1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
51 static bool plus_minus_operand_p (const_rtx
);
52 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
53 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
54 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
56 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
58 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
59 enum machine_mode
, rtx
, rtx
);
60 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
61 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode
, const_rtx i
)
69 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_PRECISION (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
91 else if (width
<= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x
)
93 && CONST_DOUBLE_LOW (x
) == 0)
95 val
= CONST_DOUBLE_HIGH (x
);
96 width
-= HOST_BITS_PER_WIDE_INT
;
99 /* FIXME: We don't yet have a representation for wider modes. */
102 if (width
< HOST_BITS_PER_WIDE_INT
)
103 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
104 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
112 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
116 if (GET_MODE_CLASS (mode
) != MODE_INT
)
119 width
= GET_MODE_PRECISION (mode
);
120 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
123 val
&= GET_MODE_MASK (mode
);
124 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
130 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
134 if (GET_MODE_CLASS (mode
) != MODE_INT
)
137 width
= GET_MODE_PRECISION (mode
);
138 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
141 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
148 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
152 if (GET_MODE_CLASS (mode
) != MODE_INT
)
155 width
= GET_MODE_PRECISION (mode
);
156 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
159 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
172 /* If this simplifies, do it. */
173 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0
, op1
))
180 tem
= op0
, op0
= op1
, op1
= tem
;
182 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x
)
191 enum machine_mode cmode
;
192 HOST_WIDE_INT offset
= 0;
194 switch (GET_CODE (x
))
200 /* Handle float extensions of constant pool references. */
202 c
= avoid_constant_pool_reference (tmp
);
203 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
207 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
216 if (GET_MODE (x
) == BLKmode
)
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr
= targetm
.delegitimize_address (addr
);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr
) == CONST
226 && GET_CODE (XEXP (addr
, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
229 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
230 addr
= XEXP (XEXP (addr
, 0), 0);
233 if (GET_CODE (addr
) == LO_SUM
)
234 addr
= XEXP (addr
, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr
) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr
))
241 c
= get_pool_constant (addr
);
242 cmode
= get_pool_mode (addr
);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset
!= 0 || cmode
!= GET_MODE (x
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
271 && MEM_OFFSET_KNOWN_P (x
))
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !host_integerp (toffset
, 0)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= TREE_INT_CST_LOW (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= MEM_OFFSET (x
);
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
508 return gen_rtx_LO_SUM (mode
, op0
, op1
);
517 fmt
= GET_RTX_FORMAT (code
);
518 for (i
= 0; fmt
[i
]; i
++)
523 newvec
= XVEC (newx
, i
);
524 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
526 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
528 if (op
!= RTVEC_ELT (vec
, j
))
532 newvec
= shallow_copy_rtvec (vec
);
534 newx
= shallow_copy_rtx (x
);
535 XVEC (newx
, i
) = newvec
;
537 RTVEC_ELT (newvec
, j
) = op
;
545 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
546 if (op
!= XEXP (x
, i
))
549 newx
= shallow_copy_rtx (x
);
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
564 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
571 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
572 rtx op
, enum machine_mode op_mode
)
576 trueop
= avoid_constant_pool_reference (op
);
578 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
582 return simplify_unary_operation_1 (code
, mode
, op
);
585 /* Perform some simplifications we can do even if the operands
588 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
590 enum rtx_code reversed
;
596 /* (not (not X)) == X. */
597 if (GET_CODE (op
) == NOT
)
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op
)
603 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
604 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
605 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
606 XEXP (op
, 0), XEXP (op
, 1));
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op
) == PLUS
610 && XEXP (op
, 1) == constm1_rtx
)
611 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op
) == NEG
)
615 return plus_constant (mode
, XEXP (op
, 0), -1);
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op
) == XOR
619 && CONST_INT_P (XEXP (op
, 1))
620 && (temp
= simplify_unary_operation (NOT
, mode
,
621 XEXP (op
, 1), mode
)) != 0)
622 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op
) == PLUS
626 && CONST_INT_P (XEXP (op
, 1))
627 && mode_signbit_p (mode
, XEXP (op
, 1))
628 && (temp
= simplify_unary_operation (NOT
, mode
,
629 XEXP (op
, 1), mode
)) != 0)
630 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
638 if (GET_CODE (op
) == ASHIFT
639 && XEXP (op
, 0) == const1_rtx
)
641 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
642 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
649 if (STORE_FLAG_VALUE
== -1
650 && GET_CODE (op
) == ASHIFTRT
651 && GET_CODE (XEXP (op
, 1))
652 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
653 return simplify_gen_relational (GE
, mode
, VOIDmode
,
654 XEXP (op
, 0), const0_rtx
);
657 if (GET_CODE (op
) == SUBREG
658 && subreg_lowpart_p (op
)
659 && (GET_MODE_SIZE (GET_MODE (op
))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
661 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
662 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
664 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
667 x
= gen_rtx_ROTATE (inner_mode
,
668 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
670 XEXP (SUBREG_REG (op
), 1));
671 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
679 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
681 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
682 enum machine_mode op_mode
;
684 op_mode
= GET_MODE (in1
);
685 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
687 op_mode
= GET_MODE (in2
);
688 if (op_mode
== VOIDmode
)
690 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
692 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
695 in2
= in1
; in1
= tem
;
698 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op
) == NEG
)
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op
) == PLUS
710 && XEXP (op
, 1) == const1_rtx
)
711 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op
) == NOT
)
715 return plus_constant (mode
, XEXP (op
, 0), 1);
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op
) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode
)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
725 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
727 if (GET_CODE (op
) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode
)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op
, 1))
733 || CONST_DOUBLE_P (XEXP (op
, 1)))
735 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
737 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
742 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op
) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
750 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
751 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
757 if (GET_CODE (op
) == ASHIFT
)
759 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
761 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op
) == ASHIFTRT
767 && CONST_INT_P (XEXP (op
, 1))
768 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
769 return simplify_gen_binary (LSHIFTRT
, mode
,
770 XEXP (op
, 0), XEXP (op
, 1));
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op
) == LSHIFTRT
775 && CONST_INT_P (XEXP (op
, 1))
776 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
777 return simplify_gen_binary (ASHIFTRT
, mode
,
778 XEXP (op
, 0), XEXP (op
, 1));
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op
) == XOR
782 && XEXP (op
, 1) == const1_rtx
783 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
784 return plus_constant (mode
, XEXP (op
, 0), -1);
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op
) == LT
789 && XEXP (op
, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
792 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
793 int isize
= GET_MODE_PRECISION (inner
);
794 if (STORE_FLAG_VALUE
== 1)
796 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
797 GEN_INT (isize
- 1));
800 if (GET_MODE_PRECISION (mode
) > isize
)
801 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
802 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
804 else if (STORE_FLAG_VALUE
== -1)
806 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
807 GEN_INT (isize
- 1));
810 if (GET_MODE_PRECISION (mode
) > isize
)
811 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
812 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
821 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op
) == SIGN_EXTEND
826 || GET_CODE (op
) == ZERO_EXTEND
)
827 && GET_MODE (XEXP (op
, 0)) == mode
)
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op
) == ABS
833 || GET_CODE (op
) == NEG
)
834 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
836 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
837 return simplify_gen_unary (GET_CODE (op
), mode
,
838 XEXP (XEXP (op
, 0), 0), mode
);
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 if (GET_CODE (op
) == SUBREG
843 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
844 && subreg_lowpart_p (op
))
845 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
846 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
856 ? (num_sign_bit_copies (op
, GET_MODE (op
))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op
))
858 - GET_MODE_PRECISION (mode
)))
859 : truncated_to_mode (mode
, op
))
860 && ! (GET_CODE (op
) == LSHIFTRT
861 && GET_CODE (XEXP (op
, 0)) == MULT
))
862 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode
)
870 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
871 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
875 if (DECIMAL_FLOAT_MODE_P (mode
))
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op
) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op
, 0)) == mode
)
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op
) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations
)
894 || GET_CODE (op
) == FLOAT_EXTEND
)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
897 > GET_MODE_SIZE (mode
)
898 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op
) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
906 && ((unsigned)significand_size (GET_MODE (op
))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
908 - num_sign_bit_copies (XEXP (op
, 0),
909 GET_MODE (XEXP (op
, 0))))))))
910 return simplify_gen_unary (FLOAT
, mode
,
912 GET_MODE (XEXP (op
, 0)));
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op
) == ABS
917 || GET_CODE (op
) == NEG
)
918 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
920 return simplify_gen_unary (GET_CODE (op
), mode
,
921 XEXP (XEXP (op
, 0), 0), mode
);
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op
) == SUBREG
926 && subreg_lowpart_p (op
)
927 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
928 return SUBREG_REG (op
);
932 if (DECIMAL_FLOAT_MODE_P (mode
))
935 /* (float_extend (float_extend x)) is (float_extend x)
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
940 if (GET_CODE (op
) == FLOAT_EXTEND
941 || (GET_CODE (op
) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
943 && ((unsigned)significand_size (GET_MODE (op
))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
945 - num_sign_bit_copies (XEXP (op
, 0),
946 GET_MODE (XEXP (op
, 0)))))))
947 return simplify_gen_unary (GET_CODE (op
), mode
,
949 GET_MODE (XEXP (op
, 0)));
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op
) == NEG
)
956 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
957 GET_MODE (XEXP (op
, 0)));
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
961 if (GET_MODE (op
) == VOIDmode
)
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op
),
967 nonzero_bits (op
, GET_MODE (op
))))
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
972 return gen_rtx_NEG (mode
, op
);
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op
) == SIGN_EXTEND
979 || GET_CODE (op
) == ZERO_EXTEND
)
980 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
981 GET_MODE (XEXP (op
, 0)));
985 switch (GET_CODE (op
))
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
991 GET_MODE (XEXP (op
, 0)));
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op
, 1)))
997 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
998 GET_MODE (XEXP (op
, 0)));
1007 switch (GET_CODE (op
))
1013 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1014 GET_MODE (XEXP (op
, 0)));
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op
, 1)))
1020 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1021 GET_MODE (XEXP (op
, 0)));
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op
) == BSWAP
)
1032 return XEXP (op
, 0);
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op
) == SIGN_EXTEND
)
1038 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1039 GET_MODE (XEXP (op
, 0)));
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1047 if (GET_CODE (op
) == TRUNCATE
1048 && GET_MODE (XEXP (op
, 0)) == mode
1049 && GET_CODE (XEXP (op
, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1052 return XEXP (op
, 0);
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op
) == MULT
)
1058 rtx lhs
= XEXP (op
, 0);
1059 rtx rhs
= XEXP (op
, 1);
1060 enum rtx_code lcode
= GET_CODE (lhs
);
1061 enum rtx_code rcode
= GET_CODE (rhs
);
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode
== SIGN_EXTEND
1066 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1067 && (rcode
== SIGN_EXTEND
1068 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1070 enum machine_mode lmode
= GET_MODE (lhs
);
1071 enum machine_mode rmode
= GET_MODE (rhs
);
1074 if (lcode
== ASHIFTRT
)
1075 /* Number of bits not shifted off the end. */
1076 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1081 if (rcode
== ASHIFTRT
)
1082 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1089 return simplify_gen_binary
1091 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1092 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op
) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op
)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1102 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1103 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1109 gcc_assert (GET_MODE_BITSIZE (mode
)
1110 > GET_MODE_BITSIZE (GET_MODE (op
)));
1111 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1112 GET_MODE (XEXP (op
, 0)));
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1121 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op
, 1))
1123 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1128 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode
)
1130 > GET_MODE_BITSIZE (GET_MODE (op
)));
1131 if (tmode
!= BLKmode
)
1134 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1136 ? SIGN_EXTEND
: ZERO_EXTEND
,
1137 mode
, inner
, tmode
);
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is referring to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1149 || (GET_CODE (op
) == SUBREG
1150 && REG_P (SUBREG_REG (op
))
1151 && REG_POINTER (SUBREG_REG (op
))
1152 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1153 return convert_memory_address (Pmode
, op
);
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op
) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op
)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1164 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1165 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op
) == MULT
)
1171 rtx lhs
= XEXP (op
, 0);
1172 rtx rhs
= XEXP (op
, 1);
1173 enum rtx_code lcode
= GET_CODE (lhs
);
1174 enum rtx_code rcode
= GET_CODE (rhs
);
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode
== ZERO_EXTEND
1179 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1180 && (rcode
== ZERO_EXTEND
1181 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1183 enum machine_mode lmode
= GET_MODE (lhs
);
1184 enum machine_mode rmode
= GET_MODE (rhs
);
1187 if (lcode
== LSHIFTRT
)
1188 /* Number of bits not shifted off the end. */
1189 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1194 if (rcode
== LSHIFTRT
)
1195 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1202 return simplify_gen_binary
1204 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1205 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op
) == ZERO_EXTEND
)
1211 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1212 GET_MODE (XEXP (op
, 0)));
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op
) == LSHIFTRT
1218 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op
, 1))
1220 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1225 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1226 if (tmode
!= BLKmode
)
1229 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is referring to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED
> 0
1240 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1242 || (GET_CODE (op
) == SUBREG
1243 && REG_P (SUBREG_REG (op
))
1244 && REG_POINTER (SUBREG_REG (op
))
1245 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1246 return convert_memory_address (Pmode
, op
);
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1261 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1262 rtx op
, enum machine_mode op_mode
)
1264 unsigned int width
= GET_MODE_PRECISION (mode
);
1265 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1267 if (code
== VEC_DUPLICATE
)
1269 gcc_assert (VECTOR_MODE_P (mode
));
1270 if (GET_MODE (op
) != VOIDmode
)
1272 if (!VECTOR_MODE_P (GET_MODE (op
)))
1273 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1275 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1278 if (CONST_INT_P (op
) || CONST_DOUBLE_P (op
)
1279 || GET_CODE (op
) == CONST_VECTOR
)
1281 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1282 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1283 rtvec v
= rtvec_alloc (n_elts
);
1286 if (GET_CODE (op
) != CONST_VECTOR
)
1287 for (i
= 0; i
< n_elts
; i
++)
1288 RTVEC_ELT (v
, i
) = op
;
1291 enum machine_mode inmode
= GET_MODE (op
);
1292 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1293 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1295 gcc_assert (in_n_elts
< n_elts
);
1296 gcc_assert ((n_elts
% in_n_elts
) == 0);
1297 for (i
= 0; i
< n_elts
; i
++)
1298 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1300 return gen_rtx_CONST_VECTOR (mode
, v
);
1304 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1306 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1307 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1308 enum machine_mode opmode
= GET_MODE (op
);
1309 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1310 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1311 rtvec v
= rtvec_alloc (n_elts
);
1314 gcc_assert (op_n_elts
== n_elts
);
1315 for (i
= 0; i
< n_elts
; i
++)
1317 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1318 CONST_VECTOR_ELT (op
, i
),
1319 GET_MODE_INNER (opmode
));
1322 RTVEC_ELT (v
, i
) = x
;
1324 return gen_rtx_CONST_VECTOR (mode
, v
);
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1331 if (code
== FLOAT
&& (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1333 HOST_WIDE_INT hv
, lv
;
1336 if (CONST_INT_P (op
))
1337 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1339 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1341 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1342 d
= real_value_truncate (mode
, d
);
1343 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1345 else if (code
== UNSIGNED_FLOAT
1346 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1348 HOST_WIDE_INT hv
, lv
;
1351 if (CONST_INT_P (op
))
1352 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1354 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1356 if (op_mode
== VOIDmode
1357 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1358 /* We should never get a negative number. */
1359 gcc_assert (hv
>= 0);
1360 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1361 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1363 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1364 d
= real_value_truncate (mode
, d
);
1365 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1368 if (CONST_INT_P (op
)
1369 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1371 HOST_WIDE_INT arg0
= INTVAL (op
);
1385 val
= (arg0
>= 0 ? arg0
: - arg0
);
1389 arg0
&= GET_MODE_MASK (mode
);
1390 val
= ffs_hwi (arg0
);
1394 arg0
&= GET_MODE_MASK (mode
);
1395 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1398 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1402 arg0
&= GET_MODE_MASK (mode
);
1404 val
= GET_MODE_PRECISION (mode
) - 1;
1406 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1408 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1412 arg0
&= GET_MODE_MASK (mode
);
1415 /* Even if the value at zero is undefined, we have to come
1416 up with some replacement. Seems good enough. */
1417 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1418 val
= GET_MODE_PRECISION (mode
);
1421 val
= ctz_hwi (arg0
);
1425 arg0
&= GET_MODE_MASK (mode
);
1428 val
++, arg0
&= arg0
- 1;
1432 arg0
&= GET_MODE_MASK (mode
);
1435 val
++, arg0
&= arg0
- 1;
1444 for (s
= 0; s
< width
; s
+= 8)
1446 unsigned int d
= width
- s
- 8;
1447 unsigned HOST_WIDE_INT byte
;
1448 byte
= (arg0
>> s
) & 0xff;
1459 /* When zero-extending a CONST_INT, we need to know its
1461 gcc_assert (op_mode
!= VOIDmode
);
1462 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1464 /* If we were really extending the mode,
1465 we would have to distinguish between zero-extension
1466 and sign-extension. */
1467 gcc_assert (width
== op_width
);
1470 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1471 val
= arg0
& GET_MODE_MASK (op_mode
);
1477 if (op_mode
== VOIDmode
)
1479 op_width
= GET_MODE_PRECISION (op_mode
);
1480 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1482 /* If we were really extending the mode,
1483 we would have to distinguish between zero-extension
1484 and sign-extension. */
1485 gcc_assert (width
== op_width
);
1488 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1490 val
= arg0
& GET_MODE_MASK (op_mode
);
1491 if (val_signbit_known_set_p (op_mode
, val
))
1492 val
|= ~GET_MODE_MASK (op_mode
);
1500 case FLOAT_TRUNCATE
:
1512 return gen_int_mode (val
, mode
);
1515 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1516 for a DImode operation on a CONST_INT. */
1517 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1518 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1520 unsigned HOST_WIDE_INT l1
, lv
;
1521 HOST_WIDE_INT h1
, hv
;
1523 if (CONST_DOUBLE_AS_INT_P (op
))
1524 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1526 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1536 neg_double (l1
, h1
, &lv
, &hv
);
1541 neg_double (l1
, h1
, &lv
, &hv
);
1551 lv
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (h1
);
1559 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (h1
) - 1
1560 - HOST_BITS_PER_WIDE_INT
;
1562 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (l1
) - 1;
1563 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1564 lv
= GET_MODE_PRECISION (mode
);
1572 lv
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (h1
);
1573 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1574 lv
= GET_MODE_PRECISION (mode
);
1602 for (s
= 0; s
< width
; s
+= 8)
1604 unsigned int d
= width
- s
- 8;
1605 unsigned HOST_WIDE_INT byte
;
1607 if (s
< HOST_BITS_PER_WIDE_INT
)
1608 byte
= (l1
>> s
) & 0xff;
1610 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1612 if (d
< HOST_BITS_PER_WIDE_INT
)
1615 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1621 /* This is just a change-of-mode, so do nothing. */
1626 gcc_assert (op_mode
!= VOIDmode
);
1628 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1632 lv
= l1
& GET_MODE_MASK (op_mode
);
1636 if (op_mode
== VOIDmode
1637 || op_width
> HOST_BITS_PER_WIDE_INT
)
1641 lv
= l1
& GET_MODE_MASK (op_mode
);
1642 if (val_signbit_known_set_p (op_mode
, lv
))
1643 lv
|= ~GET_MODE_MASK (op_mode
);
1645 hv
= HWI_SIGN_EXTEND (lv
);
1656 return immed_double_const (lv
, hv
, mode
);
1659 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1660 && SCALAR_FLOAT_MODE_P (mode
)
1661 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1663 REAL_VALUE_TYPE d
, t
;
1664 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1669 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1671 real_sqrt (&t
, mode
, &d
);
1675 d
= real_value_abs (&d
);
1678 d
= real_value_negate (&d
);
1680 case FLOAT_TRUNCATE
:
1681 d
= real_value_truncate (mode
, d
);
1684 /* All this does is change the mode, unless changing
1686 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1687 real_convert (&d
, mode
, &d
);
1690 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1697 real_to_target (tmp
, &d
, GET_MODE (op
));
1698 for (i
= 0; i
< 4; i
++)
1700 real_from_target (&d
, tmp
, mode
);
1706 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1709 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1710 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1711 && GET_MODE_CLASS (mode
) == MODE_INT
1712 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1714 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1715 operators are intentionally left unspecified (to ease implementation
1716 by target backends), for consistency, this routine implements the
1717 same semantics for constant folding as used by the middle-end. */
1719 /* This was formerly used only for non-IEEE float.
1720 eggert@twinsun.com says it is safe for IEEE also. */
1721 HOST_WIDE_INT xh
, xl
, th
, tl
;
1722 REAL_VALUE_TYPE x
, t
;
1723 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1727 if (REAL_VALUE_ISNAN (x
))
1730 /* Test against the signed upper bound. */
1731 if (width
> HOST_BITS_PER_WIDE_INT
)
1733 th
= ((unsigned HOST_WIDE_INT
) 1
1734 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1740 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1742 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1743 if (REAL_VALUES_LESS (t
, x
))
1750 /* Test against the signed lower bound. */
1751 if (width
> HOST_BITS_PER_WIDE_INT
)
1753 th
= (unsigned HOST_WIDE_INT
) (-1)
1754 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1760 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1762 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1763 if (REAL_VALUES_LESS (x
, t
))
1769 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1773 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1776 /* Test against the unsigned upper bound. */
1777 if (width
== HOST_BITS_PER_DOUBLE_INT
)
1782 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1784 th
= ((unsigned HOST_WIDE_INT
) 1
1785 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1791 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1793 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1794 if (REAL_VALUES_LESS (t
, x
))
1801 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1807 return immed_double_const (xl
, xh
, mode
);
1813 /* Subroutine of simplify_binary_operation to simplify a commutative,
1814 associative binary operation CODE with result mode MODE, operating
1815 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1816 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1817 canonicalization is possible. */
1820 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1825 /* Linearize the operator to the left. */
1826 if (GET_CODE (op1
) == code
)
1828 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1829 if (GET_CODE (op0
) == code
)
1831 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1832 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1835 /* "a op (b op c)" becomes "(b op c) op a". */
1836 if (! swap_commutative_operands_p (op1
, op0
))
1837 return simplify_gen_binary (code
, mode
, op1
, op0
);
1844 if (GET_CODE (op0
) == code
)
1846 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1847 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1849 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1850 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1853 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1854 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1856 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1858 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1859 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1861 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1868 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1869 and OP1. Return 0 if no simplification is possible.
1871 Don't use this for relational operations such as EQ or LT.
1872 Use simplify_relational_operation instead. */
1874 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1877 rtx trueop0
, trueop1
;
1880 /* Relational operations don't work here. We must know the mode
1881 of the operands in order to do the comparison correctly.
1882 Assuming a full word can give incorrect results.
1883 Consider comparing 128 with -128 in QImode. */
1884 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1885 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1887 /* Make sure the constant is second. */
1888 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1889 && swap_commutative_operands_p (op0
, op1
))
1891 tem
= op0
, op0
= op1
, op1
= tem
;
1894 trueop0
= avoid_constant_pool_reference (op0
);
1895 trueop1
= avoid_constant_pool_reference (op1
);
1897 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1900 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1903 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1904 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1905 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1906 actual constants. */
1909 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1910 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1912 rtx tem
, reversed
, opleft
, opright
;
1914 unsigned int width
= GET_MODE_PRECISION (mode
);
1916 /* Even if we can't compute a constant result,
1917 there are some cases worth simplifying. */
1922 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1923 when x is NaN, infinite, or finite and nonzero. They aren't
1924 when x is -0 and the rounding mode is not towards -infinity,
1925 since (-0) + 0 is then 0. */
1926 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1929 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1930 transformations are safe even for IEEE. */
1931 if (GET_CODE (op0
) == NEG
)
1932 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1933 else if (GET_CODE (op1
) == NEG
)
1934 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1936 /* (~a) + 1 -> -a */
1937 if (INTEGRAL_MODE_P (mode
)
1938 && GET_CODE (op0
) == NOT
1939 && trueop1
== const1_rtx
)
1940 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1942 /* Handle both-operands-constant cases. We can only add
1943 CONST_INTs to constants since the sum of relocatable symbols
1944 can't be handled by most assemblers. Don't add CONST_INT
1945 to CONST_INT since overflow won't be computed properly if wider
1946 than HOST_BITS_PER_WIDE_INT. */
1948 if ((GET_CODE (op0
) == CONST
1949 || GET_CODE (op0
) == SYMBOL_REF
1950 || GET_CODE (op0
) == LABEL_REF
)
1951 && CONST_INT_P (op1
))
1952 return plus_constant (mode
, op0
, INTVAL (op1
));
1953 else if ((GET_CODE (op1
) == CONST
1954 || GET_CODE (op1
) == SYMBOL_REF
1955 || GET_CODE (op1
) == LABEL_REF
)
1956 && CONST_INT_P (op0
))
1957 return plus_constant (mode
, op1
, INTVAL (op0
));
1959 /* See if this is something like X * C - X or vice versa or
1960 if the multiplication is written as a shift. If so, we can
1961 distribute and make a new multiply, shift, or maybe just
1962 have X (if C is 2 in the example above). But don't make
1963 something more expensive than we had before. */
1965 if (SCALAR_INT_MODE_P (mode
))
1967 double_int coeff0
, coeff1
;
1968 rtx lhs
= op0
, rhs
= op1
;
1970 coeff0
= double_int_one
;
1971 coeff1
= double_int_one
;
1973 if (GET_CODE (lhs
) == NEG
)
1975 coeff0
= double_int_minus_one
;
1976 lhs
= XEXP (lhs
, 0);
1978 else if (GET_CODE (lhs
) == MULT
1979 && CONST_INT_P (XEXP (lhs
, 1)))
1981 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
1982 lhs
= XEXP (lhs
, 0);
1984 else if (GET_CODE (lhs
) == ASHIFT
1985 && CONST_INT_P (XEXP (lhs
, 1))
1986 && INTVAL (XEXP (lhs
, 1)) >= 0
1987 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1989 coeff0
= double_int_setbit (double_int_zero
,
1990 INTVAL (XEXP (lhs
, 1)));
1991 lhs
= XEXP (lhs
, 0);
1994 if (GET_CODE (rhs
) == NEG
)
1996 coeff1
= double_int_minus_one
;
1997 rhs
= XEXP (rhs
, 0);
1999 else if (GET_CODE (rhs
) == MULT
2000 && CONST_INT_P (XEXP (rhs
, 1)))
2002 coeff1
= shwi_to_double_int (INTVAL (XEXP (rhs
, 1)));
2003 rhs
= XEXP (rhs
, 0);
2005 else if (GET_CODE (rhs
) == ASHIFT
2006 && CONST_INT_P (XEXP (rhs
, 1))
2007 && INTVAL (XEXP (rhs
, 1)) >= 0
2008 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2010 coeff1
= double_int_setbit (double_int_zero
,
2011 INTVAL (XEXP (rhs
, 1)));
2012 rhs
= XEXP (rhs
, 0);
2015 if (rtx_equal_p (lhs
, rhs
))
2017 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2020 bool speed
= optimize_function_for_speed_p (cfun
);
2022 val
= double_int_add (coeff0
, coeff1
);
2023 coeff
= immed_double_int_const (val
, mode
);
2025 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2026 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2031 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2032 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2033 && GET_CODE (op0
) == XOR
2034 && (CONST_INT_P (XEXP (op0
, 1))
2035 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1)))
2036 && mode_signbit_p (mode
, op1
))
2037 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2038 simplify_gen_binary (XOR
, mode
, op1
,
2041 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2042 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2043 && GET_CODE (op0
) == MULT
2044 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2048 in1
= XEXP (XEXP (op0
, 0), 0);
2049 in2
= XEXP (op0
, 1);
2050 return simplify_gen_binary (MINUS
, mode
, op1
,
2051 simplify_gen_binary (MULT
, mode
,
2055 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2056 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2058 if (COMPARISON_P (op0
)
2059 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2060 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2061 && (reversed
= reversed_comparison (op0
, mode
)))
2063 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2065 /* If one of the operands is a PLUS or a MINUS, see if we can
2066 simplify this by the associative law.
2067 Don't use the associative law for floating point.
2068 The inaccuracy makes it nonassociative,
2069 and subtle programs can break if operations are associated. */
2071 if (INTEGRAL_MODE_P (mode
)
2072 && (plus_minus_operand_p (op0
)
2073 || plus_minus_operand_p (op1
))
2074 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2077 /* Reassociate floating point addition only when the user
2078 specifies associative math operations. */
2079 if (FLOAT_MODE_P (mode
)
2080 && flag_associative_math
)
2082 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2089 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2090 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2091 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2092 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2094 rtx xop00
= XEXP (op0
, 0);
2095 rtx xop10
= XEXP (op1
, 0);
2098 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2100 if (REG_P (xop00
) && REG_P (xop10
)
2101 && GET_MODE (xop00
) == GET_MODE (xop10
)
2102 && REGNO (xop00
) == REGNO (xop10
)
2103 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2104 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2111 /* We can't assume x-x is 0 even with non-IEEE floating point,
2112 but since it is zero except in very strange circumstances, we
2113 will treat it as zero with -ffinite-math-only. */
2114 if (rtx_equal_p (trueop0
, trueop1
)
2115 && ! side_effects_p (op0
)
2116 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2117 return CONST0_RTX (mode
);
2119 /* Change subtraction from zero into negation. (0 - x) is the
2120 same as -x when x is NaN, infinite, or finite and nonzero.
2121 But if the mode has signed zeros, and does not round towards
2122 -infinity, then 0 - 0 is 0, not -0. */
2123 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2124 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2126 /* (-1 - a) is ~a. */
2127 if (trueop0
== constm1_rtx
)
2128 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2130 /* Subtracting 0 has no effect unless the mode has signed zeros
2131 and supports rounding towards -infinity. In such a case,
2133 if (!(HONOR_SIGNED_ZEROS (mode
)
2134 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2135 && trueop1
== CONST0_RTX (mode
))
2138 /* See if this is something like X * C - X or vice versa or
2139 if the multiplication is written as a shift. If so, we can
2140 distribute and make a new multiply, shift, or maybe just
2141 have X (if C is 2 in the example above). But don't make
2142 something more expensive than we had before. */
2144 if (SCALAR_INT_MODE_P (mode
))
2146 double_int coeff0
, negcoeff1
;
2147 rtx lhs
= op0
, rhs
= op1
;
2149 coeff0
= double_int_one
;
2150 negcoeff1
= double_int_minus_one
;
2152 if (GET_CODE (lhs
) == NEG
)
2154 coeff0
= double_int_minus_one
;
2155 lhs
= XEXP (lhs
, 0);
2157 else if (GET_CODE (lhs
) == MULT
2158 && CONST_INT_P (XEXP (lhs
, 1)))
2160 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
2161 lhs
= XEXP (lhs
, 0);
2163 else if (GET_CODE (lhs
) == ASHIFT
2164 && CONST_INT_P (XEXP (lhs
, 1))
2165 && INTVAL (XEXP (lhs
, 1)) >= 0
2166 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2168 coeff0
= double_int_setbit (double_int_zero
,
2169 INTVAL (XEXP (lhs
, 1)));
2170 lhs
= XEXP (lhs
, 0);
2173 if (GET_CODE (rhs
) == NEG
)
2175 negcoeff1
= double_int_one
;
2176 rhs
= XEXP (rhs
, 0);
2178 else if (GET_CODE (rhs
) == MULT
2179 && CONST_INT_P (XEXP (rhs
, 1)))
2181 negcoeff1
= shwi_to_double_int (-INTVAL (XEXP (rhs
, 1)));
2182 rhs
= XEXP (rhs
, 0);
2184 else if (GET_CODE (rhs
) == ASHIFT
2185 && CONST_INT_P (XEXP (rhs
, 1))
2186 && INTVAL (XEXP (rhs
, 1)) >= 0
2187 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2189 negcoeff1
= double_int_setbit (double_int_zero
,
2190 INTVAL (XEXP (rhs
, 1)));
2191 negcoeff1
= double_int_neg (negcoeff1
);
2192 rhs
= XEXP (rhs
, 0);
2195 if (rtx_equal_p (lhs
, rhs
))
2197 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2200 bool speed
= optimize_function_for_speed_p (cfun
);
2202 val
= double_int_add (coeff0
, negcoeff1
);
2203 coeff
= immed_double_int_const (val
, mode
);
2205 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2206 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2211 /* (a - (-b)) -> (a + b). True even for IEEE. */
2212 if (GET_CODE (op1
) == NEG
)
2213 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2215 /* (-x - c) may be simplified as (-c - x). */
2216 if (GET_CODE (op0
) == NEG
2217 && (CONST_INT_P (op1
) || CONST_DOUBLE_P (op1
)))
2219 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2221 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2224 /* Don't let a relocatable value get a negative coeff. */
2225 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2226 return simplify_gen_binary (PLUS
, mode
,
2228 neg_const_int (mode
, op1
));
2230 /* (x - (x & y)) -> (x & ~y) */
2231 if (GET_CODE (op1
) == AND
)
2233 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2235 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2236 GET_MODE (XEXP (op1
, 1)));
2237 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2239 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2241 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2242 GET_MODE (XEXP (op1
, 0)));
2243 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2247 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2248 by reversing the comparison code if valid. */
2249 if (STORE_FLAG_VALUE
== 1
2250 && trueop0
== const1_rtx
2251 && COMPARISON_P (op1
)
2252 && (reversed
= reversed_comparison (op1
, mode
)))
2255 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2256 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2257 && GET_CODE (op1
) == MULT
2258 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2262 in1
= XEXP (XEXP (op1
, 0), 0);
2263 in2
= XEXP (op1
, 1);
2264 return simplify_gen_binary (PLUS
, mode
,
2265 simplify_gen_binary (MULT
, mode
,
2270 /* Canonicalize (minus (neg A) (mult B C)) to
2271 (minus (mult (neg B) C) A). */
2272 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2273 && GET_CODE (op1
) == MULT
2274 && GET_CODE (op0
) == NEG
)
2278 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2279 in2
= XEXP (op1
, 1);
2280 return simplify_gen_binary (MINUS
, mode
,
2281 simplify_gen_binary (MULT
, mode
,
2286 /* If one of the operands is a PLUS or a MINUS, see if we can
2287 simplify this by the associative law. This will, for example,
2288 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2289 Don't use the associative law for floating point.
2290 The inaccuracy makes it nonassociative,
2291 and subtle programs can break if operations are associated. */
2293 if (INTEGRAL_MODE_P (mode
)
2294 && (plus_minus_operand_p (op0
)
2295 || plus_minus_operand_p (op1
))
2296 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2301 if (trueop1
== constm1_rtx
)
2302 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2304 if (GET_CODE (op0
) == NEG
)
2306 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2307 /* If op1 is a MULT as well and simplify_unary_operation
2308 just moved the NEG to the second operand, simplify_gen_binary
2309 below could through simplify_associative_operation move
2310 the NEG around again and recurse endlessly. */
2312 && GET_CODE (op1
) == MULT
2313 && GET_CODE (temp
) == MULT
2314 && XEXP (op1
, 0) == XEXP (temp
, 0)
2315 && GET_CODE (XEXP (temp
, 1)) == NEG
2316 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2319 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2321 if (GET_CODE (op1
) == NEG
)
2323 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2324 /* If op0 is a MULT as well and simplify_unary_operation
2325 just moved the NEG to the second operand, simplify_gen_binary
2326 below could through simplify_associative_operation move
2327 the NEG around again and recurse endlessly. */
2329 && GET_CODE (op0
) == MULT
2330 && GET_CODE (temp
) == MULT
2331 && XEXP (op0
, 0) == XEXP (temp
, 0)
2332 && GET_CODE (XEXP (temp
, 1)) == NEG
2333 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2336 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2339 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2340 x is NaN, since x * 0 is then also NaN. Nor is it valid
2341 when the mode has signed zeros, since multiplying a negative
2342 number by 0 will give -0, not 0. */
2343 if (!HONOR_NANS (mode
)
2344 && !HONOR_SIGNED_ZEROS (mode
)
2345 && trueop1
== CONST0_RTX (mode
)
2346 && ! side_effects_p (op0
))
2349 /* In IEEE floating point, x*1 is not equivalent to x for
2351 if (!HONOR_SNANS (mode
)
2352 && trueop1
== CONST1_RTX (mode
))
2355 /* Convert multiply by constant power of two into shift unless
2356 we are still generating RTL. This test is a kludge. */
2357 if (CONST_INT_P (trueop1
)
2358 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2359 /* If the mode is larger than the host word size, and the
2360 uppermost bit is set, then this isn't a power of two due
2361 to implicit sign extension. */
2362 && (width
<= HOST_BITS_PER_WIDE_INT
2363 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2364 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2366 /* Likewise for multipliers wider than a word. */
2367 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2368 && GET_MODE (op0
) == mode
2369 && CONST_DOUBLE_LOW (trueop1
) == 0
2370 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2371 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2372 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2373 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2374 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2376 /* x*2 is x+x and x*(-1) is -x */
2377 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2378 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2379 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2380 && GET_MODE (op0
) == mode
)
2383 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2385 if (REAL_VALUES_EQUAL (d
, dconst2
))
2386 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2388 if (!HONOR_SNANS (mode
)
2389 && REAL_VALUES_EQUAL (d
, dconstm1
))
2390 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2393 /* Optimize -x * -x as x * x. */
2394 if (FLOAT_MODE_P (mode
)
2395 && GET_CODE (op0
) == NEG
2396 && GET_CODE (op1
) == NEG
2397 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2398 && !side_effects_p (XEXP (op0
, 0)))
2399 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2401 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2402 if (SCALAR_FLOAT_MODE_P (mode
)
2403 && GET_CODE (op0
) == ABS
2404 && GET_CODE (op1
) == ABS
2405 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2406 && !side_effects_p (XEXP (op0
, 0)))
2407 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2409 /* Reassociate multiplication, but for floating point MULTs
2410 only when the user specifies unsafe math optimizations. */
2411 if (! FLOAT_MODE_P (mode
)
2412 || flag_unsafe_math_optimizations
)
2414 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2421 if (trueop1
== CONST0_RTX (mode
))
2423 if (INTEGRAL_MODE_P (mode
)
2424 && trueop1
== CONSTM1_RTX (mode
)
2425 && !side_effects_p (op0
))
2427 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2429 /* A | (~A) -> -1 */
2430 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2431 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2432 && ! side_effects_p (op0
)
2433 && SCALAR_INT_MODE_P (mode
))
2436 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2437 if (CONST_INT_P (op1
)
2438 && HWI_COMPUTABLE_MODE_P (mode
)
2439 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2440 && !side_effects_p (op0
))
2443 /* Canonicalize (X & C1) | C2. */
2444 if (GET_CODE (op0
) == AND
2445 && CONST_INT_P (trueop1
)
2446 && CONST_INT_P (XEXP (op0
, 1)))
2448 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2449 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2450 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2452 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2454 && !side_effects_p (XEXP (op0
, 0)))
2457 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2458 if (((c1
|c2
) & mask
) == mask
)
2459 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2461 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2462 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2464 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2465 gen_int_mode (c1
& ~c2
, mode
));
2466 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2470 /* Convert (A & B) | A to A. */
2471 if (GET_CODE (op0
) == AND
2472 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2473 || rtx_equal_p (XEXP (op0
, 1), op1
))
2474 && ! side_effects_p (XEXP (op0
, 0))
2475 && ! side_effects_p (XEXP (op0
, 1)))
2478 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2479 mode size to (rotate A CX). */
2481 if (GET_CODE (op1
) == ASHIFT
2482 || GET_CODE (op1
) == SUBREG
)
2493 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2494 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2495 && CONST_INT_P (XEXP (opleft
, 1))
2496 && CONST_INT_P (XEXP (opright
, 1))
2497 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2498 == GET_MODE_PRECISION (mode
)))
2499 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2501 /* Same, but for ashift that has been "simplified" to a wider mode
2502 by simplify_shift_const. */
2504 if (GET_CODE (opleft
) == SUBREG
2505 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2506 && GET_CODE (opright
) == LSHIFTRT
2507 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2508 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2509 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2510 && (GET_MODE_SIZE (GET_MODE (opleft
))
2511 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2512 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2513 SUBREG_REG (XEXP (opright
, 0)))
2514 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2515 && CONST_INT_P (XEXP (opright
, 1))
2516 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2517 == GET_MODE_PRECISION (mode
)))
2518 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2519 XEXP (SUBREG_REG (opleft
), 1));
2521 /* If we have (ior (and (X C1) C2)), simplify this by making
2522 C1 as small as possible if C1 actually changes. */
2523 if (CONST_INT_P (op1
)
2524 && (HWI_COMPUTABLE_MODE_P (mode
)
2525 || INTVAL (op1
) > 0)
2526 && GET_CODE (op0
) == AND
2527 && CONST_INT_P (XEXP (op0
, 1))
2528 && CONST_INT_P (op1
)
2529 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2530 return simplify_gen_binary (IOR
, mode
,
2532 (AND
, mode
, XEXP (op0
, 0),
2533 GEN_INT (UINTVAL (XEXP (op0
, 1))
2537 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2538 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2539 the PLUS does not affect any of the bits in OP1: then we can do
2540 the IOR as a PLUS and we can associate. This is valid if OP1
2541 can be safely shifted left C bits. */
2542 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2543 && GET_CODE (XEXP (op0
, 0)) == PLUS
2544 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2545 && CONST_INT_P (XEXP (op0
, 1))
2546 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2548 int count
= INTVAL (XEXP (op0
, 1));
2549 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2551 if (mask
>> count
== INTVAL (trueop1
)
2552 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2553 return simplify_gen_binary (ASHIFTRT
, mode
,
2554 plus_constant (mode
, XEXP (op0
, 0),
2559 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2565 if (trueop1
== CONST0_RTX (mode
))
2567 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2568 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2569 if (rtx_equal_p (trueop0
, trueop1
)
2570 && ! side_effects_p (op0
)
2571 && GET_MODE_CLASS (mode
) != MODE_CC
)
2572 return CONST0_RTX (mode
);
2574 /* Canonicalize XOR of the most significant bit to PLUS. */
2575 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2576 && mode_signbit_p (mode
, op1
))
2577 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2578 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2579 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2580 && GET_CODE (op0
) == PLUS
2581 && (CONST_INT_P (XEXP (op0
, 1))
2582 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1)))
2583 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2584 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2585 simplify_gen_binary (XOR
, mode
, op1
,
2588 /* If we are XORing two things that have no bits in common,
2589 convert them into an IOR. This helps to detect rotation encoded
2590 using those methods and possibly other simplifications. */
2592 if (HWI_COMPUTABLE_MODE_P (mode
)
2593 && (nonzero_bits (op0
, mode
)
2594 & nonzero_bits (op1
, mode
)) == 0)
2595 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2597 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2598 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2601 int num_negated
= 0;
2603 if (GET_CODE (op0
) == NOT
)
2604 num_negated
++, op0
= XEXP (op0
, 0);
2605 if (GET_CODE (op1
) == NOT
)
2606 num_negated
++, op1
= XEXP (op1
, 0);
2608 if (num_negated
== 2)
2609 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2610 else if (num_negated
== 1)
2611 return simplify_gen_unary (NOT
, mode
,
2612 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2616 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2617 correspond to a machine insn or result in further simplifications
2618 if B is a constant. */
2620 if (GET_CODE (op0
) == AND
2621 && rtx_equal_p (XEXP (op0
, 1), op1
)
2622 && ! side_effects_p (op1
))
2623 return simplify_gen_binary (AND
, mode
,
2624 simplify_gen_unary (NOT
, mode
,
2625 XEXP (op0
, 0), mode
),
2628 else if (GET_CODE (op0
) == AND
2629 && rtx_equal_p (XEXP (op0
, 0), op1
)
2630 && ! side_effects_p (op1
))
2631 return simplify_gen_binary (AND
, mode
,
2632 simplify_gen_unary (NOT
, mode
,
2633 XEXP (op0
, 1), mode
),
2636 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2637 we can transform like this:
2638 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2639 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2640 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2641 Attempt a few simplifications when B and C are both constants. */
2642 if (GET_CODE (op0
) == AND
2643 && CONST_INT_P (op1
)
2644 && CONST_INT_P (XEXP (op0
, 1)))
2646 rtx a
= XEXP (op0
, 0);
2647 rtx b
= XEXP (op0
, 1);
2649 HOST_WIDE_INT bval
= INTVAL (b
);
2650 HOST_WIDE_INT cval
= INTVAL (c
);
2653 = simplify_binary_operation (AND
, mode
,
2654 simplify_gen_unary (NOT
, mode
, a
, mode
),
2656 if ((~cval
& bval
) == 0)
2658 /* Try to simplify ~A&C | ~B&C. */
2659 if (na_c
!= NULL_RTX
)
2660 return simplify_gen_binary (IOR
, mode
, na_c
,
2661 GEN_INT (~bval
& cval
));
2665 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2666 if (na_c
== const0_rtx
)
2668 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2669 GEN_INT (~cval
& bval
));
2670 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2671 GEN_INT (~bval
& cval
));
2676 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2677 comparison if STORE_FLAG_VALUE is 1. */
2678 if (STORE_FLAG_VALUE
== 1
2679 && trueop1
== const1_rtx
2680 && COMPARISON_P (op0
)
2681 && (reversed
= reversed_comparison (op0
, mode
)))
2684 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2685 is (lt foo (const_int 0)), so we can perform the above
2686 simplification if STORE_FLAG_VALUE is 1. */
2688 if (STORE_FLAG_VALUE
== 1
2689 && trueop1
== const1_rtx
2690 && GET_CODE (op0
) == LSHIFTRT
2691 && CONST_INT_P (XEXP (op0
, 1))
2692 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2693 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2695 /* (xor (comparison foo bar) (const_int sign-bit))
2696 when STORE_FLAG_VALUE is the sign bit. */
2697 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2698 && trueop1
== const_true_rtx
2699 && COMPARISON_P (op0
)
2700 && (reversed
= reversed_comparison (op0
, mode
)))
2703 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2709 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2711 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2713 if (HWI_COMPUTABLE_MODE_P (mode
))
2715 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2716 HOST_WIDE_INT nzop1
;
2717 if (CONST_INT_P (trueop1
))
2719 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2720 /* If we are turning off bits already known off in OP0, we need
2722 if ((nzop0
& ~val1
) == 0)
2725 nzop1
= nonzero_bits (trueop1
, mode
);
2726 /* If we are clearing all the nonzero bits, the result is zero. */
2727 if ((nzop1
& nzop0
) == 0
2728 && !side_effects_p (op0
) && !side_effects_p (op1
))
2729 return CONST0_RTX (mode
);
2731 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2732 && GET_MODE_CLASS (mode
) != MODE_CC
)
2735 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2736 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2737 && ! side_effects_p (op0
)
2738 && GET_MODE_CLASS (mode
) != MODE_CC
)
2739 return CONST0_RTX (mode
);
2741 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2742 there are no nonzero bits of C outside of X's mode. */
2743 if ((GET_CODE (op0
) == SIGN_EXTEND
2744 || GET_CODE (op0
) == ZERO_EXTEND
)
2745 && CONST_INT_P (trueop1
)
2746 && HWI_COMPUTABLE_MODE_P (mode
)
2747 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2748 & UINTVAL (trueop1
)) == 0)
2750 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2751 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2752 gen_int_mode (INTVAL (trueop1
),
2754 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2757 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2758 we might be able to further simplify the AND with X and potentially
2759 remove the truncation altogether. */
2760 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2762 rtx x
= XEXP (op0
, 0);
2763 enum machine_mode xmode
= GET_MODE (x
);
2764 tem
= simplify_gen_binary (AND
, xmode
, x
,
2765 gen_int_mode (INTVAL (trueop1
), xmode
));
2766 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2769 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2770 if (GET_CODE (op0
) == IOR
2771 && CONST_INT_P (trueop1
)
2772 && CONST_INT_P (XEXP (op0
, 1)))
2774 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2775 return simplify_gen_binary (IOR
, mode
,
2776 simplify_gen_binary (AND
, mode
,
2777 XEXP (op0
, 0), op1
),
2778 gen_int_mode (tmp
, mode
));
2781 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2782 insn (and may simplify more). */
2783 if (GET_CODE (op0
) == XOR
2784 && rtx_equal_p (XEXP (op0
, 0), op1
)
2785 && ! side_effects_p (op1
))
2786 return simplify_gen_binary (AND
, mode
,
2787 simplify_gen_unary (NOT
, mode
,
2788 XEXP (op0
, 1), mode
),
2791 if (GET_CODE (op0
) == XOR
2792 && rtx_equal_p (XEXP (op0
, 1), op1
)
2793 && ! side_effects_p (op1
))
2794 return simplify_gen_binary (AND
, mode
,
2795 simplify_gen_unary (NOT
, mode
,
2796 XEXP (op0
, 0), mode
),
2799 /* Similarly for (~(A ^ B)) & A. */
2800 if (GET_CODE (op0
) == NOT
2801 && GET_CODE (XEXP (op0
, 0)) == XOR
2802 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2803 && ! side_effects_p (op1
))
2804 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2806 if (GET_CODE (op0
) == NOT
2807 && GET_CODE (XEXP (op0
, 0)) == XOR
2808 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2809 && ! side_effects_p (op1
))
2810 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2812 /* Convert (A | B) & A to A. */
2813 if (GET_CODE (op0
) == IOR
2814 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2815 || rtx_equal_p (XEXP (op0
, 1), op1
))
2816 && ! side_effects_p (XEXP (op0
, 0))
2817 && ! side_effects_p (XEXP (op0
, 1)))
2820 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2821 ((A & N) + B) & M -> (A + B) & M
2822 Similarly if (N & M) == 0,
2823 ((A | N) + B) & M -> (A + B) & M
2824 and for - instead of + and/or ^ instead of |.
2825 Also, if (N & M) == 0, then
2826 (A +- N) & M -> A & M. */
2827 if (CONST_INT_P (trueop1
)
2828 && HWI_COMPUTABLE_MODE_P (mode
)
2829 && ~UINTVAL (trueop1
)
2830 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2831 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2836 pmop
[0] = XEXP (op0
, 0);
2837 pmop
[1] = XEXP (op0
, 1);
2839 if (CONST_INT_P (pmop
[1])
2840 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2841 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2843 for (which
= 0; which
< 2; which
++)
2846 switch (GET_CODE (tem
))
2849 if (CONST_INT_P (XEXP (tem
, 1))
2850 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2851 == UINTVAL (trueop1
))
2852 pmop
[which
] = XEXP (tem
, 0);
2856 if (CONST_INT_P (XEXP (tem
, 1))
2857 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2858 pmop
[which
] = XEXP (tem
, 0);
2865 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2867 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2869 return simplify_gen_binary (code
, mode
, tem
, op1
);
2873 /* (and X (ior (not X) Y) -> (and X Y) */
2874 if (GET_CODE (op1
) == IOR
2875 && GET_CODE (XEXP (op1
, 0)) == NOT
2876 && op0
== XEXP (XEXP (op1
, 0), 0))
2877 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2879 /* (and (ior (not X) Y) X) -> (and X Y) */
2880 if (GET_CODE (op0
) == IOR
2881 && GET_CODE (XEXP (op0
, 0)) == NOT
2882 && op1
== XEXP (XEXP (op0
, 0), 0))
2883 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2885 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2891 /* 0/x is 0 (or x&0 if x has side-effects). */
2892 if (trueop0
== CONST0_RTX (mode
))
2894 if (side_effects_p (op1
))
2895 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2899 if (trueop1
== CONST1_RTX (mode
))
2900 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2901 /* Convert divide by power of two into shift. */
2902 if (CONST_INT_P (trueop1
)
2903 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2904 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2908 /* Handle floating point and integers separately. */
2909 if (SCALAR_FLOAT_MODE_P (mode
))
2911 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2912 safe for modes with NaNs, since 0.0 / 0.0 will then be
2913 NaN rather than 0.0. Nor is it safe for modes with signed
2914 zeros, since dividing 0 by a negative number gives -0.0 */
2915 if (trueop0
== CONST0_RTX (mode
)
2916 && !HONOR_NANS (mode
)
2917 && !HONOR_SIGNED_ZEROS (mode
)
2918 && ! side_effects_p (op1
))
2921 if (trueop1
== CONST1_RTX (mode
)
2922 && !HONOR_SNANS (mode
))
2925 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2926 && trueop1
!= CONST0_RTX (mode
))
2929 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2932 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2933 && !HONOR_SNANS (mode
))
2934 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2936 /* Change FP division by a constant into multiplication.
2937 Only do this with -freciprocal-math. */
2938 if (flag_reciprocal_math
2939 && !REAL_VALUES_EQUAL (d
, dconst0
))
2941 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2942 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2943 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2947 else if (SCALAR_INT_MODE_P (mode
))
2949 /* 0/x is 0 (or x&0 if x has side-effects). */
2950 if (trueop0
== CONST0_RTX (mode
)
2951 && !cfun
->can_throw_non_call_exceptions
)
2953 if (side_effects_p (op1
))
2954 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2958 if (trueop1
== CONST1_RTX (mode
))
2959 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2961 if (trueop1
== constm1_rtx
)
2963 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2964 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2970 /* 0%x is 0 (or x&0 if x has side-effects). */
2971 if (trueop0
== CONST0_RTX (mode
))
2973 if (side_effects_p (op1
))
2974 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2977 /* x%1 is 0 (of x&0 if x has side-effects). */
2978 if (trueop1
== CONST1_RTX (mode
))
2980 if (side_effects_p (op0
))
2981 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2982 return CONST0_RTX (mode
);
2984 /* Implement modulus by power of two as AND. */
2985 if (CONST_INT_P (trueop1
)
2986 && exact_log2 (UINTVAL (trueop1
)) > 0)
2987 return simplify_gen_binary (AND
, mode
, op0
,
2988 GEN_INT (INTVAL (op1
) - 1));
2992 /* 0%x is 0 (or x&0 if x has side-effects). */
2993 if (trueop0
== CONST0_RTX (mode
))
2995 if (side_effects_p (op1
))
2996 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2999 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3000 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3002 if (side_effects_p (op0
))
3003 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3004 return CONST0_RTX (mode
);
3011 if (trueop1
== CONST0_RTX (mode
))
3013 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3015 /* Rotating ~0 always results in ~0. */
3016 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3017 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3018 && ! side_effects_p (op1
))
3021 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3023 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3024 if (val
!= INTVAL (op1
))
3025 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3032 if (trueop1
== CONST0_RTX (mode
))
3034 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3036 goto canonicalize_shift
;
3039 if (trueop1
== CONST0_RTX (mode
))
3041 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3043 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3044 if (GET_CODE (op0
) == CLZ
3045 && CONST_INT_P (trueop1
)
3046 && STORE_FLAG_VALUE
== 1
3047 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3049 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3050 unsigned HOST_WIDE_INT zero_val
= 0;
3052 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3053 && zero_val
== GET_MODE_PRECISION (imode
)
3054 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3055 return simplify_gen_relational (EQ
, mode
, imode
,
3056 XEXP (op0
, 0), const0_rtx
);
3058 goto canonicalize_shift
;
3061 if (width
<= HOST_BITS_PER_WIDE_INT
3062 && mode_signbit_p (mode
, trueop1
)
3063 && ! side_effects_p (op0
))
3065 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3067 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3073 if (width
<= HOST_BITS_PER_WIDE_INT
3074 && CONST_INT_P (trueop1
)
3075 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3076 && ! side_effects_p (op0
))
3078 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3080 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3086 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3088 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3090 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3096 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3098 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3100 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3113 /* ??? There are simplifications that can be done. */
3117 if (!VECTOR_MODE_P (mode
))
3119 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3120 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3121 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3122 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3123 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3125 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3126 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3129 /* Extract a scalar element from a nested VEC_SELECT expression
3130 (with optional nested VEC_CONCAT expression). Some targets
3131 (i386) extract scalar element from a vector using chain of
3132 nested VEC_SELECT expressions. When input operand is a memory
3133 operand, this operation can be simplified to a simple scalar
3134 load from an offseted memory address. */
3135 if (GET_CODE (trueop0
) == VEC_SELECT
)
3137 rtx op0
= XEXP (trueop0
, 0);
3138 rtx op1
= XEXP (trueop0
, 1);
3140 enum machine_mode opmode
= GET_MODE (op0
);
3141 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3142 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3144 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3150 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3151 gcc_assert (i
< n_elts
);
3153 /* Select element, pointed by nested selector. */
3154 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3156 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3157 if (GET_CODE (op0
) == VEC_CONCAT
)
3159 rtx op00
= XEXP (op0
, 0);
3160 rtx op01
= XEXP (op0
, 1);
3162 enum machine_mode mode00
, mode01
;
3163 int n_elts00
, n_elts01
;
3165 mode00
= GET_MODE (op00
);
3166 mode01
= GET_MODE (op01
);
3168 /* Find out number of elements of each operand. */
3169 if (VECTOR_MODE_P (mode00
))
3171 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3172 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3177 if (VECTOR_MODE_P (mode01
))
3179 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3180 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3185 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3187 /* Select correct operand of VEC_CONCAT
3188 and adjust selector. */
3189 if (elem
< n_elts01
)
3200 vec
= rtvec_alloc (1);
3201 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3203 tmp
= gen_rtx_fmt_ee (code
, mode
,
3204 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3207 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3208 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3209 return XEXP (trueop0
, 0);
3213 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3214 gcc_assert (GET_MODE_INNER (mode
)
3215 == GET_MODE_INNER (GET_MODE (trueop0
)));
3216 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3218 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3220 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3221 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3222 rtvec v
= rtvec_alloc (n_elts
);
3225 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3226 for (i
= 0; i
< n_elts
; i
++)
3228 rtx x
= XVECEXP (trueop1
, 0, i
);
3230 gcc_assert (CONST_INT_P (x
));
3231 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3235 return gen_rtx_CONST_VECTOR (mode
, v
);
3238 /* If we build {a,b} then permute it, build the result directly. */
3239 if (XVECLEN (trueop1
, 0) == 2
3240 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3241 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3242 && GET_CODE (trueop0
) == VEC_CONCAT
3243 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3244 && GET_MODE (XEXP (trueop0
, 0)) == mode
3245 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3246 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3248 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3249 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3252 gcc_assert (i0
< 4 && i1
< 4);
3253 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3254 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3256 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3259 if (XVECLEN (trueop1
, 0) == 2
3260 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3261 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3262 && GET_CODE (trueop0
) == VEC_CONCAT
3263 && GET_MODE (trueop0
) == mode
)
3265 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3266 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3269 gcc_assert (i0
< 2 && i1
< 2);
3270 subop0
= XEXP (trueop0
, i0
);
3271 subop1
= XEXP (trueop0
, i1
);
3273 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3277 if (XVECLEN (trueop1
, 0) == 1
3278 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3279 && GET_CODE (trueop0
) == VEC_CONCAT
)
3282 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3284 /* Try to find the element in the VEC_CONCAT. */
3285 while (GET_MODE (vec
) != mode
3286 && GET_CODE (vec
) == VEC_CONCAT
)
3288 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3289 if (offset
< vec_size
)
3290 vec
= XEXP (vec
, 0);
3294 vec
= XEXP (vec
, 1);
3296 vec
= avoid_constant_pool_reference (vec
);
3299 if (GET_MODE (vec
) == mode
)
3306 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3307 ? GET_MODE (trueop0
)
3308 : GET_MODE_INNER (mode
));
3309 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3310 ? GET_MODE (trueop1
)
3311 : GET_MODE_INNER (mode
));
3313 gcc_assert (VECTOR_MODE_P (mode
));
3314 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3315 == GET_MODE_SIZE (mode
));
3317 if (VECTOR_MODE_P (op0_mode
))
3318 gcc_assert (GET_MODE_INNER (mode
)
3319 == GET_MODE_INNER (op0_mode
));
3321 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3323 if (VECTOR_MODE_P (op1_mode
))
3324 gcc_assert (GET_MODE_INNER (mode
)
3325 == GET_MODE_INNER (op1_mode
));
3327 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3329 if ((GET_CODE (trueop0
) == CONST_VECTOR
3330 || CONST_INT_P (trueop0
) || CONST_DOUBLE_P (trueop0
))
3331 && (GET_CODE (trueop1
) == CONST_VECTOR
3332 || CONST_INT_P (trueop1
) || CONST_DOUBLE_P (trueop1
)))
3334 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3335 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3336 rtvec v
= rtvec_alloc (n_elts
);
3338 unsigned in_n_elts
= 1;
3340 if (VECTOR_MODE_P (op0_mode
))
3341 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3342 for (i
= 0; i
< n_elts
; i
++)
3346 if (!VECTOR_MODE_P (op0_mode
))
3347 RTVEC_ELT (v
, i
) = trueop0
;
3349 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3353 if (!VECTOR_MODE_P (op1_mode
))
3354 RTVEC_ELT (v
, i
) = trueop1
;
3356 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3361 return gen_rtx_CONST_VECTOR (mode
, v
);
3374 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3377 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3379 unsigned int width
= GET_MODE_PRECISION (mode
);
3381 if (VECTOR_MODE_P (mode
)
3382 && code
!= VEC_CONCAT
3383 && GET_CODE (op0
) == CONST_VECTOR
3384 && GET_CODE (op1
) == CONST_VECTOR
)
3386 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3387 enum machine_mode op0mode
= GET_MODE (op0
);
3388 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3389 enum machine_mode op1mode
= GET_MODE (op1
);
3390 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3391 rtvec v
= rtvec_alloc (n_elts
);
3394 gcc_assert (op0_n_elts
== n_elts
);
3395 gcc_assert (op1_n_elts
== n_elts
);
3396 for (i
= 0; i
< n_elts
; i
++)
3398 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3399 CONST_VECTOR_ELT (op0
, i
),
3400 CONST_VECTOR_ELT (op1
, i
));
3403 RTVEC_ELT (v
, i
) = x
;
3406 return gen_rtx_CONST_VECTOR (mode
, v
);
3409 if (VECTOR_MODE_P (mode
)
3410 && code
== VEC_CONCAT
3411 && (CONST_INT_P (op0
)
3412 || GET_CODE (op0
) == CONST_FIXED
3413 || CONST_DOUBLE_P (op0
))
3414 && (CONST_INT_P (op1
)
3415 || CONST_DOUBLE_P (op1
)
3416 || GET_CODE (op1
) == CONST_FIXED
))
3418 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3419 rtvec v
= rtvec_alloc (n_elts
);
3421 gcc_assert (n_elts
>= 2);
3424 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3425 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3427 RTVEC_ELT (v
, 0) = op0
;
3428 RTVEC_ELT (v
, 1) = op1
;
3432 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3433 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3436 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3437 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3438 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3440 for (i
= 0; i
< op0_n_elts
; ++i
)
3441 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3442 for (i
= 0; i
< op1_n_elts
; ++i
)
3443 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3446 return gen_rtx_CONST_VECTOR (mode
, v
);
3449 if (SCALAR_FLOAT_MODE_P (mode
)
3450 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3451 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3452 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3463 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3465 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3467 for (i
= 0; i
< 4; i
++)
3484 real_from_target (&r
, tmp0
, mode
);
3485 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3489 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3492 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3493 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3494 real_convert (&f0
, mode
, &f0
);
3495 real_convert (&f1
, mode
, &f1
);
3497 if (HONOR_SNANS (mode
)
3498 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3502 && REAL_VALUES_EQUAL (f1
, dconst0
)
3503 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3506 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3507 && flag_trapping_math
3508 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3510 int s0
= REAL_VALUE_NEGATIVE (f0
);
3511 int s1
= REAL_VALUE_NEGATIVE (f1
);
3516 /* Inf + -Inf = NaN plus exception. */
3521 /* Inf - Inf = NaN plus exception. */
3526 /* Inf / Inf = NaN plus exception. */
3533 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3534 && flag_trapping_math
3535 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3536 || (REAL_VALUE_ISINF (f1
)
3537 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3538 /* Inf * 0 = NaN plus exception. */
3541 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3543 real_convert (&result
, mode
, &value
);
3545 /* Don't constant fold this floating point operation if
3546 the result has overflowed and flag_trapping_math. */
3548 if (flag_trapping_math
3549 && MODE_HAS_INFINITIES (mode
)
3550 && REAL_VALUE_ISINF (result
)
3551 && !REAL_VALUE_ISINF (f0
)
3552 && !REAL_VALUE_ISINF (f1
))
3553 /* Overflow plus exception. */
3556 /* Don't constant fold this floating point operation if the
3557 result may dependent upon the run-time rounding mode and
3558 flag_rounding_math is set, or if GCC's software emulation
3559 is unable to accurately represent the result. */
3561 if ((flag_rounding_math
3562 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3563 && (inexact
|| !real_identical (&result
, &value
)))
3566 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3570 /* We can fold some multi-word operations. */
3571 if (GET_MODE_CLASS (mode
) == MODE_INT
3572 && width
== HOST_BITS_PER_DOUBLE_INT
3573 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3574 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3576 double_int o0
, o1
, res
, tmp
;
3578 o0
= rtx_to_double_int (op0
);
3579 o1
= rtx_to_double_int (op1
);
3584 /* A - B == A + (-B). */
3585 o1
= double_int_neg (o1
);
3587 /* Fall through.... */
3590 res
= double_int_add (o0
, o1
);
3594 res
= double_int_mul (o0
, o1
);
3598 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3599 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3600 &res
.low
, &res
.high
,
3601 &tmp
.low
, &tmp
.high
))
3606 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3607 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3608 &tmp
.low
, &tmp
.high
,
3609 &res
.low
, &res
.high
))
3614 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3615 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3616 &res
.low
, &res
.high
,
3617 &tmp
.low
, &tmp
.high
))
3622 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3623 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3624 &tmp
.low
, &tmp
.high
,
3625 &res
.low
, &res
.high
))
3630 res
= double_int_and (o0
, o1
);
3634 res
= double_int_ior (o0
, o1
);
3638 res
= double_int_xor (o0
, o1
);
3642 res
= double_int_smin (o0
, o1
);
3646 res
= double_int_smax (o0
, o1
);
3650 res
= double_int_umin (o0
, o1
);
3654 res
= double_int_umax (o0
, o1
);
3657 case LSHIFTRT
: case ASHIFTRT
:
3659 case ROTATE
: case ROTATERT
:
3661 unsigned HOST_WIDE_INT cnt
;
3663 if (SHIFT_COUNT_TRUNCATED
)
3666 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
3669 if (!double_int_fits_in_uhwi_p (o1
)
3670 || double_int_to_uhwi (o1
) >= GET_MODE_PRECISION (mode
))
3673 cnt
= double_int_to_uhwi (o1
);
3675 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3676 res
= double_int_rshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3678 else if (code
== ASHIFT
)
3679 res
= double_int_lshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3681 else if (code
== ROTATE
)
3682 res
= double_int_lrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3683 else /* code == ROTATERT */
3684 res
= double_int_rrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3692 return immed_double_int_const (res
, mode
);
3695 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3696 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3698 /* Get the integer argument values in two forms:
3699 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3701 arg0
= INTVAL (op0
);
3702 arg1
= INTVAL (op1
);
3704 if (width
< HOST_BITS_PER_WIDE_INT
)
3706 arg0
&= GET_MODE_MASK (mode
);
3707 arg1
&= GET_MODE_MASK (mode
);
3710 if (val_signbit_known_set_p (mode
, arg0s
))
3711 arg0s
|= ~GET_MODE_MASK (mode
);
3714 if (val_signbit_known_set_p (mode
, arg1s
))
3715 arg1s
|= ~GET_MODE_MASK (mode
);
3723 /* Compute the value of the arithmetic. */
3728 val
= arg0s
+ arg1s
;
3732 val
= arg0s
- arg1s
;
3736 val
= arg0s
* arg1s
;
3741 || ((unsigned HOST_WIDE_INT
) arg0s
3742 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3745 val
= arg0s
/ arg1s
;
3750 || ((unsigned HOST_WIDE_INT
) arg0s
3751 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3754 val
= arg0s
% arg1s
;
3759 || ((unsigned HOST_WIDE_INT
) arg0s
3760 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3763 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3768 || ((unsigned HOST_WIDE_INT
) arg0s
3769 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3772 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3790 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3791 the value is in range. We can't return any old value for
3792 out-of-range arguments because either the middle-end (via
3793 shift_truncation_mask) or the back-end might be relying on
3794 target-specific knowledge. Nor can we rely on
3795 shift_truncation_mask, since the shift might not be part of an
3796 ashlM3, lshrM3 or ashrM3 instruction. */
3797 if (SHIFT_COUNT_TRUNCATED
)
3798 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3799 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3802 val
= (code
== ASHIFT
3803 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3804 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3806 /* Sign-extend the result for arithmetic right shifts. */
3807 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3808 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
3816 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3817 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3825 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3826 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3830 /* Do nothing here. */
3834 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3838 val
= ((unsigned HOST_WIDE_INT
) arg0
3839 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3843 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3847 val
= ((unsigned HOST_WIDE_INT
) arg0
3848 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3861 /* ??? There are simplifications that can be done. */
3868 return gen_int_mode (val
, mode
);
3876 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3879 Rather than test for specific case, we do this by a brute-force method
3880 and do all possible simplifications until no more changes occur. Then
3881 we rebuild the operation. */
3883 struct simplify_plus_minus_op_data
3890 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3894 result
= (commutative_operand_precedence (y
)
3895 - commutative_operand_precedence (x
));
3899 /* Group together equal REGs to do more simplification. */
3900 if (REG_P (x
) && REG_P (y
))
3901 return REGNO (x
) > REGNO (y
);
3907 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3910 struct simplify_plus_minus_op_data ops
[8];
3912 int n_ops
= 2, input_ops
= 2;
3913 int changed
, n_constants
= 0, canonicalized
= 0;
3916 memset (ops
, 0, sizeof ops
);
3918 /* Set up the two operands and then expand them until nothing has been
3919 changed. If we run out of room in our array, give up; this should
3920 almost never happen. */
3925 ops
[1].neg
= (code
== MINUS
);
3931 for (i
= 0; i
< n_ops
; i
++)
3933 rtx this_op
= ops
[i
].op
;
3934 int this_neg
= ops
[i
].neg
;
3935 enum rtx_code this_code
= GET_CODE (this_op
);
3944 ops
[n_ops
].op
= XEXP (this_op
, 1);
3945 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3948 ops
[i
].op
= XEXP (this_op
, 0);
3951 canonicalized
|= this_neg
;
3955 ops
[i
].op
= XEXP (this_op
, 0);
3956 ops
[i
].neg
= ! this_neg
;
3963 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3964 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3965 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3967 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3968 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3969 ops
[n_ops
].neg
= this_neg
;
3977 /* ~a -> (-a - 1) */
3980 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
3981 ops
[n_ops
++].neg
= this_neg
;
3982 ops
[i
].op
= XEXP (this_op
, 0);
3983 ops
[i
].neg
= !this_neg
;
3993 ops
[i
].op
= neg_const_int (mode
, this_op
);
4007 if (n_constants
> 1)
4010 gcc_assert (n_ops
>= 2);
4012 /* If we only have two operands, we can avoid the loops. */
4015 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4018 /* Get the two operands. Be careful with the order, especially for
4019 the cases where code == MINUS. */
4020 if (ops
[0].neg
&& ops
[1].neg
)
4022 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4025 else if (ops
[0].neg
)
4036 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4039 /* Now simplify each pair of operands until nothing changes. */
4042 /* Insertion sort is good enough for an eight-element array. */
4043 for (i
= 1; i
< n_ops
; i
++)
4045 struct simplify_plus_minus_op_data save
;
4047 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4053 ops
[j
+ 1] = ops
[j
];
4054 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4059 for (i
= n_ops
- 1; i
> 0; i
--)
4060 for (j
= i
- 1; j
>= 0; j
--)
4062 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4063 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4065 if (lhs
!= 0 && rhs
!= 0)
4067 enum rtx_code ncode
= PLUS
;
4073 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4075 else if (swap_commutative_operands_p (lhs
, rhs
))
4076 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4078 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4079 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4081 rtx tem_lhs
, tem_rhs
;
4083 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4084 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4085 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4087 if (tem
&& !CONSTANT_P (tem
))
4088 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4091 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4093 /* Reject "simplifications" that just wrap the two
4094 arguments in a CONST. Failure to do so can result
4095 in infinite recursion with simplify_binary_operation
4096 when it calls us to simplify CONST operations. */
4098 && ! (GET_CODE (tem
) == CONST
4099 && GET_CODE (XEXP (tem
, 0)) == ncode
4100 && XEXP (XEXP (tem
, 0), 0) == lhs
4101 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4104 if (GET_CODE (tem
) == NEG
)
4105 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4106 if (CONST_INT_P (tem
) && lneg
)
4107 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4111 ops
[j
].op
= NULL_RTX
;
4118 /* If nothing changed, fail. */
4122 /* Pack all the operands to the lower-numbered entries. */
4123 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4133 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4135 && CONST_INT_P (ops
[1].op
)
4136 && CONSTANT_P (ops
[0].op
)
4138 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4140 /* We suppressed creation of trivial CONST expressions in the
4141 combination loop to avoid recursion. Create one manually now.
4142 The combination loop should have ensured that there is exactly
4143 one CONST_INT, and the sort will have ensured that it is last
4144 in the array and that any other constant will be next-to-last. */
4147 && CONST_INT_P (ops
[n_ops
- 1].op
)
4148 && CONSTANT_P (ops
[n_ops
- 2].op
))
4150 rtx value
= ops
[n_ops
- 1].op
;
4151 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4152 value
= neg_const_int (mode
, value
);
4153 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4158 /* Put a non-negated operand first, if possible. */
4160 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4163 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4172 /* Now make the result by performing the requested operations. */
4174 for (i
= 1; i
< n_ops
; i
++)
4175 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4176 mode
, result
, ops
[i
].op
);
4181 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4183 plus_minus_operand_p (const_rtx x
)
4185 return GET_CODE (x
) == PLUS
4186 || GET_CODE (x
) == MINUS
4187 || (GET_CODE (x
) == CONST
4188 && GET_CODE (XEXP (x
, 0)) == PLUS
4189 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4190 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4193 /* Like simplify_binary_operation except used for relational operators.
4194 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4195 not also be VOIDmode.
4197 CMP_MODE specifies in which mode the comparison is done in, so it is
4198 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4199 the operands or, if both are VOIDmode, the operands are compared in
4200 "infinite precision". */
4202 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4203 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4205 rtx tem
, trueop0
, trueop1
;
4207 if (cmp_mode
== VOIDmode
)
4208 cmp_mode
= GET_MODE (op0
);
4209 if (cmp_mode
== VOIDmode
)
4210 cmp_mode
= GET_MODE (op1
);
4212 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4215 if (SCALAR_FLOAT_MODE_P (mode
))
4217 if (tem
== const0_rtx
)
4218 return CONST0_RTX (mode
);
4219 #ifdef FLOAT_STORE_FLAG_VALUE
4221 REAL_VALUE_TYPE val
;
4222 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4223 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4229 if (VECTOR_MODE_P (mode
))
4231 if (tem
== const0_rtx
)
4232 return CONST0_RTX (mode
);
4233 #ifdef VECTOR_STORE_FLAG_VALUE
4238 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4239 if (val
== NULL_RTX
)
4241 if (val
== const1_rtx
)
4242 return CONST1_RTX (mode
);
4244 units
= GET_MODE_NUNITS (mode
);
4245 v
= rtvec_alloc (units
);
4246 for (i
= 0; i
< units
; i
++)
4247 RTVEC_ELT (v
, i
) = val
;
4248 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4258 /* For the following tests, ensure const0_rtx is op1. */
4259 if (swap_commutative_operands_p (op0
, op1
)
4260 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4261 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4263 /* If op0 is a compare, extract the comparison arguments from it. */
4264 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4265 return simplify_gen_relational (code
, mode
, VOIDmode
,
4266 XEXP (op0
, 0), XEXP (op0
, 1));
4268 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4272 trueop0
= avoid_constant_pool_reference (op0
);
4273 trueop1
= avoid_constant_pool_reference (op1
);
4274 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4278 /* This part of simplify_relational_operation is only used when CMP_MODE
4279 is not in class MODE_CC (i.e. it is a real comparison).
4281 MODE is the mode of the result, while CMP_MODE specifies in which
4282 mode the comparison is done in, so it is the mode of the operands. */
4285 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4286 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4288 enum rtx_code op0code
= GET_CODE (op0
);
4290 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4292 /* If op0 is a comparison, extract the comparison arguments
4296 if (GET_MODE (op0
) == mode
)
4297 return simplify_rtx (op0
);
4299 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4300 XEXP (op0
, 0), XEXP (op0
, 1));
4302 else if (code
== EQ
)
4304 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4305 if (new_code
!= UNKNOWN
)
4306 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4307 XEXP (op0
, 0), XEXP (op0
, 1));
4311 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4312 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4313 if ((code
== LTU
|| code
== GEU
)
4314 && GET_CODE (op0
) == PLUS
4315 && CONST_INT_P (XEXP (op0
, 1))
4316 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4317 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4320 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4321 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4322 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4325 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4326 if ((code
== LTU
|| code
== GEU
)
4327 && GET_CODE (op0
) == PLUS
4328 && rtx_equal_p (op1
, XEXP (op0
, 1))
4329 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4330 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4331 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4332 copy_rtx (XEXP (op0
, 0)));
4334 if (op1
== const0_rtx
)
4336 /* Canonicalize (GTU x 0) as (NE x 0). */
4338 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4339 /* Canonicalize (LEU x 0) as (EQ x 0). */
4341 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4343 else if (op1
== const1_rtx
)
4348 /* Canonicalize (GE x 1) as (GT x 0). */
4349 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4352 /* Canonicalize (GEU x 1) as (NE x 0). */
4353 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4356 /* Canonicalize (LT x 1) as (LE x 0). */
4357 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4360 /* Canonicalize (LTU x 1) as (EQ x 0). */
4361 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4367 else if (op1
== constm1_rtx
)
4369 /* Canonicalize (LE x -1) as (LT x 0). */
4371 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4372 /* Canonicalize (GT x -1) as (GE x 0). */
4374 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4377 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4378 if ((code
== EQ
|| code
== NE
)
4379 && (op0code
== PLUS
|| op0code
== MINUS
)
4381 && CONSTANT_P (XEXP (op0
, 1))
4382 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4384 rtx x
= XEXP (op0
, 0);
4385 rtx c
= XEXP (op0
, 1);
4386 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4387 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4389 /* Detect an infinite recursive condition, where we oscillate at this
4390 simplification case between:
4391 A + B == C <---> C - B == A,
4392 where A, B, and C are all constants with non-simplifiable expressions,
4393 usually SYMBOL_REFs. */
4394 if (GET_CODE (tem
) == invcode
4396 && rtx_equal_p (c
, XEXP (tem
, 1)))
4399 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4402 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4403 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4405 && op1
== const0_rtx
4406 && GET_MODE_CLASS (mode
) == MODE_INT
4407 && cmp_mode
!= VOIDmode
4408 /* ??? Work-around BImode bugs in the ia64 backend. */
4410 && cmp_mode
!= BImode
4411 && nonzero_bits (op0
, cmp_mode
) == 1
4412 && STORE_FLAG_VALUE
== 1)
4413 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4414 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4415 : lowpart_subreg (mode
, op0
, cmp_mode
);
4417 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4418 if ((code
== EQ
|| code
== NE
)
4419 && op1
== const0_rtx
4421 return simplify_gen_relational (code
, mode
, cmp_mode
,
4422 XEXP (op0
, 0), XEXP (op0
, 1));
4424 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4425 if ((code
== EQ
|| code
== NE
)
4427 && rtx_equal_p (XEXP (op0
, 0), op1
)
4428 && !side_effects_p (XEXP (op0
, 0)))
4429 return simplify_gen_relational (code
, mode
, cmp_mode
,
4430 XEXP (op0
, 1), const0_rtx
);
4432 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4433 if ((code
== EQ
|| code
== NE
)
4435 && rtx_equal_p (XEXP (op0
, 1), op1
)
4436 && !side_effects_p (XEXP (op0
, 1)))
4437 return simplify_gen_relational (code
, mode
, cmp_mode
,
4438 XEXP (op0
, 0), const0_rtx
);
4440 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4441 if ((code
== EQ
|| code
== NE
)
4443 && (CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
4444 && (CONST_INT_P (XEXP (op0
, 1))
4445 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1))))
4446 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4447 simplify_gen_binary (XOR
, cmp_mode
,
4448 XEXP (op0
, 1), op1
));
4450 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4456 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4457 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4458 XEXP (op0
, 0), const0_rtx
);
4463 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4464 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4465 XEXP (op0
, 0), const0_rtx
);
4484 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4485 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4486 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4487 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4488 For floating-point comparisons, assume that the operands were ordered. */
4491 comparison_result (enum rtx_code code
, int known_results
)
4497 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4500 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4504 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4507 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4511 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4514 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4517 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4519 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4522 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4524 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4527 return const_true_rtx
;
4535 /* Check if the given comparison (done in the given MODE) is actually a
4536 tautology or a contradiction.
4537 If no simplification is possible, this function returns zero.
4538 Otherwise, it returns either const_true_rtx or const0_rtx. */
4541 simplify_const_relational_operation (enum rtx_code code
,
4542 enum machine_mode mode
,
4549 gcc_assert (mode
!= VOIDmode
4550 || (GET_MODE (op0
) == VOIDmode
4551 && GET_MODE (op1
) == VOIDmode
));
4553 /* If op0 is a compare, extract the comparison arguments from it. */
4554 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4556 op1
= XEXP (op0
, 1);
4557 op0
= XEXP (op0
, 0);
4559 if (GET_MODE (op0
) != VOIDmode
)
4560 mode
= GET_MODE (op0
);
4561 else if (GET_MODE (op1
) != VOIDmode
)
4562 mode
= GET_MODE (op1
);
4567 /* We can't simplify MODE_CC values since we don't know what the
4568 actual comparison is. */
4569 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4572 /* Make sure the constant is second. */
4573 if (swap_commutative_operands_p (op0
, op1
))
4575 tem
= op0
, op0
= op1
, op1
= tem
;
4576 code
= swap_condition (code
);
4579 trueop0
= avoid_constant_pool_reference (op0
);
4580 trueop1
= avoid_constant_pool_reference (op1
);
4582 /* For integer comparisons of A and B maybe we can simplify A - B and can
4583 then simplify a comparison of that with zero. If A and B are both either
4584 a register or a CONST_INT, this can't help; testing for these cases will
4585 prevent infinite recursion here and speed things up.
4587 We can only do this for EQ and NE comparisons as otherwise we may
4588 lose or introduce overflow which we cannot disregard as undefined as
4589 we do not know the signedness of the operation on either the left or
4590 the right hand side of the comparison. */
4592 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4593 && (code
== EQ
|| code
== NE
)
4594 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4595 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4596 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4597 /* We cannot do this if tem is a nonzero address. */
4598 && ! nonzero_address_p (tem
))
4599 return simplify_const_relational_operation (signed_condition (code
),
4600 mode
, tem
, const0_rtx
);
4602 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4603 return const_true_rtx
;
4605 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4608 /* For modes without NaNs, if the two operands are equal, we know the
4609 result except if they have side-effects. Even with NaNs we know
4610 the result of unordered comparisons and, if signaling NaNs are
4611 irrelevant, also the result of LT/GT/LTGT. */
4612 if ((! HONOR_NANS (GET_MODE (trueop0
))
4613 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4614 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4615 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4616 && rtx_equal_p (trueop0
, trueop1
)
4617 && ! side_effects_p (trueop0
))
4618 return comparison_result (code
, CMP_EQ
);
4620 /* If the operands are floating-point constants, see if we can fold
4622 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4623 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4624 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4626 REAL_VALUE_TYPE d0
, d1
;
4628 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4629 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4631 /* Comparisons are unordered iff at least one of the values is NaN. */
4632 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4642 return const_true_rtx
;
4655 return comparison_result (code
,
4656 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4657 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4660 /* Otherwise, see if the operands are both integers. */
4661 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4662 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
4663 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
4665 int width
= GET_MODE_PRECISION (mode
);
4666 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4667 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4669 /* Get the two words comprising each integer constant. */
4670 if (CONST_DOUBLE_AS_INT_P (trueop0
))
4672 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4673 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4677 l0u
= l0s
= INTVAL (trueop0
);
4678 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4681 if (CONST_DOUBLE_AS_INT_P (trueop1
))
4683 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4684 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4688 l1u
= l1s
= INTVAL (trueop1
);
4689 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4692 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4693 we have to sign or zero-extend the values. */
4694 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4696 l0u
&= GET_MODE_MASK (mode
);
4697 l1u
&= GET_MODE_MASK (mode
);
4699 if (val_signbit_known_set_p (mode
, l0s
))
4700 l0s
|= ~GET_MODE_MASK (mode
);
4702 if (val_signbit_known_set_p (mode
, l1s
))
4703 l1s
|= ~GET_MODE_MASK (mode
);
4705 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4706 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4708 if (h0u
== h1u
&& l0u
== l1u
)
4709 return comparison_result (code
, CMP_EQ
);
4713 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4714 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4715 return comparison_result (code
, cr
);
4719 /* Optimize comparisons with upper and lower bounds. */
4720 if (HWI_COMPUTABLE_MODE_P (mode
)
4721 && CONST_INT_P (trueop1
))
4724 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4725 HOST_WIDE_INT val
= INTVAL (trueop1
);
4726 HOST_WIDE_INT mmin
, mmax
;
4736 /* Get a reduced range if the sign bit is zero. */
4737 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4744 rtx mmin_rtx
, mmax_rtx
;
4745 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4747 mmin
= INTVAL (mmin_rtx
);
4748 mmax
= INTVAL (mmax_rtx
);
4751 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4753 mmin
>>= (sign_copies
- 1);
4754 mmax
>>= (sign_copies
- 1);
4760 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4762 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4763 return const_true_rtx
;
4764 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4769 return const_true_rtx
;
4774 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4776 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4777 return const_true_rtx
;
4778 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4783 return const_true_rtx
;
4789 /* x == y is always false for y out of range. */
4790 if (val
< mmin
|| val
> mmax
)
4794 /* x > y is always false for y >= mmax, always true for y < mmin. */
4796 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4798 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4799 return const_true_rtx
;
4805 return const_true_rtx
;
4808 /* x < y is always false for y <= mmin, always true for y > mmax. */
4810 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4812 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4813 return const_true_rtx
;
4819 return const_true_rtx
;
4823 /* x != y is always true for y out of range. */
4824 if (val
< mmin
|| val
> mmax
)
4825 return const_true_rtx
;
4833 /* Optimize integer comparisons with zero. */
4834 if (trueop1
== const0_rtx
)
4836 /* Some addresses are known to be nonzero. We don't know
4837 their sign, but equality comparisons are known. */
4838 if (nonzero_address_p (trueop0
))
4840 if (code
== EQ
|| code
== LEU
)
4842 if (code
== NE
|| code
== GTU
)
4843 return const_true_rtx
;
4846 /* See if the first operand is an IOR with a constant. If so, we
4847 may be able to determine the result of this comparison. */
4848 if (GET_CODE (op0
) == IOR
)
4850 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4851 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4853 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4854 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4855 && (UINTVAL (inner_const
)
4856 & ((unsigned HOST_WIDE_INT
) 1
4866 return const_true_rtx
;
4870 return const_true_rtx
;
4884 /* Optimize comparison of ABS with zero. */
4885 if (trueop1
== CONST0_RTX (mode
)
4886 && (GET_CODE (trueop0
) == ABS
4887 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4888 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4893 /* Optimize abs(x) < 0.0. */
4894 if (!HONOR_SNANS (mode
)
4895 && (!INTEGRAL_MODE_P (mode
)
4896 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4898 if (INTEGRAL_MODE_P (mode
)
4899 && (issue_strict_overflow_warning
4900 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4901 warning (OPT_Wstrict_overflow
,
4902 ("assuming signed overflow does not occur when "
4903 "assuming abs (x) < 0 is false"));
4909 /* Optimize abs(x) >= 0.0. */
4910 if (!HONOR_NANS (mode
)
4911 && (!INTEGRAL_MODE_P (mode
)
4912 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4914 if (INTEGRAL_MODE_P (mode
)
4915 && (issue_strict_overflow_warning
4916 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4917 warning (OPT_Wstrict_overflow
,
4918 ("assuming signed overflow does not occur when "
4919 "assuming abs (x) >= 0 is true"));
4920 return const_true_rtx
;
4925 /* Optimize ! (abs(x) < 0.0). */
4926 return const_true_rtx
;
4936 /* Simplify CODE, an operation with result mode MODE and three operands,
4937 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4938 a constant. Return 0 if no simplifications is possible. */
4941 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4942 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4945 unsigned int width
= GET_MODE_PRECISION (mode
);
4946 bool any_change
= false;
4949 /* VOIDmode means "infinite" precision. */
4951 width
= HOST_BITS_PER_WIDE_INT
;
4956 /* Simplify negations around the multiplication. */
4957 /* -a * -b + c => a * b + c. */
4958 if (GET_CODE (op0
) == NEG
)
4960 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4962 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4964 else if (GET_CODE (op1
) == NEG
)
4966 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4968 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4971 /* Canonicalize the two multiplication operands. */
4972 /* a * -b + c => -b * a + c. */
4973 if (swap_commutative_operands_p (op0
, op1
))
4974 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4977 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4982 if (CONST_INT_P (op0
)
4983 && CONST_INT_P (op1
)
4984 && CONST_INT_P (op2
)
4985 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4986 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4988 /* Extracting a bit-field from a constant */
4989 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4990 HOST_WIDE_INT op1val
= INTVAL (op1
);
4991 HOST_WIDE_INT op2val
= INTVAL (op2
);
4992 if (BITS_BIG_ENDIAN
)
4993 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
4997 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
4999 /* First zero-extend. */
5000 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5001 /* If desired, propagate sign bit. */
5002 if (code
== SIGN_EXTRACT
5003 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5005 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5008 return gen_int_mode (val
, mode
);
5013 if (CONST_INT_P (op0
))
5014 return op0
!= const0_rtx
? op1
: op2
;
5016 /* Convert c ? a : a into "a". */
5017 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5020 /* Convert a != b ? a : b into "a". */
5021 if (GET_CODE (op0
) == NE
5022 && ! side_effects_p (op0
)
5023 && ! HONOR_NANS (mode
)
5024 && ! HONOR_SIGNED_ZEROS (mode
)
5025 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5026 && rtx_equal_p (XEXP (op0
, 1), op2
))
5027 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5028 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5031 /* Convert a == b ? a : b into "b". */
5032 if (GET_CODE (op0
) == EQ
5033 && ! side_effects_p (op0
)
5034 && ! HONOR_NANS (mode
)
5035 && ! HONOR_SIGNED_ZEROS (mode
)
5036 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5037 && rtx_equal_p (XEXP (op0
, 1), op2
))
5038 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5039 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5042 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5044 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5045 ? GET_MODE (XEXP (op0
, 1))
5046 : GET_MODE (XEXP (op0
, 0)));
5049 /* Look for happy constants in op1 and op2. */
5050 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5052 HOST_WIDE_INT t
= INTVAL (op1
);
5053 HOST_WIDE_INT f
= INTVAL (op2
);
5055 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5056 code
= GET_CODE (op0
);
5057 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5060 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5068 return simplify_gen_relational (code
, mode
, cmp_mode
,
5069 XEXP (op0
, 0), XEXP (op0
, 1));
5072 if (cmp_mode
== VOIDmode
)
5073 cmp_mode
= op0_mode
;
5074 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5075 cmp_mode
, XEXP (op0
, 0),
5078 /* See if any simplifications were possible. */
5081 if (CONST_INT_P (temp
))
5082 return temp
== const0_rtx
? op2
: op1
;
5084 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5090 gcc_assert (GET_MODE (op0
) == mode
);
5091 gcc_assert (GET_MODE (op1
) == mode
);
5092 gcc_assert (VECTOR_MODE_P (mode
));
5093 op2
= avoid_constant_pool_reference (op2
);
5094 if (CONST_INT_P (op2
))
5096 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5097 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5098 int mask
= (1 << n_elts
) - 1;
5100 if (!(INTVAL (op2
) & mask
))
5102 if ((INTVAL (op2
) & mask
) == mask
)
5105 op0
= avoid_constant_pool_reference (op0
);
5106 op1
= avoid_constant_pool_reference (op1
);
5107 if (GET_CODE (op0
) == CONST_VECTOR
5108 && GET_CODE (op1
) == CONST_VECTOR
)
5110 rtvec v
= rtvec_alloc (n_elts
);
5113 for (i
= 0; i
< n_elts
; i
++)
5114 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5115 ? CONST_VECTOR_ELT (op0
, i
)
5116 : CONST_VECTOR_ELT (op1
, i
));
5117 return gen_rtx_CONST_VECTOR (mode
, v
);
5129 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5131 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5133 Works by unpacking OP into a collection of 8-bit values
5134 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5135 and then repacking them again for OUTERMODE. */
5138 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5139 enum machine_mode innermode
, unsigned int byte
)
5141 /* We support up to 512-bit values (for V8DFmode). */
5145 value_mask
= (1 << value_bit
) - 1
5147 unsigned char value
[max_bitsize
/ value_bit
];
5156 rtvec result_v
= NULL
;
5157 enum mode_class outer_class
;
5158 enum machine_mode outer_submode
;
5160 /* Some ports misuse CCmode. */
5161 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5164 /* We have no way to represent a complex constant at the rtl level. */
5165 if (COMPLEX_MODE_P (outermode
))
5168 /* Unpack the value. */
5170 if (GET_CODE (op
) == CONST_VECTOR
)
5172 num_elem
= CONST_VECTOR_NUNITS (op
);
5173 elems
= &CONST_VECTOR_ELT (op
, 0);
5174 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5180 elem_bitsize
= max_bitsize
;
5182 /* If this asserts, it is too complicated; reducing value_bit may help. */
5183 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5184 /* I don't know how to handle endianness of sub-units. */
5185 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5187 for (elem
= 0; elem
< num_elem
; elem
++)
5190 rtx el
= elems
[elem
];
5192 /* Vectors are kept in target memory order. (This is probably
5195 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5196 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5198 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5199 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5200 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5201 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5202 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5205 switch (GET_CODE (el
))
5209 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5211 *vp
++ = INTVAL (el
) >> i
;
5212 /* CONST_INTs are always logically sign-extended. */
5213 for (; i
< elem_bitsize
; i
+= value_bit
)
5214 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5218 if (GET_MODE (el
) == VOIDmode
)
5220 unsigned char extend
= 0;
5221 /* If this triggers, someone should have generated a
5222 CONST_INT instead. */
5223 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5225 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5226 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5227 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5230 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5234 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5236 for (; i
< elem_bitsize
; i
+= value_bit
)
5241 long tmp
[max_bitsize
/ 32];
5242 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5244 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5245 gcc_assert (bitsize
<= elem_bitsize
);
5246 gcc_assert (bitsize
% value_bit
== 0);
5248 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5251 /* real_to_target produces its result in words affected by
5252 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5253 and use WORDS_BIG_ENDIAN instead; see the documentation
5254 of SUBREG in rtl.texi. */
5255 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5258 if (WORDS_BIG_ENDIAN
)
5259 ibase
= bitsize
- 1 - i
;
5262 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5265 /* It shouldn't matter what's done here, so fill it with
5267 for (; i
< elem_bitsize
; i
+= value_bit
)
5273 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5275 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5276 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5280 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5281 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5282 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5284 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5285 >> (i
- HOST_BITS_PER_WIDE_INT
);
5286 for (; i
< elem_bitsize
; i
+= value_bit
)
5296 /* Now, pick the right byte to start with. */
5297 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5298 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5299 will already have offset 0. */
5300 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5302 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5304 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5305 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5306 byte
= (subword_byte
% UNITS_PER_WORD
5307 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5310 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5311 so if it's become negative it will instead be very large.) */
5312 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5314 /* Convert from bytes to chunks of size value_bit. */
5315 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5317 /* Re-pack the value. */
5319 if (VECTOR_MODE_P (outermode
))
5321 num_elem
= GET_MODE_NUNITS (outermode
);
5322 result_v
= rtvec_alloc (num_elem
);
5323 elems
= &RTVEC_ELT (result_v
, 0);
5324 outer_submode
= GET_MODE_INNER (outermode
);
5330 outer_submode
= outermode
;
5333 outer_class
= GET_MODE_CLASS (outer_submode
);
5334 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5336 gcc_assert (elem_bitsize
% value_bit
== 0);
5337 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5339 for (elem
= 0; elem
< num_elem
; elem
++)
5343 /* Vectors are stored in target memory order. (This is probably
5346 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5347 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5349 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5350 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5351 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5352 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5353 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5356 switch (outer_class
)
5359 case MODE_PARTIAL_INT
:
5361 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5364 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5366 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5367 for (; i
< elem_bitsize
; i
+= value_bit
)
5368 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5369 << (i
- HOST_BITS_PER_WIDE_INT
);
5371 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5373 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5374 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5375 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5376 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5383 case MODE_DECIMAL_FLOAT
:
5386 long tmp
[max_bitsize
/ 32];
5388 /* real_from_target wants its input in words affected by
5389 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5390 and use WORDS_BIG_ENDIAN instead; see the documentation
5391 of SUBREG in rtl.texi. */
5392 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5394 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5397 if (WORDS_BIG_ENDIAN
)
5398 ibase
= elem_bitsize
- 1 - i
;
5401 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5404 real_from_target (&r
, tmp
, outer_submode
);
5405 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5417 f
.mode
= outer_submode
;
5420 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5422 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5423 for (; i
< elem_bitsize
; i
+= value_bit
)
5424 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5425 << (i
- HOST_BITS_PER_WIDE_INT
));
5427 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5435 if (VECTOR_MODE_P (outermode
))
5436 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5441 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5442 Return 0 if no simplifications are possible. */
5444 simplify_subreg (enum machine_mode outermode
, rtx op
,
5445 enum machine_mode innermode
, unsigned int byte
)
5447 /* Little bit of sanity checking. */
5448 gcc_assert (innermode
!= VOIDmode
);
5449 gcc_assert (outermode
!= VOIDmode
);
5450 gcc_assert (innermode
!= BLKmode
);
5451 gcc_assert (outermode
!= BLKmode
);
5453 gcc_assert (GET_MODE (op
) == innermode
5454 || GET_MODE (op
) == VOIDmode
);
5456 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5457 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5459 if (outermode
== innermode
&& !byte
)
5462 if (CONST_INT_P (op
)
5463 || CONST_DOUBLE_P (op
)
5464 || GET_CODE (op
) == CONST_FIXED
5465 || GET_CODE (op
) == CONST_VECTOR
)
5466 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5468 /* Changing mode twice with SUBREG => just change it once,
5469 or not at all if changing back op starting mode. */
5470 if (GET_CODE (op
) == SUBREG
)
5472 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5473 int final_offset
= byte
+ SUBREG_BYTE (op
);
5476 if (outermode
== innermostmode
5477 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5478 return SUBREG_REG (op
);
5480 /* The SUBREG_BYTE represents offset, as if the value were stored
5481 in memory. Irritating exception is paradoxical subreg, where
5482 we define SUBREG_BYTE to be 0. On big endian machines, this
5483 value should be negative. For a moment, undo this exception. */
5484 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5486 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5487 if (WORDS_BIG_ENDIAN
)
5488 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5489 if (BYTES_BIG_ENDIAN
)
5490 final_offset
+= difference
% UNITS_PER_WORD
;
5492 if (SUBREG_BYTE (op
) == 0
5493 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5495 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5496 if (WORDS_BIG_ENDIAN
)
5497 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5498 if (BYTES_BIG_ENDIAN
)
5499 final_offset
+= difference
% UNITS_PER_WORD
;
5502 /* See whether resulting subreg will be paradoxical. */
5503 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5505 /* In nonparadoxical subregs we can't handle negative offsets. */
5506 if (final_offset
< 0)
5508 /* Bail out in case resulting subreg would be incorrect. */
5509 if (final_offset
% GET_MODE_SIZE (outermode
)
5510 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5516 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5518 /* In paradoxical subreg, see if we are still looking on lower part.
5519 If so, our SUBREG_BYTE will be 0. */
5520 if (WORDS_BIG_ENDIAN
)
5521 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5522 if (BYTES_BIG_ENDIAN
)
5523 offset
+= difference
% UNITS_PER_WORD
;
5524 if (offset
== final_offset
)
5530 /* Recurse for further possible simplifications. */
5531 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5535 if (validate_subreg (outermode
, innermostmode
,
5536 SUBREG_REG (op
), final_offset
))
5538 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5539 if (SUBREG_PROMOTED_VAR_P (op
)
5540 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5541 && GET_MODE_CLASS (outermode
) == MODE_INT
5542 && IN_RANGE (GET_MODE_SIZE (outermode
),
5543 GET_MODE_SIZE (innermode
),
5544 GET_MODE_SIZE (innermostmode
))
5545 && subreg_lowpart_p (newx
))
5547 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5548 SUBREG_PROMOTED_UNSIGNED_SET
5549 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5556 /* Merge implicit and explicit truncations. */
5558 if (GET_CODE (op
) == TRUNCATE
5559 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5560 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5561 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5562 GET_MODE (XEXP (op
, 0)));
5564 /* SUBREG of a hard register => just change the register number
5565 and/or mode. If the hard register is not valid in that mode,
5566 suppress this simplification. If the hard register is the stack,
5567 frame, or argument pointer, leave this as a SUBREG. */
5569 if (REG_P (op
) && HARD_REGISTER_P (op
))
5571 unsigned int regno
, final_regno
;
5574 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5575 if (HARD_REGISTER_NUM_P (final_regno
))
5578 int final_offset
= byte
;
5580 /* Adjust offset for paradoxical subregs. */
5582 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5584 int difference
= (GET_MODE_SIZE (innermode
)
5585 - GET_MODE_SIZE (outermode
));
5586 if (WORDS_BIG_ENDIAN
)
5587 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5588 if (BYTES_BIG_ENDIAN
)
5589 final_offset
+= difference
% UNITS_PER_WORD
;
5592 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5594 /* Propagate original regno. We don't have any way to specify
5595 the offset inside original regno, so do so only for lowpart.
5596 The information is used only by alias analysis that can not
5597 grog partial register anyway. */
5599 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5600 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5605 /* If we have a SUBREG of a register that we are replacing and we are
5606 replacing it with a MEM, make a new MEM and try replacing the
5607 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5608 or if we would be widening it. */
5611 && ! mode_dependent_address_p (XEXP (op
, 0))
5612 /* Allow splitting of volatile memory references in case we don't
5613 have instruction to move the whole thing. */
5614 && (! MEM_VOLATILE_P (op
)
5615 || ! have_insn_for (SET
, innermode
))
5616 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5617 return adjust_address_nv (op
, outermode
, byte
);
5619 /* Handle complex values represented as CONCAT
5620 of real and imaginary part. */
5621 if (GET_CODE (op
) == CONCAT
)
5623 unsigned int part_size
, final_offset
;
5626 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5627 if (byte
< part_size
)
5629 part
= XEXP (op
, 0);
5630 final_offset
= byte
;
5634 part
= XEXP (op
, 1);
5635 final_offset
= byte
- part_size
;
5638 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5641 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5644 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5645 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5649 /* Optimize SUBREG truncations of zero and sign extended values. */
5650 if ((GET_CODE (op
) == ZERO_EXTEND
5651 || GET_CODE (op
) == SIGN_EXTEND
)
5652 && SCALAR_INT_MODE_P (innermode
)
5653 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
))
5655 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5657 /* If we're requesting the lowpart of a zero or sign extension,
5658 there are three possibilities. If the outermode is the same
5659 as the origmode, we can omit both the extension and the subreg.
5660 If the outermode is not larger than the origmode, we can apply
5661 the truncation without the extension. Finally, if the outermode
5662 is larger than the origmode, but both are integer modes, we
5663 can just extend to the appropriate mode. */
5666 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5667 if (outermode
== origmode
)
5668 return XEXP (op
, 0);
5669 if (GET_MODE_PRECISION (outermode
) <= GET_MODE_PRECISION (origmode
))
5670 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5671 subreg_lowpart_offset (outermode
,
5673 if (SCALAR_INT_MODE_P (outermode
))
5674 return simplify_gen_unary (GET_CODE (op
), outermode
,
5675 XEXP (op
, 0), origmode
);
5678 /* A SUBREG resulting from a zero extension may fold to zero if
5679 it extracts higher bits that the ZERO_EXTEND's source bits. */
5680 if (GET_CODE (op
) == ZERO_EXTEND
5681 && bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5682 return CONST0_RTX (outermode
);
5685 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5686 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5687 the outer subreg is effectively a truncation to the original mode. */
5688 if ((GET_CODE (op
) == LSHIFTRT
5689 || GET_CODE (op
) == ASHIFTRT
)
5690 && SCALAR_INT_MODE_P (outermode
)
5691 && SCALAR_INT_MODE_P (innermode
)
5692 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5693 to avoid the possibility that an outer LSHIFTRT shifts by more
5694 than the sign extension's sign_bit_copies and introduces zeros
5695 into the high bits of the result. */
5696 && (2 * GET_MODE_PRECISION (outermode
)) <= GET_MODE_PRECISION (innermode
)
5697 && CONST_INT_P (XEXP (op
, 1))
5698 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5699 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5700 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5701 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5702 return simplify_gen_binary (ASHIFTRT
, outermode
,
5703 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5705 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5706 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5707 the outer subreg is effectively a truncation to the original mode. */
5708 if ((GET_CODE (op
) == LSHIFTRT
5709 || GET_CODE (op
) == ASHIFTRT
)
5710 && SCALAR_INT_MODE_P (outermode
)
5711 && SCALAR_INT_MODE_P (innermode
)
5712 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5713 && CONST_INT_P (XEXP (op
, 1))
5714 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5715 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5716 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5717 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5718 return simplify_gen_binary (LSHIFTRT
, outermode
,
5719 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5721 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5722 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5723 the outer subreg is effectively a truncation to the original mode. */
5724 if (GET_CODE (op
) == ASHIFT
5725 && SCALAR_INT_MODE_P (outermode
)
5726 && SCALAR_INT_MODE_P (innermode
)
5727 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5728 && CONST_INT_P (XEXP (op
, 1))
5729 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5730 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5731 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5732 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5733 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5734 return simplify_gen_binary (ASHIFT
, outermode
,
5735 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5737 /* Recognize a word extraction from a multi-word subreg. */
5738 if ((GET_CODE (op
) == LSHIFTRT
5739 || GET_CODE (op
) == ASHIFTRT
)
5740 && SCALAR_INT_MODE_P (innermode
)
5741 && GET_MODE_PRECISION (outermode
) >= BITS_PER_WORD
5742 && GET_MODE_PRECISION (innermode
) >= (2 * GET_MODE_PRECISION (outermode
))
5743 && CONST_INT_P (XEXP (op
, 1))
5744 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_PRECISION (outermode
) - 1)) == 0
5745 && INTVAL (XEXP (op
, 1)) >= 0
5746 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (innermode
)
5747 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5749 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5750 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5752 ? byte
- shifted_bytes
5753 : byte
+ shifted_bytes
));
5756 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5757 and try replacing the SUBREG and shift with it. Don't do this if
5758 the MEM has a mode-dependent address or if we would be widening it. */
5760 if ((GET_CODE (op
) == LSHIFTRT
5761 || GET_CODE (op
) == ASHIFTRT
)
5762 && SCALAR_INT_MODE_P (innermode
)
5763 && MEM_P (XEXP (op
, 0))
5764 && CONST_INT_P (XEXP (op
, 1))
5765 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (GET_MODE (op
))
5766 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (outermode
)) == 0
5767 && INTVAL (XEXP (op
, 1)) > 0
5768 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5769 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0))
5770 && ! MEM_VOLATILE_P (XEXP (op
, 0))
5771 && byte
== subreg_lowpart_offset (outermode
, innermode
)
5772 && (GET_MODE_SIZE (outermode
) >= UNITS_PER_WORD
5773 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
5775 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5776 return adjust_address_nv (XEXP (op
, 0), outermode
,
5778 ? byte
- shifted_bytes
5779 : byte
+ shifted_bytes
));
5785 /* Make a SUBREG operation or equivalent if it folds. */
5788 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5789 enum machine_mode innermode
, unsigned int byte
)
5793 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5797 if (GET_CODE (op
) == SUBREG
5798 || GET_CODE (op
) == CONCAT
5799 || GET_MODE (op
) == VOIDmode
)
5802 if (validate_subreg (outermode
, innermode
, op
, byte
))
5803 return gen_rtx_SUBREG (outermode
, op
, byte
);
5808 /* Simplify X, an rtx expression.
5810 Return the simplified expression or NULL if no simplifications
5813 This is the preferred entry point into the simplification routines;
5814 however, we still allow passes to call the more specific routines.
5816 Right now GCC has three (yes, three) major bodies of RTL simplification
5817 code that need to be unified.
5819 1. fold_rtx in cse.c. This code uses various CSE specific
5820 information to aid in RTL simplification.
5822 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5823 it uses combine specific information to aid in RTL
5826 3. The routines in this file.
5829 Long term we want to only have one body of simplification code; to
5830 get to that state I recommend the following steps:
5832 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5833 which are not pass dependent state into these routines.
5835 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5836 use this routine whenever possible.
5838 3. Allow for pass dependent state to be provided to these
5839 routines and add simplifications based on the pass dependent
5840 state. Remove code from cse.c & combine.c that becomes
5843 It will take time, but ultimately the compiler will be easier to
5844 maintain and improve. It's totally silly that when we add a
5845 simplification that it needs to be added to 4 places (3 for RTL
5846 simplification and 1 for tree simplification. */
5849 simplify_rtx (const_rtx x
)
5851 const enum rtx_code code
= GET_CODE (x
);
5852 const enum machine_mode mode
= GET_MODE (x
);
5854 switch (GET_RTX_CLASS (code
))
5857 return simplify_unary_operation (code
, mode
,
5858 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5859 case RTX_COMM_ARITH
:
5860 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5861 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5863 /* Fall through.... */
5866 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5869 case RTX_BITFIELD_OPS
:
5870 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5871 XEXP (x
, 0), XEXP (x
, 1),
5875 case RTX_COMM_COMPARE
:
5876 return simplify_relational_operation (code
, mode
,
5877 ((GET_MODE (XEXP (x
, 0))
5879 ? GET_MODE (XEXP (x
, 0))
5880 : GET_MODE (XEXP (x
, 1))),
5886 return simplify_subreg (mode
, SUBREG_REG (x
),
5887 GET_MODE (SUBREG_REG (x
)),
5894 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5895 if (GET_CODE (XEXP (x
, 0)) == HIGH
5896 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))