1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
52 static bool plus_minus_operand_p (const_rtx
);
53 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
55 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
57 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
59 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
60 enum machine_mode
, rtx
, rtx
);
61 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
62 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
68 neg_const_int (enum machine_mode mode
, const_rtx i
)
70 return gen_int_mode (- INTVAL (i
), mode
);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
77 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
79 unsigned HOST_WIDE_INT val
;
82 if (GET_MODE_CLASS (mode
) != MODE_INT
)
85 width
= GET_MODE_PRECISION (mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x
) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x
) == 0)
96 val
= CONST_DOUBLE_HIGH (x
);
97 width
-= HOST_BITS_PER_WIDE_INT
;
102 if (width
< HOST_BITS_PER_WIDE_INT
)
103 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
104 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
112 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
116 if (GET_MODE_CLASS (mode
) != MODE_INT
)
119 width
= GET_MODE_PRECISION (mode
);
120 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
123 val
&= GET_MODE_MASK (mode
);
124 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
130 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
134 if (GET_MODE_CLASS (mode
) != MODE_INT
)
137 width
= GET_MODE_PRECISION (mode
);
138 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
141 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
148 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
152 if (GET_MODE_CLASS (mode
) != MODE_INT
)
155 width
= GET_MODE_PRECISION (mode
);
156 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
159 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
172 /* If this simplifies, do it. */
173 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0
, op1
))
180 tem
= op0
, op0
= op1
, op1
= tem
;
182 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x
)
191 enum machine_mode cmode
;
192 HOST_WIDE_INT offset
= 0;
194 switch (GET_CODE (x
))
200 /* Handle float extensions of constant pool references. */
202 c
= avoid_constant_pool_reference (tmp
);
203 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
207 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
216 if (GET_MODE (x
) == BLKmode
)
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr
= targetm
.delegitimize_address (addr
);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr
) == CONST
226 && GET_CODE (XEXP (addr
, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
229 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
230 addr
= XEXP (XEXP (addr
, 0), 0);
233 if (GET_CODE (addr
) == LO_SUM
)
234 addr
= XEXP (addr
, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr
) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr
))
241 c
= get_pool_constant (addr
);
242 cmode
= get_pool_mode (addr
);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset
!= 0 || cmode
!= GET_MODE (x
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
= 0, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !host_integerp (toffset
, 0)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= TREE_INT_CST_LOW (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= INTVAL (MEM_OFFSET (x
));
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
508 return gen_rtx_LO_SUM (mode
, op0
, op1
);
517 fmt
= GET_RTX_FORMAT (code
);
518 for (i
= 0; fmt
[i
]; i
++)
523 newvec
= XVEC (newx
, i
);
524 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
526 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
528 if (op
!= RTVEC_ELT (vec
, j
))
532 newvec
= shallow_copy_rtvec (vec
);
534 newx
= shallow_copy_rtx (x
);
535 XVEC (newx
, i
) = newvec
;
537 RTVEC_ELT (newvec
, j
) = op
;
545 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
546 if (op
!= XEXP (x
, i
))
549 newx
= shallow_copy_rtx (x
);
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
564 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
571 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
572 rtx op
, enum machine_mode op_mode
)
576 trueop
= avoid_constant_pool_reference (op
);
578 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
582 return simplify_unary_operation_1 (code
, mode
, op
);
585 /* Perform some simplifications we can do even if the operands
588 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
590 enum rtx_code reversed
;
596 /* (not (not X)) == X. */
597 if (GET_CODE (op
) == NOT
)
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op
)
603 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
604 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
605 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
606 XEXP (op
, 0), XEXP (op
, 1));
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op
) == PLUS
610 && XEXP (op
, 1) == constm1_rtx
)
611 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op
) == NEG
)
615 return plus_constant (XEXP (op
, 0), -1);
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op
) == XOR
619 && CONST_INT_P (XEXP (op
, 1))
620 && (temp
= simplify_unary_operation (NOT
, mode
,
621 XEXP (op
, 1), mode
)) != 0)
622 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op
) == PLUS
626 && CONST_INT_P (XEXP (op
, 1))
627 && mode_signbit_p (mode
, XEXP (op
, 1))
628 && (temp
= simplify_unary_operation (NOT
, mode
,
629 XEXP (op
, 1), mode
)) != 0)
630 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
638 if (GET_CODE (op
) == ASHIFT
639 && XEXP (op
, 0) == const1_rtx
)
641 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
642 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
649 if (STORE_FLAG_VALUE
== -1
650 && GET_CODE (op
) == ASHIFTRT
651 && GET_CODE (XEXP (op
, 1))
652 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
653 return simplify_gen_relational (GE
, mode
, VOIDmode
,
654 XEXP (op
, 0), const0_rtx
);
657 if (GET_CODE (op
) == SUBREG
658 && subreg_lowpart_p (op
)
659 && (GET_MODE_SIZE (GET_MODE (op
))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
661 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
662 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
664 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
667 x
= gen_rtx_ROTATE (inner_mode
,
668 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
670 XEXP (SUBREG_REG (op
), 1));
671 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
679 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
681 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
682 enum machine_mode op_mode
;
684 op_mode
= GET_MODE (in1
);
685 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
687 op_mode
= GET_MODE (in2
);
688 if (op_mode
== VOIDmode
)
690 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
692 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
695 in2
= in1
; in1
= tem
;
698 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op
) == NEG
)
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op
) == PLUS
710 && XEXP (op
, 1) == const1_rtx
)
711 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op
) == NOT
)
715 return plus_constant (XEXP (op
, 0), 1);
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op
) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode
)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
725 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
727 if (GET_CODE (op
) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode
)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op
, 1))
733 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
735 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
737 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
742 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op
) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
750 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
751 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
757 if (GET_CODE (op
) == ASHIFT
)
759 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
761 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op
) == ASHIFTRT
767 && CONST_INT_P (XEXP (op
, 1))
768 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
769 return simplify_gen_binary (LSHIFTRT
, mode
,
770 XEXP (op
, 0), XEXP (op
, 1));
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op
) == LSHIFTRT
775 && CONST_INT_P (XEXP (op
, 1))
776 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
777 return simplify_gen_binary (ASHIFTRT
, mode
,
778 XEXP (op
, 0), XEXP (op
, 1));
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op
) == XOR
782 && XEXP (op
, 1) == const1_rtx
783 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
784 return plus_constant (XEXP (op
, 0), -1);
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op
) == LT
789 && XEXP (op
, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
792 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
793 int isize
= GET_MODE_PRECISION (inner
);
794 if (STORE_FLAG_VALUE
== 1)
796 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
797 GEN_INT (isize
- 1));
800 if (GET_MODE_PRECISION (mode
) > isize
)
801 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
802 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
804 else if (STORE_FLAG_VALUE
== -1)
806 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
807 GEN_INT (isize
- 1));
810 if (GET_MODE_PRECISION (mode
) > isize
)
811 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
812 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
821 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op
) == SIGN_EXTEND
826 || GET_CODE (op
) == ZERO_EXTEND
)
827 && GET_MODE (XEXP (op
, 0)) == mode
)
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op
) == ABS
833 || GET_CODE (op
) == NEG
)
834 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
836 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
837 return simplify_gen_unary (GET_CODE (op
), mode
,
838 XEXP (XEXP (op
, 0), 0), mode
);
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 if (GET_CODE (op
) == SUBREG
843 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
844 && subreg_lowpart_p (op
))
845 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
846 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
856 ? (num_sign_bit_copies (op
, GET_MODE (op
))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op
))
858 - GET_MODE_PRECISION (mode
)))
859 : truncated_to_mode (mode
, op
))
860 && ! (GET_CODE (op
) == LSHIFTRT
861 && GET_CODE (XEXP (op
, 0)) == MULT
))
862 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode
)
870 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
871 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
875 if (DECIMAL_FLOAT_MODE_P (mode
))
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op
) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op
, 0)) == mode
)
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op
) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations
)
894 || GET_CODE (op
) == FLOAT_EXTEND
)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
897 > GET_MODE_SIZE (mode
)
898 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op
) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
906 && ((unsigned)significand_size (GET_MODE (op
))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
908 - num_sign_bit_copies (XEXP (op
, 0),
909 GET_MODE (XEXP (op
, 0))))))))
910 return simplify_gen_unary (FLOAT
, mode
,
912 GET_MODE (XEXP (op
, 0)));
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op
) == ABS
917 || GET_CODE (op
) == NEG
)
918 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
920 return simplify_gen_unary (GET_CODE (op
), mode
,
921 XEXP (XEXP (op
, 0), 0), mode
);
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op
) == SUBREG
926 && subreg_lowpart_p (op
)
927 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
928 return SUBREG_REG (op
);
932 if (DECIMAL_FLOAT_MODE_P (mode
))
935 /* (float_extend (float_extend x)) is (float_extend x)
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
940 if (GET_CODE (op
) == FLOAT_EXTEND
941 || (GET_CODE (op
) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
943 && ((unsigned)significand_size (GET_MODE (op
))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
945 - num_sign_bit_copies (XEXP (op
, 0),
946 GET_MODE (XEXP (op
, 0)))))))
947 return simplify_gen_unary (GET_CODE (op
), mode
,
949 GET_MODE (XEXP (op
, 0)));
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op
) == NEG
)
956 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
957 GET_MODE (XEXP (op
, 0)));
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
961 if (GET_MODE (op
) == VOIDmode
)
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op
),
967 nonzero_bits (op
, GET_MODE (op
))))
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
972 return gen_rtx_NEG (mode
, op
);
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op
) == SIGN_EXTEND
979 || GET_CODE (op
) == ZERO_EXTEND
)
980 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
981 GET_MODE (XEXP (op
, 0)));
985 switch (GET_CODE (op
))
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
991 GET_MODE (XEXP (op
, 0)));
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op
, 1)))
997 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
998 GET_MODE (XEXP (op
, 0)));
1007 switch (GET_CODE (op
))
1013 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1014 GET_MODE (XEXP (op
, 0)));
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op
, 1)))
1020 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1021 GET_MODE (XEXP (op
, 0)));
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op
) == BSWAP
)
1032 return XEXP (op
, 0);
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op
) == SIGN_EXTEND
)
1038 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1039 GET_MODE (XEXP (op
, 0)));
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1047 if (GET_CODE (op
) == TRUNCATE
1048 && GET_MODE (XEXP (op
, 0)) == mode
1049 && GET_CODE (XEXP (op
, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1052 return XEXP (op
, 0);
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op
) == MULT
)
1058 rtx lhs
= XEXP (op
, 0);
1059 rtx rhs
= XEXP (op
, 1);
1060 enum rtx_code lcode
= GET_CODE (lhs
);
1061 enum rtx_code rcode
= GET_CODE (rhs
);
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode
== SIGN_EXTEND
1066 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1067 && (rcode
== SIGN_EXTEND
1068 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1070 enum machine_mode lmode
= GET_MODE (lhs
);
1071 enum machine_mode rmode
= GET_MODE (rhs
);
1074 if (lcode
== ASHIFTRT
)
1075 /* Number of bits not shifted off the end. */
1076 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1081 if (rcode
== ASHIFTRT
)
1082 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1089 return simplify_gen_binary
1091 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1092 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op
) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op
)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1102 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1103 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1109 gcc_assert (GET_MODE_BITSIZE (mode
)
1110 > GET_MODE_BITSIZE (GET_MODE (op
)));
1111 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1112 GET_MODE (XEXP (op
, 0)));
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1121 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op
, 1))
1123 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1128 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode
)
1130 > GET_MODE_BITSIZE (GET_MODE (op
)));
1131 if (tmode
!= BLKmode
)
1134 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1136 ? SIGN_EXTEND
: ZERO_EXTEND
,
1137 mode
, inner
, tmode
);
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is refering to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1149 || (GET_CODE (op
) == SUBREG
1150 && REG_P (SUBREG_REG (op
))
1151 && REG_POINTER (SUBREG_REG (op
))
1152 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1153 return convert_memory_address (Pmode
, op
);
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op
) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op
)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1164 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1165 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op
) == MULT
)
1171 rtx lhs
= XEXP (op
, 0);
1172 rtx rhs
= XEXP (op
, 1);
1173 enum rtx_code lcode
= GET_CODE (lhs
);
1174 enum rtx_code rcode
= GET_CODE (rhs
);
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode
== ZERO_EXTEND
1179 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1180 && (rcode
== ZERO_EXTEND
1181 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1183 enum machine_mode lmode
= GET_MODE (lhs
);
1184 enum machine_mode rmode
= GET_MODE (rhs
);
1187 if (lcode
== LSHIFTRT
)
1188 /* Number of bits not shifted off the end. */
1189 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1194 if (rcode
== LSHIFTRT
)
1195 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1202 return simplify_gen_binary
1204 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1205 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op
) == ZERO_EXTEND
)
1211 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1212 GET_MODE (XEXP (op
, 0)));
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op
) == LSHIFTRT
1218 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op
, 1))
1220 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1225 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1226 if (tmode
!= BLKmode
)
1229 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is refering to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED
> 0
1240 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1242 || (GET_CODE (op
) == SUBREG
1243 && REG_P (SUBREG_REG (op
))
1244 && REG_POINTER (SUBREG_REG (op
))
1245 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1246 return convert_memory_address (Pmode
, op
);
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1261 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1262 rtx op
, enum machine_mode op_mode
)
1264 unsigned int width
= GET_MODE_PRECISION (mode
);
1265 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1267 if (code
== VEC_DUPLICATE
)
1269 gcc_assert (VECTOR_MODE_P (mode
));
1270 if (GET_MODE (op
) != VOIDmode
)
1272 if (!VECTOR_MODE_P (GET_MODE (op
)))
1273 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1275 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1278 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1279 || GET_CODE (op
) == CONST_VECTOR
)
1281 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1282 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1283 rtvec v
= rtvec_alloc (n_elts
);
1286 if (GET_CODE (op
) != CONST_VECTOR
)
1287 for (i
= 0; i
< n_elts
; i
++)
1288 RTVEC_ELT (v
, i
) = op
;
1291 enum machine_mode inmode
= GET_MODE (op
);
1292 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1293 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1295 gcc_assert (in_n_elts
< n_elts
);
1296 gcc_assert ((n_elts
% in_n_elts
) == 0);
1297 for (i
= 0; i
< n_elts
; i
++)
1298 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1300 return gen_rtx_CONST_VECTOR (mode
, v
);
1304 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1306 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1307 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1308 enum machine_mode opmode
= GET_MODE (op
);
1309 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1310 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1311 rtvec v
= rtvec_alloc (n_elts
);
1314 gcc_assert (op_n_elts
== n_elts
);
1315 for (i
= 0; i
< n_elts
; i
++)
1317 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1318 CONST_VECTOR_ELT (op
, i
),
1319 GET_MODE_INNER (opmode
));
1322 RTVEC_ELT (v
, i
) = x
;
1324 return gen_rtx_CONST_VECTOR (mode
, v
);
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1331 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1332 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1334 HOST_WIDE_INT hv
, lv
;
1337 if (CONST_INT_P (op
))
1338 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1340 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1342 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1343 d
= real_value_truncate (mode
, d
);
1344 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1346 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1347 && (GET_CODE (op
) == CONST_DOUBLE
1348 || CONST_INT_P (op
)))
1350 HOST_WIDE_INT hv
, lv
;
1353 if (CONST_INT_P (op
))
1354 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1356 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1358 if (op_mode
== VOIDmode
)
1360 /* We don't know how to interpret negative-looking numbers in
1361 this case, so don't try to fold those. */
1365 else if (GET_MODE_PRECISION (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1368 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1370 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1371 d
= real_value_truncate (mode
, d
);
1372 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1375 if (CONST_INT_P (op
)
1376 && width
<= HOST_BITS_PER_WIDE_INT
1377 && op_width
<= HOST_BITS_PER_WIDE_INT
&& op_width
> 0)
1379 HOST_WIDE_INT arg0
= INTVAL (op
);
1393 val
= (arg0
>= 0 ? arg0
: - arg0
);
1397 arg0
&= GET_MODE_MASK (op_mode
);
1398 val
= ffs_hwi (arg0
);
1402 arg0
&= GET_MODE_MASK (op_mode
);
1403 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (op_mode
, val
))
1406 val
= GET_MODE_PRECISION (op_mode
) - floor_log2 (arg0
) - 1;
1410 arg0
&= GET_MODE_MASK (op_mode
);
1412 val
= GET_MODE_PRECISION (op_mode
) - 1;
1414 val
= GET_MODE_PRECISION (op_mode
) - floor_log2 (arg0
) - 2;
1416 val
= GET_MODE_PRECISION (op_mode
) - floor_log2 (~arg0
) - 2;
1420 arg0
&= GET_MODE_MASK (op_mode
);
1423 /* Even if the value at zero is undefined, we have to come
1424 up with some replacement. Seems good enough. */
1425 if (! CTZ_DEFINED_VALUE_AT_ZERO (op_mode
, val
))
1426 val
= GET_MODE_PRECISION (op_mode
);
1429 val
= ctz_hwi (arg0
);
1433 arg0
&= GET_MODE_MASK (op_mode
);
1436 val
++, arg0
&= arg0
- 1;
1440 arg0
&= GET_MODE_MASK (op_mode
);
1443 val
++, arg0
&= arg0
- 1;
1452 for (s
= 0; s
< width
; s
+= 8)
1454 unsigned int d
= width
- s
- 8;
1455 unsigned HOST_WIDE_INT byte
;
1456 byte
= (arg0
>> s
) & 0xff;
1467 /* When zero-extending a CONST_INT, we need to know its
1469 gcc_assert (op_mode
!= VOIDmode
);
1470 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1472 /* If we were really extending the mode,
1473 we would have to distinguish between zero-extension
1474 and sign-extension. */
1475 gcc_assert (width
== op_width
);
1478 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1479 val
= arg0
& GET_MODE_MASK (op_mode
);
1485 if (op_mode
== VOIDmode
)
1487 op_width
= GET_MODE_PRECISION (op_mode
);
1488 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1490 /* If we were really extending the mode,
1491 we would have to distinguish between zero-extension
1492 and sign-extension. */
1493 gcc_assert (width
== op_width
);
1496 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1498 val
= arg0
& GET_MODE_MASK (op_mode
);
1499 if (val_signbit_known_set_p (op_mode
, val
))
1500 val
|= ~GET_MODE_MASK (op_mode
);
1508 case FLOAT_TRUNCATE
:
1520 return gen_int_mode (val
, mode
);
1523 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1524 for a DImode operation on a CONST_INT. */
1525 else if (GET_MODE (op
) == VOIDmode
1526 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1527 && (GET_CODE (op
) == CONST_DOUBLE
1528 || CONST_INT_P (op
)))
1530 unsigned HOST_WIDE_INT l1
, lv
;
1531 HOST_WIDE_INT h1
, hv
;
1533 if (GET_CODE (op
) == CONST_DOUBLE
)
1534 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1536 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1546 neg_double (l1
, h1
, &lv
, &hv
);
1551 neg_double (l1
, h1
, &lv
, &hv
);
1561 lv
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (h1
);
1569 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (h1
) - 1
1570 - HOST_BITS_PER_WIDE_INT
;
1572 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (l1
) - 1;
1573 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1574 lv
= GET_MODE_PRECISION (mode
);
1582 lv
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (h1
);
1583 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1584 lv
= GET_MODE_PRECISION (mode
);
1612 for (s
= 0; s
< width
; s
+= 8)
1614 unsigned int d
= width
- s
- 8;
1615 unsigned HOST_WIDE_INT byte
;
1617 if (s
< HOST_BITS_PER_WIDE_INT
)
1618 byte
= (l1
>> s
) & 0xff;
1620 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1622 if (d
< HOST_BITS_PER_WIDE_INT
)
1625 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1631 /* This is just a change-of-mode, so do nothing. */
1636 gcc_assert (op_mode
!= VOIDmode
);
1638 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1642 lv
= l1
& GET_MODE_MASK (op_mode
);
1646 if (op_mode
== VOIDmode
1647 || op_width
> HOST_BITS_PER_WIDE_INT
)
1651 lv
= l1
& GET_MODE_MASK (op_mode
);
1652 if (val_signbit_known_set_p (op_mode
, lv
))
1653 lv
|= ~GET_MODE_MASK (op_mode
);
1655 hv
= HWI_SIGN_EXTEND (lv
);
1666 return immed_double_const (lv
, hv
, mode
);
1669 else if (GET_CODE (op
) == CONST_DOUBLE
1670 && SCALAR_FLOAT_MODE_P (mode
)
1671 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1673 REAL_VALUE_TYPE d
, t
;
1674 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1679 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1681 real_sqrt (&t
, mode
, &d
);
1685 d
= real_value_abs (&d
);
1688 d
= real_value_negate (&d
);
1690 case FLOAT_TRUNCATE
:
1691 d
= real_value_truncate (mode
, d
);
1694 /* All this does is change the mode, unless changing
1696 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1697 real_convert (&d
, mode
, &d
);
1700 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1707 real_to_target (tmp
, &d
, GET_MODE (op
));
1708 for (i
= 0; i
< 4; i
++)
1710 real_from_target (&d
, tmp
, mode
);
1716 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1719 else if (GET_CODE (op
) == CONST_DOUBLE
1720 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1721 && GET_MODE_CLASS (mode
) == MODE_INT
1722 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1724 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1725 operators are intentionally left unspecified (to ease implementation
1726 by target backends), for consistency, this routine implements the
1727 same semantics for constant folding as used by the middle-end. */
1729 /* This was formerly used only for non-IEEE float.
1730 eggert@twinsun.com says it is safe for IEEE also. */
1731 HOST_WIDE_INT xh
, xl
, th
, tl
;
1732 REAL_VALUE_TYPE x
, t
;
1733 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1737 if (REAL_VALUE_ISNAN (x
))
1740 /* Test against the signed upper bound. */
1741 if (width
> HOST_BITS_PER_WIDE_INT
)
1743 th
= ((unsigned HOST_WIDE_INT
) 1
1744 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1750 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1752 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1753 if (REAL_VALUES_LESS (t
, x
))
1760 /* Test against the signed lower bound. */
1761 if (width
> HOST_BITS_PER_WIDE_INT
)
1763 th
= (unsigned HOST_WIDE_INT
) (-1)
1764 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1770 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1772 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1773 if (REAL_VALUES_LESS (x
, t
))
1779 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1783 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1786 /* Test against the unsigned upper bound. */
1787 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1792 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1794 th
= ((unsigned HOST_WIDE_INT
) 1
1795 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1801 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1803 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1804 if (REAL_VALUES_LESS (t
, x
))
1811 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1817 return immed_double_const (xl
, xh
, mode
);
1823 /* Subroutine of simplify_binary_operation to simplify a commutative,
1824 associative binary operation CODE with result mode MODE, operating
1825 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1826 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1827 canonicalization is possible. */
1830 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1835 /* Linearize the operator to the left. */
1836 if (GET_CODE (op1
) == code
)
1838 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1839 if (GET_CODE (op0
) == code
)
1841 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1842 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1845 /* "a op (b op c)" becomes "(b op c) op a". */
1846 if (! swap_commutative_operands_p (op1
, op0
))
1847 return simplify_gen_binary (code
, mode
, op1
, op0
);
1854 if (GET_CODE (op0
) == code
)
1856 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1857 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1859 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1860 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1863 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1864 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1866 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1868 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1869 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1871 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1878 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1879 and OP1. Return 0 if no simplification is possible.
1881 Don't use this for relational operations such as EQ or LT.
1882 Use simplify_relational_operation instead. */
1884 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1887 rtx trueop0
, trueop1
;
1890 /* Relational operations don't work here. We must know the mode
1891 of the operands in order to do the comparison correctly.
1892 Assuming a full word can give incorrect results.
1893 Consider comparing 128 with -128 in QImode. */
1894 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1895 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1897 /* Make sure the constant is second. */
1898 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1899 && swap_commutative_operands_p (op0
, op1
))
1901 tem
= op0
, op0
= op1
, op1
= tem
;
1904 trueop0
= avoid_constant_pool_reference (op0
);
1905 trueop1
= avoid_constant_pool_reference (op1
);
1907 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1910 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1913 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1914 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1915 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1916 actual constants. */
1919 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1920 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1922 rtx tem
, reversed
, opleft
, opright
;
1924 unsigned int width
= GET_MODE_PRECISION (mode
);
1926 /* Even if we can't compute a constant result,
1927 there are some cases worth simplifying. */
1932 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1933 when x is NaN, infinite, or finite and nonzero. They aren't
1934 when x is -0 and the rounding mode is not towards -infinity,
1935 since (-0) + 0 is then 0. */
1936 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1939 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1940 transformations are safe even for IEEE. */
1941 if (GET_CODE (op0
) == NEG
)
1942 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1943 else if (GET_CODE (op1
) == NEG
)
1944 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1946 /* (~a) + 1 -> -a */
1947 if (INTEGRAL_MODE_P (mode
)
1948 && GET_CODE (op0
) == NOT
1949 && trueop1
== const1_rtx
)
1950 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1952 /* Handle both-operands-constant cases. We can only add
1953 CONST_INTs to constants since the sum of relocatable symbols
1954 can't be handled by most assemblers. Don't add CONST_INT
1955 to CONST_INT since overflow won't be computed properly if wider
1956 than HOST_BITS_PER_WIDE_INT. */
1958 if ((GET_CODE (op0
) == CONST
1959 || GET_CODE (op0
) == SYMBOL_REF
1960 || GET_CODE (op0
) == LABEL_REF
)
1961 && CONST_INT_P (op1
))
1962 return plus_constant (op0
, INTVAL (op1
));
1963 else if ((GET_CODE (op1
) == CONST
1964 || GET_CODE (op1
) == SYMBOL_REF
1965 || GET_CODE (op1
) == LABEL_REF
)
1966 && CONST_INT_P (op0
))
1967 return plus_constant (op1
, INTVAL (op0
));
1969 /* See if this is something like X * C - X or vice versa or
1970 if the multiplication is written as a shift. If so, we can
1971 distribute and make a new multiply, shift, or maybe just
1972 have X (if C is 2 in the example above). But don't make
1973 something more expensive than we had before. */
1975 if (SCALAR_INT_MODE_P (mode
))
1977 double_int coeff0
, coeff1
;
1978 rtx lhs
= op0
, rhs
= op1
;
1980 coeff0
= double_int_one
;
1981 coeff1
= double_int_one
;
1983 if (GET_CODE (lhs
) == NEG
)
1985 coeff0
= double_int_minus_one
;
1986 lhs
= XEXP (lhs
, 0);
1988 else if (GET_CODE (lhs
) == MULT
1989 && CONST_INT_P (XEXP (lhs
, 1)))
1991 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
1992 lhs
= XEXP (lhs
, 0);
1994 else if (GET_CODE (lhs
) == ASHIFT
1995 && CONST_INT_P (XEXP (lhs
, 1))
1996 && INTVAL (XEXP (lhs
, 1)) >= 0
1997 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1999 coeff0
= double_int_setbit (double_int_zero
,
2000 INTVAL (XEXP (lhs
, 1)));
2001 lhs
= XEXP (lhs
, 0);
2004 if (GET_CODE (rhs
) == NEG
)
2006 coeff1
= double_int_minus_one
;
2007 rhs
= XEXP (rhs
, 0);
2009 else if (GET_CODE (rhs
) == MULT
2010 && CONST_INT_P (XEXP (rhs
, 1)))
2012 coeff1
= shwi_to_double_int (INTVAL (XEXP (rhs
, 1)));
2013 rhs
= XEXP (rhs
, 0);
2015 else if (GET_CODE (rhs
) == ASHIFT
2016 && CONST_INT_P (XEXP (rhs
, 1))
2017 && INTVAL (XEXP (rhs
, 1)) >= 0
2018 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2020 coeff1
= double_int_setbit (double_int_zero
,
2021 INTVAL (XEXP (rhs
, 1)));
2022 rhs
= XEXP (rhs
, 0);
2025 if (rtx_equal_p (lhs
, rhs
))
2027 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2030 bool speed
= optimize_function_for_speed_p (cfun
);
2032 val
= double_int_add (coeff0
, coeff1
);
2033 coeff
= immed_double_int_const (val
, mode
);
2035 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2036 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
2041 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2042 if ((CONST_INT_P (op1
)
2043 || GET_CODE (op1
) == CONST_DOUBLE
)
2044 && GET_CODE (op0
) == XOR
2045 && (CONST_INT_P (XEXP (op0
, 1))
2046 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2047 && mode_signbit_p (mode
, op1
))
2048 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2049 simplify_gen_binary (XOR
, mode
, op1
,
2052 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2053 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2054 && GET_CODE (op0
) == MULT
2055 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2059 in1
= XEXP (XEXP (op0
, 0), 0);
2060 in2
= XEXP (op0
, 1);
2061 return simplify_gen_binary (MINUS
, mode
, op1
,
2062 simplify_gen_binary (MULT
, mode
,
2066 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2067 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2069 if (COMPARISON_P (op0
)
2070 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2071 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2072 && (reversed
= reversed_comparison (op0
, mode
)))
2074 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2076 /* If one of the operands is a PLUS or a MINUS, see if we can
2077 simplify this by the associative law.
2078 Don't use the associative law for floating point.
2079 The inaccuracy makes it nonassociative,
2080 and subtle programs can break if operations are associated. */
2082 if (INTEGRAL_MODE_P (mode
)
2083 && (plus_minus_operand_p (op0
)
2084 || plus_minus_operand_p (op1
))
2085 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2088 /* Reassociate floating point addition only when the user
2089 specifies associative math operations. */
2090 if (FLOAT_MODE_P (mode
)
2091 && flag_associative_math
)
2093 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2100 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2101 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2102 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2103 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2105 rtx xop00
= XEXP (op0
, 0);
2106 rtx xop10
= XEXP (op1
, 0);
2109 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2111 if (REG_P (xop00
) && REG_P (xop10
)
2112 && GET_MODE (xop00
) == GET_MODE (xop10
)
2113 && REGNO (xop00
) == REGNO (xop10
)
2114 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2115 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2122 /* We can't assume x-x is 0 even with non-IEEE floating point,
2123 but since it is zero except in very strange circumstances, we
2124 will treat it as zero with -ffinite-math-only. */
2125 if (rtx_equal_p (trueop0
, trueop1
)
2126 && ! side_effects_p (op0
)
2127 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2128 return CONST0_RTX (mode
);
2130 /* Change subtraction from zero into negation. (0 - x) is the
2131 same as -x when x is NaN, infinite, or finite and nonzero.
2132 But if the mode has signed zeros, and does not round towards
2133 -infinity, then 0 - 0 is 0, not -0. */
2134 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2135 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2137 /* (-1 - a) is ~a. */
2138 if (trueop0
== constm1_rtx
)
2139 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2141 /* Subtracting 0 has no effect unless the mode has signed zeros
2142 and supports rounding towards -infinity. In such a case,
2144 if (!(HONOR_SIGNED_ZEROS (mode
)
2145 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2146 && trueop1
== CONST0_RTX (mode
))
2149 /* See if this is something like X * C - X or vice versa or
2150 if the multiplication is written as a shift. If so, we can
2151 distribute and make a new multiply, shift, or maybe just
2152 have X (if C is 2 in the example above). But don't make
2153 something more expensive than we had before. */
2155 if (SCALAR_INT_MODE_P (mode
))
2157 double_int coeff0
, negcoeff1
;
2158 rtx lhs
= op0
, rhs
= op1
;
2160 coeff0
= double_int_one
;
2161 negcoeff1
= double_int_minus_one
;
2163 if (GET_CODE (lhs
) == NEG
)
2165 coeff0
= double_int_minus_one
;
2166 lhs
= XEXP (lhs
, 0);
2168 else if (GET_CODE (lhs
) == MULT
2169 && CONST_INT_P (XEXP (lhs
, 1)))
2171 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
2172 lhs
= XEXP (lhs
, 0);
2174 else if (GET_CODE (lhs
) == ASHIFT
2175 && CONST_INT_P (XEXP (lhs
, 1))
2176 && INTVAL (XEXP (lhs
, 1)) >= 0
2177 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2179 coeff0
= double_int_setbit (double_int_zero
,
2180 INTVAL (XEXP (lhs
, 1)));
2181 lhs
= XEXP (lhs
, 0);
2184 if (GET_CODE (rhs
) == NEG
)
2186 negcoeff1
= double_int_one
;
2187 rhs
= XEXP (rhs
, 0);
2189 else if (GET_CODE (rhs
) == MULT
2190 && CONST_INT_P (XEXP (rhs
, 1)))
2192 negcoeff1
= shwi_to_double_int (-INTVAL (XEXP (rhs
, 1)));
2193 rhs
= XEXP (rhs
, 0);
2195 else if (GET_CODE (rhs
) == ASHIFT
2196 && CONST_INT_P (XEXP (rhs
, 1))
2197 && INTVAL (XEXP (rhs
, 1)) >= 0
2198 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2200 negcoeff1
= double_int_setbit (double_int_zero
,
2201 INTVAL (XEXP (rhs
, 1)));
2202 negcoeff1
= double_int_neg (negcoeff1
);
2203 rhs
= XEXP (rhs
, 0);
2206 if (rtx_equal_p (lhs
, rhs
))
2208 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2211 bool speed
= optimize_function_for_speed_p (cfun
);
2213 val
= double_int_add (coeff0
, negcoeff1
);
2214 coeff
= immed_double_int_const (val
, mode
);
2216 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2217 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
2222 /* (a - (-b)) -> (a + b). True even for IEEE. */
2223 if (GET_CODE (op1
) == NEG
)
2224 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2226 /* (-x - c) may be simplified as (-c - x). */
2227 if (GET_CODE (op0
) == NEG
2228 && (CONST_INT_P (op1
)
2229 || GET_CODE (op1
) == CONST_DOUBLE
))
2231 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2233 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2236 /* Don't let a relocatable value get a negative coeff. */
2237 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2238 return simplify_gen_binary (PLUS
, mode
,
2240 neg_const_int (mode
, op1
));
2242 /* (x - (x & y)) -> (x & ~y) */
2243 if (GET_CODE (op1
) == AND
)
2245 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2247 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2248 GET_MODE (XEXP (op1
, 1)));
2249 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2251 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2253 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2254 GET_MODE (XEXP (op1
, 0)));
2255 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2259 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2260 by reversing the comparison code if valid. */
2261 if (STORE_FLAG_VALUE
== 1
2262 && trueop0
== const1_rtx
2263 && COMPARISON_P (op1
)
2264 && (reversed
= reversed_comparison (op1
, mode
)))
2267 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2268 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2269 && GET_CODE (op1
) == MULT
2270 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2274 in1
= XEXP (XEXP (op1
, 0), 0);
2275 in2
= XEXP (op1
, 1);
2276 return simplify_gen_binary (PLUS
, mode
,
2277 simplify_gen_binary (MULT
, mode
,
2282 /* Canonicalize (minus (neg A) (mult B C)) to
2283 (minus (mult (neg B) C) A). */
2284 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2285 && GET_CODE (op1
) == MULT
2286 && GET_CODE (op0
) == NEG
)
2290 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2291 in2
= XEXP (op1
, 1);
2292 return simplify_gen_binary (MINUS
, mode
,
2293 simplify_gen_binary (MULT
, mode
,
2298 /* If one of the operands is a PLUS or a MINUS, see if we can
2299 simplify this by the associative law. This will, for example,
2300 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2301 Don't use the associative law for floating point.
2302 The inaccuracy makes it nonassociative,
2303 and subtle programs can break if operations are associated. */
2305 if (INTEGRAL_MODE_P (mode
)
2306 && (plus_minus_operand_p (op0
)
2307 || plus_minus_operand_p (op1
))
2308 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2313 if (trueop1
== constm1_rtx
)
2314 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2316 if (GET_CODE (op0
) == NEG
)
2318 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2319 /* If op1 is a MULT as well and simplify_unary_operation
2320 just moved the NEG to the second operand, simplify_gen_binary
2321 below could through simplify_associative_operation move
2322 the NEG around again and recurse endlessly. */
2324 && GET_CODE (op1
) == MULT
2325 && GET_CODE (temp
) == MULT
2326 && XEXP (op1
, 0) == XEXP (temp
, 0)
2327 && GET_CODE (XEXP (temp
, 1)) == NEG
2328 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2331 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2333 if (GET_CODE (op1
) == NEG
)
2335 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2336 /* If op0 is a MULT as well and simplify_unary_operation
2337 just moved the NEG to the second operand, simplify_gen_binary
2338 below could through simplify_associative_operation move
2339 the NEG around again and recurse endlessly. */
2341 && GET_CODE (op0
) == MULT
2342 && GET_CODE (temp
) == MULT
2343 && XEXP (op0
, 0) == XEXP (temp
, 0)
2344 && GET_CODE (XEXP (temp
, 1)) == NEG
2345 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2348 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2351 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2352 x is NaN, since x * 0 is then also NaN. Nor is it valid
2353 when the mode has signed zeros, since multiplying a negative
2354 number by 0 will give -0, not 0. */
2355 if (!HONOR_NANS (mode
)
2356 && !HONOR_SIGNED_ZEROS (mode
)
2357 && trueop1
== CONST0_RTX (mode
)
2358 && ! side_effects_p (op0
))
2361 /* In IEEE floating point, x*1 is not equivalent to x for
2363 if (!HONOR_SNANS (mode
)
2364 && trueop1
== CONST1_RTX (mode
))
2367 /* Convert multiply by constant power of two into shift unless
2368 we are still generating RTL. This test is a kludge. */
2369 if (CONST_INT_P (trueop1
)
2370 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2371 /* If the mode is larger than the host word size, and the
2372 uppermost bit is set, then this isn't a power of two due
2373 to implicit sign extension. */
2374 && (width
<= HOST_BITS_PER_WIDE_INT
2375 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2376 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2378 /* Likewise for multipliers wider than a word. */
2379 if (GET_CODE (trueop1
) == CONST_DOUBLE
2380 && (GET_MODE (trueop1
) == VOIDmode
2381 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2382 && GET_MODE (op0
) == mode
2383 && CONST_DOUBLE_LOW (trueop1
) == 0
2384 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2385 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2386 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2388 /* x*2 is x+x and x*(-1) is -x */
2389 if (GET_CODE (trueop1
) == CONST_DOUBLE
2390 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2391 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2392 && GET_MODE (op0
) == mode
)
2395 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2397 if (REAL_VALUES_EQUAL (d
, dconst2
))
2398 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2400 if (!HONOR_SNANS (mode
)
2401 && REAL_VALUES_EQUAL (d
, dconstm1
))
2402 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2405 /* Optimize -x * -x as x * x. */
2406 if (FLOAT_MODE_P (mode
)
2407 && GET_CODE (op0
) == NEG
2408 && GET_CODE (op1
) == NEG
2409 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2410 && !side_effects_p (XEXP (op0
, 0)))
2411 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2413 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2414 if (SCALAR_FLOAT_MODE_P (mode
)
2415 && GET_CODE (op0
) == ABS
2416 && GET_CODE (op1
) == ABS
2417 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2418 && !side_effects_p (XEXP (op0
, 0)))
2419 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2421 /* Reassociate multiplication, but for floating point MULTs
2422 only when the user specifies unsafe math optimizations. */
2423 if (! FLOAT_MODE_P (mode
)
2424 || flag_unsafe_math_optimizations
)
2426 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2433 if (trueop1
== CONST0_RTX (mode
))
2435 if (CONST_INT_P (trueop1
)
2436 && ((UINTVAL (trueop1
) & GET_MODE_MASK (mode
))
2437 == GET_MODE_MASK (mode
)))
2439 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2441 /* A | (~A) -> -1 */
2442 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2443 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2444 && ! side_effects_p (op0
)
2445 && SCALAR_INT_MODE_P (mode
))
2448 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2449 if (CONST_INT_P (op1
)
2450 && HWI_COMPUTABLE_MODE_P (mode
)
2451 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0)
2454 /* Canonicalize (X & C1) | C2. */
2455 if (GET_CODE (op0
) == AND
2456 && CONST_INT_P (trueop1
)
2457 && CONST_INT_P (XEXP (op0
, 1)))
2459 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2460 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2461 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2463 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2465 && !side_effects_p (XEXP (op0
, 0)))
2468 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2469 if (((c1
|c2
) & mask
) == mask
)
2470 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2472 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2473 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2475 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2476 gen_int_mode (c1
& ~c2
, mode
));
2477 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2481 /* Convert (A & B) | A to A. */
2482 if (GET_CODE (op0
) == AND
2483 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2484 || rtx_equal_p (XEXP (op0
, 1), op1
))
2485 && ! side_effects_p (XEXP (op0
, 0))
2486 && ! side_effects_p (XEXP (op0
, 1)))
2489 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2490 mode size to (rotate A CX). */
2492 if (GET_CODE (op1
) == ASHIFT
2493 || GET_CODE (op1
) == SUBREG
)
2504 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2505 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2506 && CONST_INT_P (XEXP (opleft
, 1))
2507 && CONST_INT_P (XEXP (opright
, 1))
2508 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2509 == GET_MODE_PRECISION (mode
)))
2510 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2512 /* Same, but for ashift that has been "simplified" to a wider mode
2513 by simplify_shift_const. */
2515 if (GET_CODE (opleft
) == SUBREG
2516 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2517 && GET_CODE (opright
) == LSHIFTRT
2518 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2519 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2520 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2521 && (GET_MODE_SIZE (GET_MODE (opleft
))
2522 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2523 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2524 SUBREG_REG (XEXP (opright
, 0)))
2525 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2526 && CONST_INT_P (XEXP (opright
, 1))
2527 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2528 == GET_MODE_PRECISION (mode
)))
2529 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2530 XEXP (SUBREG_REG (opleft
), 1));
2532 /* If we have (ior (and (X C1) C2)), simplify this by making
2533 C1 as small as possible if C1 actually changes. */
2534 if (CONST_INT_P (op1
)
2535 && (HWI_COMPUTABLE_MODE_P (mode
)
2536 || INTVAL (op1
) > 0)
2537 && GET_CODE (op0
) == AND
2538 && CONST_INT_P (XEXP (op0
, 1))
2539 && CONST_INT_P (op1
)
2540 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2541 return simplify_gen_binary (IOR
, mode
,
2543 (AND
, mode
, XEXP (op0
, 0),
2544 GEN_INT (UINTVAL (XEXP (op0
, 1))
2548 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2549 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2550 the PLUS does not affect any of the bits in OP1: then we can do
2551 the IOR as a PLUS and we can associate. This is valid if OP1
2552 can be safely shifted left C bits. */
2553 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2554 && GET_CODE (XEXP (op0
, 0)) == PLUS
2555 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2556 && CONST_INT_P (XEXP (op0
, 1))
2557 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2559 int count
= INTVAL (XEXP (op0
, 1));
2560 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2562 if (mask
>> count
== INTVAL (trueop1
)
2563 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2564 return simplify_gen_binary (ASHIFTRT
, mode
,
2565 plus_constant (XEXP (op0
, 0), mask
),
2569 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2575 if (trueop1
== CONST0_RTX (mode
))
2577 if (CONST_INT_P (trueop1
)
2578 && ((UINTVAL (trueop1
) & GET_MODE_MASK (mode
))
2579 == GET_MODE_MASK (mode
)))
2580 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2581 if (rtx_equal_p (trueop0
, trueop1
)
2582 && ! side_effects_p (op0
)
2583 && GET_MODE_CLASS (mode
) != MODE_CC
)
2584 return CONST0_RTX (mode
);
2586 /* Canonicalize XOR of the most significant bit to PLUS. */
2587 if ((CONST_INT_P (op1
)
2588 || GET_CODE (op1
) == CONST_DOUBLE
)
2589 && mode_signbit_p (mode
, op1
))
2590 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2591 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2592 if ((CONST_INT_P (op1
)
2593 || GET_CODE (op1
) == CONST_DOUBLE
)
2594 && GET_CODE (op0
) == PLUS
2595 && (CONST_INT_P (XEXP (op0
, 1))
2596 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2597 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2598 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2599 simplify_gen_binary (XOR
, mode
, op1
,
2602 /* If we are XORing two things that have no bits in common,
2603 convert them into an IOR. This helps to detect rotation encoded
2604 using those methods and possibly other simplifications. */
2606 if (HWI_COMPUTABLE_MODE_P (mode
)
2607 && (nonzero_bits (op0
, mode
)
2608 & nonzero_bits (op1
, mode
)) == 0)
2609 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2611 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2612 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2615 int num_negated
= 0;
2617 if (GET_CODE (op0
) == NOT
)
2618 num_negated
++, op0
= XEXP (op0
, 0);
2619 if (GET_CODE (op1
) == NOT
)
2620 num_negated
++, op1
= XEXP (op1
, 0);
2622 if (num_negated
== 2)
2623 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2624 else if (num_negated
== 1)
2625 return simplify_gen_unary (NOT
, mode
,
2626 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2630 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2631 correspond to a machine insn or result in further simplifications
2632 if B is a constant. */
2634 if (GET_CODE (op0
) == AND
2635 && rtx_equal_p (XEXP (op0
, 1), op1
)
2636 && ! side_effects_p (op1
))
2637 return simplify_gen_binary (AND
, mode
,
2638 simplify_gen_unary (NOT
, mode
,
2639 XEXP (op0
, 0), mode
),
2642 else if (GET_CODE (op0
) == AND
2643 && rtx_equal_p (XEXP (op0
, 0), op1
)
2644 && ! side_effects_p (op1
))
2645 return simplify_gen_binary (AND
, mode
,
2646 simplify_gen_unary (NOT
, mode
,
2647 XEXP (op0
, 1), mode
),
2650 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2651 we can transform like this:
2652 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2653 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2654 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2655 Attempt a few simplifications when B and C are both constants. */
2656 if (GET_CODE (op0
) == AND
2657 && CONST_INT_P (op1
)
2658 && CONST_INT_P (XEXP (op0
, 1)))
2660 rtx a
= XEXP (op0
, 0);
2661 rtx b
= XEXP (op0
, 1);
2663 HOST_WIDE_INT bval
= INTVAL (b
);
2664 HOST_WIDE_INT cval
= INTVAL (c
);
2667 = simplify_binary_operation (AND
, mode
,
2668 simplify_gen_unary (NOT
, mode
, a
, mode
),
2670 if ((~cval
& bval
) == 0)
2672 /* Try to simplify ~A&C | ~B&C. */
2673 if (na_c
!= NULL_RTX
)
2674 return simplify_gen_binary (IOR
, mode
, na_c
,
2675 GEN_INT (~bval
& cval
));
2679 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2680 if (na_c
== const0_rtx
)
2682 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2683 GEN_INT (~cval
& bval
));
2684 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2685 GEN_INT (~bval
& cval
));
2690 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2691 comparison if STORE_FLAG_VALUE is 1. */
2692 if (STORE_FLAG_VALUE
== 1
2693 && trueop1
== const1_rtx
2694 && COMPARISON_P (op0
)
2695 && (reversed
= reversed_comparison (op0
, mode
)))
2698 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2699 is (lt foo (const_int 0)), so we can perform the above
2700 simplification if STORE_FLAG_VALUE is 1. */
2702 if (STORE_FLAG_VALUE
== 1
2703 && trueop1
== const1_rtx
2704 && GET_CODE (op0
) == LSHIFTRT
2705 && CONST_INT_P (XEXP (op0
, 1))
2706 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2707 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2709 /* (xor (comparison foo bar) (const_int sign-bit))
2710 when STORE_FLAG_VALUE is the sign bit. */
2711 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2712 && trueop1
== const_true_rtx
2713 && COMPARISON_P (op0
)
2714 && (reversed
= reversed_comparison (op0
, mode
)))
2717 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2723 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2725 if (HWI_COMPUTABLE_MODE_P (mode
))
2727 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2728 HOST_WIDE_INT nzop1
;
2729 if (CONST_INT_P (trueop1
))
2731 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2732 /* If we are turning off bits already known off in OP0, we need
2734 if ((nzop0
& ~val1
) == 0)
2737 nzop1
= nonzero_bits (trueop1
, mode
);
2738 /* If we are clearing all the nonzero bits, the result is zero. */
2739 if ((nzop1
& nzop0
) == 0
2740 && !side_effects_p (op0
) && !side_effects_p (op1
))
2741 return CONST0_RTX (mode
);
2743 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2744 && GET_MODE_CLASS (mode
) != MODE_CC
)
2747 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2748 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2749 && ! side_effects_p (op0
)
2750 && GET_MODE_CLASS (mode
) != MODE_CC
)
2751 return CONST0_RTX (mode
);
2753 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2754 there are no nonzero bits of C outside of X's mode. */
2755 if ((GET_CODE (op0
) == SIGN_EXTEND
2756 || GET_CODE (op0
) == ZERO_EXTEND
)
2757 && CONST_INT_P (trueop1
)
2758 && HWI_COMPUTABLE_MODE_P (mode
)
2759 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2760 & UINTVAL (trueop1
)) == 0)
2762 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2763 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2764 gen_int_mode (INTVAL (trueop1
),
2766 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2769 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2770 we might be able to further simplify the AND with X and potentially
2771 remove the truncation altogether. */
2772 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2774 rtx x
= XEXP (op0
, 0);
2775 enum machine_mode xmode
= GET_MODE (x
);
2776 tem
= simplify_gen_binary (AND
, xmode
, x
,
2777 gen_int_mode (INTVAL (trueop1
), xmode
));
2778 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2781 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2782 if (GET_CODE (op0
) == IOR
2783 && CONST_INT_P (trueop1
)
2784 && CONST_INT_P (XEXP (op0
, 1)))
2786 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2787 return simplify_gen_binary (IOR
, mode
,
2788 simplify_gen_binary (AND
, mode
,
2789 XEXP (op0
, 0), op1
),
2790 gen_int_mode (tmp
, mode
));
2793 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2794 insn (and may simplify more). */
2795 if (GET_CODE (op0
) == XOR
2796 && rtx_equal_p (XEXP (op0
, 0), op1
)
2797 && ! side_effects_p (op1
))
2798 return simplify_gen_binary (AND
, mode
,
2799 simplify_gen_unary (NOT
, mode
,
2800 XEXP (op0
, 1), mode
),
2803 if (GET_CODE (op0
) == XOR
2804 && rtx_equal_p (XEXP (op0
, 1), op1
)
2805 && ! side_effects_p (op1
))
2806 return simplify_gen_binary (AND
, mode
,
2807 simplify_gen_unary (NOT
, mode
,
2808 XEXP (op0
, 0), mode
),
2811 /* Similarly for (~(A ^ B)) & A. */
2812 if (GET_CODE (op0
) == NOT
2813 && GET_CODE (XEXP (op0
, 0)) == XOR
2814 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2815 && ! side_effects_p (op1
))
2816 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2818 if (GET_CODE (op0
) == NOT
2819 && GET_CODE (XEXP (op0
, 0)) == XOR
2820 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2821 && ! side_effects_p (op1
))
2822 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2824 /* Convert (A | B) & A to A. */
2825 if (GET_CODE (op0
) == IOR
2826 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2827 || rtx_equal_p (XEXP (op0
, 1), op1
))
2828 && ! side_effects_p (XEXP (op0
, 0))
2829 && ! side_effects_p (XEXP (op0
, 1)))
2832 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2833 ((A & N) + B) & M -> (A + B) & M
2834 Similarly if (N & M) == 0,
2835 ((A | N) + B) & M -> (A + B) & M
2836 and for - instead of + and/or ^ instead of |.
2837 Also, if (N & M) == 0, then
2838 (A +- N) & M -> A & M. */
2839 if (CONST_INT_P (trueop1
)
2840 && HWI_COMPUTABLE_MODE_P (mode
)
2841 && ~UINTVAL (trueop1
)
2842 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2843 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2848 pmop
[0] = XEXP (op0
, 0);
2849 pmop
[1] = XEXP (op0
, 1);
2851 if (CONST_INT_P (pmop
[1])
2852 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2853 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2855 for (which
= 0; which
< 2; which
++)
2858 switch (GET_CODE (tem
))
2861 if (CONST_INT_P (XEXP (tem
, 1))
2862 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2863 == UINTVAL (trueop1
))
2864 pmop
[which
] = XEXP (tem
, 0);
2868 if (CONST_INT_P (XEXP (tem
, 1))
2869 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2870 pmop
[which
] = XEXP (tem
, 0);
2877 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2879 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2881 return simplify_gen_binary (code
, mode
, tem
, op1
);
2885 /* (and X (ior (not X) Y) -> (and X Y) */
2886 if (GET_CODE (op1
) == IOR
2887 && GET_CODE (XEXP (op1
, 0)) == NOT
2888 && op0
== XEXP (XEXP (op1
, 0), 0))
2889 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2891 /* (and (ior (not X) Y) X) -> (and X Y) */
2892 if (GET_CODE (op0
) == IOR
2893 && GET_CODE (XEXP (op0
, 0)) == NOT
2894 && op1
== XEXP (XEXP (op0
, 0), 0))
2895 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2897 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2903 /* 0/x is 0 (or x&0 if x has side-effects). */
2904 if (trueop0
== CONST0_RTX (mode
))
2906 if (side_effects_p (op1
))
2907 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2911 if (trueop1
== CONST1_RTX (mode
))
2912 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2913 /* Convert divide by power of two into shift. */
2914 if (CONST_INT_P (trueop1
)
2915 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2916 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2920 /* Handle floating point and integers separately. */
2921 if (SCALAR_FLOAT_MODE_P (mode
))
2923 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2924 safe for modes with NaNs, since 0.0 / 0.0 will then be
2925 NaN rather than 0.0. Nor is it safe for modes with signed
2926 zeros, since dividing 0 by a negative number gives -0.0 */
2927 if (trueop0
== CONST0_RTX (mode
)
2928 && !HONOR_NANS (mode
)
2929 && !HONOR_SIGNED_ZEROS (mode
)
2930 && ! side_effects_p (op1
))
2933 if (trueop1
== CONST1_RTX (mode
)
2934 && !HONOR_SNANS (mode
))
2937 if (GET_CODE (trueop1
) == CONST_DOUBLE
2938 && trueop1
!= CONST0_RTX (mode
))
2941 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2944 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2945 && !HONOR_SNANS (mode
))
2946 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2948 /* Change FP division by a constant into multiplication.
2949 Only do this with -freciprocal-math. */
2950 if (flag_reciprocal_math
2951 && !REAL_VALUES_EQUAL (d
, dconst0
))
2953 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2954 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2955 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2961 /* 0/x is 0 (or x&0 if x has side-effects). */
2962 if (trueop0
== CONST0_RTX (mode
)
2963 && !cfun
->can_throw_non_call_exceptions
)
2965 if (side_effects_p (op1
))
2966 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2970 if (trueop1
== CONST1_RTX (mode
))
2971 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2973 if (trueop1
== constm1_rtx
)
2975 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2976 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2982 /* 0%x is 0 (or x&0 if x has side-effects). */
2983 if (trueop0
== CONST0_RTX (mode
))
2985 if (side_effects_p (op1
))
2986 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2989 /* x%1 is 0 (of x&0 if x has side-effects). */
2990 if (trueop1
== CONST1_RTX (mode
))
2992 if (side_effects_p (op0
))
2993 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2994 return CONST0_RTX (mode
);
2996 /* Implement modulus by power of two as AND. */
2997 if (CONST_INT_P (trueop1
)
2998 && exact_log2 (UINTVAL (trueop1
)) > 0)
2999 return simplify_gen_binary (AND
, mode
, op0
,
3000 GEN_INT (INTVAL (op1
) - 1));
3004 /* 0%x is 0 (or x&0 if x has side-effects). */
3005 if (trueop0
== CONST0_RTX (mode
))
3007 if (side_effects_p (op1
))
3008 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3011 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3012 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3014 if (side_effects_p (op0
))
3015 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3016 return CONST0_RTX (mode
);
3023 if (trueop1
== CONST0_RTX (mode
))
3025 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3027 /* Rotating ~0 always results in ~0. */
3028 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3029 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3030 && ! side_effects_p (op1
))
3033 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3035 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3036 if (val
!= INTVAL (op1
))
3037 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3044 if (trueop1
== CONST0_RTX (mode
))
3046 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3048 goto canonicalize_shift
;
3051 if (trueop1
== CONST0_RTX (mode
))
3053 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3055 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3056 if (GET_CODE (op0
) == CLZ
3057 && CONST_INT_P (trueop1
)
3058 && STORE_FLAG_VALUE
== 1
3059 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3061 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3062 unsigned HOST_WIDE_INT zero_val
= 0;
3064 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3065 && zero_val
== GET_MODE_PRECISION (imode
)
3066 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3067 return simplify_gen_relational (EQ
, mode
, imode
,
3068 XEXP (op0
, 0), const0_rtx
);
3070 goto canonicalize_shift
;
3073 if (width
<= HOST_BITS_PER_WIDE_INT
3074 && mode_signbit_p (mode
, trueop1
)
3075 && ! side_effects_p (op0
))
3077 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3079 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3085 if (width
<= HOST_BITS_PER_WIDE_INT
3086 && CONST_INT_P (trueop1
)
3087 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3088 && ! side_effects_p (op0
))
3090 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3092 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3098 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3100 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3102 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3108 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3110 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3112 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3125 /* ??? There are simplifications that can be done. */
3129 if (!VECTOR_MODE_P (mode
))
3131 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3132 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3133 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3134 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3135 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3137 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3138 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3141 /* Extract a scalar element from a nested VEC_SELECT expression
3142 (with optional nested VEC_CONCAT expression). Some targets
3143 (i386) extract scalar element from a vector using chain of
3144 nested VEC_SELECT expressions. When input operand is a memory
3145 operand, this operation can be simplified to a simple scalar
3146 load from an offseted memory address. */
3147 if (GET_CODE (trueop0
) == VEC_SELECT
)
3149 rtx op0
= XEXP (trueop0
, 0);
3150 rtx op1
= XEXP (trueop0
, 1);
3152 enum machine_mode opmode
= GET_MODE (op0
);
3153 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3154 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3156 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3162 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3163 gcc_assert (i
< n_elts
);
3165 /* Select element, pointed by nested selector. */
3166 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3168 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3169 if (GET_CODE (op0
) == VEC_CONCAT
)
3171 rtx op00
= XEXP (op0
, 0);
3172 rtx op01
= XEXP (op0
, 1);
3174 enum machine_mode mode00
, mode01
;
3175 int n_elts00
, n_elts01
;
3177 mode00
= GET_MODE (op00
);
3178 mode01
= GET_MODE (op01
);
3180 /* Find out number of elements of each operand. */
3181 if (VECTOR_MODE_P (mode00
))
3183 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3184 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3189 if (VECTOR_MODE_P (mode01
))
3191 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3192 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3197 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3199 /* Select correct operand of VEC_CONCAT
3200 and adjust selector. */
3201 if (elem
< n_elts01
)
3212 vec
= rtvec_alloc (1);
3213 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3215 tmp
= gen_rtx_fmt_ee (code
, mode
,
3216 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3219 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3220 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3221 return XEXP (trueop0
, 0);
3225 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3226 gcc_assert (GET_MODE_INNER (mode
)
3227 == GET_MODE_INNER (GET_MODE (trueop0
)));
3228 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3230 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3232 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3233 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3234 rtvec v
= rtvec_alloc (n_elts
);
3237 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3238 for (i
= 0; i
< n_elts
; i
++)
3240 rtx x
= XVECEXP (trueop1
, 0, i
);
3242 gcc_assert (CONST_INT_P (x
));
3243 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3247 return gen_rtx_CONST_VECTOR (mode
, v
);
3251 if (XVECLEN (trueop1
, 0) == 1
3252 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3253 && GET_CODE (trueop0
) == VEC_CONCAT
)
3256 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3258 /* Try to find the element in the VEC_CONCAT. */
3259 while (GET_MODE (vec
) != mode
3260 && GET_CODE (vec
) == VEC_CONCAT
)
3262 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3263 if (offset
< vec_size
)
3264 vec
= XEXP (vec
, 0);
3268 vec
= XEXP (vec
, 1);
3270 vec
= avoid_constant_pool_reference (vec
);
3273 if (GET_MODE (vec
) == mode
)
3280 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3281 ? GET_MODE (trueop0
)
3282 : GET_MODE_INNER (mode
));
3283 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3284 ? GET_MODE (trueop1
)
3285 : GET_MODE_INNER (mode
));
3287 gcc_assert (VECTOR_MODE_P (mode
));
3288 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3289 == GET_MODE_SIZE (mode
));
3291 if (VECTOR_MODE_P (op0_mode
))
3292 gcc_assert (GET_MODE_INNER (mode
)
3293 == GET_MODE_INNER (op0_mode
));
3295 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3297 if (VECTOR_MODE_P (op1_mode
))
3298 gcc_assert (GET_MODE_INNER (mode
)
3299 == GET_MODE_INNER (op1_mode
));
3301 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3303 if ((GET_CODE (trueop0
) == CONST_VECTOR
3304 || CONST_INT_P (trueop0
)
3305 || GET_CODE (trueop0
) == CONST_DOUBLE
)
3306 && (GET_CODE (trueop1
) == CONST_VECTOR
3307 || CONST_INT_P (trueop1
)
3308 || GET_CODE (trueop1
) == CONST_DOUBLE
))
3310 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3311 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3312 rtvec v
= rtvec_alloc (n_elts
);
3314 unsigned in_n_elts
= 1;
3316 if (VECTOR_MODE_P (op0_mode
))
3317 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3318 for (i
= 0; i
< n_elts
; i
++)
3322 if (!VECTOR_MODE_P (op0_mode
))
3323 RTVEC_ELT (v
, i
) = trueop0
;
3325 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3329 if (!VECTOR_MODE_P (op1_mode
))
3330 RTVEC_ELT (v
, i
) = trueop1
;
3332 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3337 return gen_rtx_CONST_VECTOR (mode
, v
);
3350 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3353 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3355 unsigned int width
= GET_MODE_PRECISION (mode
);
3357 if (VECTOR_MODE_P (mode
)
3358 && code
!= VEC_CONCAT
3359 && GET_CODE (op0
) == CONST_VECTOR
3360 && GET_CODE (op1
) == CONST_VECTOR
)
3362 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3363 enum machine_mode op0mode
= GET_MODE (op0
);
3364 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3365 enum machine_mode op1mode
= GET_MODE (op1
);
3366 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3367 rtvec v
= rtvec_alloc (n_elts
);
3370 gcc_assert (op0_n_elts
== n_elts
);
3371 gcc_assert (op1_n_elts
== n_elts
);
3372 for (i
= 0; i
< n_elts
; i
++)
3374 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3375 CONST_VECTOR_ELT (op0
, i
),
3376 CONST_VECTOR_ELT (op1
, i
));
3379 RTVEC_ELT (v
, i
) = x
;
3382 return gen_rtx_CONST_VECTOR (mode
, v
);
3385 if (VECTOR_MODE_P (mode
)
3386 && code
== VEC_CONCAT
3387 && (CONST_INT_P (op0
)
3388 || GET_CODE (op0
) == CONST_DOUBLE
3389 || GET_CODE (op0
) == CONST_FIXED
)
3390 && (CONST_INT_P (op1
)
3391 || GET_CODE (op1
) == CONST_DOUBLE
3392 || GET_CODE (op1
) == CONST_FIXED
))
3394 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3395 rtvec v
= rtvec_alloc (n_elts
);
3397 gcc_assert (n_elts
>= 2);
3400 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3401 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3403 RTVEC_ELT (v
, 0) = op0
;
3404 RTVEC_ELT (v
, 1) = op1
;
3408 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3409 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3412 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3413 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3414 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3416 for (i
= 0; i
< op0_n_elts
; ++i
)
3417 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3418 for (i
= 0; i
< op1_n_elts
; ++i
)
3419 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3422 return gen_rtx_CONST_VECTOR (mode
, v
);
3425 if (SCALAR_FLOAT_MODE_P (mode
)
3426 && GET_CODE (op0
) == CONST_DOUBLE
3427 && GET_CODE (op1
) == CONST_DOUBLE
3428 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3439 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3441 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3443 for (i
= 0; i
< 4; i
++)
3460 real_from_target (&r
, tmp0
, mode
);
3461 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3465 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3468 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3469 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3470 real_convert (&f0
, mode
, &f0
);
3471 real_convert (&f1
, mode
, &f1
);
3473 if (HONOR_SNANS (mode
)
3474 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3478 && REAL_VALUES_EQUAL (f1
, dconst0
)
3479 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3482 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3483 && flag_trapping_math
3484 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3486 int s0
= REAL_VALUE_NEGATIVE (f0
);
3487 int s1
= REAL_VALUE_NEGATIVE (f1
);
3492 /* Inf + -Inf = NaN plus exception. */
3497 /* Inf - Inf = NaN plus exception. */
3502 /* Inf / Inf = NaN plus exception. */
3509 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3510 && flag_trapping_math
3511 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3512 || (REAL_VALUE_ISINF (f1
)
3513 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3514 /* Inf * 0 = NaN plus exception. */
3517 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3519 real_convert (&result
, mode
, &value
);
3521 /* Don't constant fold this floating point operation if
3522 the result has overflowed and flag_trapping_math. */
3524 if (flag_trapping_math
3525 && MODE_HAS_INFINITIES (mode
)
3526 && REAL_VALUE_ISINF (result
)
3527 && !REAL_VALUE_ISINF (f0
)
3528 && !REAL_VALUE_ISINF (f1
))
3529 /* Overflow plus exception. */
3532 /* Don't constant fold this floating point operation if the
3533 result may dependent upon the run-time rounding mode and
3534 flag_rounding_math is set, or if GCC's software emulation
3535 is unable to accurately represent the result. */
3537 if ((flag_rounding_math
3538 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3539 && (inexact
|| !real_identical (&result
, &value
)))
3542 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3546 /* We can fold some multi-word operations. */
3547 if (GET_MODE_CLASS (mode
) == MODE_INT
3548 && width
== HOST_BITS_PER_DOUBLE_INT
3549 && (CONST_DOUBLE_P (op0
) || CONST_INT_P (op0
))
3550 && (CONST_DOUBLE_P (op1
) || CONST_INT_P (op1
)))
3552 double_int o0
, o1
, res
, tmp
;
3554 o0
= rtx_to_double_int (op0
);
3555 o1
= rtx_to_double_int (op1
);
3560 /* A - B == A + (-B). */
3561 o1
= double_int_neg (o1
);
3563 /* Fall through.... */
3566 res
= double_int_add (o0
, o1
);
3570 res
= double_int_mul (o0
, o1
);
3574 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3575 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3576 &res
.low
, &res
.high
,
3577 &tmp
.low
, &tmp
.high
))
3582 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3583 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3584 &tmp
.low
, &tmp
.high
,
3585 &res
.low
, &res
.high
))
3590 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3591 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3592 &res
.low
, &res
.high
,
3593 &tmp
.low
, &tmp
.high
))
3598 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3599 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3600 &tmp
.low
, &tmp
.high
,
3601 &res
.low
, &res
.high
))
3606 res
= double_int_and (o0
, o1
);
3610 res
= double_int_ior (o0
, o1
);
3614 res
= double_int_xor (o0
, o1
);
3618 res
= double_int_smin (o0
, o1
);
3622 res
= double_int_smax (o0
, o1
);
3626 res
= double_int_umin (o0
, o1
);
3630 res
= double_int_umax (o0
, o1
);
3633 case LSHIFTRT
: case ASHIFTRT
:
3635 case ROTATE
: case ROTATERT
:
3637 unsigned HOST_WIDE_INT cnt
;
3639 if (SHIFT_COUNT_TRUNCATED
)
3640 o1
= double_int_zext (o1
, GET_MODE_PRECISION (mode
));
3642 if (!double_int_fits_in_uhwi_p (o1
)
3643 || double_int_to_uhwi (o1
) >= GET_MODE_PRECISION (mode
))
3646 cnt
= double_int_to_uhwi (o1
);
3648 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3649 res
= double_int_rshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3651 else if (code
== ASHIFT
)
3652 res
= double_int_lshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3654 else if (code
== ROTATE
)
3655 res
= double_int_lrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3656 else /* code == ROTATERT */
3657 res
= double_int_rrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3665 return immed_double_int_const (res
, mode
);
3668 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3669 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3671 /* Get the integer argument values in two forms:
3672 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3674 arg0
= INTVAL (op0
);
3675 arg1
= INTVAL (op1
);
3677 if (width
< HOST_BITS_PER_WIDE_INT
)
3679 arg0
&= GET_MODE_MASK (mode
);
3680 arg1
&= GET_MODE_MASK (mode
);
3683 if (val_signbit_known_set_p (mode
, arg0s
))
3684 arg0s
|= ~GET_MODE_MASK (mode
);
3687 if (val_signbit_known_set_p (mode
, arg1s
))
3688 arg1s
|= ~GET_MODE_MASK (mode
);
3696 /* Compute the value of the arithmetic. */
3701 val
= arg0s
+ arg1s
;
3705 val
= arg0s
- arg1s
;
3709 val
= arg0s
* arg1s
;
3714 || ((unsigned HOST_WIDE_INT
) arg0s
3715 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3718 val
= arg0s
/ arg1s
;
3723 || ((unsigned HOST_WIDE_INT
) arg0s
3724 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3727 val
= arg0s
% arg1s
;
3732 || ((unsigned HOST_WIDE_INT
) arg0s
3733 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3736 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3741 || ((unsigned HOST_WIDE_INT
) arg0s
3742 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3745 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3763 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3764 the value is in range. We can't return any old value for
3765 out-of-range arguments because either the middle-end (via
3766 shift_truncation_mask) or the back-end might be relying on
3767 target-specific knowledge. Nor can we rely on
3768 shift_truncation_mask, since the shift might not be part of an
3769 ashlM3, lshrM3 or ashrM3 instruction. */
3770 if (SHIFT_COUNT_TRUNCATED
)
3771 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3772 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3775 val
= (code
== ASHIFT
3776 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3777 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3779 /* Sign-extend the result for arithmetic right shifts. */
3780 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3781 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
3789 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3790 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3798 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3799 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3803 /* Do nothing here. */
3807 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3811 val
= ((unsigned HOST_WIDE_INT
) arg0
3812 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3816 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3820 val
= ((unsigned HOST_WIDE_INT
) arg0
3821 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3834 /* ??? There are simplifications that can be done. */
3841 return gen_int_mode (val
, mode
);
3849 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3852 Rather than test for specific case, we do this by a brute-force method
3853 and do all possible simplifications until no more changes occur. Then
3854 we rebuild the operation. */
3856 struct simplify_plus_minus_op_data
3863 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3867 result
= (commutative_operand_precedence (y
)
3868 - commutative_operand_precedence (x
));
3872 /* Group together equal REGs to do more simplification. */
3873 if (REG_P (x
) && REG_P (y
))
3874 return REGNO (x
) > REGNO (y
);
3880 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3883 struct simplify_plus_minus_op_data ops
[8];
3885 int n_ops
= 2, input_ops
= 2;
3886 int changed
, n_constants
= 0, canonicalized
= 0;
3889 memset (ops
, 0, sizeof ops
);
3891 /* Set up the two operands and then expand them until nothing has been
3892 changed. If we run out of room in our array, give up; this should
3893 almost never happen. */
3898 ops
[1].neg
= (code
== MINUS
);
3904 for (i
= 0; i
< n_ops
; i
++)
3906 rtx this_op
= ops
[i
].op
;
3907 int this_neg
= ops
[i
].neg
;
3908 enum rtx_code this_code
= GET_CODE (this_op
);
3917 ops
[n_ops
].op
= XEXP (this_op
, 1);
3918 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3921 ops
[i
].op
= XEXP (this_op
, 0);
3924 canonicalized
|= this_neg
;
3928 ops
[i
].op
= XEXP (this_op
, 0);
3929 ops
[i
].neg
= ! this_neg
;
3936 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3937 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3938 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3940 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3941 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3942 ops
[n_ops
].neg
= this_neg
;
3950 /* ~a -> (-a - 1) */
3953 ops
[n_ops
].op
= constm1_rtx
;
3954 ops
[n_ops
++].neg
= this_neg
;
3955 ops
[i
].op
= XEXP (this_op
, 0);
3956 ops
[i
].neg
= !this_neg
;
3966 ops
[i
].op
= neg_const_int (mode
, this_op
);
3980 if (n_constants
> 1)
3983 gcc_assert (n_ops
>= 2);
3985 /* If we only have two operands, we can avoid the loops. */
3988 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3991 /* Get the two operands. Be careful with the order, especially for
3992 the cases where code == MINUS. */
3993 if (ops
[0].neg
&& ops
[1].neg
)
3995 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3998 else if (ops
[0].neg
)
4009 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4012 /* Now simplify each pair of operands until nothing changes. */
4015 /* Insertion sort is good enough for an eight-element array. */
4016 for (i
= 1; i
< n_ops
; i
++)
4018 struct simplify_plus_minus_op_data save
;
4020 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4026 ops
[j
+ 1] = ops
[j
];
4027 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4032 for (i
= n_ops
- 1; i
> 0; i
--)
4033 for (j
= i
- 1; j
>= 0; j
--)
4035 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4036 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4038 if (lhs
!= 0 && rhs
!= 0)
4040 enum rtx_code ncode
= PLUS
;
4046 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4048 else if (swap_commutative_operands_p (lhs
, rhs
))
4049 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4051 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4052 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4054 rtx tem_lhs
, tem_rhs
;
4056 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4057 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4058 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4060 if (tem
&& !CONSTANT_P (tem
))
4061 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4064 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4066 /* Reject "simplifications" that just wrap the two
4067 arguments in a CONST. Failure to do so can result
4068 in infinite recursion with simplify_binary_operation
4069 when it calls us to simplify CONST operations. */
4071 && ! (GET_CODE (tem
) == CONST
4072 && GET_CODE (XEXP (tem
, 0)) == ncode
4073 && XEXP (XEXP (tem
, 0), 0) == lhs
4074 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4077 if (GET_CODE (tem
) == NEG
)
4078 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4079 if (CONST_INT_P (tem
) && lneg
)
4080 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4084 ops
[j
].op
= NULL_RTX
;
4091 /* If nothing changed, fail. */
4095 /* Pack all the operands to the lower-numbered entries. */
4096 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4106 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4108 && CONST_INT_P (ops
[1].op
)
4109 && CONSTANT_P (ops
[0].op
)
4111 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4113 /* We suppressed creation of trivial CONST expressions in the
4114 combination loop to avoid recursion. Create one manually now.
4115 The combination loop should have ensured that there is exactly
4116 one CONST_INT, and the sort will have ensured that it is last
4117 in the array and that any other constant will be next-to-last. */
4120 && CONST_INT_P (ops
[n_ops
- 1].op
)
4121 && CONSTANT_P (ops
[n_ops
- 2].op
))
4123 rtx value
= ops
[n_ops
- 1].op
;
4124 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4125 value
= neg_const_int (mode
, value
);
4126 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
4130 /* Put a non-negated operand first, if possible. */
4132 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4135 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4144 /* Now make the result by performing the requested operations. */
4146 for (i
= 1; i
< n_ops
; i
++)
4147 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4148 mode
, result
, ops
[i
].op
);
4153 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4155 plus_minus_operand_p (const_rtx x
)
4157 return GET_CODE (x
) == PLUS
4158 || GET_CODE (x
) == MINUS
4159 || (GET_CODE (x
) == CONST
4160 && GET_CODE (XEXP (x
, 0)) == PLUS
4161 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4162 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4165 /* Like simplify_binary_operation except used for relational operators.
4166 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4167 not also be VOIDmode.
4169 CMP_MODE specifies in which mode the comparison is done in, so it is
4170 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4171 the operands or, if both are VOIDmode, the operands are compared in
4172 "infinite precision". */
4174 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4175 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4177 rtx tem
, trueop0
, trueop1
;
4179 if (cmp_mode
== VOIDmode
)
4180 cmp_mode
= GET_MODE (op0
);
4181 if (cmp_mode
== VOIDmode
)
4182 cmp_mode
= GET_MODE (op1
);
4184 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4187 if (SCALAR_FLOAT_MODE_P (mode
))
4189 if (tem
== const0_rtx
)
4190 return CONST0_RTX (mode
);
4191 #ifdef FLOAT_STORE_FLAG_VALUE
4193 REAL_VALUE_TYPE val
;
4194 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4195 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4201 if (VECTOR_MODE_P (mode
))
4203 if (tem
== const0_rtx
)
4204 return CONST0_RTX (mode
);
4205 #ifdef VECTOR_STORE_FLAG_VALUE
4210 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4211 if (val
== NULL_RTX
)
4213 if (val
== const1_rtx
)
4214 return CONST1_RTX (mode
);
4216 units
= GET_MODE_NUNITS (mode
);
4217 v
= rtvec_alloc (units
);
4218 for (i
= 0; i
< units
; i
++)
4219 RTVEC_ELT (v
, i
) = val
;
4220 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4230 /* For the following tests, ensure const0_rtx is op1. */
4231 if (swap_commutative_operands_p (op0
, op1
)
4232 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4233 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4235 /* If op0 is a compare, extract the comparison arguments from it. */
4236 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4237 return simplify_gen_relational (code
, mode
, VOIDmode
,
4238 XEXP (op0
, 0), XEXP (op0
, 1));
4240 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4244 trueop0
= avoid_constant_pool_reference (op0
);
4245 trueop1
= avoid_constant_pool_reference (op1
);
4246 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4250 /* This part of simplify_relational_operation is only used when CMP_MODE
4251 is not in class MODE_CC (i.e. it is a real comparison).
4253 MODE is the mode of the result, while CMP_MODE specifies in which
4254 mode the comparison is done in, so it is the mode of the operands. */
4257 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4258 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4260 enum rtx_code op0code
= GET_CODE (op0
);
4262 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4264 /* If op0 is a comparison, extract the comparison arguments
4268 if (GET_MODE (op0
) == mode
)
4269 return simplify_rtx (op0
);
4271 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4272 XEXP (op0
, 0), XEXP (op0
, 1));
4274 else if (code
== EQ
)
4276 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4277 if (new_code
!= UNKNOWN
)
4278 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4279 XEXP (op0
, 0), XEXP (op0
, 1));
4283 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4284 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4285 if ((code
== LTU
|| code
== GEU
)
4286 && GET_CODE (op0
) == PLUS
4287 && CONST_INT_P (XEXP (op0
, 1))
4288 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4289 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4292 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4293 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4294 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4297 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4298 if ((code
== LTU
|| code
== GEU
)
4299 && GET_CODE (op0
) == PLUS
4300 && rtx_equal_p (op1
, XEXP (op0
, 1))
4301 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4302 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4303 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4304 copy_rtx (XEXP (op0
, 0)));
4306 if (op1
== const0_rtx
)
4308 /* Canonicalize (GTU x 0) as (NE x 0). */
4310 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4311 /* Canonicalize (LEU x 0) as (EQ x 0). */
4313 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4315 else if (op1
== const1_rtx
)
4320 /* Canonicalize (GE x 1) as (GT x 0). */
4321 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4324 /* Canonicalize (GEU x 1) as (NE x 0). */
4325 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4328 /* Canonicalize (LT x 1) as (LE x 0). */
4329 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4332 /* Canonicalize (LTU x 1) as (EQ x 0). */
4333 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4339 else if (op1
== constm1_rtx
)
4341 /* Canonicalize (LE x -1) as (LT x 0). */
4343 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4344 /* Canonicalize (GT x -1) as (GE x 0). */
4346 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4349 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4350 if ((code
== EQ
|| code
== NE
)
4351 && (op0code
== PLUS
|| op0code
== MINUS
)
4353 && CONSTANT_P (XEXP (op0
, 1))
4354 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4356 rtx x
= XEXP (op0
, 0);
4357 rtx c
= XEXP (op0
, 1);
4359 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
4361 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
4364 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4365 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4367 && op1
== const0_rtx
4368 && GET_MODE_CLASS (mode
) == MODE_INT
4369 && cmp_mode
!= VOIDmode
4370 /* ??? Work-around BImode bugs in the ia64 backend. */
4372 && cmp_mode
!= BImode
4373 && nonzero_bits (op0
, cmp_mode
) == 1
4374 && STORE_FLAG_VALUE
== 1)
4375 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4376 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4377 : lowpart_subreg (mode
, op0
, cmp_mode
);
4379 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4380 if ((code
== EQ
|| code
== NE
)
4381 && op1
== const0_rtx
4383 return simplify_gen_relational (code
, mode
, cmp_mode
,
4384 XEXP (op0
, 0), XEXP (op0
, 1));
4386 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4387 if ((code
== EQ
|| code
== NE
)
4389 && rtx_equal_p (XEXP (op0
, 0), op1
)
4390 && !side_effects_p (XEXP (op0
, 0)))
4391 return simplify_gen_relational (code
, mode
, cmp_mode
,
4392 XEXP (op0
, 1), const0_rtx
);
4394 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4395 if ((code
== EQ
|| code
== NE
)
4397 && rtx_equal_p (XEXP (op0
, 1), op1
)
4398 && !side_effects_p (XEXP (op0
, 1)))
4399 return simplify_gen_relational (code
, mode
, cmp_mode
,
4400 XEXP (op0
, 0), const0_rtx
);
4402 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4403 if ((code
== EQ
|| code
== NE
)
4405 && (CONST_INT_P (op1
)
4406 || GET_CODE (op1
) == CONST_DOUBLE
)
4407 && (CONST_INT_P (XEXP (op0
, 1))
4408 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4409 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4410 simplify_gen_binary (XOR
, cmp_mode
,
4411 XEXP (op0
, 1), op1
));
4413 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4419 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4420 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4421 XEXP (op0
, 0), const0_rtx
);
4426 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4427 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4428 XEXP (op0
, 0), const0_rtx
);
4447 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4448 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4449 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4450 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4451 For floating-point comparisons, assume that the operands were ordered. */
4454 comparison_result (enum rtx_code code
, int known_results
)
4460 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4463 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4467 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4470 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4474 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4477 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4480 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4482 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4485 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4487 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4490 return const_true_rtx
;
4498 /* Check if the given comparison (done in the given MODE) is actually a
4499 tautology or a contradiction.
4500 If no simplification is possible, this function returns zero.
4501 Otherwise, it returns either const_true_rtx or const0_rtx. */
4504 simplify_const_relational_operation (enum rtx_code code
,
4505 enum machine_mode mode
,
4512 gcc_assert (mode
!= VOIDmode
4513 || (GET_MODE (op0
) == VOIDmode
4514 && GET_MODE (op1
) == VOIDmode
));
4516 /* If op0 is a compare, extract the comparison arguments from it. */
4517 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4519 op1
= XEXP (op0
, 1);
4520 op0
= XEXP (op0
, 0);
4522 if (GET_MODE (op0
) != VOIDmode
)
4523 mode
= GET_MODE (op0
);
4524 else if (GET_MODE (op1
) != VOIDmode
)
4525 mode
= GET_MODE (op1
);
4530 /* We can't simplify MODE_CC values since we don't know what the
4531 actual comparison is. */
4532 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4535 /* Make sure the constant is second. */
4536 if (swap_commutative_operands_p (op0
, op1
))
4538 tem
= op0
, op0
= op1
, op1
= tem
;
4539 code
= swap_condition (code
);
4542 trueop0
= avoid_constant_pool_reference (op0
);
4543 trueop1
= avoid_constant_pool_reference (op1
);
4545 /* For integer comparisons of A and B maybe we can simplify A - B and can
4546 then simplify a comparison of that with zero. If A and B are both either
4547 a register or a CONST_INT, this can't help; testing for these cases will
4548 prevent infinite recursion here and speed things up.
4550 We can only do this for EQ and NE comparisons as otherwise we may
4551 lose or introduce overflow which we cannot disregard as undefined as
4552 we do not know the signedness of the operation on either the left or
4553 the right hand side of the comparison. */
4555 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4556 && (code
== EQ
|| code
== NE
)
4557 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4558 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4559 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4560 /* We cannot do this if tem is a nonzero address. */
4561 && ! nonzero_address_p (tem
))
4562 return simplify_const_relational_operation (signed_condition (code
),
4563 mode
, tem
, const0_rtx
);
4565 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4566 return const_true_rtx
;
4568 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4571 /* For modes without NaNs, if the two operands are equal, we know the
4572 result except if they have side-effects. Even with NaNs we know
4573 the result of unordered comparisons and, if signaling NaNs are
4574 irrelevant, also the result of LT/GT/LTGT. */
4575 if ((! HONOR_NANS (GET_MODE (trueop0
))
4576 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4577 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4578 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4579 && rtx_equal_p (trueop0
, trueop1
)
4580 && ! side_effects_p (trueop0
))
4581 return comparison_result (code
, CMP_EQ
);
4583 /* If the operands are floating-point constants, see if we can fold
4585 if (GET_CODE (trueop0
) == CONST_DOUBLE
4586 && GET_CODE (trueop1
) == CONST_DOUBLE
4587 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4589 REAL_VALUE_TYPE d0
, d1
;
4591 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4592 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4594 /* Comparisons are unordered iff at least one of the values is NaN. */
4595 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4605 return const_true_rtx
;
4618 return comparison_result (code
,
4619 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4620 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4623 /* Otherwise, see if the operands are both integers. */
4624 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4625 && (GET_CODE (trueop0
) == CONST_DOUBLE
4626 || CONST_INT_P (trueop0
))
4627 && (GET_CODE (trueop1
) == CONST_DOUBLE
4628 || CONST_INT_P (trueop1
)))
4630 int width
= GET_MODE_PRECISION (mode
);
4631 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4632 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4634 /* Get the two words comprising each integer constant. */
4635 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4637 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4638 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4642 l0u
= l0s
= INTVAL (trueop0
);
4643 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4646 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4648 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4649 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4653 l1u
= l1s
= INTVAL (trueop1
);
4654 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4657 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4658 we have to sign or zero-extend the values. */
4659 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4661 l0u
&= GET_MODE_MASK (mode
);
4662 l1u
&= GET_MODE_MASK (mode
);
4664 if (val_signbit_known_set_p (mode
, l0s
))
4665 l0s
|= ~GET_MODE_MASK (mode
);
4667 if (val_signbit_known_set_p (mode
, l1s
))
4668 l1s
|= ~GET_MODE_MASK (mode
);
4670 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4671 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4673 if (h0u
== h1u
&& l0u
== l1u
)
4674 return comparison_result (code
, CMP_EQ
);
4678 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4679 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4680 return comparison_result (code
, cr
);
4684 /* Optimize comparisons with upper and lower bounds. */
4685 if (HWI_COMPUTABLE_MODE_P (mode
)
4686 && CONST_INT_P (trueop1
))
4689 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4690 HOST_WIDE_INT val
= INTVAL (trueop1
);
4691 HOST_WIDE_INT mmin
, mmax
;
4701 /* Get a reduced range if the sign bit is zero. */
4702 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4709 rtx mmin_rtx
, mmax_rtx
;
4710 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4712 mmin
= INTVAL (mmin_rtx
);
4713 mmax
= INTVAL (mmax_rtx
);
4716 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4718 mmin
>>= (sign_copies
- 1);
4719 mmax
>>= (sign_copies
- 1);
4725 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4727 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4728 return const_true_rtx
;
4729 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4734 return const_true_rtx
;
4739 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4741 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4742 return const_true_rtx
;
4743 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4748 return const_true_rtx
;
4754 /* x == y is always false for y out of range. */
4755 if (val
< mmin
|| val
> mmax
)
4759 /* x > y is always false for y >= mmax, always true for y < mmin. */
4761 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4763 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4764 return const_true_rtx
;
4770 return const_true_rtx
;
4773 /* x < y is always false for y <= mmin, always true for y > mmax. */
4775 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4777 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4778 return const_true_rtx
;
4784 return const_true_rtx
;
4788 /* x != y is always true for y out of range. */
4789 if (val
< mmin
|| val
> mmax
)
4790 return const_true_rtx
;
4798 /* Optimize integer comparisons with zero. */
4799 if (trueop1
== const0_rtx
)
4801 /* Some addresses are known to be nonzero. We don't know
4802 their sign, but equality comparisons are known. */
4803 if (nonzero_address_p (trueop0
))
4805 if (code
== EQ
|| code
== LEU
)
4807 if (code
== NE
|| code
== GTU
)
4808 return const_true_rtx
;
4811 /* See if the first operand is an IOR with a constant. If so, we
4812 may be able to determine the result of this comparison. */
4813 if (GET_CODE (op0
) == IOR
)
4815 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4816 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4818 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4819 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4820 && (UINTVAL (inner_const
)
4821 & ((unsigned HOST_WIDE_INT
) 1
4831 return const_true_rtx
;
4835 return const_true_rtx
;
4849 /* Optimize comparison of ABS with zero. */
4850 if (trueop1
== CONST0_RTX (mode
)
4851 && (GET_CODE (trueop0
) == ABS
4852 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4853 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4858 /* Optimize abs(x) < 0.0. */
4859 if (!HONOR_SNANS (mode
)
4860 && (!INTEGRAL_MODE_P (mode
)
4861 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4863 if (INTEGRAL_MODE_P (mode
)
4864 && (issue_strict_overflow_warning
4865 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4866 warning (OPT_Wstrict_overflow
,
4867 ("assuming signed overflow does not occur when "
4868 "assuming abs (x) < 0 is false"));
4874 /* Optimize abs(x) >= 0.0. */
4875 if (!HONOR_NANS (mode
)
4876 && (!INTEGRAL_MODE_P (mode
)
4877 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4879 if (INTEGRAL_MODE_P (mode
)
4880 && (issue_strict_overflow_warning
4881 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4882 warning (OPT_Wstrict_overflow
,
4883 ("assuming signed overflow does not occur when "
4884 "assuming abs (x) >= 0 is true"));
4885 return const_true_rtx
;
4890 /* Optimize ! (abs(x) < 0.0). */
4891 return const_true_rtx
;
4901 /* Simplify CODE, an operation with result mode MODE and three operands,
4902 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4903 a constant. Return 0 if no simplifications is possible. */
4906 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4907 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4910 unsigned int width
= GET_MODE_PRECISION (mode
);
4911 bool any_change
= false;
4914 /* VOIDmode means "infinite" precision. */
4916 width
= HOST_BITS_PER_WIDE_INT
;
4921 /* Simplify negations around the multiplication. */
4922 /* -a * -b + c => a * b + c. */
4923 if (GET_CODE (op0
) == NEG
)
4925 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4927 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4929 else if (GET_CODE (op1
) == NEG
)
4931 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4933 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4936 /* Canonicalize the two multiplication operands. */
4937 /* a * -b + c => -b * a + c. */
4938 if (swap_commutative_operands_p (op0
, op1
))
4939 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4942 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4947 if (CONST_INT_P (op0
)
4948 && CONST_INT_P (op1
)
4949 && CONST_INT_P (op2
)
4950 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4951 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4953 /* Extracting a bit-field from a constant */
4954 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4955 HOST_WIDE_INT op1val
= INTVAL (op1
);
4956 HOST_WIDE_INT op2val
= INTVAL (op2
);
4957 if (BITS_BIG_ENDIAN
)
4958 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
4962 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
4964 /* First zero-extend. */
4965 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
4966 /* If desired, propagate sign bit. */
4967 if (code
== SIGN_EXTRACT
4968 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
4970 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
4973 return gen_int_mode (val
, mode
);
4978 if (CONST_INT_P (op0
))
4979 return op0
!= const0_rtx
? op1
: op2
;
4981 /* Convert c ? a : a into "a". */
4982 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4985 /* Convert a != b ? a : b into "a". */
4986 if (GET_CODE (op0
) == NE
4987 && ! side_effects_p (op0
)
4988 && ! HONOR_NANS (mode
)
4989 && ! HONOR_SIGNED_ZEROS (mode
)
4990 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4991 && rtx_equal_p (XEXP (op0
, 1), op2
))
4992 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4993 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4996 /* Convert a == b ? a : b into "b". */
4997 if (GET_CODE (op0
) == EQ
4998 && ! side_effects_p (op0
)
4999 && ! HONOR_NANS (mode
)
5000 && ! HONOR_SIGNED_ZEROS (mode
)
5001 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5002 && rtx_equal_p (XEXP (op0
, 1), op2
))
5003 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5004 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5007 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5009 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5010 ? GET_MODE (XEXP (op0
, 1))
5011 : GET_MODE (XEXP (op0
, 0)));
5014 /* Look for happy constants in op1 and op2. */
5015 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5017 HOST_WIDE_INT t
= INTVAL (op1
);
5018 HOST_WIDE_INT f
= INTVAL (op2
);
5020 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5021 code
= GET_CODE (op0
);
5022 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5025 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5033 return simplify_gen_relational (code
, mode
, cmp_mode
,
5034 XEXP (op0
, 0), XEXP (op0
, 1));
5037 if (cmp_mode
== VOIDmode
)
5038 cmp_mode
= op0_mode
;
5039 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5040 cmp_mode
, XEXP (op0
, 0),
5043 /* See if any simplifications were possible. */
5046 if (CONST_INT_P (temp
))
5047 return temp
== const0_rtx
? op2
: op1
;
5049 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5055 gcc_assert (GET_MODE (op0
) == mode
);
5056 gcc_assert (GET_MODE (op1
) == mode
);
5057 gcc_assert (VECTOR_MODE_P (mode
));
5058 op2
= avoid_constant_pool_reference (op2
);
5059 if (CONST_INT_P (op2
))
5061 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5062 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5063 int mask
= (1 << n_elts
) - 1;
5065 if (!(INTVAL (op2
) & mask
))
5067 if ((INTVAL (op2
) & mask
) == mask
)
5070 op0
= avoid_constant_pool_reference (op0
);
5071 op1
= avoid_constant_pool_reference (op1
);
5072 if (GET_CODE (op0
) == CONST_VECTOR
5073 && GET_CODE (op1
) == CONST_VECTOR
)
5075 rtvec v
= rtvec_alloc (n_elts
);
5078 for (i
= 0; i
< n_elts
; i
++)
5079 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5080 ? CONST_VECTOR_ELT (op0
, i
)
5081 : CONST_VECTOR_ELT (op1
, i
));
5082 return gen_rtx_CONST_VECTOR (mode
, v
);
5094 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5096 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5098 Works by unpacking OP into a collection of 8-bit values
5099 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5100 and then repacking them again for OUTERMODE. */
5103 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5104 enum machine_mode innermode
, unsigned int byte
)
5106 /* We support up to 512-bit values (for V8DFmode). */
5110 value_mask
= (1 << value_bit
) - 1
5112 unsigned char value
[max_bitsize
/ value_bit
];
5121 rtvec result_v
= NULL
;
5122 enum mode_class outer_class
;
5123 enum machine_mode outer_submode
;
5125 /* Some ports misuse CCmode. */
5126 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5129 /* We have no way to represent a complex constant at the rtl level. */
5130 if (COMPLEX_MODE_P (outermode
))
5133 /* Unpack the value. */
5135 if (GET_CODE (op
) == CONST_VECTOR
)
5137 num_elem
= CONST_VECTOR_NUNITS (op
);
5138 elems
= &CONST_VECTOR_ELT (op
, 0);
5139 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5145 elem_bitsize
= max_bitsize
;
5147 /* If this asserts, it is too complicated; reducing value_bit may help. */
5148 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5149 /* I don't know how to handle endianness of sub-units. */
5150 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5152 for (elem
= 0; elem
< num_elem
; elem
++)
5155 rtx el
= elems
[elem
];
5157 /* Vectors are kept in target memory order. (This is probably
5160 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5161 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5163 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5164 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5165 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5166 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5167 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5170 switch (GET_CODE (el
))
5174 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5176 *vp
++ = INTVAL (el
) >> i
;
5177 /* CONST_INTs are always logically sign-extended. */
5178 for (; i
< elem_bitsize
; i
+= value_bit
)
5179 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5183 if (GET_MODE (el
) == VOIDmode
)
5185 /* If this triggers, someone should have generated a
5186 CONST_INT instead. */
5187 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5189 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5190 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5191 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
5194 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5197 /* It shouldn't matter what's done here, so fill it with
5199 for (; i
< elem_bitsize
; i
+= value_bit
)
5204 long tmp
[max_bitsize
/ 32];
5205 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5207 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5208 gcc_assert (bitsize
<= elem_bitsize
);
5209 gcc_assert (bitsize
% value_bit
== 0);
5211 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5214 /* real_to_target produces its result in words affected by
5215 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5216 and use WORDS_BIG_ENDIAN instead; see the documentation
5217 of SUBREG in rtl.texi. */
5218 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5221 if (WORDS_BIG_ENDIAN
)
5222 ibase
= bitsize
- 1 - i
;
5225 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5228 /* It shouldn't matter what's done here, so fill it with
5230 for (; i
< elem_bitsize
; i
+= value_bit
)
5236 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5238 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5239 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5243 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5244 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5245 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5247 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5248 >> (i
- HOST_BITS_PER_WIDE_INT
);
5249 for (; i
< elem_bitsize
; i
+= value_bit
)
5259 /* Now, pick the right byte to start with. */
5260 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5261 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5262 will already have offset 0. */
5263 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5265 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5267 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5268 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5269 byte
= (subword_byte
% UNITS_PER_WORD
5270 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5273 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5274 so if it's become negative it will instead be very large.) */
5275 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5277 /* Convert from bytes to chunks of size value_bit. */
5278 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5280 /* Re-pack the value. */
5282 if (VECTOR_MODE_P (outermode
))
5284 num_elem
= GET_MODE_NUNITS (outermode
);
5285 result_v
= rtvec_alloc (num_elem
);
5286 elems
= &RTVEC_ELT (result_v
, 0);
5287 outer_submode
= GET_MODE_INNER (outermode
);
5293 outer_submode
= outermode
;
5296 outer_class
= GET_MODE_CLASS (outer_submode
);
5297 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5299 gcc_assert (elem_bitsize
% value_bit
== 0);
5300 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5302 for (elem
= 0; elem
< num_elem
; elem
++)
5306 /* Vectors are stored in target memory order. (This is probably
5309 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5310 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5312 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5313 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5314 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5315 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5316 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5319 switch (outer_class
)
5322 case MODE_PARTIAL_INT
:
5324 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5327 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5329 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5330 for (; i
< elem_bitsize
; i
+= value_bit
)
5331 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5332 << (i
- HOST_BITS_PER_WIDE_INT
);
5334 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5336 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5337 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5338 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
5339 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5346 case MODE_DECIMAL_FLOAT
:
5349 long tmp
[max_bitsize
/ 32];
5351 /* real_from_target wants its input in words affected by
5352 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5353 and use WORDS_BIG_ENDIAN instead; see the documentation
5354 of SUBREG in rtl.texi. */
5355 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5357 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5360 if (WORDS_BIG_ENDIAN
)
5361 ibase
= elem_bitsize
- 1 - i
;
5364 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5367 real_from_target (&r
, tmp
, outer_submode
);
5368 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5380 f
.mode
= outer_submode
;
5383 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5385 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5386 for (; i
< elem_bitsize
; i
+= value_bit
)
5387 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5388 << (i
- HOST_BITS_PER_WIDE_INT
));
5390 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5398 if (VECTOR_MODE_P (outermode
))
5399 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5404 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5405 Return 0 if no simplifications are possible. */
5407 simplify_subreg (enum machine_mode outermode
, rtx op
,
5408 enum machine_mode innermode
, unsigned int byte
)
5410 /* Little bit of sanity checking. */
5411 gcc_assert (innermode
!= VOIDmode
);
5412 gcc_assert (outermode
!= VOIDmode
);
5413 gcc_assert (innermode
!= BLKmode
);
5414 gcc_assert (outermode
!= BLKmode
);
5416 gcc_assert (GET_MODE (op
) == innermode
5417 || GET_MODE (op
) == VOIDmode
);
5419 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5420 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5422 if (outermode
== innermode
&& !byte
)
5425 if (CONST_INT_P (op
)
5426 || GET_CODE (op
) == CONST_DOUBLE
5427 || GET_CODE (op
) == CONST_FIXED
5428 || GET_CODE (op
) == CONST_VECTOR
)
5429 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5431 /* Changing mode twice with SUBREG => just change it once,
5432 or not at all if changing back op starting mode. */
5433 if (GET_CODE (op
) == SUBREG
)
5435 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5436 int final_offset
= byte
+ SUBREG_BYTE (op
);
5439 if (outermode
== innermostmode
5440 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5441 return SUBREG_REG (op
);
5443 /* The SUBREG_BYTE represents offset, as if the value were stored
5444 in memory. Irritating exception is paradoxical subreg, where
5445 we define SUBREG_BYTE to be 0. On big endian machines, this
5446 value should be negative. For a moment, undo this exception. */
5447 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5449 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5450 if (WORDS_BIG_ENDIAN
)
5451 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5452 if (BYTES_BIG_ENDIAN
)
5453 final_offset
+= difference
% UNITS_PER_WORD
;
5455 if (SUBREG_BYTE (op
) == 0
5456 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5458 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5459 if (WORDS_BIG_ENDIAN
)
5460 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5461 if (BYTES_BIG_ENDIAN
)
5462 final_offset
+= difference
% UNITS_PER_WORD
;
5465 /* See whether resulting subreg will be paradoxical. */
5466 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5468 /* In nonparadoxical subregs we can't handle negative offsets. */
5469 if (final_offset
< 0)
5471 /* Bail out in case resulting subreg would be incorrect. */
5472 if (final_offset
% GET_MODE_SIZE (outermode
)
5473 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5479 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5481 /* In paradoxical subreg, see if we are still looking on lower part.
5482 If so, our SUBREG_BYTE will be 0. */
5483 if (WORDS_BIG_ENDIAN
)
5484 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5485 if (BYTES_BIG_ENDIAN
)
5486 offset
+= difference
% UNITS_PER_WORD
;
5487 if (offset
== final_offset
)
5493 /* Recurse for further possible simplifications. */
5494 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5498 if (validate_subreg (outermode
, innermostmode
,
5499 SUBREG_REG (op
), final_offset
))
5501 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5502 if (SUBREG_PROMOTED_VAR_P (op
)
5503 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5504 && GET_MODE_CLASS (outermode
) == MODE_INT
5505 && IN_RANGE (GET_MODE_SIZE (outermode
),
5506 GET_MODE_SIZE (innermode
),
5507 GET_MODE_SIZE (innermostmode
))
5508 && subreg_lowpart_p (newx
))
5510 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5511 SUBREG_PROMOTED_UNSIGNED_SET
5512 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5519 /* Merge implicit and explicit truncations. */
5521 if (GET_CODE (op
) == TRUNCATE
5522 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5523 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5524 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5525 GET_MODE (XEXP (op
, 0)));
5527 /* SUBREG of a hard register => just change the register number
5528 and/or mode. If the hard register is not valid in that mode,
5529 suppress this simplification. If the hard register is the stack,
5530 frame, or argument pointer, leave this as a SUBREG. */
5532 if (REG_P (op
) && HARD_REGISTER_P (op
))
5534 unsigned int regno
, final_regno
;
5537 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5538 if (HARD_REGISTER_NUM_P (final_regno
))
5541 int final_offset
= byte
;
5543 /* Adjust offset for paradoxical subregs. */
5545 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5547 int difference
= (GET_MODE_SIZE (innermode
)
5548 - GET_MODE_SIZE (outermode
));
5549 if (WORDS_BIG_ENDIAN
)
5550 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5551 if (BYTES_BIG_ENDIAN
)
5552 final_offset
+= difference
% UNITS_PER_WORD
;
5555 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5557 /* Propagate original regno. We don't have any way to specify
5558 the offset inside original regno, so do so only for lowpart.
5559 The information is used only by alias analysis that can not
5560 grog partial register anyway. */
5562 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5563 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5568 /* If we have a SUBREG of a register that we are replacing and we are
5569 replacing it with a MEM, make a new MEM and try replacing the
5570 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5571 or if we would be widening it. */
5574 && ! mode_dependent_address_p (XEXP (op
, 0))
5575 /* Allow splitting of volatile memory references in case we don't
5576 have instruction to move the whole thing. */
5577 && (! MEM_VOLATILE_P (op
)
5578 || ! have_insn_for (SET
, innermode
))
5579 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5580 return adjust_address_nv (op
, outermode
, byte
);
5582 /* Handle complex values represented as CONCAT
5583 of real and imaginary part. */
5584 if (GET_CODE (op
) == CONCAT
)
5586 unsigned int part_size
, final_offset
;
5589 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5590 if (byte
< part_size
)
5592 part
= XEXP (op
, 0);
5593 final_offset
= byte
;
5597 part
= XEXP (op
, 1);
5598 final_offset
= byte
- part_size
;
5601 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5604 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5607 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5608 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5612 /* Optimize SUBREG truncations of zero and sign extended values. */
5613 if ((GET_CODE (op
) == ZERO_EXTEND
5614 || GET_CODE (op
) == SIGN_EXTEND
)
5615 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
))
5617 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5619 /* If we're requesting the lowpart of a zero or sign extension,
5620 there are three possibilities. If the outermode is the same
5621 as the origmode, we can omit both the extension and the subreg.
5622 If the outermode is not larger than the origmode, we can apply
5623 the truncation without the extension. Finally, if the outermode
5624 is larger than the origmode, but both are integer modes, we
5625 can just extend to the appropriate mode. */
5628 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5629 if (outermode
== origmode
)
5630 return XEXP (op
, 0);
5631 if (GET_MODE_PRECISION (outermode
) <= GET_MODE_PRECISION (origmode
))
5632 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5633 subreg_lowpart_offset (outermode
,
5635 if (SCALAR_INT_MODE_P (outermode
))
5636 return simplify_gen_unary (GET_CODE (op
), outermode
,
5637 XEXP (op
, 0), origmode
);
5640 /* A SUBREG resulting from a zero extension may fold to zero if
5641 it extracts higher bits that the ZERO_EXTEND's source bits. */
5642 if (GET_CODE (op
) == ZERO_EXTEND
5643 && bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5644 return CONST0_RTX (outermode
);
5647 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5648 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5649 the outer subreg is effectively a truncation to the original mode. */
5650 if ((GET_CODE (op
) == LSHIFTRT
5651 || GET_CODE (op
) == ASHIFTRT
)
5652 && SCALAR_INT_MODE_P (outermode
)
5653 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5654 to avoid the possibility that an outer LSHIFTRT shifts by more
5655 than the sign extension's sign_bit_copies and introduces zeros
5656 into the high bits of the result. */
5657 && (2 * GET_MODE_PRECISION (outermode
)) <= GET_MODE_PRECISION (innermode
)
5658 && CONST_INT_P (XEXP (op
, 1))
5659 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5660 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5661 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5662 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5663 return simplify_gen_binary (ASHIFTRT
, outermode
,
5664 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5666 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5667 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5668 the outer subreg is effectively a truncation to the original mode. */
5669 if ((GET_CODE (op
) == LSHIFTRT
5670 || GET_CODE (op
) == ASHIFTRT
)
5671 && SCALAR_INT_MODE_P (outermode
)
5672 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5673 && CONST_INT_P (XEXP (op
, 1))
5674 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5675 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5676 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5677 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5678 return simplify_gen_binary (LSHIFTRT
, outermode
,
5679 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5681 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5682 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5683 the outer subreg is effectively a truncation to the original mode. */
5684 if (GET_CODE (op
) == ASHIFT
5685 && SCALAR_INT_MODE_P (outermode
)
5686 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5687 && CONST_INT_P (XEXP (op
, 1))
5688 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5689 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5690 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5691 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5692 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5693 return simplify_gen_binary (ASHIFT
, outermode
,
5694 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5696 /* Recognize a word extraction from a multi-word subreg. */
5697 if ((GET_CODE (op
) == LSHIFTRT
5698 || GET_CODE (op
) == ASHIFTRT
)
5699 && SCALAR_INT_MODE_P (outermode
)
5700 && GET_MODE_PRECISION (outermode
) >= BITS_PER_WORD
5701 && GET_MODE_PRECISION (innermode
) >= (2 * GET_MODE_PRECISION (outermode
))
5702 && CONST_INT_P (XEXP (op
, 1))
5703 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_PRECISION (outermode
) - 1)) == 0
5704 && INTVAL (XEXP (op
, 1)) >= 0
5705 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (innermode
)
5706 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5708 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5709 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5711 ? byte
- shifted_bytes
5712 : byte
+ shifted_bytes
));
5715 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5716 and try replacing the SUBREG and shift with it. Don't do this if
5717 the MEM has a mode-dependent address or if we would be widening it. */
5719 if ((GET_CODE (op
) == LSHIFTRT
5720 || GET_CODE (op
) == ASHIFTRT
)
5721 && MEM_P (XEXP (op
, 0))
5722 && CONST_INT_P (XEXP (op
, 1))
5723 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (GET_MODE (op
))
5724 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (outermode
)) == 0
5725 && INTVAL (XEXP (op
, 1)) > 0
5726 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5727 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0))
5728 && ! MEM_VOLATILE_P (XEXP (op
, 0))
5729 && byte
== subreg_lowpart_offset (outermode
, innermode
)
5730 && (GET_MODE_SIZE (outermode
) >= UNITS_PER_WORD
5731 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
5733 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5734 return adjust_address_nv (XEXP (op
, 0), outermode
,
5736 ? byte
- shifted_bytes
5737 : byte
+ shifted_bytes
));
5743 /* Make a SUBREG operation or equivalent if it folds. */
5746 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5747 enum machine_mode innermode
, unsigned int byte
)
5751 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5755 if (GET_CODE (op
) == SUBREG
5756 || GET_CODE (op
) == CONCAT
5757 || GET_MODE (op
) == VOIDmode
)
5760 if (validate_subreg (outermode
, innermode
, op
, byte
))
5761 return gen_rtx_SUBREG (outermode
, op
, byte
);
5766 /* Simplify X, an rtx expression.
5768 Return the simplified expression or NULL if no simplifications
5771 This is the preferred entry point into the simplification routines;
5772 however, we still allow passes to call the more specific routines.
5774 Right now GCC has three (yes, three) major bodies of RTL simplification
5775 code that need to be unified.
5777 1. fold_rtx in cse.c. This code uses various CSE specific
5778 information to aid in RTL simplification.
5780 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5781 it uses combine specific information to aid in RTL
5784 3. The routines in this file.
5787 Long term we want to only have one body of simplification code; to
5788 get to that state I recommend the following steps:
5790 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5791 which are not pass dependent state into these routines.
5793 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5794 use this routine whenever possible.
5796 3. Allow for pass dependent state to be provided to these
5797 routines and add simplifications based on the pass dependent
5798 state. Remove code from cse.c & combine.c that becomes
5801 It will take time, but ultimately the compiler will be easier to
5802 maintain and improve. It's totally silly that when we add a
5803 simplification that it needs to be added to 4 places (3 for RTL
5804 simplification and 1 for tree simplification. */
5807 simplify_rtx (const_rtx x
)
5809 const enum rtx_code code
= GET_CODE (x
);
5810 const enum machine_mode mode
= GET_MODE (x
);
5812 switch (GET_RTX_CLASS (code
))
5815 return simplify_unary_operation (code
, mode
,
5816 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5817 case RTX_COMM_ARITH
:
5818 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5819 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5821 /* Fall through.... */
5824 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5827 case RTX_BITFIELD_OPS
:
5828 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5829 XEXP (x
, 0), XEXP (x
, 1),
5833 case RTX_COMM_COMPARE
:
5834 return simplify_relational_operation (code
, mode
,
5835 ((GET_MODE (XEXP (x
, 0))
5837 ? GET_MODE (XEXP (x
, 0))
5838 : GET_MODE (XEXP (x
, 1))),
5844 return simplify_subreg (mode
, SUBREG_REG (x
),
5845 GET_MODE (SUBREG_REG (x
)),
5852 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5853 if (GET_CODE (XEXP (x
, 0)) == HIGH
5854 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))