1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "diagnostic-core.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx
neg_const_int (machine_mode
, const_rtx
);
47 static bool plus_minus_operand_p (const_rtx
);
48 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
49 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
51 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
53 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
54 machine_mode
, rtx
, rtx
);
55 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
56 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
59 /* Negate a CONST_INT rtx. */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 unsigned HOST_WIDE_INT val
= -UINTVAL (i
);
65 if (!HWI_COMPUTABLE_MODE_P (mode
)
66 && val
== UINTVAL (i
))
67 return simplify_const_unary_operation (NEG
, mode
, CONST_CAST_RTX (i
),
69 return gen_int_mode (val
, mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
80 scalar_int_mode int_mode
;
82 if (!is_int_mode (mode
, &int_mode
))
85 width
= GET_MODE_PRECISION (int_mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x
))
96 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
97 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
99 for (i
= 0; i
< elts
- 1; i
++)
100 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
102 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
103 width
%= HOST_BITS_PER_WIDE_INT
;
105 width
= HOST_BITS_PER_WIDE_INT
;
108 else if (width
<= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x
)
110 && CONST_DOUBLE_LOW (x
) == 0)
112 val
= CONST_DOUBLE_HIGH (x
);
113 width
-= HOST_BITS_PER_WIDE_INT
;
117 /* X is not an integer constant. */
120 if (width
< HOST_BITS_PER_WIDE_INT
)
121 val
&= (HOST_WIDE_INT_1U
<< width
) - 1;
122 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
130 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 scalar_int_mode int_mode
;
135 if (!is_int_mode (mode
, &int_mode
))
138 width
= GET_MODE_PRECISION (int_mode
);
139 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
142 val
&= GET_MODE_MASK (int_mode
);
143 return val
== (HOST_WIDE_INT_1U
<< (width
- 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
149 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
153 scalar_int_mode int_mode
;
154 if (!is_int_mode (mode
, &int_mode
))
157 width
= GET_MODE_PRECISION (int_mode
);
158 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
161 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
168 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
172 scalar_int_mode int_mode
;
173 if (!is_int_mode (mode
, &int_mode
))
176 width
= GET_MODE_PRECISION (int_mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= HOST_WIDE_INT_1U
<< (width
- 1);
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
193 /* If this simplifies, do it. */
194 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0
, op1
))
201 std::swap (op0
, op1
);
203 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x
)
213 HOST_WIDE_INT offset
= 0;
215 switch (GET_CODE (x
))
221 /* Handle float extensions of constant pool references. */
223 c
= avoid_constant_pool_reference (tmp
);
224 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
233 if (GET_MODE (x
) == BLKmode
)
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr
= targetm
.delegitimize_address (addr
);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr
) == CONST
243 && GET_CODE (XEXP (addr
, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
246 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
247 addr
= XEXP (XEXP (addr
, 0), 0);
250 if (GET_CODE (addr
) == LO_SUM
)
251 addr
= XEXP (addr
, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr
) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr
))
258 c
= get_pool_constant (addr
);
259 cmode
= get_pool_mode (addr
);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset
== 0 && cmode
== GET_MODE (x
))
266 else if (offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
268 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
269 if (tem
&& CONSTANT_P (tem
))
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x
)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
288 && MEM_OFFSET_KNOWN_P (x
))
290 tree decl
= MEM_EXPR (x
);
291 machine_mode mode
= GET_MODE (x
);
292 HOST_WIDE_INT offset
= 0;
294 switch (TREE_CODE (decl
))
304 case ARRAY_RANGE_REF
:
309 case VIEW_CONVERT_EXPR
:
311 HOST_WIDE_INT bitsize
, bitpos
;
313 int unsignedp
, reversep
, volatilep
= 0;
316 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
317 &unsignedp
, &reversep
, &volatilep
);
318 if (bitsize
!= GET_MODE_BITSIZE (mode
)
319 || (bitpos
% BITS_PER_UNIT
)
320 || (toffset
&& !tree_fits_shwi_p (toffset
)))
324 offset
+= bitpos
/ BITS_PER_UNIT
;
326 offset
+= tree_to_shwi (toffset
);
333 && mode
== GET_MODE (x
)
335 && (TREE_STATIC (decl
)
336 || DECL_THREAD_LOCAL_P (decl
))
337 && DECL_RTL_SET_P (decl
)
338 && MEM_P (DECL_RTL (decl
)))
342 offset
+= MEM_OFFSET (x
);
344 newx
= DECL_RTL (decl
);
348 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
357 || (GET_CODE (o
) == PLUS
358 && GET_CODE (XEXP (o
, 1)) == CONST_INT
359 && (offset
== INTVAL (XEXP (o
, 1))
360 || (GET_CODE (n
) == PLUS
361 && GET_CODE (XEXP (n
, 1)) == CONST_INT
362 && (INTVAL (XEXP (n
, 1)) + offset
363 == INTVAL (XEXP (o
, 1)))
364 && (n
= XEXP (n
, 0))))
365 && (o
= XEXP (o
, 0))))
366 && rtx_equal_p (o
, n
)))
367 x
= adjust_address_nv (newx
, mode
, offset
);
369 else if (GET_MODE (x
) == GET_MODE (newx
)
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
383 machine_mode op_mode
)
387 /* If this simplifies, use it. */
388 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
391 return gen_rtx_fmt_e (code
, mode
, op
);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
398 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
402 /* If this simplifies, use it. */
403 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
407 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
415 machine_mode cmp_mode
, rtx op0
, rtx op1
)
419 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
423 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
432 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
433 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
435 enum rtx_code code
= GET_CODE (x
);
436 machine_mode mode
= GET_MODE (x
);
437 machine_mode op_mode
;
439 rtx op0
, op1
, op2
, newx
, op
;
443 if (__builtin_expect (fn
!= NULL
, 0))
445 newx
= fn (x
, old_rtx
, data
);
449 else if (rtx_equal_p (x
, old_rtx
))
450 return copy_rtx ((rtx
) data
);
452 switch (GET_RTX_CLASS (code
))
456 op_mode
= GET_MODE (op0
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 if (op0
== XEXP (x
, 0))
460 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
464 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
465 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
466 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
468 return simplify_gen_binary (code
, mode
, op0
, op1
);
471 case RTX_COMM_COMPARE
:
474 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
475 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
476 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
477 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
479 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
482 case RTX_BITFIELD_OPS
:
484 op_mode
= GET_MODE (op0
);
485 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
486 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
487 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
488 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
490 if (op_mode
== VOIDmode
)
491 op_mode
= GET_MODE (op0
);
492 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
497 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
498 if (op0
== SUBREG_REG (x
))
500 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
501 GET_MODE (SUBREG_REG (x
)),
503 return op0
? op0
: x
;
510 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
511 if (op0
== XEXP (x
, 0))
513 return replace_equiv_address_nv (x
, op0
);
515 else if (code
== LO_SUM
)
517 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
518 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0
) == HIGH
)
523 rtx base0
, base1
, offset0
, offset1
;
524 split_const (XEXP (op0
, 0), &base0
, &offset0
);
525 split_const (op1
, &base1
, &offset1
);
526 if (rtx_equal_p (base0
, base1
))
530 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
532 return gen_rtx_LO_SUM (mode
, op0
, op1
);
541 fmt
= GET_RTX_FORMAT (code
);
542 for (i
= 0; fmt
[i
]; i
++)
547 newvec
= XVEC (newx
, i
);
548 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
550 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
552 if (op
!= RTVEC_ELT (vec
, j
))
556 newvec
= shallow_copy_rtvec (vec
);
558 newx
= shallow_copy_rtx (x
);
559 XVEC (newx
, i
) = newvec
;
561 RTVEC_ELT (newvec
, j
) = op
;
569 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
570 if (op
!= XEXP (x
, i
))
573 newx
= shallow_copy_rtx (x
);
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
588 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
639 simplify_truncation (machine_mode mode
, rtx op
,
640 machine_mode op_mode
)
642 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
643 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
644 scalar_int_mode int_mode
, int_op_mode
, subreg_mode
;
646 gcc_assert (precision
<= op_precision
);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op
) == ZERO_EXTEND
650 || GET_CODE (op
) == SIGN_EXTEND
)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
658 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
659 if (mode
== origmode
)
661 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
662 return simplify_gen_unary (TRUNCATE
, mode
,
663 XEXP (op
, 0), origmode
);
665 return simplify_gen_unary (GET_CODE (op
), mode
,
666 XEXP (op
, 0), origmode
);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
673 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
674 && (GET_CODE (op
) == PLUS
675 || GET_CODE (op
) == MINUS
676 || GET_CODE (op
) == MULT
))
678 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
681 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
683 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op
) == LSHIFTRT
691 || GET_CODE (op
) == ASHIFTRT
)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision
<= op_precision
697 && CONST_INT_P (XEXP (op
, 1))
698 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
700 && UINTVAL (XEXP (op
, 1)) < precision
)
701 return simplify_gen_binary (ASHIFTRT
, mode
,
702 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op
) == LSHIFTRT
708 || GET_CODE (op
) == ASHIFTRT
)
709 && CONST_INT_P (XEXP (op
, 1))
710 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
712 && UINTVAL (XEXP (op
, 1)) < precision
)
713 return simplify_gen_binary (LSHIFTRT
, mode
,
714 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op
) == ASHIFT
720 && CONST_INT_P (XEXP (op
, 1))
721 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
723 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
724 && UINTVAL (XEXP (op
, 1)) < precision
)
725 return simplify_gen_binary (ASHIFT
, mode
,
726 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
731 if (GET_CODE (op
) == AND
732 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
734 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
735 && CONST_INT_P (XEXP (op
, 1)))
737 rtx op0
= (XEXP (XEXP (op
, 0), 0));
738 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
739 rtx mask_op
= XEXP (op
, 1);
740 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
741 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
743 if (shift
< precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
747 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
748 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
749 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
751 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
752 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
759 if ((GET_CODE (op
) == ZERO_EXTRACT
|| GET_CODE (op
) == SIGN_EXTRACT
)
760 && REG_P (XEXP (op
, 0))
761 && GET_MODE (XEXP (op
, 0)) == GET_MODE (op
)
762 && CONST_INT_P (XEXP (op
, 1))
763 && CONST_INT_P (XEXP (op
, 2)))
765 rtx op0
= XEXP (op
, 0);
766 unsigned HOST_WIDE_INT len
= UINTVAL (XEXP (op
, 1));
767 unsigned HOST_WIDE_INT pos
= UINTVAL (XEXP (op
, 2));
768 if (BITS_BIG_ENDIAN
&& pos
>= op_precision
- precision
)
770 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
773 pos
-= op_precision
- precision
;
774 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
775 XEXP (op
, 1), GEN_INT (pos
));
778 else if (!BITS_BIG_ENDIAN
&& precision
>= len
+ pos
)
780 op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, GET_MODE (op0
));
782 return simplify_gen_ternary (GET_CODE (op
), mode
, mode
, op0
,
783 XEXP (op
, 1), XEXP (op
, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op
) == LSHIFTRT
789 || GET_CODE (op
) == ASHIFTRT
)
790 && SCALAR_INT_MODE_P (mode
)
791 && SCALAR_INT_MODE_P (op_mode
)
792 && precision
>= BITS_PER_WORD
793 && 2 * precision
<= op_precision
794 && CONST_INT_P (XEXP (op
, 1))
795 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
796 && UINTVAL (XEXP (op
, 1)) < op_precision
)
798 int byte
= subreg_lowpart_offset (mode
, op_mode
);
799 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
800 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
802 ? byte
- shifted_bytes
803 : byte
+ shifted_bytes
));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op
) == LSHIFTRT
810 || GET_CODE (op
) == ASHIFTRT
)
811 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
812 && is_a
<scalar_int_mode
> (op_mode
, &int_op_mode
)
813 && MEM_P (XEXP (op
, 0))
814 && CONST_INT_P (XEXP (op
, 1))
815 && INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (int_mode
) == 0
816 && INTVAL (XEXP (op
, 1)) > 0
817 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (int_op_mode
)
818 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op
, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op
, 0))
821 && (GET_MODE_SIZE (int_mode
) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
824 int byte
= subreg_lowpart_offset (int_mode
, int_op_mode
);
825 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
826 return adjust_address_nv (XEXP (op
, 0), int_mode
,
828 ? byte
- shifted_bytes
829 : byte
+ shifted_bytes
));
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op
) == ABS
835 || GET_CODE (op
) == NEG
)
836 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
838 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
839 return simplify_gen_unary (GET_CODE (op
), mode
,
840 XEXP (XEXP (op
, 0), 0), mode
);
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
844 if (GET_CODE (op
) == SUBREG
845 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
846 && SCALAR_INT_MODE_P (op_mode
)
847 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &subreg_mode
)
848 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
849 && subreg_lowpart_p (op
))
851 rtx inner
= XEXP (SUBREG_REG (op
), 0);
852 if (GET_MODE_PRECISION (int_mode
) <= GET_MODE_PRECISION (subreg_mode
))
853 return simplify_gen_unary (TRUNCATE
, int_mode
, inner
,
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode
, SUBREG_REG (op
), subreg_mode
, 0);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op
) == TRUNCATE
)
863 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
864 GET_MODE (XEXP (op
, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
868 if (GET_CODE (op
) == IOR
869 && SCALAR_INT_MODE_P (mode
)
870 && SCALAR_INT_MODE_P (op_mode
)
871 && CONST_INT_P (XEXP (op
, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op
, 1)), mode
) == -1)
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
883 rtx op
, machine_mode op_mode
)
887 trueop
= avoid_constant_pool_reference (op
);
889 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
893 return simplify_unary_operation_1 (code
, mode
, op
);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
900 exact_int_to_float_conversion_p (const_rtx op
)
902 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
903 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode
!= VOIDmode
);
906 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
907 int in_bits
= in_prec
;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
910 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
911 if (GET_CODE (op
) == FLOAT
)
912 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
913 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
914 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
917 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
919 return in_bits
<= out_bits
;
922 /* Perform some simplifications we can do even if the operands
925 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
927 enum rtx_code reversed
;
929 scalar_int_mode inner
, int_mode
, op_mode
, op0_mode
;
934 /* (not (not X)) == X. */
935 if (GET_CODE (op
) == NOT
)
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op
)
941 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
942 && ((reversed
= reversed_comparison_code (op
, NULL
)) != UNKNOWN
))
943 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
944 XEXP (op
, 0), XEXP (op
, 1));
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op
) == PLUS
948 && XEXP (op
, 1) == constm1_rtx
)
949 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op
) == NEG
&& CONSTM1_RTX (mode
))
955 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op
) == XOR
960 && CONST_INT_P (XEXP (op
, 1))
961 && (temp
= simplify_unary_operation (NOT
, mode
,
962 XEXP (op
, 1), mode
)) != 0)
963 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op
) == PLUS
967 && CONST_INT_P (XEXP (op
, 1))
968 && mode_signbit_p (mode
, XEXP (op
, 1))
969 && (temp
= simplify_unary_operation (NOT
, mode
,
970 XEXP (op
, 1), mode
)) != 0)
971 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
979 if (GET_CODE (op
) == ASHIFT
980 && XEXP (op
, 0) == const1_rtx
)
982 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
983 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE
== -1
990 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
991 && GET_CODE (op
) == ASHIFTRT
992 && CONST_INT_P (XEXP (op
, 1))
993 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
994 return simplify_gen_relational (GE
, int_mode
, VOIDmode
,
995 XEXP (op
, 0), const0_rtx
);
998 if (partial_subreg_p (op
)
999 && subreg_lowpart_p (op
)
1000 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
1001 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
1003 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
1006 x
= gen_rtx_ROTATE (inner_mode
,
1007 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
1009 XEXP (SUBREG_REG (op
), 1));
1010 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
1015 /* Apply De Morgan's laws to reduce number of patterns for machines
1016 with negating logical insns (and-not, nand, etc.). If result has
1017 only one NOT, put it first, since that is how the patterns are
1019 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
1021 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
1022 machine_mode op_mode
;
1024 op_mode
= GET_MODE (in1
);
1025 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
1027 op_mode
= GET_MODE (in2
);
1028 if (op_mode
== VOIDmode
)
1030 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
1032 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
1033 std::swap (in1
, in2
);
1035 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
1039 /* (not (bswap x)) -> (bswap (not x)). */
1040 if (GET_CODE (op
) == BSWAP
)
1042 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1043 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
1048 /* (neg (neg X)) == X. */
1049 if (GET_CODE (op
) == NEG
)
1050 return XEXP (op
, 0);
1052 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1053 If comparison is not reversible use
1055 if (GET_CODE (op
) == IF_THEN_ELSE
)
1057 rtx cond
= XEXP (op
, 0);
1058 rtx true_rtx
= XEXP (op
, 1);
1059 rtx false_rtx
= XEXP (op
, 2);
1061 if ((GET_CODE (true_rtx
) == NEG
1062 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1063 || (GET_CODE (false_rtx
) == NEG
1064 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1066 if (reversed_comparison_code (cond
, NULL
) != UNKNOWN
)
1067 temp
= reversed_comparison (cond
, mode
);
1071 std::swap (true_rtx
, false_rtx
);
1073 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1074 mode
, temp
, true_rtx
, false_rtx
);
1078 /* (neg (plus X 1)) can become (not X). */
1079 if (GET_CODE (op
) == PLUS
1080 && XEXP (op
, 1) == const1_rtx
)
1081 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1083 /* Similarly, (neg (not X)) is (plus X 1). */
1084 if (GET_CODE (op
) == NOT
)
1085 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1088 /* (neg (minus X Y)) can become (minus Y X). This transformation
1089 isn't safe for modes with signed zeros, since if X and Y are
1090 both +0, (minus Y X) is the same as (minus X Y). If the
1091 rounding mode is towards +infinity (or -infinity) then the two
1092 expressions will be rounded differently. */
1093 if (GET_CODE (op
) == MINUS
1094 && !HONOR_SIGNED_ZEROS (mode
)
1095 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1096 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1098 if (GET_CODE (op
) == PLUS
1099 && !HONOR_SIGNED_ZEROS (mode
)
1100 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1102 /* (neg (plus A C)) is simplified to (minus -C A). */
1103 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1104 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1106 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1108 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1111 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1112 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1113 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1116 /* (neg (mult A B)) becomes (mult A (neg B)).
1117 This works even for floating-point values. */
1118 if (GET_CODE (op
) == MULT
1119 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1121 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1122 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1125 /* NEG commutes with ASHIFT since it is multiplication. Only do
1126 this if we can then eliminate the NEG (e.g., if the operand
1128 if (GET_CODE (op
) == ASHIFT
)
1130 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1132 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1135 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1136 C is equal to the width of MODE minus 1. */
1137 if (GET_CODE (op
) == ASHIFTRT
1138 && CONST_INT_P (XEXP (op
, 1))
1139 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1140 return simplify_gen_binary (LSHIFTRT
, mode
,
1141 XEXP (op
, 0), XEXP (op
, 1));
1143 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1144 C is equal to the width of MODE minus 1. */
1145 if (GET_CODE (op
) == LSHIFTRT
1146 && CONST_INT_P (XEXP (op
, 1))
1147 && INTVAL (XEXP (op
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
1148 return simplify_gen_binary (ASHIFTRT
, mode
,
1149 XEXP (op
, 0), XEXP (op
, 1));
1151 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1152 if (GET_CODE (op
) == XOR
1153 && XEXP (op
, 1) == const1_rtx
1154 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1155 return plus_constant (mode
, XEXP (op
, 0), -1);
1157 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1158 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1159 if (GET_CODE (op
) == LT
1160 && XEXP (op
, 1) == const0_rtx
1161 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op
, 0)), &inner
))
1163 int_mode
= as_a
<scalar_int_mode
> (mode
);
1164 int isize
= GET_MODE_PRECISION (inner
);
1165 if (STORE_FLAG_VALUE
== 1)
1167 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1168 GEN_INT (isize
- 1));
1169 if (int_mode
== inner
)
1171 if (GET_MODE_PRECISION (int_mode
) > isize
)
1172 return simplify_gen_unary (SIGN_EXTEND
, int_mode
, temp
, inner
);
1173 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1175 else if (STORE_FLAG_VALUE
== -1)
1177 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1178 GEN_INT (isize
- 1));
1179 if (int_mode
== inner
)
1181 if (GET_MODE_PRECISION (int_mode
) > isize
)
1182 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, temp
, inner
);
1183 return simplify_gen_unary (TRUNCATE
, int_mode
, temp
, inner
);
1189 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1190 with the umulXi3_highpart patterns. */
1191 if (GET_CODE (op
) == LSHIFTRT
1192 && GET_CODE (XEXP (op
, 0)) == MULT
)
1195 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1197 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1199 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1203 /* We can't handle truncation to a partial integer mode here
1204 because we don't know the real bitsize of the partial
1209 if (GET_MODE (op
) != VOIDmode
)
1211 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1216 /* If we know that the value is already truncated, we can
1217 replace the TRUNCATE with a SUBREG. */
1218 if (GET_MODE_NUNITS (mode
) == 1
1219 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1220 || truncated_to_mode (mode
, op
)))
1222 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1227 /* A truncate of a comparison can be replaced with a subreg if
1228 STORE_FLAG_VALUE permits. This is like the previous test,
1229 but it works even if the comparison is done in a mode larger
1230 than HOST_BITS_PER_WIDE_INT. */
1231 if (HWI_COMPUTABLE_MODE_P (mode
)
1232 && COMPARISON_P (op
)
1233 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1235 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1240 /* A truncate of a memory is just loading the low part of the memory
1241 if we are not changing the meaning of the address. */
1242 if (GET_CODE (op
) == MEM
1243 && !VECTOR_MODE_P (mode
)
1244 && !MEM_VOLATILE_P (op
)
1245 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1247 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1254 case FLOAT_TRUNCATE
:
1255 if (DECIMAL_FLOAT_MODE_P (mode
))
1258 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1259 if (GET_CODE (op
) == FLOAT_EXTEND
1260 && GET_MODE (XEXP (op
, 0)) == mode
)
1261 return XEXP (op
, 0);
1263 /* (float_truncate:SF (float_truncate:DF foo:XF))
1264 = (float_truncate:SF foo:XF).
1265 This may eliminate double rounding, so it is unsafe.
1267 (float_truncate:SF (float_extend:XF foo:DF))
1268 = (float_truncate:SF foo:DF).
1270 (float_truncate:DF (float_extend:XF foo:SF))
1271 = (float_extend:DF foo:SF). */
1272 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1273 && flag_unsafe_math_optimizations
)
1274 || GET_CODE (op
) == FLOAT_EXTEND
)
1275 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)))
1276 > GET_MODE_UNIT_SIZE (mode
)
1277 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1279 XEXP (op
, 0), mode
);
1281 /* (float_truncate (float x)) is (float x) */
1282 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1283 && (flag_unsafe_math_optimizations
1284 || exact_int_to_float_conversion_p (op
)))
1285 return simplify_gen_unary (GET_CODE (op
), mode
,
1287 GET_MODE (XEXP (op
, 0)));
1289 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1290 (OP:SF foo:SF) if OP is NEG or ABS. */
1291 if ((GET_CODE (op
) == ABS
1292 || GET_CODE (op
) == NEG
)
1293 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1294 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1295 return simplify_gen_unary (GET_CODE (op
), mode
,
1296 XEXP (XEXP (op
, 0), 0), mode
);
1298 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1299 is (float_truncate:SF x). */
1300 if (GET_CODE (op
) == SUBREG
1301 && subreg_lowpart_p (op
)
1302 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1303 return SUBREG_REG (op
);
1307 if (DECIMAL_FLOAT_MODE_P (mode
))
1310 /* (float_extend (float_extend x)) is (float_extend x)
1312 (float_extend (float x)) is (float x) assuming that double
1313 rounding can't happen.
1315 if (GET_CODE (op
) == FLOAT_EXTEND
1316 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1317 && exact_int_to_float_conversion_p (op
)))
1318 return simplify_gen_unary (GET_CODE (op
), mode
,
1320 GET_MODE (XEXP (op
, 0)));
1325 /* (abs (neg <foo>)) -> (abs <foo>) */
1326 if (GET_CODE (op
) == NEG
)
1327 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1328 GET_MODE (XEXP (op
, 0)));
1330 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1332 if (GET_MODE (op
) == VOIDmode
)
1335 /* If operand is something known to be positive, ignore the ABS. */
1336 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1337 || val_signbit_known_clear_p (GET_MODE (op
),
1338 nonzero_bits (op
, GET_MODE (op
))))
1341 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1342 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
1343 && (num_sign_bit_copies (op
, int_mode
)
1344 == GET_MODE_PRECISION (int_mode
)))
1345 return gen_rtx_NEG (int_mode
, op
);
1350 /* (ffs (*_extend <X>)) = (ffs <X>) */
1351 if (GET_CODE (op
) == SIGN_EXTEND
1352 || GET_CODE (op
) == ZERO_EXTEND
)
1353 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1354 GET_MODE (XEXP (op
, 0)));
1358 switch (GET_CODE (op
))
1362 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1363 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1364 GET_MODE (XEXP (op
, 0)));
1368 /* Rotations don't affect popcount. */
1369 if (!side_effects_p (XEXP (op
, 1)))
1370 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1371 GET_MODE (XEXP (op
, 0)));
1380 switch (GET_CODE (op
))
1386 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1387 GET_MODE (XEXP (op
, 0)));
1391 /* Rotations don't affect parity. */
1392 if (!side_effects_p (XEXP (op
, 1)))
1393 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1394 GET_MODE (XEXP (op
, 0)));
1403 /* (bswap (bswap x)) -> x. */
1404 if (GET_CODE (op
) == BSWAP
)
1405 return XEXP (op
, 0);
1409 /* (float (sign_extend <X>)) = (float <X>). */
1410 if (GET_CODE (op
) == SIGN_EXTEND
)
1411 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1412 GET_MODE (XEXP (op
, 0)));
1416 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1417 becomes just the MINUS if its mode is MODE. This allows
1418 folding switch statements on machines using casesi (such as
1420 if (GET_CODE (op
) == TRUNCATE
1421 && GET_MODE (XEXP (op
, 0)) == mode
1422 && GET_CODE (XEXP (op
, 0)) == MINUS
1423 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1424 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1425 return XEXP (op
, 0);
1427 /* Extending a widening multiplication should be canonicalized to
1428 a wider widening multiplication. */
1429 if (GET_CODE (op
) == MULT
)
1431 rtx lhs
= XEXP (op
, 0);
1432 rtx rhs
= XEXP (op
, 1);
1433 enum rtx_code lcode
= GET_CODE (lhs
);
1434 enum rtx_code rcode
= GET_CODE (rhs
);
1436 /* Widening multiplies usually extend both operands, but sometimes
1437 they use a shift to extract a portion of a register. */
1438 if ((lcode
== SIGN_EXTEND
1439 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1440 && (rcode
== SIGN_EXTEND
1441 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1443 machine_mode lmode
= GET_MODE (lhs
);
1444 machine_mode rmode
= GET_MODE (rhs
);
1447 if (lcode
== ASHIFTRT
)
1448 /* Number of bits not shifted off the end. */
1449 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1450 - INTVAL (XEXP (lhs
, 1)));
1451 else /* lcode == SIGN_EXTEND */
1452 /* Size of inner mode. */
1453 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1455 if (rcode
== ASHIFTRT
)
1456 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1457 - INTVAL (XEXP (rhs
, 1)));
1458 else /* rcode == SIGN_EXTEND */
1459 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1461 /* We can only widen multiplies if the result is mathematiclly
1462 equivalent. I.e. if overflow was impossible. */
1463 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1464 return simplify_gen_binary
1466 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1467 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1471 /* Check for a sign extension of a subreg of a promoted
1472 variable, where the promotion is sign-extended, and the
1473 target mode is the same as the variable's promotion. */
1474 if (GET_CODE (op
) == SUBREG
1475 && SUBREG_PROMOTED_VAR_P (op
)
1476 && SUBREG_PROMOTED_SIGNED_P (op
)
1477 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1479 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1484 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1485 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1486 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1488 gcc_assert (GET_MODE_UNIT_PRECISION (mode
)
1489 > GET_MODE_UNIT_PRECISION (GET_MODE (op
)));
1490 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1491 GET_MODE (XEXP (op
, 0)));
1494 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 is (sign_extend:M (subreg:O <X>)) if there is mode with
1496 GET_MODE_BITSIZE (N) - I bits.
1497 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1498 is similarly (zero_extend:M (subreg:O <X>)). */
1499 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1500 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1501 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1502 && CONST_INT_P (XEXP (op
, 1))
1503 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1504 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1505 GET_MODE_BITSIZE (op_mode
) > INTVAL (XEXP (op
, 1))))
1507 scalar_int_mode tmode
;
1508 gcc_assert (GET_MODE_BITSIZE (int_mode
)
1509 > GET_MODE_BITSIZE (op_mode
));
1510 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode
)
1511 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1514 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1516 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1517 ? SIGN_EXTEND
: ZERO_EXTEND
,
1518 int_mode
, inner
, tmode
);
1522 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1523 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1524 if (GET_CODE (op
) == LSHIFTRT
1525 && CONST_INT_P (XEXP (op
, 1))
1526 && XEXP (op
, 1) != const0_rtx
)
1527 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1529 #if defined(POINTERS_EXTEND_UNSIGNED)
1530 /* As we do not know which address space the pointer is referring to,
1531 we can do this only if the target does not support different pointer
1532 or address modes depending on the address space. */
1533 if (target_default_pointer_address_modes_p ()
1534 && ! POINTERS_EXTEND_UNSIGNED
1535 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1537 || (GET_CODE (op
) == SUBREG
1538 && REG_P (SUBREG_REG (op
))
1539 && REG_POINTER (SUBREG_REG (op
))
1540 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1541 && !targetm
.have_ptr_extend ())
1544 = convert_memory_address_addr_space_1 (Pmode
, op
,
1545 ADDR_SPACE_GENERIC
, false,
1554 /* Check for a zero extension of a subreg of a promoted
1555 variable, where the promotion is zero-extended, and the
1556 target mode is the same as the variable's promotion. */
1557 if (GET_CODE (op
) == SUBREG
1558 && SUBREG_PROMOTED_VAR_P (op
)
1559 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1560 && !paradoxical_subreg_p (mode
, GET_MODE (SUBREG_REG (op
))))
1562 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1567 /* Extending a widening multiplication should be canonicalized to
1568 a wider widening multiplication. */
1569 if (GET_CODE (op
) == MULT
)
1571 rtx lhs
= XEXP (op
, 0);
1572 rtx rhs
= XEXP (op
, 1);
1573 enum rtx_code lcode
= GET_CODE (lhs
);
1574 enum rtx_code rcode
= GET_CODE (rhs
);
1576 /* Widening multiplies usually extend both operands, but sometimes
1577 they use a shift to extract a portion of a register. */
1578 if ((lcode
== ZERO_EXTEND
1579 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1580 && (rcode
== ZERO_EXTEND
1581 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1583 machine_mode lmode
= GET_MODE (lhs
);
1584 machine_mode rmode
= GET_MODE (rhs
);
1587 if (lcode
== LSHIFTRT
)
1588 /* Number of bits not shifted off the end. */
1589 bits
= (GET_MODE_UNIT_PRECISION (lmode
)
1590 - INTVAL (XEXP (lhs
, 1)));
1591 else /* lcode == ZERO_EXTEND */
1592 /* Size of inner mode. */
1593 bits
= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1595 if (rcode
== LSHIFTRT
)
1596 bits
+= (GET_MODE_UNIT_PRECISION (rmode
)
1597 - INTVAL (XEXP (rhs
, 1)));
1598 else /* rcode == ZERO_EXTEND */
1599 bits
+= GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1601 /* We can only widen multiplies if the result is mathematiclly
1602 equivalent. I.e. if overflow was impossible. */
1603 if (bits
<= GET_MODE_UNIT_PRECISION (GET_MODE (op
)))
1604 return simplify_gen_binary
1606 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1607 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1611 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1612 if (GET_CODE (op
) == ZERO_EXTEND
)
1613 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1614 GET_MODE (XEXP (op
, 0)));
1616 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1617 is (zero_extend:M (subreg:O <X>)) if there is mode with
1618 GET_MODE_PRECISION (N) - I bits. */
1619 if (GET_CODE (op
) == LSHIFTRT
1620 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1621 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1622 && CONST_INT_P (XEXP (op
, 1))
1623 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1624 && (op_mode
= as_a
<scalar_int_mode
> (GET_MODE (op
)),
1625 GET_MODE_PRECISION (op_mode
) > INTVAL (XEXP (op
, 1))))
1627 scalar_int_mode tmode
;
1628 if (int_mode_for_size (GET_MODE_PRECISION (op_mode
)
1629 - INTVAL (XEXP (op
, 1)), 1).exists (&tmode
))
1632 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1634 return simplify_gen_unary (ZERO_EXTEND
, int_mode
,
1639 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1640 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1642 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1643 (and:SI (reg:SI) (const_int 63)). */
1644 if (partial_subreg_p (op
)
1645 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1646 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &op0_mode
)
1647 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
1648 && GET_MODE_PRECISION (int_mode
) >= GET_MODE_PRECISION (op0_mode
)
1649 && subreg_lowpart_p (op
)
1650 && (nonzero_bits (SUBREG_REG (op
), op0_mode
)
1651 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1653 if (GET_MODE_PRECISION (int_mode
) == GET_MODE_PRECISION (op0_mode
))
1654 return SUBREG_REG (op
);
1655 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, SUBREG_REG (op
),
1659 #if defined(POINTERS_EXTEND_UNSIGNED)
1660 /* As we do not know which address space the pointer is referring to,
1661 we can do this only if the target does not support different pointer
1662 or address modes depending on the address space. */
1663 if (target_default_pointer_address_modes_p ()
1664 && POINTERS_EXTEND_UNSIGNED
> 0
1665 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1667 || (GET_CODE (op
) == SUBREG
1668 && REG_P (SUBREG_REG (op
))
1669 && REG_POINTER (SUBREG_REG (op
))
1670 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1671 && !targetm
.have_ptr_extend ())
1674 = convert_memory_address_addr_space_1 (Pmode
, op
,
1675 ADDR_SPACE_GENERIC
, false,
1690 /* Try to compute the value of a unary operation CODE whose output mode is to
1691 be MODE with input operand OP whose mode was originally OP_MODE.
1692 Return zero if the value cannot be computed. */
1694 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1695 rtx op
, machine_mode op_mode
)
1697 scalar_int_mode result_mode
;
1699 if (code
== VEC_DUPLICATE
)
1701 gcc_assert (VECTOR_MODE_P (mode
));
1702 if (GET_MODE (op
) != VOIDmode
)
1704 if (!VECTOR_MODE_P (GET_MODE (op
)))
1705 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1707 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1710 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
))
1711 return gen_const_vec_duplicate (mode
, op
);
1712 if (GET_CODE (op
) == CONST_VECTOR
)
1714 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1715 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1716 rtvec v
= rtvec_alloc (n_elts
);
1719 machine_mode inmode
= GET_MODE (op
);
1720 int in_elt_size
= GET_MODE_UNIT_SIZE (inmode
);
1721 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1723 gcc_assert (in_n_elts
< n_elts
);
1724 gcc_assert ((n_elts
% in_n_elts
) == 0);
1725 for (i
= 0; i
< n_elts
; i
++)
1726 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1727 return gen_rtx_CONST_VECTOR (mode
, v
);
1731 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1733 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1734 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1735 machine_mode opmode
= GET_MODE (op
);
1736 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1737 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1738 rtvec v
= rtvec_alloc (n_elts
);
1741 gcc_assert (op_n_elts
== n_elts
);
1742 for (i
= 0; i
< n_elts
; i
++)
1744 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1745 CONST_VECTOR_ELT (op
, i
),
1746 GET_MODE_INNER (opmode
));
1749 RTVEC_ELT (v
, i
) = x
;
1751 return gen_rtx_CONST_VECTOR (mode
, v
);
1754 /* The order of these tests is critical so that, for example, we don't
1755 check the wrong mode (input vs. output) for a conversion operation,
1756 such as FIX. At some point, this should be simplified. */
1758 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1762 if (op_mode
== VOIDmode
)
1764 /* CONST_INT have VOIDmode as the mode. We assume that all
1765 the bits of the constant are significant, though, this is
1766 a dangerous assumption as many times CONST_INTs are
1767 created and used with garbage in the bits outside of the
1768 precision of the implied mode of the const_int. */
1769 op_mode
= MAX_MODE_INT
;
1772 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), SIGNED
);
1774 /* Avoid the folding if flag_signaling_nans is on and
1775 operand is a signaling NaN. */
1776 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1779 d
= real_value_truncate (mode
, d
);
1780 return const_double_from_real_value (d
, mode
);
1782 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1786 if (op_mode
== VOIDmode
)
1788 /* CONST_INT have VOIDmode as the mode. We assume that all
1789 the bits of the constant are significant, though, this is
1790 a dangerous assumption as many times CONST_INTs are
1791 created and used with garbage in the bits outside of the
1792 precision of the implied mode of the const_int. */
1793 op_mode
= MAX_MODE_INT
;
1796 real_from_integer (&d
, mode
, rtx_mode_t (op
, op_mode
), UNSIGNED
);
1798 /* Avoid the folding if flag_signaling_nans is on and
1799 operand is a signaling NaN. */
1800 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1803 d
= real_value_truncate (mode
, d
);
1804 return const_double_from_real_value (d
, mode
);
1807 if (CONST_SCALAR_INT_P (op
) && is_a
<scalar_int_mode
> (mode
, &result_mode
))
1809 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1811 scalar_int_mode imode
= (op_mode
== VOIDmode
1813 : as_a
<scalar_int_mode
> (op_mode
));
1814 rtx_mode_t op0
= rtx_mode_t (op
, imode
);
1817 #if TARGET_SUPPORTS_WIDE_INT == 0
1818 /* This assert keeps the simplification from producing a result
1819 that cannot be represented in a CONST_DOUBLE but a lot of
1820 upstream callers expect that this function never fails to
1821 simplify something and so you if you added this to the test
1822 above the code would die later anyway. If this assert
1823 happens, you just need to make the port support wide int. */
1824 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1830 result
= wi::bit_not (op0
);
1834 result
= wi::neg (op0
);
1838 result
= wi::abs (op0
);
1842 result
= wi::shwi (wi::ffs (op0
), result_mode
);
1846 if (wi::ne_p (op0
, 0))
1847 int_value
= wi::clz (op0
);
1848 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1849 int_value
= GET_MODE_PRECISION (imode
);
1850 result
= wi::shwi (int_value
, result_mode
);
1854 result
= wi::shwi (wi::clrsb (op0
), result_mode
);
1858 if (wi::ne_p (op0
, 0))
1859 int_value
= wi::ctz (op0
);
1860 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode
, int_value
))
1861 int_value
= GET_MODE_PRECISION (imode
);
1862 result
= wi::shwi (int_value
, result_mode
);
1866 result
= wi::shwi (wi::popcount (op0
), result_mode
);
1870 result
= wi::shwi (wi::parity (op0
), result_mode
);
1874 result
= wide_int (op0
).bswap ();
1879 result
= wide_int::from (op0
, width
, UNSIGNED
);
1883 result
= wide_int::from (op0
, width
, SIGNED
);
1891 return immed_wide_int_const (result
, result_mode
);
1894 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1895 && SCALAR_FLOAT_MODE_P (mode
)
1896 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1898 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1904 d
= real_value_abs (&d
);
1907 d
= real_value_negate (&d
);
1909 case FLOAT_TRUNCATE
:
1910 /* Don't perform the operation if flag_signaling_nans is on
1911 and the operand is a signaling NaN. */
1912 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1914 d
= real_value_truncate (mode
, d
);
1917 /* Don't perform the operation if flag_signaling_nans is on
1918 and the operand is a signaling NaN. */
1919 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1921 /* All this does is change the mode, unless changing
1923 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1924 real_convert (&d
, mode
, &d
);
1927 /* Don't perform the operation if flag_signaling_nans is on
1928 and the operand is a signaling NaN. */
1929 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1931 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1938 real_to_target (tmp
, &d
, GET_MODE (op
));
1939 for (i
= 0; i
< 4; i
++)
1941 real_from_target (&d
, tmp
, mode
);
1947 return const_double_from_real_value (d
, mode
);
1949 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1950 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1951 && is_int_mode (mode
, &result_mode
))
1953 unsigned int width
= GET_MODE_PRECISION (result_mode
);
1954 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1955 operators are intentionally left unspecified (to ease implementation
1956 by target backends), for consistency, this routine implements the
1957 same semantics for constant folding as used by the middle-end. */
1959 /* This was formerly used only for non-IEEE float.
1960 eggert@twinsun.com says it is safe for IEEE also. */
1962 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1963 wide_int wmax
, wmin
;
1964 /* This is part of the abi to real_to_integer, but we check
1965 things before making this call. */
1971 if (REAL_VALUE_ISNAN (*x
))
1974 /* Test against the signed upper bound. */
1975 wmax
= wi::max_value (width
, SIGNED
);
1976 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1977 if (real_less (&t
, x
))
1978 return immed_wide_int_const (wmax
, mode
);
1980 /* Test against the signed lower bound. */
1981 wmin
= wi::min_value (width
, SIGNED
);
1982 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1983 if (real_less (x
, &t
))
1984 return immed_wide_int_const (wmin
, mode
);
1986 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1990 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
1993 /* Test against the unsigned upper bound. */
1994 wmax
= wi::max_value (width
, UNSIGNED
);
1995 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1996 if (real_less (&t
, x
))
1997 return immed_wide_int_const (wmax
, mode
);
1999 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
2010 /* Subroutine of simplify_binary_operation to simplify a binary operation
2011 CODE that can commute with byte swapping, with result mode MODE and
2012 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2013 Return zero if no simplification or canonicalization is possible. */
2016 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
2021 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2022 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2024 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2025 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2026 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2029 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2030 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2032 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2033 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2039 /* Subroutine of simplify_binary_operation to simplify a commutative,
2040 associative binary operation CODE with result mode MODE, operating
2041 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2042 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2043 canonicalization is possible. */
2046 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
2051 /* Linearize the operator to the left. */
2052 if (GET_CODE (op1
) == code
)
2054 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2055 if (GET_CODE (op0
) == code
)
2057 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2058 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2061 /* "a op (b op c)" becomes "(b op c) op a". */
2062 if (! swap_commutative_operands_p (op1
, op0
))
2063 return simplify_gen_binary (code
, mode
, op1
, op0
);
2065 std::swap (op0
, op1
);
2068 if (GET_CODE (op0
) == code
)
2070 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2071 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2073 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2074 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2077 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2078 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2080 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2082 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2083 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2085 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2092 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2093 and OP1. Return 0 if no simplification is possible.
2095 Don't use this for relational operations such as EQ or LT.
2096 Use simplify_relational_operation instead. */
2098 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2101 rtx trueop0
, trueop1
;
2104 /* Relational operations don't work here. We must know the mode
2105 of the operands in order to do the comparison correctly.
2106 Assuming a full word can give incorrect results.
2107 Consider comparing 128 with -128 in QImode. */
2108 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2109 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2111 /* Make sure the constant is second. */
2112 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2113 && swap_commutative_operands_p (op0
, op1
))
2114 std::swap (op0
, op1
);
2116 trueop0
= avoid_constant_pool_reference (op0
);
2117 trueop1
= avoid_constant_pool_reference (op1
);
2119 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2122 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2127 /* If the above steps did not result in a simplification and op0 or op1
2128 were constant pool references, use the referenced constants directly. */
2129 if (trueop0
!= op0
|| trueop1
!= op1
)
2130 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2135 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2136 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2137 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2138 actual constants. */
2141 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2142 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2144 rtx tem
, reversed
, opleft
, opright
;
2146 scalar_int_mode int_mode
, inner_mode
;
2148 /* Even if we can't compute a constant result,
2149 there are some cases worth simplifying. */
2154 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2155 when x is NaN, infinite, or finite and nonzero. They aren't
2156 when x is -0 and the rounding mode is not towards -infinity,
2157 since (-0) + 0 is then 0. */
2158 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2161 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2162 transformations are safe even for IEEE. */
2163 if (GET_CODE (op0
) == NEG
)
2164 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2165 else if (GET_CODE (op1
) == NEG
)
2166 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2168 /* (~a) + 1 -> -a */
2169 if (INTEGRAL_MODE_P (mode
)
2170 && GET_CODE (op0
) == NOT
2171 && trueop1
== const1_rtx
)
2172 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2174 /* Handle both-operands-constant cases. We can only add
2175 CONST_INTs to constants since the sum of relocatable symbols
2176 can't be handled by most assemblers. Don't add CONST_INT
2177 to CONST_INT since overflow won't be computed properly if wider
2178 than HOST_BITS_PER_WIDE_INT. */
2180 if ((GET_CODE (op0
) == CONST
2181 || GET_CODE (op0
) == SYMBOL_REF
2182 || GET_CODE (op0
) == LABEL_REF
)
2183 && CONST_INT_P (op1
))
2184 return plus_constant (mode
, op0
, INTVAL (op1
));
2185 else if ((GET_CODE (op1
) == CONST
2186 || GET_CODE (op1
) == SYMBOL_REF
2187 || GET_CODE (op1
) == LABEL_REF
)
2188 && CONST_INT_P (op0
))
2189 return plus_constant (mode
, op1
, INTVAL (op0
));
2191 /* See if this is something like X * C - X or vice versa or
2192 if the multiplication is written as a shift. If so, we can
2193 distribute and make a new multiply, shift, or maybe just
2194 have X (if C is 2 in the example above). But don't make
2195 something more expensive than we had before. */
2197 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2199 rtx lhs
= op0
, rhs
= op1
;
2201 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2202 wide_int coeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2204 if (GET_CODE (lhs
) == NEG
)
2206 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2207 lhs
= XEXP (lhs
, 0);
2209 else if (GET_CODE (lhs
) == MULT
2210 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2212 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2213 lhs
= XEXP (lhs
, 0);
2215 else if (GET_CODE (lhs
) == ASHIFT
2216 && CONST_INT_P (XEXP (lhs
, 1))
2217 && INTVAL (XEXP (lhs
, 1)) >= 0
2218 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2220 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2221 GET_MODE_PRECISION (int_mode
));
2222 lhs
= XEXP (lhs
, 0);
2225 if (GET_CODE (rhs
) == NEG
)
2227 coeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2228 rhs
= XEXP (rhs
, 0);
2230 else if (GET_CODE (rhs
) == MULT
2231 && CONST_INT_P (XEXP (rhs
, 1)))
2233 coeff1
= rtx_mode_t (XEXP (rhs
, 1), int_mode
);
2234 rhs
= XEXP (rhs
, 0);
2236 else if (GET_CODE (rhs
) == ASHIFT
2237 && CONST_INT_P (XEXP (rhs
, 1))
2238 && INTVAL (XEXP (rhs
, 1)) >= 0
2239 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2241 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2242 GET_MODE_PRECISION (int_mode
));
2243 rhs
= XEXP (rhs
, 0);
2246 if (rtx_equal_p (lhs
, rhs
))
2248 rtx orig
= gen_rtx_PLUS (int_mode
, op0
, op1
);
2250 bool speed
= optimize_function_for_speed_p (cfun
);
2252 coeff
= immed_wide_int_const (coeff0
+ coeff1
, int_mode
);
2254 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2255 return (set_src_cost (tem
, int_mode
, speed
)
2256 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2260 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2261 if (CONST_SCALAR_INT_P (op1
)
2262 && GET_CODE (op0
) == XOR
2263 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2264 && mode_signbit_p (mode
, op1
))
2265 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2266 simplify_gen_binary (XOR
, mode
, op1
,
2269 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2270 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2271 && GET_CODE (op0
) == MULT
2272 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2276 in1
= XEXP (XEXP (op0
, 0), 0);
2277 in2
= XEXP (op0
, 1);
2278 return simplify_gen_binary (MINUS
, mode
, op1
,
2279 simplify_gen_binary (MULT
, mode
,
2283 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2284 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2286 if (COMPARISON_P (op0
)
2287 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2288 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2289 && (reversed
= reversed_comparison (op0
, mode
)))
2291 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2293 /* If one of the operands is a PLUS or a MINUS, see if we can
2294 simplify this by the associative law.
2295 Don't use the associative law for floating point.
2296 The inaccuracy makes it nonassociative,
2297 and subtle programs can break if operations are associated. */
2299 if (INTEGRAL_MODE_P (mode
)
2300 && (plus_minus_operand_p (op0
)
2301 || plus_minus_operand_p (op1
))
2302 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2305 /* Reassociate floating point addition only when the user
2306 specifies associative math operations. */
2307 if (FLOAT_MODE_P (mode
)
2308 && flag_associative_math
)
2310 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2317 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2318 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2319 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2320 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2322 rtx xop00
= XEXP (op0
, 0);
2323 rtx xop10
= XEXP (op1
, 0);
2325 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2328 if (REG_P (xop00
) && REG_P (xop10
)
2329 && REGNO (xop00
) == REGNO (xop10
)
2330 && GET_MODE (xop00
) == mode
2331 && GET_MODE (xop10
) == mode
2332 && GET_MODE_CLASS (mode
) == MODE_CC
)
2338 /* We can't assume x-x is 0 even with non-IEEE floating point,
2339 but since it is zero except in very strange circumstances, we
2340 will treat it as zero with -ffinite-math-only. */
2341 if (rtx_equal_p (trueop0
, trueop1
)
2342 && ! side_effects_p (op0
)
2343 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2344 return CONST0_RTX (mode
);
2346 /* Change subtraction from zero into negation. (0 - x) is the
2347 same as -x when x is NaN, infinite, or finite and nonzero.
2348 But if the mode has signed zeros, and does not round towards
2349 -infinity, then 0 - 0 is 0, not -0. */
2350 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2351 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2353 /* (-1 - a) is ~a, unless the expression contains symbolic
2354 constants, in which case not retaining additions and
2355 subtractions could cause invalid assembly to be produced. */
2356 if (trueop0
== constm1_rtx
2357 && !contains_symbolic_reference_p (op1
))
2358 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2360 /* Subtracting 0 has no effect unless the mode has signed zeros
2361 and supports rounding towards -infinity. In such a case,
2363 if (!(HONOR_SIGNED_ZEROS (mode
)
2364 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2365 && trueop1
== CONST0_RTX (mode
))
2368 /* See if this is something like X * C - X or vice versa or
2369 if the multiplication is written as a shift. If so, we can
2370 distribute and make a new multiply, shift, or maybe just
2371 have X (if C is 2 in the example above). But don't make
2372 something more expensive than we had before. */
2374 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
2376 rtx lhs
= op0
, rhs
= op1
;
2378 wide_int coeff0
= wi::one (GET_MODE_PRECISION (int_mode
));
2379 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2381 if (GET_CODE (lhs
) == NEG
)
2383 coeff0
= wi::minus_one (GET_MODE_PRECISION (int_mode
));
2384 lhs
= XEXP (lhs
, 0);
2386 else if (GET_CODE (lhs
) == MULT
2387 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2389 coeff0
= rtx_mode_t (XEXP (lhs
, 1), int_mode
);
2390 lhs
= XEXP (lhs
, 0);
2392 else if (GET_CODE (lhs
) == ASHIFT
2393 && CONST_INT_P (XEXP (lhs
, 1))
2394 && INTVAL (XEXP (lhs
, 1)) >= 0
2395 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2397 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2398 GET_MODE_PRECISION (int_mode
));
2399 lhs
= XEXP (lhs
, 0);
2402 if (GET_CODE (rhs
) == NEG
)
2404 negcoeff1
= wi::one (GET_MODE_PRECISION (int_mode
));
2405 rhs
= XEXP (rhs
, 0);
2407 else if (GET_CODE (rhs
) == MULT
2408 && CONST_INT_P (XEXP (rhs
, 1)))
2410 negcoeff1
= wi::neg (rtx_mode_t (XEXP (rhs
, 1), int_mode
));
2411 rhs
= XEXP (rhs
, 0);
2413 else if (GET_CODE (rhs
) == ASHIFT
2414 && CONST_INT_P (XEXP (rhs
, 1))
2415 && INTVAL (XEXP (rhs
, 1)) >= 0
2416 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (int_mode
))
2418 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2419 GET_MODE_PRECISION (int_mode
));
2420 negcoeff1
= -negcoeff1
;
2421 rhs
= XEXP (rhs
, 0);
2424 if (rtx_equal_p (lhs
, rhs
))
2426 rtx orig
= gen_rtx_MINUS (int_mode
, op0
, op1
);
2428 bool speed
= optimize_function_for_speed_p (cfun
);
2430 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, int_mode
);
2432 tem
= simplify_gen_binary (MULT
, int_mode
, lhs
, coeff
);
2433 return (set_src_cost (tem
, int_mode
, speed
)
2434 <= set_src_cost (orig
, int_mode
, speed
) ? tem
: 0);
2438 /* (a - (-b)) -> (a + b). True even for IEEE. */
2439 if (GET_CODE (op1
) == NEG
)
2440 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2442 /* (-x - c) may be simplified as (-c - x). */
2443 if (GET_CODE (op0
) == NEG
2444 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2446 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2448 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2451 /* Don't let a relocatable value get a negative coeff. */
2452 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2453 return simplify_gen_binary (PLUS
, mode
,
2455 neg_const_int (mode
, op1
));
2457 /* (x - (x & y)) -> (x & ~y) */
2458 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2460 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2462 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2463 GET_MODE (XEXP (op1
, 1)));
2464 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2466 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2468 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2469 GET_MODE (XEXP (op1
, 0)));
2470 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2474 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2475 by reversing the comparison code if valid. */
2476 if (STORE_FLAG_VALUE
== 1
2477 && trueop0
== const1_rtx
2478 && COMPARISON_P (op1
)
2479 && (reversed
= reversed_comparison (op1
, mode
)))
2482 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2483 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2484 && GET_CODE (op1
) == MULT
2485 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2489 in1
= XEXP (XEXP (op1
, 0), 0);
2490 in2
= XEXP (op1
, 1);
2491 return simplify_gen_binary (PLUS
, mode
,
2492 simplify_gen_binary (MULT
, mode
,
2497 /* Canonicalize (minus (neg A) (mult B C)) to
2498 (minus (mult (neg B) C) A). */
2499 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2500 && GET_CODE (op1
) == MULT
2501 && GET_CODE (op0
) == NEG
)
2505 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2506 in2
= XEXP (op1
, 1);
2507 return simplify_gen_binary (MINUS
, mode
,
2508 simplify_gen_binary (MULT
, mode
,
2513 /* If one of the operands is a PLUS or a MINUS, see if we can
2514 simplify this by the associative law. This will, for example,
2515 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2516 Don't use the associative law for floating point.
2517 The inaccuracy makes it nonassociative,
2518 and subtle programs can break if operations are associated. */
2520 if (INTEGRAL_MODE_P (mode
)
2521 && (plus_minus_operand_p (op0
)
2522 || plus_minus_operand_p (op1
))
2523 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2528 if (trueop1
== constm1_rtx
)
2529 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2531 if (GET_CODE (op0
) == NEG
)
2533 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2534 /* If op1 is a MULT as well and simplify_unary_operation
2535 just moved the NEG to the second operand, simplify_gen_binary
2536 below could through simplify_associative_operation move
2537 the NEG around again and recurse endlessly. */
2539 && GET_CODE (op1
) == MULT
2540 && GET_CODE (temp
) == MULT
2541 && XEXP (op1
, 0) == XEXP (temp
, 0)
2542 && GET_CODE (XEXP (temp
, 1)) == NEG
2543 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2546 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2548 if (GET_CODE (op1
) == NEG
)
2550 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2551 /* If op0 is a MULT as well and simplify_unary_operation
2552 just moved the NEG to the second operand, simplify_gen_binary
2553 below could through simplify_associative_operation move
2554 the NEG around again and recurse endlessly. */
2556 && GET_CODE (op0
) == MULT
2557 && GET_CODE (temp
) == MULT
2558 && XEXP (op0
, 0) == XEXP (temp
, 0)
2559 && GET_CODE (XEXP (temp
, 1)) == NEG
2560 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2563 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2566 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2567 x is NaN, since x * 0 is then also NaN. Nor is it valid
2568 when the mode has signed zeros, since multiplying a negative
2569 number by 0 will give -0, not 0. */
2570 if (!HONOR_NANS (mode
)
2571 && !HONOR_SIGNED_ZEROS (mode
)
2572 && trueop1
== CONST0_RTX (mode
)
2573 && ! side_effects_p (op0
))
2576 /* In IEEE floating point, x*1 is not equivalent to x for
2578 if (!HONOR_SNANS (mode
)
2579 && trueop1
== CONST1_RTX (mode
))
2582 /* Convert multiply by constant power of two into shift. */
2583 if (CONST_SCALAR_INT_P (trueop1
))
2585 val
= wi::exact_log2 (rtx_mode_t (trueop1
, mode
));
2587 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2590 /* x*2 is x+x and x*(-1) is -x */
2591 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2592 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2593 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2594 && GET_MODE (op0
) == mode
)
2596 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2598 if (real_equal (d1
, &dconst2
))
2599 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2601 if (!HONOR_SNANS (mode
)
2602 && real_equal (d1
, &dconstm1
))
2603 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2606 /* Optimize -x * -x as x * x. */
2607 if (FLOAT_MODE_P (mode
)
2608 && GET_CODE (op0
) == NEG
2609 && GET_CODE (op1
) == NEG
2610 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2611 && !side_effects_p (XEXP (op0
, 0)))
2612 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2614 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2615 if (SCALAR_FLOAT_MODE_P (mode
)
2616 && GET_CODE (op0
) == ABS
2617 && GET_CODE (op1
) == ABS
2618 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2619 && !side_effects_p (XEXP (op0
, 0)))
2620 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2622 /* Reassociate multiplication, but for floating point MULTs
2623 only when the user specifies unsafe math optimizations. */
2624 if (! FLOAT_MODE_P (mode
)
2625 || flag_unsafe_math_optimizations
)
2627 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2634 if (trueop1
== CONST0_RTX (mode
))
2636 if (INTEGRAL_MODE_P (mode
)
2637 && trueop1
== CONSTM1_RTX (mode
)
2638 && !side_effects_p (op0
))
2640 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2642 /* A | (~A) -> -1 */
2643 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2644 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2645 && ! side_effects_p (op0
)
2646 && SCALAR_INT_MODE_P (mode
))
2649 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2650 if (CONST_INT_P (op1
)
2651 && HWI_COMPUTABLE_MODE_P (mode
)
2652 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2653 && !side_effects_p (op0
))
2656 /* Canonicalize (X & C1) | C2. */
2657 if (GET_CODE (op0
) == AND
2658 && CONST_INT_P (trueop1
)
2659 && CONST_INT_P (XEXP (op0
, 1)))
2661 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2662 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2663 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2665 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2667 && !side_effects_p (XEXP (op0
, 0)))
2670 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2671 if (((c1
|c2
) & mask
) == mask
)
2672 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2675 /* Convert (A & B) | A to A. */
2676 if (GET_CODE (op0
) == AND
2677 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2678 || rtx_equal_p (XEXP (op0
, 1), op1
))
2679 && ! side_effects_p (XEXP (op0
, 0))
2680 && ! side_effects_p (XEXP (op0
, 1)))
2683 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2684 mode size to (rotate A CX). */
2686 if (GET_CODE (op1
) == ASHIFT
2687 || GET_CODE (op1
) == SUBREG
)
2698 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2699 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2700 && CONST_INT_P (XEXP (opleft
, 1))
2701 && CONST_INT_P (XEXP (opright
, 1))
2702 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2703 == GET_MODE_UNIT_PRECISION (mode
)))
2704 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2706 /* Same, but for ashift that has been "simplified" to a wider mode
2707 by simplify_shift_const. */
2709 if (GET_CODE (opleft
) == SUBREG
2710 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
2711 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (opleft
)),
2713 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2714 && GET_CODE (opright
) == LSHIFTRT
2715 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2716 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2717 && GET_MODE_SIZE (int_mode
) < GET_MODE_SIZE (inner_mode
)
2718 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2719 SUBREG_REG (XEXP (opright
, 0)))
2720 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2721 && CONST_INT_P (XEXP (opright
, 1))
2722 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1))
2723 + INTVAL (XEXP (opright
, 1))
2724 == GET_MODE_PRECISION (int_mode
)))
2725 return gen_rtx_ROTATE (int_mode
, XEXP (opright
, 0),
2726 XEXP (SUBREG_REG (opleft
), 1));
2728 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2729 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2730 the PLUS does not affect any of the bits in OP1: then we can do
2731 the IOR as a PLUS and we can associate. This is valid if OP1
2732 can be safely shifted left C bits. */
2733 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2734 && GET_CODE (XEXP (op0
, 0)) == PLUS
2735 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2736 && CONST_INT_P (XEXP (op0
, 1))
2737 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2739 int count
= INTVAL (XEXP (op0
, 1));
2740 HOST_WIDE_INT mask
= UINTVAL (trueop1
) << count
;
2742 if (mask
>> count
== INTVAL (trueop1
)
2743 && trunc_int_for_mode (mask
, mode
) == mask
2744 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2745 return simplify_gen_binary (ASHIFTRT
, mode
,
2746 plus_constant (mode
, XEXP (op0
, 0),
2751 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2755 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2761 if (trueop1
== CONST0_RTX (mode
))
2763 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2764 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2765 if (rtx_equal_p (trueop0
, trueop1
)
2766 && ! side_effects_p (op0
)
2767 && GET_MODE_CLASS (mode
) != MODE_CC
)
2768 return CONST0_RTX (mode
);
2770 /* Canonicalize XOR of the most significant bit to PLUS. */
2771 if (CONST_SCALAR_INT_P (op1
)
2772 && mode_signbit_p (mode
, op1
))
2773 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2774 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2775 if (CONST_SCALAR_INT_P (op1
)
2776 && GET_CODE (op0
) == PLUS
2777 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2778 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2779 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2780 simplify_gen_binary (XOR
, mode
, op1
,
2783 /* If we are XORing two things that have no bits in common,
2784 convert them into an IOR. This helps to detect rotation encoded
2785 using those methods and possibly other simplifications. */
2787 if (HWI_COMPUTABLE_MODE_P (mode
)
2788 && (nonzero_bits (op0
, mode
)
2789 & nonzero_bits (op1
, mode
)) == 0)
2790 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2792 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2793 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2796 int num_negated
= 0;
2798 if (GET_CODE (op0
) == NOT
)
2799 num_negated
++, op0
= XEXP (op0
, 0);
2800 if (GET_CODE (op1
) == NOT
)
2801 num_negated
++, op1
= XEXP (op1
, 0);
2803 if (num_negated
== 2)
2804 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2805 else if (num_negated
== 1)
2806 return simplify_gen_unary (NOT
, mode
,
2807 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2811 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2812 correspond to a machine insn or result in further simplifications
2813 if B is a constant. */
2815 if (GET_CODE (op0
) == AND
2816 && rtx_equal_p (XEXP (op0
, 1), op1
)
2817 && ! side_effects_p (op1
))
2818 return simplify_gen_binary (AND
, mode
,
2819 simplify_gen_unary (NOT
, mode
,
2820 XEXP (op0
, 0), mode
),
2823 else if (GET_CODE (op0
) == AND
2824 && rtx_equal_p (XEXP (op0
, 0), op1
)
2825 && ! side_effects_p (op1
))
2826 return simplify_gen_binary (AND
, mode
,
2827 simplify_gen_unary (NOT
, mode
,
2828 XEXP (op0
, 1), mode
),
2831 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2832 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2833 out bits inverted twice and not set by C. Similarly, given
2834 (xor (and (xor A B) C) D), simplify without inverting C in
2835 the xor operand: (xor (and A C) (B&C)^D).
2837 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2838 && GET_CODE (XEXP (op0
, 0)) == XOR
2839 && CONST_INT_P (op1
)
2840 && CONST_INT_P (XEXP (op0
, 1))
2841 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2843 enum rtx_code op
= GET_CODE (op0
);
2844 rtx a
= XEXP (XEXP (op0
, 0), 0);
2845 rtx b
= XEXP (XEXP (op0
, 0), 1);
2846 rtx c
= XEXP (op0
, 1);
2848 HOST_WIDE_INT bval
= INTVAL (b
);
2849 HOST_WIDE_INT cval
= INTVAL (c
);
2850 HOST_WIDE_INT dval
= INTVAL (d
);
2851 HOST_WIDE_INT xcval
;
2858 return simplify_gen_binary (XOR
, mode
,
2859 simplify_gen_binary (op
, mode
, a
, c
),
2860 gen_int_mode ((bval
& xcval
) ^ dval
,
2864 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2865 we can transform like this:
2866 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2867 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2868 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2869 Attempt a few simplifications when B and C are both constants. */
2870 if (GET_CODE (op0
) == AND
2871 && CONST_INT_P (op1
)
2872 && CONST_INT_P (XEXP (op0
, 1)))
2874 rtx a
= XEXP (op0
, 0);
2875 rtx b
= XEXP (op0
, 1);
2877 HOST_WIDE_INT bval
= INTVAL (b
);
2878 HOST_WIDE_INT cval
= INTVAL (c
);
2880 /* Instead of computing ~A&C, we compute its negated value,
2881 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2882 optimize for sure. If it does not simplify, we still try
2883 to compute ~A&C below, but since that always allocates
2884 RTL, we don't try that before committing to returning a
2885 simplified expression. */
2886 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2889 if ((~cval
& bval
) == 0)
2891 rtx na_c
= NULL_RTX
;
2893 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2896 /* If ~A does not simplify, don't bother: we don't
2897 want to simplify 2 operations into 3, and if na_c
2898 were to simplify with na, n_na_c would have
2899 simplified as well. */
2900 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2902 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2905 /* Try to simplify ~A&C | ~B&C. */
2906 if (na_c
!= NULL_RTX
)
2907 return simplify_gen_binary (IOR
, mode
, na_c
,
2908 gen_int_mode (~bval
& cval
, mode
));
2912 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2913 if (n_na_c
== CONSTM1_RTX (mode
))
2915 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2916 gen_int_mode (~cval
& bval
,
2918 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2919 gen_int_mode (~bval
& cval
,
2925 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2926 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2927 machines, and also has shorter instruction path length. */
2928 if (GET_CODE (op0
) == AND
2929 && GET_CODE (XEXP (op0
, 0)) == XOR
2930 && CONST_INT_P (XEXP (op0
, 1))
2931 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), trueop1
))
2934 rtx b
= XEXP (XEXP (op0
, 0), 1);
2935 rtx c
= XEXP (op0
, 1);
2936 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2937 rtx a_nc
= simplify_gen_binary (AND
, mode
, a
, nc
);
2938 rtx bc
= simplify_gen_binary (AND
, mode
, b
, c
);
2939 return simplify_gen_binary (IOR
, mode
, a_nc
, bc
);
2941 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2942 else if (GET_CODE (op0
) == AND
2943 && GET_CODE (XEXP (op0
, 0)) == XOR
2944 && CONST_INT_P (XEXP (op0
, 1))
2945 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), trueop1
))
2947 rtx a
= XEXP (XEXP (op0
, 0), 0);
2949 rtx c
= XEXP (op0
, 1);
2950 rtx nc
= simplify_gen_unary (NOT
, mode
, c
, mode
);
2951 rtx b_nc
= simplify_gen_binary (AND
, mode
, b
, nc
);
2952 rtx ac
= simplify_gen_binary (AND
, mode
, a
, c
);
2953 return simplify_gen_binary (IOR
, mode
, ac
, b_nc
);
2956 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2957 comparison if STORE_FLAG_VALUE is 1. */
2958 if (STORE_FLAG_VALUE
== 1
2959 && trueop1
== const1_rtx
2960 && COMPARISON_P (op0
)
2961 && (reversed
= reversed_comparison (op0
, mode
)))
2964 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2965 is (lt foo (const_int 0)), so we can perform the above
2966 simplification if STORE_FLAG_VALUE is 1. */
2968 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
2969 && STORE_FLAG_VALUE
== 1
2970 && trueop1
== const1_rtx
2971 && GET_CODE (op0
) == LSHIFTRT
2972 && CONST_INT_P (XEXP (op0
, 1))
2973 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (int_mode
) - 1)
2974 return gen_rtx_GE (int_mode
, XEXP (op0
, 0), const0_rtx
);
2976 /* (xor (comparison foo bar) (const_int sign-bit))
2977 when STORE_FLAG_VALUE is the sign bit. */
2978 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
2979 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
2980 && trueop1
== const_true_rtx
2981 && COMPARISON_P (op0
)
2982 && (reversed
= reversed_comparison (op0
, int_mode
)))
2985 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2989 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2995 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2997 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2999 if (HWI_COMPUTABLE_MODE_P (mode
))
3001 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3002 HOST_WIDE_INT nzop1
;
3003 if (CONST_INT_P (trueop1
))
3005 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3006 /* If we are turning off bits already known off in OP0, we need
3008 if ((nzop0
& ~val1
) == 0)
3011 nzop1
= nonzero_bits (trueop1
, mode
);
3012 /* If we are clearing all the nonzero bits, the result is zero. */
3013 if ((nzop1
& nzop0
) == 0
3014 && !side_effects_p (op0
) && !side_effects_p (op1
))
3015 return CONST0_RTX (mode
);
3017 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3018 && GET_MODE_CLASS (mode
) != MODE_CC
)
3021 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3022 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3023 && ! side_effects_p (op0
)
3024 && GET_MODE_CLASS (mode
) != MODE_CC
)
3025 return CONST0_RTX (mode
);
3027 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3028 there are no nonzero bits of C outside of X's mode. */
3029 if ((GET_CODE (op0
) == SIGN_EXTEND
3030 || GET_CODE (op0
) == ZERO_EXTEND
)
3031 && CONST_INT_P (trueop1
)
3032 && HWI_COMPUTABLE_MODE_P (mode
)
3033 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3034 & UINTVAL (trueop1
)) == 0)
3036 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3037 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3038 gen_int_mode (INTVAL (trueop1
),
3040 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3043 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3044 we might be able to further simplify the AND with X and potentially
3045 remove the truncation altogether. */
3046 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3048 rtx x
= XEXP (op0
, 0);
3049 machine_mode xmode
= GET_MODE (x
);
3050 tem
= simplify_gen_binary (AND
, xmode
, x
,
3051 gen_int_mode (INTVAL (trueop1
), xmode
));
3052 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3055 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3056 if (GET_CODE (op0
) == IOR
3057 && CONST_INT_P (trueop1
)
3058 && CONST_INT_P (XEXP (op0
, 1)))
3060 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3061 return simplify_gen_binary (IOR
, mode
,
3062 simplify_gen_binary (AND
, mode
,
3063 XEXP (op0
, 0), op1
),
3064 gen_int_mode (tmp
, mode
));
3067 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3068 insn (and may simplify more). */
3069 if (GET_CODE (op0
) == XOR
3070 && rtx_equal_p (XEXP (op0
, 0), op1
)
3071 && ! side_effects_p (op1
))
3072 return simplify_gen_binary (AND
, mode
,
3073 simplify_gen_unary (NOT
, mode
,
3074 XEXP (op0
, 1), mode
),
3077 if (GET_CODE (op0
) == XOR
3078 && rtx_equal_p (XEXP (op0
, 1), op1
)
3079 && ! side_effects_p (op1
))
3080 return simplify_gen_binary (AND
, mode
,
3081 simplify_gen_unary (NOT
, mode
,
3082 XEXP (op0
, 0), mode
),
3085 /* Similarly for (~(A ^ B)) & A. */
3086 if (GET_CODE (op0
) == NOT
3087 && GET_CODE (XEXP (op0
, 0)) == XOR
3088 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3089 && ! side_effects_p (op1
))
3090 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3092 if (GET_CODE (op0
) == NOT
3093 && GET_CODE (XEXP (op0
, 0)) == XOR
3094 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3095 && ! side_effects_p (op1
))
3096 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3098 /* Convert (A | B) & A to A. */
3099 if (GET_CODE (op0
) == IOR
3100 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3101 || rtx_equal_p (XEXP (op0
, 1), op1
))
3102 && ! side_effects_p (XEXP (op0
, 0))
3103 && ! side_effects_p (XEXP (op0
, 1)))
3106 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3107 ((A & N) + B) & M -> (A + B) & M
3108 Similarly if (N & M) == 0,
3109 ((A | N) + B) & M -> (A + B) & M
3110 and for - instead of + and/or ^ instead of |.
3111 Also, if (N & M) == 0, then
3112 (A +- N) & M -> A & M. */
3113 if (CONST_INT_P (trueop1
)
3114 && HWI_COMPUTABLE_MODE_P (mode
)
3115 && ~UINTVAL (trueop1
)
3116 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3117 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3122 pmop
[0] = XEXP (op0
, 0);
3123 pmop
[1] = XEXP (op0
, 1);
3125 if (CONST_INT_P (pmop
[1])
3126 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3127 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3129 for (which
= 0; which
< 2; which
++)
3132 switch (GET_CODE (tem
))
3135 if (CONST_INT_P (XEXP (tem
, 1))
3136 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3137 == UINTVAL (trueop1
))
3138 pmop
[which
] = XEXP (tem
, 0);
3142 if (CONST_INT_P (XEXP (tem
, 1))
3143 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3144 pmop
[which
] = XEXP (tem
, 0);
3151 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3153 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3155 return simplify_gen_binary (code
, mode
, tem
, op1
);
3159 /* (and X (ior (not X) Y) -> (and X Y) */
3160 if (GET_CODE (op1
) == IOR
3161 && GET_CODE (XEXP (op1
, 0)) == NOT
3162 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3163 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3165 /* (and (ior (not X) Y) X) -> (and X Y) */
3166 if (GET_CODE (op0
) == IOR
3167 && GET_CODE (XEXP (op0
, 0)) == NOT
3168 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3169 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3171 /* (and X (ior Y (not X)) -> (and X Y) */
3172 if (GET_CODE (op1
) == IOR
3173 && GET_CODE (XEXP (op1
, 1)) == NOT
3174 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3175 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3177 /* (and (ior Y (not X)) X) -> (and X Y) */
3178 if (GET_CODE (op0
) == IOR
3179 && GET_CODE (XEXP (op0
, 1)) == NOT
3180 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3181 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3183 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3187 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3193 /* 0/x is 0 (or x&0 if x has side-effects). */
3194 if (trueop0
== CONST0_RTX (mode
)
3195 && !cfun
->can_throw_non_call_exceptions
)
3197 if (side_effects_p (op1
))
3198 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3202 if (trueop1
== CONST1_RTX (mode
))
3204 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3208 /* Convert divide by power of two into shift. */
3209 if (CONST_INT_P (trueop1
)
3210 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3211 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3215 /* Handle floating point and integers separately. */
3216 if (SCALAR_FLOAT_MODE_P (mode
))
3218 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3219 safe for modes with NaNs, since 0.0 / 0.0 will then be
3220 NaN rather than 0.0. Nor is it safe for modes with signed
3221 zeros, since dividing 0 by a negative number gives -0.0 */
3222 if (trueop0
== CONST0_RTX (mode
)
3223 && !HONOR_NANS (mode
)
3224 && !HONOR_SIGNED_ZEROS (mode
)
3225 && ! side_effects_p (op1
))
3228 if (trueop1
== CONST1_RTX (mode
)
3229 && !HONOR_SNANS (mode
))
3232 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3233 && trueop1
!= CONST0_RTX (mode
))
3235 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3238 if (real_equal (d1
, &dconstm1
)
3239 && !HONOR_SNANS (mode
))
3240 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3242 /* Change FP division by a constant into multiplication.
3243 Only do this with -freciprocal-math. */
3244 if (flag_reciprocal_math
3245 && !real_equal (d1
, &dconst0
))
3248 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3249 tem
= const_double_from_real_value (d
, mode
);
3250 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3254 else if (SCALAR_INT_MODE_P (mode
))
3256 /* 0/x is 0 (or x&0 if x has side-effects). */
3257 if (trueop0
== CONST0_RTX (mode
)
3258 && !cfun
->can_throw_non_call_exceptions
)
3260 if (side_effects_p (op1
))
3261 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3265 if (trueop1
== CONST1_RTX (mode
))
3267 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3272 if (trueop1
== constm1_rtx
)
3274 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3276 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3282 /* 0%x is 0 (or x&0 if x has side-effects). */
3283 if (trueop0
== CONST0_RTX (mode
))
3285 if (side_effects_p (op1
))
3286 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3289 /* x%1 is 0 (of x&0 if x has side-effects). */
3290 if (trueop1
== CONST1_RTX (mode
))
3292 if (side_effects_p (op0
))
3293 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3294 return CONST0_RTX (mode
);
3296 /* Implement modulus by power of two as AND. */
3297 if (CONST_INT_P (trueop1
)
3298 && exact_log2 (UINTVAL (trueop1
)) > 0)
3299 return simplify_gen_binary (AND
, mode
, op0
,
3300 gen_int_mode (INTVAL (op1
) - 1, mode
));
3304 /* 0%x is 0 (or x&0 if x has side-effects). */
3305 if (trueop0
== CONST0_RTX (mode
))
3307 if (side_effects_p (op1
))
3308 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3311 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3312 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3314 if (side_effects_p (op0
))
3315 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3316 return CONST0_RTX (mode
);
3322 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3323 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3324 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3326 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3327 if (CONST_INT_P (trueop1
)
3328 && IN_RANGE (INTVAL (trueop1
),
3329 GET_MODE_UNIT_PRECISION (mode
) / 2 + (code
== ROTATE
),
3330 GET_MODE_UNIT_PRECISION (mode
) - 1))
3331 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3333 GEN_INT (GET_MODE_UNIT_PRECISION (mode
)
3334 - INTVAL (trueop1
)));
3338 if (trueop1
== CONST0_RTX (mode
))
3340 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3342 /* Rotating ~0 always results in ~0. */
3343 if (CONST_INT_P (trueop0
)
3344 && HWI_COMPUTABLE_MODE_P (mode
)
3345 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3346 && ! side_effects_p (op1
))
3352 scalar constants c1, c2
3353 size (M2) > size (M1)
3354 c1 == size (M2) - size (M1)
3356 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3360 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3362 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
3363 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3365 && CONST_INT_P (op1
)
3366 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3367 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
3369 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3370 && GET_MODE_BITSIZE (inner_mode
) > GET_MODE_BITSIZE (int_mode
)
3371 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3372 == GET_MODE_BITSIZE (inner_mode
) - GET_MODE_BITSIZE (int_mode
))
3373 && subreg_lowpart_p (op0
))
3375 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3377 tmp
= simplify_gen_binary (code
, inner_mode
,
3378 XEXP (SUBREG_REG (op0
), 0),
3380 return lowpart_subreg (int_mode
, tmp
, inner_mode
);
3383 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3385 val
= INTVAL (op1
) & (GET_MODE_UNIT_PRECISION (mode
) - 1);
3386 if (val
!= INTVAL (op1
))
3387 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3394 if (trueop1
== CONST0_RTX (mode
))
3396 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3398 goto canonicalize_shift
;
3401 if (trueop1
== CONST0_RTX (mode
))
3403 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3405 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3406 if (GET_CODE (op0
) == CLZ
3407 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (op0
, 0)), &inner_mode
)
3408 && CONST_INT_P (trueop1
)
3409 && STORE_FLAG_VALUE
== 1
3410 && INTVAL (trueop1
) < GET_MODE_UNIT_PRECISION (mode
))
3412 unsigned HOST_WIDE_INT zero_val
= 0;
3414 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode
, zero_val
)
3415 && zero_val
== GET_MODE_PRECISION (inner_mode
)
3416 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3417 return simplify_gen_relational (EQ
, mode
, inner_mode
,
3418 XEXP (op0
, 0), const0_rtx
);
3420 goto canonicalize_shift
;
3423 if (HWI_COMPUTABLE_MODE_P (mode
)
3424 && mode_signbit_p (mode
, trueop1
)
3425 && ! side_effects_p (op0
))
3427 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3429 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3435 if (HWI_COMPUTABLE_MODE_P (mode
)
3436 && CONST_INT_P (trueop1
)
3437 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3438 && ! side_effects_p (op0
))
3440 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3442 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3448 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3450 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3452 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3458 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3460 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3462 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3475 /* ??? There are simplifications that can be done. */
3479 if (!VECTOR_MODE_P (mode
))
3481 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3482 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3483 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3484 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3485 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3487 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3488 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3491 /* Extract a scalar element from a nested VEC_SELECT expression
3492 (with optional nested VEC_CONCAT expression). Some targets
3493 (i386) extract scalar element from a vector using chain of
3494 nested VEC_SELECT expressions. When input operand is a memory
3495 operand, this operation can be simplified to a simple scalar
3496 load from an offseted memory address. */
3497 if (GET_CODE (trueop0
) == VEC_SELECT
)
3499 rtx op0
= XEXP (trueop0
, 0);
3500 rtx op1
= XEXP (trueop0
, 1);
3502 machine_mode opmode
= GET_MODE (op0
);
3503 int elt_size
= GET_MODE_UNIT_SIZE (opmode
);
3504 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3506 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3512 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3513 gcc_assert (i
< n_elts
);
3515 /* Select element, pointed by nested selector. */
3516 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3518 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3519 if (GET_CODE (op0
) == VEC_CONCAT
)
3521 rtx op00
= XEXP (op0
, 0);
3522 rtx op01
= XEXP (op0
, 1);
3524 machine_mode mode00
, mode01
;
3525 int n_elts00
, n_elts01
;
3527 mode00
= GET_MODE (op00
);
3528 mode01
= GET_MODE (op01
);
3530 /* Find out number of elements of each operand. */
3531 if (VECTOR_MODE_P (mode00
))
3533 elt_size
= GET_MODE_UNIT_SIZE (mode00
);
3534 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3539 if (VECTOR_MODE_P (mode01
))
3541 elt_size
= GET_MODE_UNIT_SIZE (mode01
);
3542 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3547 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3549 /* Select correct operand of VEC_CONCAT
3550 and adjust selector. */
3551 if (elem
< n_elts01
)
3562 vec
= rtvec_alloc (1);
3563 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3565 tmp
= gen_rtx_fmt_ee (code
, mode
,
3566 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3569 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3570 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3571 return XEXP (trueop0
, 0);
3575 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3576 gcc_assert (GET_MODE_INNER (mode
)
3577 == GET_MODE_INNER (GET_MODE (trueop0
)));
3578 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3580 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3582 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3583 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3584 rtvec v
= rtvec_alloc (n_elts
);
3587 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3588 for (i
= 0; i
< n_elts
; i
++)
3590 rtx x
= XVECEXP (trueop1
, 0, i
);
3592 gcc_assert (CONST_INT_P (x
));
3593 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3597 return gen_rtx_CONST_VECTOR (mode
, v
);
3600 /* Recognize the identity. */
3601 if (GET_MODE (trueop0
) == mode
)
3603 bool maybe_ident
= true;
3604 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3606 rtx j
= XVECEXP (trueop1
, 0, i
);
3607 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3609 maybe_ident
= false;
3617 /* If we build {a,b} then permute it, build the result directly. */
3618 if (XVECLEN (trueop1
, 0) == 2
3619 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3620 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3621 && GET_CODE (trueop0
) == VEC_CONCAT
3622 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3623 && GET_MODE (XEXP (trueop0
, 0)) == mode
3624 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3625 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3627 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3628 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3631 gcc_assert (i0
< 4 && i1
< 4);
3632 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3633 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3635 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3638 if (XVECLEN (trueop1
, 0) == 2
3639 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3640 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3641 && GET_CODE (trueop0
) == VEC_CONCAT
3642 && GET_MODE (trueop0
) == mode
)
3644 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3645 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3648 gcc_assert (i0
< 2 && i1
< 2);
3649 subop0
= XEXP (trueop0
, i0
);
3650 subop1
= XEXP (trueop0
, i1
);
3652 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3655 /* If we select one half of a vec_concat, return that. */
3656 if (GET_CODE (trueop0
) == VEC_CONCAT
3657 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3659 rtx subop0
= XEXP (trueop0
, 0);
3660 rtx subop1
= XEXP (trueop0
, 1);
3661 machine_mode mode0
= GET_MODE (subop0
);
3662 machine_mode mode1
= GET_MODE (subop1
);
3663 int li
= GET_MODE_UNIT_SIZE (mode0
);
3664 int l0
= GET_MODE_SIZE (mode0
) / li
;
3665 int l1
= GET_MODE_SIZE (mode1
) / li
;
3666 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3667 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3669 bool success
= true;
3670 for (int i
= 1; i
< l0
; ++i
)
3672 rtx j
= XVECEXP (trueop1
, 0, i
);
3673 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3682 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3684 bool success
= true;
3685 for (int i
= 1; i
< l1
; ++i
)
3687 rtx j
= XVECEXP (trueop1
, 0, i
);
3688 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3700 if (XVECLEN (trueop1
, 0) == 1
3701 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3702 && GET_CODE (trueop0
) == VEC_CONCAT
)
3705 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3707 /* Try to find the element in the VEC_CONCAT. */
3708 while (GET_MODE (vec
) != mode
3709 && GET_CODE (vec
) == VEC_CONCAT
)
3711 HOST_WIDE_INT vec_size
;
3713 if (CONST_INT_P (XEXP (vec
, 0)))
3715 /* vec_concat of two const_ints doesn't make sense with
3716 respect to modes. */
3717 if (CONST_INT_P (XEXP (vec
, 1)))
3720 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3721 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3724 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3726 if (offset
< vec_size
)
3727 vec
= XEXP (vec
, 0);
3731 vec
= XEXP (vec
, 1);
3733 vec
= avoid_constant_pool_reference (vec
);
3736 if (GET_MODE (vec
) == mode
)
3740 /* If we select elements in a vec_merge that all come from the same
3741 operand, select from that operand directly. */
3742 if (GET_CODE (op0
) == VEC_MERGE
)
3744 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3745 if (CONST_INT_P (trueop02
))
3747 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3748 bool all_operand0
= true;
3749 bool all_operand1
= true;
3750 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3752 rtx j
= XVECEXP (trueop1
, 0, i
);
3753 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3754 all_operand1
= false;
3756 all_operand0
= false;
3758 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3759 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3760 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3761 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3765 /* If we have two nested selects that are inverses of each
3766 other, replace them with the source operand. */
3767 if (GET_CODE (trueop0
) == VEC_SELECT
3768 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3770 rtx op0_subop1
= XEXP (trueop0
, 1);
3771 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3772 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3774 /* Apply the outer ordering vector to the inner one. (The inner
3775 ordering vector is expressly permitted to be of a different
3776 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3777 then the two VEC_SELECTs cancel. */
3778 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3780 rtx x
= XVECEXP (trueop1
, 0, i
);
3781 if (!CONST_INT_P (x
))
3783 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3784 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3787 return XEXP (trueop0
, 0);
3793 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3794 ? GET_MODE (trueop0
)
3795 : GET_MODE_INNER (mode
));
3796 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3797 ? GET_MODE (trueop1
)
3798 : GET_MODE_INNER (mode
));
3800 gcc_assert (VECTOR_MODE_P (mode
));
3801 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3802 == GET_MODE_SIZE (mode
));
3804 if (VECTOR_MODE_P (op0_mode
))
3805 gcc_assert (GET_MODE_INNER (mode
)
3806 == GET_MODE_INNER (op0_mode
));
3808 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3810 if (VECTOR_MODE_P (op1_mode
))
3811 gcc_assert (GET_MODE_INNER (mode
)
3812 == GET_MODE_INNER (op1_mode
));
3814 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3816 if ((GET_CODE (trueop0
) == CONST_VECTOR
3817 || CONST_SCALAR_INT_P (trueop0
)
3818 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3819 && (GET_CODE (trueop1
) == CONST_VECTOR
3820 || CONST_SCALAR_INT_P (trueop1
)
3821 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3823 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3824 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3825 rtvec v
= rtvec_alloc (n_elts
);
3827 unsigned in_n_elts
= 1;
3829 if (VECTOR_MODE_P (op0_mode
))
3830 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3831 for (i
= 0; i
< n_elts
; i
++)
3835 if (!VECTOR_MODE_P (op0_mode
))
3836 RTVEC_ELT (v
, i
) = trueop0
;
3838 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3842 if (!VECTOR_MODE_P (op1_mode
))
3843 RTVEC_ELT (v
, i
) = trueop1
;
3845 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3850 return gen_rtx_CONST_VECTOR (mode
, v
);
3853 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3854 Restrict the transformation to avoid generating a VEC_SELECT with a
3855 mode unrelated to its operand. */
3856 if (GET_CODE (trueop0
) == VEC_SELECT
3857 && GET_CODE (trueop1
) == VEC_SELECT
3858 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3859 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3861 rtx par0
= XEXP (trueop0
, 1);
3862 rtx par1
= XEXP (trueop1
, 1);
3863 int len0
= XVECLEN (par0
, 0);
3864 int len1
= XVECLEN (par1
, 0);
3865 rtvec vec
= rtvec_alloc (len0
+ len1
);
3866 for (int i
= 0; i
< len0
; i
++)
3867 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3868 for (int i
= 0; i
< len1
; i
++)
3869 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3870 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3871 gen_rtx_PARALLEL (VOIDmode
, vec
));
3884 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3887 if (VECTOR_MODE_P (mode
)
3888 && code
!= VEC_CONCAT
3889 && GET_CODE (op0
) == CONST_VECTOR
3890 && GET_CODE (op1
) == CONST_VECTOR
)
3892 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3893 machine_mode op0mode
= GET_MODE (op0
);
3894 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3895 machine_mode op1mode
= GET_MODE (op1
);
3896 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3897 rtvec v
= rtvec_alloc (n_elts
);
3900 gcc_assert (op0_n_elts
== n_elts
);
3901 gcc_assert (op1_n_elts
== n_elts
);
3902 for (i
= 0; i
< n_elts
; i
++)
3904 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3905 CONST_VECTOR_ELT (op0
, i
),
3906 CONST_VECTOR_ELT (op1
, i
));
3909 RTVEC_ELT (v
, i
) = x
;
3912 return gen_rtx_CONST_VECTOR (mode
, v
);
3915 if (VECTOR_MODE_P (mode
)
3916 && code
== VEC_CONCAT
3917 && (CONST_SCALAR_INT_P (op0
)
3918 || GET_CODE (op0
) == CONST_FIXED
3919 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3920 && (CONST_SCALAR_INT_P (op1
)
3921 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3922 || GET_CODE (op1
) == CONST_FIXED
))
3924 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3925 rtvec v
= rtvec_alloc (n_elts
);
3927 gcc_assert (n_elts
>= 2);
3930 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3931 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3933 RTVEC_ELT (v
, 0) = op0
;
3934 RTVEC_ELT (v
, 1) = op1
;
3938 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3939 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3942 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3943 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3944 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3946 for (i
= 0; i
< op0_n_elts
; ++i
)
3947 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3948 for (i
= 0; i
< op1_n_elts
; ++i
)
3949 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3952 return gen_rtx_CONST_VECTOR (mode
, v
);
3955 if (SCALAR_FLOAT_MODE_P (mode
)
3956 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3957 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3958 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3969 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3971 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3973 for (i
= 0; i
< 4; i
++)
3990 real_from_target (&r
, tmp0
, mode
);
3991 return const_double_from_real_value (r
, mode
);
3995 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3996 const REAL_VALUE_TYPE
*opr0
, *opr1
;
3999 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
4000 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
4002 if (HONOR_SNANS (mode
)
4003 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
4004 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
4007 real_convert (&f0
, mode
, opr0
);
4008 real_convert (&f1
, mode
, opr1
);
4011 && real_equal (&f1
, &dconst0
)
4012 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
4015 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4016 && flag_trapping_math
4017 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
4019 int s0
= REAL_VALUE_NEGATIVE (f0
);
4020 int s1
= REAL_VALUE_NEGATIVE (f1
);
4025 /* Inf + -Inf = NaN plus exception. */
4030 /* Inf - Inf = NaN plus exception. */
4035 /* Inf / Inf = NaN plus exception. */
4042 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
4043 && flag_trapping_math
4044 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
4045 || (REAL_VALUE_ISINF (f1
)
4046 && real_equal (&f0
, &dconst0
))))
4047 /* Inf * 0 = NaN plus exception. */
4050 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
4052 real_convert (&result
, mode
, &value
);
4054 /* Don't constant fold this floating point operation if
4055 the result has overflowed and flag_trapping_math. */
4057 if (flag_trapping_math
4058 && MODE_HAS_INFINITIES (mode
)
4059 && REAL_VALUE_ISINF (result
)
4060 && !REAL_VALUE_ISINF (f0
)
4061 && !REAL_VALUE_ISINF (f1
))
4062 /* Overflow plus exception. */
4065 /* Don't constant fold this floating point operation if the
4066 result may dependent upon the run-time rounding mode and
4067 flag_rounding_math is set, or if GCC's software emulation
4068 is unable to accurately represent the result. */
4070 if ((flag_rounding_math
4071 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4072 && (inexact
|| !real_identical (&result
, &value
)))
4075 return const_double_from_real_value (result
, mode
);
4079 /* We can fold some multi-word operations. */
4080 scalar_int_mode int_mode
;
4081 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
4082 && CONST_SCALAR_INT_P (op0
)
4083 && CONST_SCALAR_INT_P (op1
))
4087 rtx_mode_t pop0
= rtx_mode_t (op0
, int_mode
);
4088 rtx_mode_t pop1
= rtx_mode_t (op1
, int_mode
);
4090 #if TARGET_SUPPORTS_WIDE_INT == 0
4091 /* This assert keeps the simplification from producing a result
4092 that cannot be represented in a CONST_DOUBLE but a lot of
4093 upstream callers expect that this function never fails to
4094 simplify something and so you if you added this to the test
4095 above the code would die later anyway. If this assert
4096 happens, you just need to make the port support wide int. */
4097 gcc_assert (GET_MODE_PRECISION (int_mode
) <= HOST_BITS_PER_DOUBLE_INT
);
4102 result
= wi::sub (pop0
, pop1
);
4106 result
= wi::add (pop0
, pop1
);
4110 result
= wi::mul (pop0
, pop1
);
4114 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4120 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4126 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4132 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4138 result
= wi::bit_and (pop0
, pop1
);
4142 result
= wi::bit_or (pop0
, pop1
);
4146 result
= wi::bit_xor (pop0
, pop1
);
4150 result
= wi::smin (pop0
, pop1
);
4154 result
= wi::smax (pop0
, pop1
);
4158 result
= wi::umin (pop0
, pop1
);
4162 result
= wi::umax (pop0
, pop1
);
4169 wide_int wop1
= pop1
;
4170 if (SHIFT_COUNT_TRUNCATED
)
4171 wop1
= wi::umod_trunc (wop1
, GET_MODE_PRECISION (int_mode
));
4172 else if (wi::geu_p (wop1
, GET_MODE_PRECISION (int_mode
)))
4178 result
= wi::lrshift (pop0
, wop1
);
4182 result
= wi::arshift (pop0
, wop1
);
4186 result
= wi::lshift (pop0
, wop1
);
4197 if (wi::neg_p (pop1
))
4203 result
= wi::lrotate (pop0
, pop1
);
4207 result
= wi::rrotate (pop0
, pop1
);
4218 return immed_wide_int_const (result
, int_mode
);
4226 /* Return a positive integer if X should sort after Y. The value
4227 returned is 1 if and only if X and Y are both regs. */
4230 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4234 result
= (commutative_operand_precedence (y
)
4235 - commutative_operand_precedence (x
));
4237 return result
+ result
;
4239 /* Group together equal REGs to do more simplification. */
4240 if (REG_P (x
) && REG_P (y
))
4241 return REGNO (x
) > REGNO (y
);
4246 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4247 operands may be another PLUS or MINUS.
4249 Rather than test for specific case, we do this by a brute-force method
4250 and do all possible simplifications until no more changes occur. Then
4251 we rebuild the operation.
4253 May return NULL_RTX when no changes were made. */
4256 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4259 struct simplify_plus_minus_op_data
4266 int changed
, n_constants
, canonicalized
= 0;
4269 memset (ops
, 0, sizeof ops
);
4271 /* Set up the two operands and then expand them until nothing has been
4272 changed. If we run out of room in our array, give up; this should
4273 almost never happen. */
4278 ops
[1].neg
= (code
== MINUS
);
4285 for (i
= 0; i
< n_ops
; i
++)
4287 rtx this_op
= ops
[i
].op
;
4288 int this_neg
= ops
[i
].neg
;
4289 enum rtx_code this_code
= GET_CODE (this_op
);
4295 if (n_ops
== ARRAY_SIZE (ops
))
4298 ops
[n_ops
].op
= XEXP (this_op
, 1);
4299 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4302 ops
[i
].op
= XEXP (this_op
, 0);
4304 /* If this operand was negated then we will potentially
4305 canonicalize the expression. Similarly if we don't
4306 place the operands adjacent we're re-ordering the
4307 expression and thus might be performing a
4308 canonicalization. Ignore register re-ordering.
4309 ??? It might be better to shuffle the ops array here,
4310 but then (plus (plus (A, B), plus (C, D))) wouldn't
4311 be seen as non-canonical. */
4314 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4319 ops
[i
].op
= XEXP (this_op
, 0);
4320 ops
[i
].neg
= ! this_neg
;
4326 if (n_ops
!= ARRAY_SIZE (ops
)
4327 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4328 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4329 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4331 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4332 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4333 ops
[n_ops
].neg
= this_neg
;
4341 /* ~a -> (-a - 1) */
4342 if (n_ops
!= ARRAY_SIZE (ops
))
4344 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4345 ops
[n_ops
++].neg
= this_neg
;
4346 ops
[i
].op
= XEXP (this_op
, 0);
4347 ops
[i
].neg
= !this_neg
;
4357 ops
[i
].op
= neg_const_int (mode
, this_op
);
4371 if (n_constants
> 1)
4374 gcc_assert (n_ops
>= 2);
4376 /* If we only have two operands, we can avoid the loops. */
4379 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4382 /* Get the two operands. Be careful with the order, especially for
4383 the cases where code == MINUS. */
4384 if (ops
[0].neg
&& ops
[1].neg
)
4386 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4389 else if (ops
[0].neg
)
4400 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4403 /* Now simplify each pair of operands until nothing changes. */
4406 /* Insertion sort is good enough for a small array. */
4407 for (i
= 1; i
< n_ops
; i
++)
4409 struct simplify_plus_minus_op_data save
;
4413 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4416 /* Just swapping registers doesn't count as canonicalization. */
4422 ops
[j
+ 1] = ops
[j
];
4424 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4429 for (i
= n_ops
- 1; i
> 0; i
--)
4430 for (j
= i
- 1; j
>= 0; j
--)
4432 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4433 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4435 if (lhs
!= 0 && rhs
!= 0)
4437 enum rtx_code ncode
= PLUS
;
4443 std::swap (lhs
, rhs
);
4445 else if (swap_commutative_operands_p (lhs
, rhs
))
4446 std::swap (lhs
, rhs
);
4448 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4449 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4451 rtx tem_lhs
, tem_rhs
;
4453 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4454 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4455 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4458 if (tem
&& !CONSTANT_P (tem
))
4459 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4462 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4466 /* Reject "simplifications" that just wrap the two
4467 arguments in a CONST. Failure to do so can result
4468 in infinite recursion with simplify_binary_operation
4469 when it calls us to simplify CONST operations.
4470 Also, if we find such a simplification, don't try
4471 any more combinations with this rhs: We must have
4472 something like symbol+offset, ie. one of the
4473 trivial CONST expressions we handle later. */
4474 if (GET_CODE (tem
) == CONST
4475 && GET_CODE (XEXP (tem
, 0)) == ncode
4476 && XEXP (XEXP (tem
, 0), 0) == lhs
4477 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4480 if (GET_CODE (tem
) == NEG
)
4481 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4482 if (CONST_INT_P (tem
) && lneg
)
4483 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4487 ops
[j
].op
= NULL_RTX
;
4497 /* Pack all the operands to the lower-numbered entries. */
4498 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4507 /* If nothing changed, check that rematerialization of rtl instructions
4508 is still required. */
4511 /* Perform rematerialization if only all operands are registers and
4512 all operations are PLUS. */
4513 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4514 around rs6000 and how it uses the CA register. See PR67145. */
4515 for (i
= 0; i
< n_ops
; i
++)
4517 || !REG_P (ops
[i
].op
)
4518 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4519 && fixed_regs
[REGNO (ops
[i
].op
)]
4520 && !global_regs
[REGNO (ops
[i
].op
)]
4521 && ops
[i
].op
!= frame_pointer_rtx
4522 && ops
[i
].op
!= arg_pointer_rtx
4523 && ops
[i
].op
!= stack_pointer_rtx
))
4528 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4530 && CONST_INT_P (ops
[1].op
)
4531 && CONSTANT_P (ops
[0].op
)
4533 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4535 /* We suppressed creation of trivial CONST expressions in the
4536 combination loop to avoid recursion. Create one manually now.
4537 The combination loop should have ensured that there is exactly
4538 one CONST_INT, and the sort will have ensured that it is last
4539 in the array and that any other constant will be next-to-last. */
4542 && CONST_INT_P (ops
[n_ops
- 1].op
)
4543 && CONSTANT_P (ops
[n_ops
- 2].op
))
4545 rtx value
= ops
[n_ops
- 1].op
;
4546 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4547 value
= neg_const_int (mode
, value
);
4548 if (CONST_INT_P (value
))
4550 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4556 /* Put a non-negated operand first, if possible. */
4558 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4561 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4570 /* Now make the result by performing the requested operations. */
4573 for (i
= 1; i
< n_ops
; i
++)
4574 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4575 mode
, result
, ops
[i
].op
);
4580 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4582 plus_minus_operand_p (const_rtx x
)
4584 return GET_CODE (x
) == PLUS
4585 || GET_CODE (x
) == MINUS
4586 || (GET_CODE (x
) == CONST
4587 && GET_CODE (XEXP (x
, 0)) == PLUS
4588 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4589 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4592 /* Like simplify_binary_operation except used for relational operators.
4593 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4594 not also be VOIDmode.
4596 CMP_MODE specifies in which mode the comparison is done in, so it is
4597 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4598 the operands or, if both are VOIDmode, the operands are compared in
4599 "infinite precision". */
4601 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4602 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4604 rtx tem
, trueop0
, trueop1
;
4606 if (cmp_mode
== VOIDmode
)
4607 cmp_mode
= GET_MODE (op0
);
4608 if (cmp_mode
== VOIDmode
)
4609 cmp_mode
= GET_MODE (op1
);
4611 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4614 if (SCALAR_FLOAT_MODE_P (mode
))
4616 if (tem
== const0_rtx
)
4617 return CONST0_RTX (mode
);
4618 #ifdef FLOAT_STORE_FLAG_VALUE
4620 REAL_VALUE_TYPE val
;
4621 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4622 return const_double_from_real_value (val
, mode
);
4628 if (VECTOR_MODE_P (mode
))
4630 if (tem
== const0_rtx
)
4631 return CONST0_RTX (mode
);
4632 #ifdef VECTOR_STORE_FLAG_VALUE
4634 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4635 if (val
== NULL_RTX
)
4637 if (val
== const1_rtx
)
4638 return CONST1_RTX (mode
);
4640 return gen_const_vec_duplicate (mode
, val
);
4650 /* For the following tests, ensure const0_rtx is op1. */
4651 if (swap_commutative_operands_p (op0
, op1
)
4652 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4653 std::swap (op0
, op1
), code
= swap_condition (code
);
4655 /* If op0 is a compare, extract the comparison arguments from it. */
4656 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4657 return simplify_gen_relational (code
, mode
, VOIDmode
,
4658 XEXP (op0
, 0), XEXP (op0
, 1));
4660 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4664 trueop0
= avoid_constant_pool_reference (op0
);
4665 trueop1
= avoid_constant_pool_reference (op1
);
4666 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4670 /* This part of simplify_relational_operation is only used when CMP_MODE
4671 is not in class MODE_CC (i.e. it is a real comparison).
4673 MODE is the mode of the result, while CMP_MODE specifies in which
4674 mode the comparison is done in, so it is the mode of the operands. */
4677 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4678 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4680 enum rtx_code op0code
= GET_CODE (op0
);
4682 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4684 /* If op0 is a comparison, extract the comparison arguments
4688 if (GET_MODE (op0
) == mode
)
4689 return simplify_rtx (op0
);
4691 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4692 XEXP (op0
, 0), XEXP (op0
, 1));
4694 else if (code
== EQ
)
4696 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL
);
4697 if (new_code
!= UNKNOWN
)
4698 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4699 XEXP (op0
, 0), XEXP (op0
, 1));
4703 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4704 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4705 if ((code
== LTU
|| code
== GEU
)
4706 && GET_CODE (op0
) == PLUS
4707 && CONST_INT_P (XEXP (op0
, 1))
4708 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4709 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4710 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4711 && XEXP (op0
, 1) != const0_rtx
)
4714 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4715 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4716 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4719 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4720 transformed into (LTU a -C). */
4721 if (code
== GTU
&& GET_CODE (op0
) == PLUS
&& CONST_INT_P (op1
)
4722 && CONST_INT_P (XEXP (op0
, 1))
4723 && (UINTVAL (op1
) == UINTVAL (XEXP (op0
, 1)) - 1)
4724 && XEXP (op0
, 1) != const0_rtx
)
4727 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4728 return simplify_gen_relational (LTU
, mode
, cmp_mode
,
4729 XEXP (op0
, 0), new_cmp
);
4732 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4733 if ((code
== LTU
|| code
== GEU
)
4734 && GET_CODE (op0
) == PLUS
4735 && rtx_equal_p (op1
, XEXP (op0
, 1))
4736 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4737 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4738 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4739 copy_rtx (XEXP (op0
, 0)));
4741 if (op1
== const0_rtx
)
4743 /* Canonicalize (GTU x 0) as (NE x 0). */
4745 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4746 /* Canonicalize (LEU x 0) as (EQ x 0). */
4748 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4750 else if (op1
== const1_rtx
)
4755 /* Canonicalize (GE x 1) as (GT x 0). */
4756 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4759 /* Canonicalize (GEU x 1) as (NE x 0). */
4760 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4763 /* Canonicalize (LT x 1) as (LE x 0). */
4764 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4767 /* Canonicalize (LTU x 1) as (EQ x 0). */
4768 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4774 else if (op1
== constm1_rtx
)
4776 /* Canonicalize (LE x -1) as (LT x 0). */
4778 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4779 /* Canonicalize (GT x -1) as (GE x 0). */
4781 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4784 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4785 if ((code
== EQ
|| code
== NE
)
4786 && (op0code
== PLUS
|| op0code
== MINUS
)
4788 && CONSTANT_P (XEXP (op0
, 1))
4789 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4791 rtx x
= XEXP (op0
, 0);
4792 rtx c
= XEXP (op0
, 1);
4793 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4794 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4796 /* Detect an infinite recursive condition, where we oscillate at this
4797 simplification case between:
4798 A + B == C <---> C - B == A,
4799 where A, B, and C are all constants with non-simplifiable expressions,
4800 usually SYMBOL_REFs. */
4801 if (GET_CODE (tem
) == invcode
4803 && rtx_equal_p (c
, XEXP (tem
, 1)))
4806 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4809 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4810 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4811 scalar_int_mode int_mode
, int_cmp_mode
;
4813 && op1
== const0_rtx
4814 && is_int_mode (mode
, &int_mode
)
4815 && is_a
<scalar_int_mode
> (cmp_mode
, &int_cmp_mode
)
4816 /* ??? Work-around BImode bugs in the ia64 backend. */
4817 && int_mode
!= BImode
4818 && int_cmp_mode
!= BImode
4819 && nonzero_bits (op0
, int_cmp_mode
) == 1
4820 && STORE_FLAG_VALUE
== 1)
4821 return GET_MODE_SIZE (int_mode
) > GET_MODE_SIZE (int_cmp_mode
)
4822 ? simplify_gen_unary (ZERO_EXTEND
, int_mode
, op0
, int_cmp_mode
)
4823 : lowpart_subreg (int_mode
, op0
, int_cmp_mode
);
4825 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4826 if ((code
== EQ
|| code
== NE
)
4827 && op1
== const0_rtx
4829 return simplify_gen_relational (code
, mode
, cmp_mode
,
4830 XEXP (op0
, 0), XEXP (op0
, 1));
4832 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4833 if ((code
== EQ
|| code
== NE
)
4835 && rtx_equal_p (XEXP (op0
, 0), op1
)
4836 && !side_effects_p (XEXP (op0
, 0)))
4837 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4840 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4841 if ((code
== EQ
|| code
== NE
)
4843 && rtx_equal_p (XEXP (op0
, 1), op1
)
4844 && !side_effects_p (XEXP (op0
, 1)))
4845 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4848 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4849 if ((code
== EQ
|| code
== NE
)
4851 && CONST_SCALAR_INT_P (op1
)
4852 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4853 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4854 simplify_gen_binary (XOR
, cmp_mode
,
4855 XEXP (op0
, 1), op1
));
4857 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4858 can be implemented with a BICS instruction on some targets, or
4859 constant-folded if y is a constant. */
4860 if ((code
== EQ
|| code
== NE
)
4862 && rtx_equal_p (XEXP (op0
, 0), op1
)
4863 && !side_effects_p (op1
)
4864 && op1
!= CONST0_RTX (cmp_mode
))
4866 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4867 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4869 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4870 CONST0_RTX (cmp_mode
));
4873 /* Likewise for (eq/ne (and x y) y). */
4874 if ((code
== EQ
|| code
== NE
)
4876 && rtx_equal_p (XEXP (op0
, 1), op1
)
4877 && !side_effects_p (op1
)
4878 && op1
!= CONST0_RTX (cmp_mode
))
4880 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4881 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4883 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4884 CONST0_RTX (cmp_mode
));
4887 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4888 if ((code
== EQ
|| code
== NE
)
4889 && GET_CODE (op0
) == BSWAP
4890 && CONST_SCALAR_INT_P (op1
))
4891 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4892 simplify_gen_unary (BSWAP
, cmp_mode
,
4895 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4896 if ((code
== EQ
|| code
== NE
)
4897 && GET_CODE (op0
) == BSWAP
4898 && GET_CODE (op1
) == BSWAP
)
4899 return simplify_gen_relational (code
, mode
, cmp_mode
,
4900 XEXP (op0
, 0), XEXP (op1
, 0));
4902 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4908 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4909 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4910 XEXP (op0
, 0), const0_rtx
);
4915 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4916 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4917 XEXP (op0
, 0), const0_rtx
);
4936 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4937 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4938 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4939 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4940 For floating-point comparisons, assume that the operands were ordered. */
4943 comparison_result (enum rtx_code code
, int known_results
)
4949 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4952 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4956 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4959 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4963 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4966 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4969 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4971 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4974 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4976 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4979 return const_true_rtx
;
4987 /* Check if the given comparison (done in the given MODE) is actually
4988 a tautology or a contradiction. If the mode is VOID_mode, the
4989 comparison is done in "infinite precision". If no simplification
4990 is possible, this function returns zero. Otherwise, it returns
4991 either const_true_rtx or const0_rtx. */
4994 simplify_const_relational_operation (enum rtx_code code
,
5002 gcc_assert (mode
!= VOIDmode
5003 || (GET_MODE (op0
) == VOIDmode
5004 && GET_MODE (op1
) == VOIDmode
));
5006 /* If op0 is a compare, extract the comparison arguments from it. */
5007 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
5009 op1
= XEXP (op0
, 1);
5010 op0
= XEXP (op0
, 0);
5012 if (GET_MODE (op0
) != VOIDmode
)
5013 mode
= GET_MODE (op0
);
5014 else if (GET_MODE (op1
) != VOIDmode
)
5015 mode
= GET_MODE (op1
);
5020 /* We can't simplify MODE_CC values since we don't know what the
5021 actual comparison is. */
5022 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
5025 /* Make sure the constant is second. */
5026 if (swap_commutative_operands_p (op0
, op1
))
5028 std::swap (op0
, op1
);
5029 code
= swap_condition (code
);
5032 trueop0
= avoid_constant_pool_reference (op0
);
5033 trueop1
= avoid_constant_pool_reference (op1
);
5035 /* For integer comparisons of A and B maybe we can simplify A - B and can
5036 then simplify a comparison of that with zero. If A and B are both either
5037 a register or a CONST_INT, this can't help; testing for these cases will
5038 prevent infinite recursion here and speed things up.
5040 We can only do this for EQ and NE comparisons as otherwise we may
5041 lose or introduce overflow which we cannot disregard as undefined as
5042 we do not know the signedness of the operation on either the left or
5043 the right hand side of the comparison. */
5045 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5046 && (code
== EQ
|| code
== NE
)
5047 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5048 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5049 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
5050 /* We cannot do this if tem is a nonzero address. */
5051 && ! nonzero_address_p (tem
))
5052 return simplify_const_relational_operation (signed_condition (code
),
5053 mode
, tem
, const0_rtx
);
5055 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5056 return const_true_rtx
;
5058 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5061 /* For modes without NaNs, if the two operands are equal, we know the
5062 result except if they have side-effects. Even with NaNs we know
5063 the result of unordered comparisons and, if signaling NaNs are
5064 irrelevant, also the result of LT/GT/LTGT. */
5065 if ((! HONOR_NANS (trueop0
)
5066 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5067 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5068 && ! HONOR_SNANS (trueop0
)))
5069 && rtx_equal_p (trueop0
, trueop1
)
5070 && ! side_effects_p (trueop0
))
5071 return comparison_result (code
, CMP_EQ
);
5073 /* If the operands are floating-point constants, see if we can fold
5075 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5076 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5077 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5079 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5080 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5082 /* Comparisons are unordered iff at least one of the values is NaN. */
5083 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5093 return const_true_rtx
;
5106 return comparison_result (code
,
5107 (real_equal (d0
, d1
) ? CMP_EQ
:
5108 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5111 /* Otherwise, see if the operands are both integers. */
5112 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5113 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5115 /* It would be nice if we really had a mode here. However, the
5116 largest int representable on the target is as good as
5118 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5119 rtx_mode_t ptrueop0
= rtx_mode_t (trueop0
, cmode
);
5120 rtx_mode_t ptrueop1
= rtx_mode_t (trueop1
, cmode
);
5122 if (wi::eq_p (ptrueop0
, ptrueop1
))
5123 return comparison_result (code
, CMP_EQ
);
5126 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5127 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5128 return comparison_result (code
, cr
);
5132 /* Optimize comparisons with upper and lower bounds. */
5133 scalar_int_mode int_mode
;
5134 if (CONST_INT_P (trueop1
)
5135 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5136 && HWI_COMPUTABLE_MODE_P (int_mode
)
5137 && !side_effects_p (trueop0
))
5140 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, int_mode
);
5141 HOST_WIDE_INT val
= INTVAL (trueop1
);
5142 HOST_WIDE_INT mmin
, mmax
;
5152 /* Get a reduced range if the sign bit is zero. */
5153 if (nonzero
<= (GET_MODE_MASK (int_mode
) >> 1))
5160 rtx mmin_rtx
, mmax_rtx
;
5161 get_mode_bounds (int_mode
, sign
, int_mode
, &mmin_rtx
, &mmax_rtx
);
5163 mmin
= INTVAL (mmin_rtx
);
5164 mmax
= INTVAL (mmax_rtx
);
5167 unsigned int sign_copies
5168 = num_sign_bit_copies (trueop0
, int_mode
);
5170 mmin
>>= (sign_copies
- 1);
5171 mmax
>>= (sign_copies
- 1);
5177 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5179 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5180 return const_true_rtx
;
5181 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5186 return const_true_rtx
;
5191 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5193 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5194 return const_true_rtx
;
5195 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5200 return const_true_rtx
;
5206 /* x == y is always false for y out of range. */
5207 if (val
< mmin
|| val
> mmax
)
5211 /* x > y is always false for y >= mmax, always true for y < mmin. */
5213 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5215 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5216 return const_true_rtx
;
5222 return const_true_rtx
;
5225 /* x < y is always false for y <= mmin, always true for y > mmax. */
5227 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5229 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5230 return const_true_rtx
;
5236 return const_true_rtx
;
5240 /* x != y is always true for y out of range. */
5241 if (val
< mmin
|| val
> mmax
)
5242 return const_true_rtx
;
5250 /* Optimize integer comparisons with zero. */
5251 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
5252 && trueop1
== const0_rtx
5253 && !side_effects_p (trueop0
))
5255 /* Some addresses are known to be nonzero. We don't know
5256 their sign, but equality comparisons are known. */
5257 if (nonzero_address_p (trueop0
))
5259 if (code
== EQ
|| code
== LEU
)
5261 if (code
== NE
|| code
== GTU
)
5262 return const_true_rtx
;
5265 /* See if the first operand is an IOR with a constant. If so, we
5266 may be able to determine the result of this comparison. */
5267 if (GET_CODE (op0
) == IOR
)
5269 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5270 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5272 int sign_bitnum
= GET_MODE_PRECISION (int_mode
) - 1;
5273 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5274 && (UINTVAL (inner_const
)
5285 return const_true_rtx
;
5289 return const_true_rtx
;
5303 /* Optimize comparison of ABS with zero. */
5304 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5305 && (GET_CODE (trueop0
) == ABS
5306 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5307 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5312 /* Optimize abs(x) < 0.0. */
5313 if (!INTEGRAL_MODE_P (mode
) && !HONOR_SNANS (mode
))
5318 /* Optimize abs(x) >= 0.0. */
5319 if (!INTEGRAL_MODE_P (mode
) && !HONOR_NANS (mode
))
5320 return const_true_rtx
;
5324 /* Optimize ! (abs(x) < 0.0). */
5325 return const_true_rtx
;
5335 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5336 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5337 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5338 can be simplified to that or NULL_RTX if not.
5339 Assume X is compared against zero with CMP_CODE and the true
5340 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5343 simplify_cond_clz_ctz (rtx x
, rtx_code cmp_code
, rtx true_val
, rtx false_val
)
5345 if (cmp_code
!= EQ
&& cmp_code
!= NE
)
5348 /* Result on X == 0 and X !=0 respectively. */
5349 rtx on_zero
, on_nonzero
;
5353 on_nonzero
= false_val
;
5357 on_zero
= false_val
;
5358 on_nonzero
= true_val
;
5361 rtx_code op_code
= GET_CODE (on_nonzero
);
5362 if ((op_code
!= CLZ
&& op_code
!= CTZ
)
5363 || !rtx_equal_p (XEXP (on_nonzero
, 0), x
)
5364 || !CONST_INT_P (on_zero
))
5367 HOST_WIDE_INT op_val
;
5368 scalar_int_mode mode ATTRIBUTE_UNUSED
5369 = as_a
<scalar_int_mode
> (GET_MODE (XEXP (on_nonzero
, 0)));
5370 if (((op_code
== CLZ
&& CLZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
))
5371 || (op_code
== CTZ
&& CTZ_DEFINED_VALUE_AT_ZERO (mode
, op_val
)))
5372 && op_val
== INTVAL (on_zero
))
5379 /* Simplify CODE, an operation with result mode MODE and three operands,
5380 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5381 a constant. Return 0 if no simplifications is possible. */
5384 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5385 machine_mode op0_mode
, rtx op0
, rtx op1
,
5388 bool any_change
= false;
5390 scalar_int_mode int_mode
, int_op0_mode
;
5395 /* Simplify negations around the multiplication. */
5396 /* -a * -b + c => a * b + c. */
5397 if (GET_CODE (op0
) == NEG
)
5399 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5401 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5403 else if (GET_CODE (op1
) == NEG
)
5405 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5407 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5410 /* Canonicalize the two multiplication operands. */
5411 /* a * -b + c => -b * a + c. */
5412 if (swap_commutative_operands_p (op0
, op1
))
5413 std::swap (op0
, op1
), any_change
= true;
5416 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5421 if (CONST_INT_P (op0
)
5422 && CONST_INT_P (op1
)
5423 && CONST_INT_P (op2
)
5424 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5425 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_PRECISION (int_mode
)
5426 && HWI_COMPUTABLE_MODE_P (int_mode
))
5428 /* Extracting a bit-field from a constant */
5429 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5430 HOST_WIDE_INT op1val
= INTVAL (op1
);
5431 HOST_WIDE_INT op2val
= INTVAL (op2
);
5432 if (!BITS_BIG_ENDIAN
)
5434 else if (is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
))
5435 val
>>= GET_MODE_PRECISION (int_op0_mode
) - op2val
- op1val
;
5437 /* Not enough information to calculate the bit position. */
5440 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5442 /* First zero-extend. */
5443 val
&= (HOST_WIDE_INT_1U
<< op1val
) - 1;
5444 /* If desired, propagate sign bit. */
5445 if (code
== SIGN_EXTRACT
5446 && (val
& (HOST_WIDE_INT_1U
<< (op1val
- 1)))
5448 val
|= ~ ((HOST_WIDE_INT_1U
<< op1val
) - 1);
5451 return gen_int_mode (val
, int_mode
);
5456 if (CONST_INT_P (op0
))
5457 return op0
!= const0_rtx
? op1
: op2
;
5459 /* Convert c ? a : a into "a". */
5460 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5463 /* Convert a != b ? a : b into "a". */
5464 if (GET_CODE (op0
) == NE
5465 && ! side_effects_p (op0
)
5466 && ! HONOR_NANS (mode
)
5467 && ! HONOR_SIGNED_ZEROS (mode
)
5468 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5469 && rtx_equal_p (XEXP (op0
, 1), op2
))
5470 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5471 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5474 /* Convert a == b ? a : b into "b". */
5475 if (GET_CODE (op0
) == EQ
5476 && ! side_effects_p (op0
)
5477 && ! HONOR_NANS (mode
)
5478 && ! HONOR_SIGNED_ZEROS (mode
)
5479 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5480 && rtx_equal_p (XEXP (op0
, 1), op2
))
5481 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5482 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5485 /* Convert (!c) != {0,...,0} ? a : b into
5486 c != {0,...,0} ? b : a for vector modes. */
5487 if (VECTOR_MODE_P (GET_MODE (op1
))
5488 && GET_CODE (op0
) == NE
5489 && GET_CODE (XEXP (op0
, 0)) == NOT
5490 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5492 rtx cv
= XEXP (op0
, 1);
5493 int nunits
= CONST_VECTOR_NUNITS (cv
);
5495 for (int i
= 0; i
< nunits
; ++i
)
5496 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5503 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5504 XEXP (XEXP (op0
, 0), 0),
5506 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5511 /* Convert x == 0 ? N : clz (x) into clz (x) when
5512 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5513 Similarly for ctz (x). */
5514 if (COMPARISON_P (op0
) && !side_effects_p (op0
)
5515 && XEXP (op0
, 1) == const0_rtx
)
5518 = simplify_cond_clz_ctz (XEXP (op0
, 0), GET_CODE (op0
),
5524 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5526 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5527 ? GET_MODE (XEXP (op0
, 1))
5528 : GET_MODE (XEXP (op0
, 0)));
5531 /* Look for happy constants in op1 and op2. */
5532 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5534 HOST_WIDE_INT t
= INTVAL (op1
);
5535 HOST_WIDE_INT f
= INTVAL (op2
);
5537 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5538 code
= GET_CODE (op0
);
5539 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5542 tmp
= reversed_comparison_code (op0
, NULL
);
5550 return simplify_gen_relational (code
, mode
, cmp_mode
,
5551 XEXP (op0
, 0), XEXP (op0
, 1));
5554 if (cmp_mode
== VOIDmode
)
5555 cmp_mode
= op0_mode
;
5556 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5557 cmp_mode
, XEXP (op0
, 0),
5560 /* See if any simplifications were possible. */
5563 if (CONST_INT_P (temp
))
5564 return temp
== const0_rtx
? op2
: op1
;
5566 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5572 gcc_assert (GET_MODE (op0
) == mode
);
5573 gcc_assert (GET_MODE (op1
) == mode
);
5574 gcc_assert (VECTOR_MODE_P (mode
));
5575 trueop2
= avoid_constant_pool_reference (op2
);
5576 if (CONST_INT_P (trueop2
))
5578 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
5579 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5580 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5581 unsigned HOST_WIDE_INT mask
;
5582 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5585 mask
= (HOST_WIDE_INT_1U
<< n_elts
) - 1;
5587 if (!(sel
& mask
) && !side_effects_p (op0
))
5589 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5592 rtx trueop0
= avoid_constant_pool_reference (op0
);
5593 rtx trueop1
= avoid_constant_pool_reference (op1
);
5594 if (GET_CODE (trueop0
) == CONST_VECTOR
5595 && GET_CODE (trueop1
) == CONST_VECTOR
)
5597 rtvec v
= rtvec_alloc (n_elts
);
5600 for (i
= 0; i
< n_elts
; i
++)
5601 RTVEC_ELT (v
, i
) = ((sel
& (HOST_WIDE_INT_1U
<< i
))
5602 ? CONST_VECTOR_ELT (trueop0
, i
)
5603 : CONST_VECTOR_ELT (trueop1
, i
));
5604 return gen_rtx_CONST_VECTOR (mode
, v
);
5607 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5608 if no element from a appears in the result. */
5609 if (GET_CODE (op0
) == VEC_MERGE
)
5611 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5612 if (CONST_INT_P (tem
))
5614 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5615 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5616 return simplify_gen_ternary (code
, mode
, mode
,
5617 XEXP (op0
, 1), op1
, op2
);
5618 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5619 return simplify_gen_ternary (code
, mode
, mode
,
5620 XEXP (op0
, 0), op1
, op2
);
5623 if (GET_CODE (op1
) == VEC_MERGE
)
5625 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5626 if (CONST_INT_P (tem
))
5628 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5629 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5630 return simplify_gen_ternary (code
, mode
, mode
,
5631 op0
, XEXP (op1
, 1), op2
);
5632 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5633 return simplify_gen_ternary (code
, mode
, mode
,
5634 op0
, XEXP (op1
, 0), op2
);
5638 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5640 if (GET_CODE (op0
) == VEC_DUPLICATE
5641 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5642 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5643 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5645 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5646 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5648 if (XEXP (XEXP (op0
, 0), 0) == op1
5649 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5655 if (rtx_equal_p (op0
, op1
)
5656 && !side_effects_p (op2
) && !side_effects_p (op1
))
5668 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5669 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5670 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5672 Works by unpacking OP into a collection of 8-bit values
5673 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5674 and then repacking them again for OUTERMODE. */
5677 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5678 machine_mode innermode
, unsigned int byte
)
5682 value_mask
= (1 << value_bit
) - 1
5684 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5692 rtx result_s
= NULL
;
5693 rtvec result_v
= NULL
;
5694 enum mode_class outer_class
;
5695 scalar_mode outer_submode
;
5698 /* Some ports misuse CCmode. */
5699 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5702 /* We have no way to represent a complex constant at the rtl level. */
5703 if (COMPLEX_MODE_P (outermode
))
5706 /* We support any size mode. */
5707 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5708 GET_MODE_BITSIZE (innermode
));
5710 /* Unpack the value. */
5712 if (GET_CODE (op
) == CONST_VECTOR
)
5714 num_elem
= CONST_VECTOR_NUNITS (op
);
5715 elems
= &CONST_VECTOR_ELT (op
, 0);
5716 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5722 elem_bitsize
= max_bitsize
;
5724 /* If this asserts, it is too complicated; reducing value_bit may help. */
5725 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5726 /* I don't know how to handle endianness of sub-units. */
5727 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5729 for (elem
= 0; elem
< num_elem
; elem
++)
5732 rtx el
= elems
[elem
];
5734 /* Vectors are kept in target memory order. (This is probably
5737 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5738 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5740 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5741 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5742 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5743 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5744 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5747 switch (GET_CODE (el
))
5751 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5753 *vp
++ = INTVAL (el
) >> i
;
5754 /* CONST_INTs are always logically sign-extended. */
5755 for (; i
< elem_bitsize
; i
+= value_bit
)
5756 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5759 case CONST_WIDE_INT
:
5761 rtx_mode_t val
= rtx_mode_t (el
, GET_MODE_INNER (innermode
));
5762 unsigned char extend
= wi::sign_mask (val
);
5763 int prec
= wi::get_precision (val
);
5765 for (i
= 0; i
< prec
&& i
< elem_bitsize
; i
+= value_bit
)
5766 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5767 for (; i
< elem_bitsize
; i
+= value_bit
)
5773 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5775 unsigned char extend
= 0;
5776 /* If this triggers, someone should have generated a
5777 CONST_INT instead. */
5778 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5780 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5781 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5782 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5785 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5789 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5791 for (; i
< elem_bitsize
; i
+= value_bit
)
5796 /* This is big enough for anything on the platform. */
5797 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5798 scalar_float_mode el_mode
;
5800 el_mode
= as_a
<scalar_float_mode
> (GET_MODE (el
));
5801 int bitsize
= GET_MODE_BITSIZE (el_mode
);
5803 gcc_assert (bitsize
<= elem_bitsize
);
5804 gcc_assert (bitsize
% value_bit
== 0);
5806 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5809 /* real_to_target produces its result in words affected by
5810 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5811 and use WORDS_BIG_ENDIAN instead; see the documentation
5812 of SUBREG in rtl.texi. */
5813 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5816 if (WORDS_BIG_ENDIAN
)
5817 ibase
= bitsize
- 1 - i
;
5820 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5823 /* It shouldn't matter what's done here, so fill it with
5825 for (; i
< elem_bitsize
; i
+= value_bit
)
5831 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5833 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5834 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5838 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5839 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5840 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5842 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5843 >> (i
- HOST_BITS_PER_WIDE_INT
);
5844 for (; i
< elem_bitsize
; i
+= value_bit
)
5854 /* Now, pick the right byte to start with. */
5855 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5856 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5857 will already have offset 0. */
5858 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5860 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5862 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5863 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5864 byte
= (subword_byte
% UNITS_PER_WORD
5865 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5868 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5869 so if it's become negative it will instead be very large.) */
5870 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5872 /* Convert from bytes to chunks of size value_bit. */
5873 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5875 /* Re-pack the value. */
5876 num_elem
= GET_MODE_NUNITS (outermode
);
5878 if (VECTOR_MODE_P (outermode
))
5880 result_v
= rtvec_alloc (num_elem
);
5881 elems
= &RTVEC_ELT (result_v
, 0);
5886 outer_submode
= GET_MODE_INNER (outermode
);
5887 outer_class
= GET_MODE_CLASS (outer_submode
);
5888 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5890 gcc_assert (elem_bitsize
% value_bit
== 0);
5891 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5893 for (elem
= 0; elem
< num_elem
; elem
++)
5897 /* Vectors are stored in target memory order. (This is probably
5900 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5901 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5903 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5904 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5905 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5906 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5907 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5910 switch (outer_class
)
5913 case MODE_PARTIAL_INT
:
5918 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5919 / HOST_BITS_PER_WIDE_INT
;
5920 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5923 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5925 for (u
= 0; u
< units
; u
++)
5927 unsigned HOST_WIDE_INT buf
= 0;
5929 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5931 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5934 base
+= HOST_BITS_PER_WIDE_INT
;
5936 r
= wide_int::from_array (tmp
, units
,
5937 GET_MODE_PRECISION (outer_submode
));
5938 #if TARGET_SUPPORTS_WIDE_INT == 0
5939 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5940 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5943 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5948 case MODE_DECIMAL_FLOAT
:
5951 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32] = { 0 };
5953 /* real_from_target wants its input in words affected by
5954 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5955 and use WORDS_BIG_ENDIAN instead; see the documentation
5956 of SUBREG in rtl.texi. */
5957 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5960 if (WORDS_BIG_ENDIAN
)
5961 ibase
= elem_bitsize
- 1 - i
;
5964 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5967 real_from_target (&r
, tmp
, outer_submode
);
5968 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
5980 f
.mode
= outer_submode
;
5983 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5985 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5986 for (; i
< elem_bitsize
; i
+= value_bit
)
5987 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5988 << (i
- HOST_BITS_PER_WIDE_INT
));
5990 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5998 if (VECTOR_MODE_P (outermode
))
5999 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
6004 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6005 Return 0 if no simplifications are possible. */
6007 simplify_subreg (machine_mode outermode
, rtx op
,
6008 machine_mode innermode
, unsigned int byte
)
6010 /* Little bit of sanity checking. */
6011 gcc_assert (innermode
!= VOIDmode
);
6012 gcc_assert (outermode
!= VOIDmode
);
6013 gcc_assert (innermode
!= BLKmode
);
6014 gcc_assert (outermode
!= BLKmode
);
6016 gcc_assert (GET_MODE (op
) == innermode
6017 || GET_MODE (op
) == VOIDmode
);
6019 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
6022 if (byte
>= GET_MODE_SIZE (innermode
))
6025 if (outermode
== innermode
&& !byte
)
6028 if (CONST_SCALAR_INT_P (op
)
6029 || CONST_DOUBLE_AS_FLOAT_P (op
)
6030 || GET_CODE (op
) == CONST_FIXED
6031 || GET_CODE (op
) == CONST_VECTOR
)
6032 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
6034 /* Changing mode twice with SUBREG => just change it once,
6035 or not at all if changing back op starting mode. */
6036 if (GET_CODE (op
) == SUBREG
)
6038 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
6041 if (outermode
== innermostmode
6042 && byte
== 0 && SUBREG_BYTE (op
) == 0)
6043 return SUBREG_REG (op
);
6045 /* Work out the memory offset of the final OUTERMODE value relative
6046 to the inner value of OP. */
6047 HOST_WIDE_INT mem_offset
= subreg_memory_offset (outermode
,
6049 HOST_WIDE_INT op_mem_offset
= subreg_memory_offset (op
);
6050 HOST_WIDE_INT final_offset
= mem_offset
+ op_mem_offset
;
6052 /* See whether resulting subreg will be paradoxical. */
6053 if (!paradoxical_subreg_p (outermode
, innermostmode
))
6055 /* In nonparadoxical subregs we can't handle negative offsets. */
6056 if (final_offset
< 0)
6058 /* Bail out in case resulting subreg would be incorrect. */
6059 if (final_offset
% GET_MODE_SIZE (outermode
)
6060 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
6065 HOST_WIDE_INT required_offset
6066 = subreg_memory_offset (outermode
, innermostmode
, 0);
6067 if (final_offset
!= required_offset
)
6069 /* Paradoxical subregs always have byte offset 0. */
6073 /* Recurse for further possible simplifications. */
6074 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
6078 if (validate_subreg (outermode
, innermostmode
,
6079 SUBREG_REG (op
), final_offset
))
6081 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
6082 if (SUBREG_PROMOTED_VAR_P (op
)
6083 && SUBREG_PROMOTED_SIGN (op
) >= 0
6084 && GET_MODE_CLASS (outermode
) == MODE_INT
6085 && IN_RANGE (GET_MODE_SIZE (outermode
),
6086 GET_MODE_SIZE (innermode
),
6087 GET_MODE_SIZE (innermostmode
))
6088 && subreg_lowpart_p (newx
))
6090 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6091 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
6098 /* SUBREG of a hard register => just change the register number
6099 and/or mode. If the hard register is not valid in that mode,
6100 suppress this simplification. If the hard register is the stack,
6101 frame, or argument pointer, leave this as a SUBREG. */
6103 if (REG_P (op
) && HARD_REGISTER_P (op
))
6105 unsigned int regno
, final_regno
;
6108 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6109 if (HARD_REGISTER_NUM_P (final_regno
))
6111 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
,
6112 subreg_memory_offset (outermode
,
6115 /* Propagate original regno. We don't have any way to specify
6116 the offset inside original regno, so do so only for lowpart.
6117 The information is used only by alias analysis that can not
6118 grog partial register anyway. */
6120 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6121 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6126 /* If we have a SUBREG of a register that we are replacing and we are
6127 replacing it with a MEM, make a new MEM and try replacing the
6128 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6129 or if we would be widening it. */
6132 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6133 /* Allow splitting of volatile memory references in case we don't
6134 have instruction to move the whole thing. */
6135 && (! MEM_VOLATILE_P (op
)
6136 || ! have_insn_for (SET
, innermode
))
6137 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6138 return adjust_address_nv (op
, outermode
, byte
);
6140 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6142 if (GET_CODE (op
) == CONCAT
6143 || GET_CODE (op
) == VEC_CONCAT
)
6145 unsigned int part_size
, final_offset
;
6148 machine_mode part_mode
= GET_MODE (XEXP (op
, 0));
6149 if (part_mode
== VOIDmode
)
6150 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6151 part_size
= GET_MODE_SIZE (part_mode
);
6152 if (byte
< part_size
)
6154 part
= XEXP (op
, 0);
6155 final_offset
= byte
;
6159 part
= XEXP (op
, 1);
6160 final_offset
= byte
- part_size
;
6163 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6166 part_mode
= GET_MODE (part
);
6167 if (part_mode
== VOIDmode
)
6168 part_mode
= GET_MODE_INNER (GET_MODE (op
));
6169 res
= simplify_subreg (outermode
, part
, part_mode
, final_offset
);
6172 if (validate_subreg (outermode
, part_mode
, part
, final_offset
))
6173 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6177 /* A SUBREG resulting from a zero extension may fold to zero if
6178 it extracts higher bits that the ZERO_EXTEND's source bits. */
6179 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6181 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6182 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6183 return CONST0_RTX (outermode
);
6186 scalar_int_mode int_outermode
, int_innermode
;
6187 if (is_a
<scalar_int_mode
> (outermode
, &int_outermode
)
6188 && is_a
<scalar_int_mode
> (innermode
, &int_innermode
)
6189 && (GET_MODE_PRECISION (int_outermode
)
6190 < GET_MODE_PRECISION (int_innermode
))
6191 && byte
== subreg_lowpart_offset (int_outermode
, int_innermode
))
6193 rtx tem
= simplify_truncation (int_outermode
, op
, int_innermode
);
6201 /* Make a SUBREG operation or equivalent if it folds. */
6204 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6205 machine_mode innermode
, unsigned int byte
)
6209 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6213 if (GET_CODE (op
) == SUBREG
6214 || GET_CODE (op
) == CONCAT
6215 || GET_MODE (op
) == VOIDmode
)
6218 if (validate_subreg (outermode
, innermode
, op
, byte
))
6219 return gen_rtx_SUBREG (outermode
, op
, byte
);
6224 /* Generates a subreg to get the least significant part of EXPR (in mode
6225 INNER_MODE) to OUTER_MODE. */
6228 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6229 machine_mode inner_mode
)
6231 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6232 subreg_lowpart_offset (outer_mode
, inner_mode
));
6235 /* Simplify X, an rtx expression.
6237 Return the simplified expression or NULL if no simplifications
6240 This is the preferred entry point into the simplification routines;
6241 however, we still allow passes to call the more specific routines.
6243 Right now GCC has three (yes, three) major bodies of RTL simplification
6244 code that need to be unified.
6246 1. fold_rtx in cse.c. This code uses various CSE specific
6247 information to aid in RTL simplification.
6249 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6250 it uses combine specific information to aid in RTL
6253 3. The routines in this file.
6256 Long term we want to only have one body of simplification code; to
6257 get to that state I recommend the following steps:
6259 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6260 which are not pass dependent state into these routines.
6262 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6263 use this routine whenever possible.
6265 3. Allow for pass dependent state to be provided to these
6266 routines and add simplifications based on the pass dependent
6267 state. Remove code from cse.c & combine.c that becomes
6270 It will take time, but ultimately the compiler will be easier to
6271 maintain and improve. It's totally silly that when we add a
6272 simplification that it needs to be added to 4 places (3 for RTL
6273 simplification and 1 for tree simplification. */
6276 simplify_rtx (const_rtx x
)
6278 const enum rtx_code code
= GET_CODE (x
);
6279 const machine_mode mode
= GET_MODE (x
);
6281 switch (GET_RTX_CLASS (code
))
6284 return simplify_unary_operation (code
, mode
,
6285 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6286 case RTX_COMM_ARITH
:
6287 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6288 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6293 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6296 case RTX_BITFIELD_OPS
:
6297 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6298 XEXP (x
, 0), XEXP (x
, 1),
6302 case RTX_COMM_COMPARE
:
6303 return simplify_relational_operation (code
, mode
,
6304 ((GET_MODE (XEXP (x
, 0))
6306 ? GET_MODE (XEXP (x
, 0))
6307 : GET_MODE (XEXP (x
, 1))),
6313 return simplify_subreg (mode
, SUBREG_REG (x
),
6314 GET_MODE (SUBREG_REG (x
)),
6321 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6322 if (GET_CODE (XEXP (x
, 0)) == HIGH
6323 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))