1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "fold-const.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
38 #include "insn-codes.h"
47 #include "diagnostic-core.h"
51 /* Simplification and canonicalization of RTL. */
53 /* Much code operates on (low, high) pairs; the low value is an
54 unsigned wide int, the high value a signed wide int. We
55 occasionally need to sign extend from low to high as if low were a
57 #define HWI_SIGN_EXTEND(low) \
58 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
60 static rtx
neg_const_int (machine_mode
, const_rtx
);
61 static bool plus_minus_operand_p (const_rtx
);
62 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
63 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
64 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
66 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
68 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
69 machine_mode
, rtx
, rtx
);
70 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
71 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
74 /* Negate a CONST_INT rtx, truncating (because a conversion from a
75 maximally negative number can overflow). */
77 neg_const_int (machine_mode mode
, const_rtx i
)
79 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
82 /* Test whether expression, X, is an immediate constant that represents
83 the most significant bit of machine mode MODE. */
86 mode_signbit_p (machine_mode mode
, const_rtx x
)
88 unsigned HOST_WIDE_INT val
;
91 if (GET_MODE_CLASS (mode
) != MODE_INT
)
94 width
= GET_MODE_PRECISION (mode
);
98 if (width
<= HOST_BITS_PER_WIDE_INT
101 #if TARGET_SUPPORTS_WIDE_INT
102 else if (CONST_WIDE_INT_P (x
))
105 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
106 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
108 for (i
= 0; i
< elts
- 1; i
++)
109 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
111 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
112 width
%= HOST_BITS_PER_WIDE_INT
;
114 width
= HOST_BITS_PER_WIDE_INT
;
117 else if (width
<= HOST_BITS_PER_DOUBLE_INT
118 && CONST_DOUBLE_AS_INT_P (x
)
119 && CONST_DOUBLE_LOW (x
) == 0)
121 val
= CONST_DOUBLE_HIGH (x
);
122 width
-= HOST_BITS_PER_WIDE_INT
;
126 /* X is not an integer constant. */
129 if (width
< HOST_BITS_PER_WIDE_INT
)
130 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
131 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
134 /* Test whether VAL is equal to the most significant bit of mode MODE
135 (after masking with the mode mask of MODE). Returns false if the
136 precision of MODE is too large to handle. */
139 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
143 if (GET_MODE_CLASS (mode
) != MODE_INT
)
146 width
= GET_MODE_PRECISION (mode
);
147 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
150 val
&= GET_MODE_MASK (mode
);
151 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
154 /* Test whether the most significant bit of mode MODE is set in VAL.
155 Returns false if the precision of MODE is too large to handle. */
157 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
161 if (GET_MODE_CLASS (mode
) != MODE_INT
)
164 width
= GET_MODE_PRECISION (mode
);
165 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
168 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
172 /* Test whether the most significant bit of mode MODE is clear in VAL.
173 Returns false if the precision of MODE is too large to handle. */
175 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
179 if (GET_MODE_CLASS (mode
) != MODE_INT
)
182 width
= GET_MODE_PRECISION (mode
);
183 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
186 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
190 /* Make a binary operation by properly ordering the operands and
191 seeing if the expression folds. */
194 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
199 /* If this simplifies, do it. */
200 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
204 /* Put complex operands first and constants second if commutative. */
205 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
206 && swap_commutative_operands_p (op0
, op1
))
207 std::swap (op0
, op1
);
209 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
212 /* If X is a MEM referencing the constant pool, return the real value.
213 Otherwise return X. */
215 avoid_constant_pool_reference (rtx x
)
219 HOST_WIDE_INT offset
= 0;
221 switch (GET_CODE (x
))
227 /* Handle float extensions of constant pool references. */
229 c
= avoid_constant_pool_reference (tmp
);
230 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
234 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
235 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
243 if (GET_MODE (x
) == BLKmode
)
248 /* Call target hook to avoid the effects of -fpic etc.... */
249 addr
= targetm
.delegitimize_address (addr
);
251 /* Split the address into a base and integer offset. */
252 if (GET_CODE (addr
) == CONST
253 && GET_CODE (XEXP (addr
, 0)) == PLUS
254 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
256 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
257 addr
= XEXP (XEXP (addr
, 0), 0);
260 if (GET_CODE (addr
) == LO_SUM
)
261 addr
= XEXP (addr
, 1);
263 /* If this is a constant pool reference, we can turn it into its
264 constant and hope that simplifications happen. */
265 if (GET_CODE (addr
) == SYMBOL_REF
266 && CONSTANT_POOL_ADDRESS_P (addr
))
268 c
= get_pool_constant (addr
);
269 cmode
= get_pool_mode (addr
);
271 /* If we're accessing the constant in a different mode than it was
272 originally stored, attempt to fix that up via subreg simplifications.
273 If that fails we have no choice but to return the original memory. */
274 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
275 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
277 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
278 if (tem
&& CONSTANT_P (tem
))
288 /* Simplify a MEM based on its attributes. This is the default
289 delegitimize_address target hook, and it's recommended that every
290 overrider call it. */
293 delegitimize_mem_from_attrs (rtx x
)
295 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
296 use their base addresses as equivalent. */
299 && MEM_OFFSET_KNOWN_P (x
))
301 tree decl
= MEM_EXPR (x
);
302 machine_mode mode
= GET_MODE (x
);
303 HOST_WIDE_INT offset
= 0;
305 switch (TREE_CODE (decl
))
315 case ARRAY_RANGE_REF
:
320 case VIEW_CONVERT_EXPR
:
322 HOST_WIDE_INT bitsize
, bitpos
;
324 int unsignedp
, volatilep
= 0;
326 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
327 &mode
, &unsignedp
, &volatilep
, false);
328 if (bitsize
!= GET_MODE_BITSIZE (mode
)
329 || (bitpos
% BITS_PER_UNIT
)
330 || (toffset
&& !tree_fits_shwi_p (toffset
)))
334 offset
+= bitpos
/ BITS_PER_UNIT
;
336 offset
+= tree_to_shwi (toffset
);
343 && mode
== GET_MODE (x
)
344 && TREE_CODE (decl
) == VAR_DECL
345 && (TREE_STATIC (decl
)
346 || DECL_THREAD_LOCAL_P (decl
))
347 && DECL_RTL_SET_P (decl
)
348 && MEM_P (DECL_RTL (decl
)))
352 offset
+= MEM_OFFSET (x
);
354 newx
= DECL_RTL (decl
);
358 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
360 /* Avoid creating a new MEM needlessly if we already had
361 the same address. We do if there's no OFFSET and the
362 old address X is identical to NEWX, or if X is of the
363 form (plus NEWX OFFSET), or the NEWX is of the form
364 (plus Y (const_int Z)) and X is that with the offset
365 added: (plus Y (const_int Z+OFFSET)). */
367 || (GET_CODE (o
) == PLUS
368 && GET_CODE (XEXP (o
, 1)) == CONST_INT
369 && (offset
== INTVAL (XEXP (o
, 1))
370 || (GET_CODE (n
) == PLUS
371 && GET_CODE (XEXP (n
, 1)) == CONST_INT
372 && (INTVAL (XEXP (n
, 1)) + offset
373 == INTVAL (XEXP (o
, 1)))
374 && (n
= XEXP (n
, 0))))
375 && (o
= XEXP (o
, 0))))
376 && rtx_equal_p (o
, n
)))
377 x
= adjust_address_nv (newx
, mode
, offset
);
379 else if (GET_MODE (x
) == GET_MODE (newx
)
388 /* Make a unary operation by first seeing if it folds and otherwise making
389 the specified operation. */
392 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
393 machine_mode op_mode
)
397 /* If this simplifies, use it. */
398 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
401 return gen_rtx_fmt_e (code
, mode
, op
);
404 /* Likewise for ternary operations. */
407 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
408 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
412 /* If this simplifies, use it. */
413 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
417 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
420 /* Likewise, for relational operations.
421 CMP_MODE specifies mode comparison is done in. */
424 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
425 machine_mode cmp_mode
, rtx op0
, rtx op1
)
429 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
433 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
436 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
437 and simplify the result. If FN is non-NULL, call this callback on each
438 X, if it returns non-NULL, replace X with its return value and simplify the
442 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
443 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
445 enum rtx_code code
= GET_CODE (x
);
446 machine_mode mode
= GET_MODE (x
);
447 machine_mode op_mode
;
449 rtx op0
, op1
, op2
, newx
, op
;
453 if (__builtin_expect (fn
!= NULL
, 0))
455 newx
= fn (x
, old_rtx
, data
);
459 else if (rtx_equal_p (x
, old_rtx
))
460 return copy_rtx ((rtx
) data
);
462 switch (GET_RTX_CLASS (code
))
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 if (op0
== XEXP (x
, 0))
470 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
474 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
475 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
476 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
478 return simplify_gen_binary (code
, mode
, op0
, op1
);
481 case RTX_COMM_COMPARE
:
484 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
485 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
486 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
487 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
489 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
492 case RTX_BITFIELD_OPS
:
494 op_mode
= GET_MODE (op0
);
495 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
496 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
497 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
498 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
500 if (op_mode
== VOIDmode
)
501 op_mode
= GET_MODE (op0
);
502 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
507 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
508 if (op0
== SUBREG_REG (x
))
510 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
511 GET_MODE (SUBREG_REG (x
)),
513 return op0
? op0
: x
;
520 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
521 if (op0
== XEXP (x
, 0))
523 return replace_equiv_address_nv (x
, op0
);
525 else if (code
== LO_SUM
)
527 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
528 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
530 /* (lo_sum (high x) y) -> y where x and y have the same base. */
531 if (GET_CODE (op0
) == HIGH
)
533 rtx base0
, base1
, offset0
, offset1
;
534 split_const (XEXP (op0
, 0), &base0
, &offset0
);
535 split_const (op1
, &base1
, &offset1
);
536 if (rtx_equal_p (base0
, base1
))
540 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
542 return gen_rtx_LO_SUM (mode
, op0
, op1
);
551 fmt
= GET_RTX_FORMAT (code
);
552 for (i
= 0; fmt
[i
]; i
++)
557 newvec
= XVEC (newx
, i
);
558 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
560 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
562 if (op
!= RTVEC_ELT (vec
, j
))
566 newvec
= shallow_copy_rtvec (vec
);
568 newx
= shallow_copy_rtx (x
);
569 XVEC (newx
, i
) = newvec
;
571 RTVEC_ELT (newvec
, j
) = op
;
579 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
580 if (op
!= XEXP (x
, i
))
583 newx
= shallow_copy_rtx (x
);
592 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
593 resulting RTX. Return a new RTX which is as simplified as possible. */
596 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
598 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
601 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
602 Only handle cases where the truncated value is inherently an rvalue.
604 RTL provides two ways of truncating a value:
606 1. a lowpart subreg. This form is only a truncation when both
607 the outer and inner modes (here MODE and OP_MODE respectively)
608 are scalar integers, and only then when the subreg is used as
611 It is only valid to form such truncating subregs if the
612 truncation requires no action by the target. The onus for
613 proving this is on the creator of the subreg -- e.g. the
614 caller to simplify_subreg or simplify_gen_subreg -- and typically
615 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
617 2. a TRUNCATE. This form handles both scalar and compound integers.
619 The first form is preferred where valid. However, the TRUNCATE
620 handling in simplify_unary_operation turns the second form into the
621 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
622 so it is generally safe to form rvalue truncations using:
624 simplify_gen_unary (TRUNCATE, ...)
626 and leave simplify_unary_operation to work out which representation
629 Because of the proof requirements on (1), simplify_truncation must
630 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
631 regardless of whether the outer truncation came from a SUBREG or a
632 TRUNCATE. For example, if the caller has proven that an SImode
637 is a no-op and can be represented as a subreg, it does not follow
638 that SImode truncations of X and Y are also no-ops. On a target
639 like 64-bit MIPS that requires SImode values to be stored in
640 sign-extended form, an SImode truncation of:
642 (and:DI (reg:DI X) (const_int 63))
644 is trivially a no-op because only the lower 6 bits can be set.
645 However, X is still an arbitrary 64-bit number and so we cannot
646 assume that truncating it too is a no-op. */
649 simplify_truncation (machine_mode mode
, rtx op
,
650 machine_mode op_mode
)
652 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
653 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
654 gcc_assert (precision
<= op_precision
);
656 /* Optimize truncations of zero and sign extended values. */
657 if (GET_CODE (op
) == ZERO_EXTEND
658 || GET_CODE (op
) == SIGN_EXTEND
)
660 /* There are three possibilities. If MODE is the same as the
661 origmode, we can omit both the extension and the subreg.
662 If MODE is not larger than the origmode, we can apply the
663 truncation without the extension. Finally, if the outermode
664 is larger than the origmode, we can just extend to the appropriate
666 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
667 if (mode
== origmode
)
669 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
670 return simplify_gen_unary (TRUNCATE
, mode
,
671 XEXP (op
, 0), origmode
);
673 return simplify_gen_unary (GET_CODE (op
), mode
,
674 XEXP (op
, 0), origmode
);
677 /* If the machine can perform operations in the truncated mode, distribute
678 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
679 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
681 #ifdef WORD_REGISTER_OPERATIONS
682 && precision
>= BITS_PER_WORD
684 && (GET_CODE (op
) == PLUS
685 || GET_CODE (op
) == MINUS
686 || GET_CODE (op
) == MULT
))
688 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
691 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
693 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
697 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
698 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
699 the outer subreg is effectively a truncation to the original mode. */
700 if ((GET_CODE (op
) == LSHIFTRT
701 || GET_CODE (op
) == ASHIFTRT
)
702 /* Ensure that OP_MODE is at least twice as wide as MODE
703 to avoid the possibility that an outer LSHIFTRT shifts by more
704 than the sign extension's sign_bit_copies and introduces zeros
705 into the high bits of the result. */
706 && 2 * precision
<= op_precision
707 && CONST_INT_P (XEXP (op
, 1))
708 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
709 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
710 && UINTVAL (XEXP (op
, 1)) < precision
)
711 return simplify_gen_binary (ASHIFTRT
, mode
,
712 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
714 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
715 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
716 the outer subreg is effectively a truncation to the original mode. */
717 if ((GET_CODE (op
) == LSHIFTRT
718 || GET_CODE (op
) == ASHIFTRT
)
719 && CONST_INT_P (XEXP (op
, 1))
720 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
721 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
722 && UINTVAL (XEXP (op
, 1)) < precision
)
723 return simplify_gen_binary (LSHIFTRT
, mode
,
724 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
726 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
727 to (ashift:QI (x:QI) C), where C is a suitable small constant and
728 the outer subreg is effectively a truncation to the original mode. */
729 if (GET_CODE (op
) == ASHIFT
730 && CONST_INT_P (XEXP (op
, 1))
731 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
732 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
733 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
734 && UINTVAL (XEXP (op
, 1)) < precision
)
735 return simplify_gen_binary (ASHIFT
, mode
,
736 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
738 /* Recognize a word extraction from a multi-word subreg. */
739 if ((GET_CODE (op
) == LSHIFTRT
740 || GET_CODE (op
) == ASHIFTRT
)
741 && SCALAR_INT_MODE_P (mode
)
742 && SCALAR_INT_MODE_P (op_mode
)
743 && precision
>= BITS_PER_WORD
744 && 2 * precision
<= op_precision
745 && CONST_INT_P (XEXP (op
, 1))
746 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
747 && UINTVAL (XEXP (op
, 1)) < op_precision
)
749 int byte
= subreg_lowpart_offset (mode
, op_mode
);
750 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
751 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
753 ? byte
- shifted_bytes
754 : byte
+ shifted_bytes
));
757 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
758 and try replacing the TRUNCATE and shift with it. Don't do this
759 if the MEM has a mode-dependent address. */
760 if ((GET_CODE (op
) == LSHIFTRT
761 || GET_CODE (op
) == ASHIFTRT
)
762 && SCALAR_INT_MODE_P (op_mode
)
763 && MEM_P (XEXP (op
, 0))
764 && CONST_INT_P (XEXP (op
, 1))
765 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
766 && INTVAL (XEXP (op
, 1)) > 0
767 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
768 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
769 MEM_ADDR_SPACE (XEXP (op
, 0)))
770 && ! MEM_VOLATILE_P (XEXP (op
, 0))
771 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
772 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
774 int byte
= subreg_lowpart_offset (mode
, op_mode
);
775 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
776 return adjust_address_nv (XEXP (op
, 0), mode
,
778 ? byte
- shifted_bytes
779 : byte
+ shifted_bytes
));
782 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
783 (OP:SI foo:SI) if OP is NEG or ABS. */
784 if ((GET_CODE (op
) == ABS
785 || GET_CODE (op
) == NEG
)
786 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
787 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
788 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
789 return simplify_gen_unary (GET_CODE (op
), mode
,
790 XEXP (XEXP (op
, 0), 0), mode
);
792 /* (truncate:A (subreg:B (truncate:C X) 0)) is
794 if (GET_CODE (op
) == SUBREG
795 && SCALAR_INT_MODE_P (mode
)
796 && SCALAR_INT_MODE_P (op_mode
)
797 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
798 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
799 && subreg_lowpart_p (op
))
801 rtx inner
= XEXP (SUBREG_REG (op
), 0);
802 if (GET_MODE_PRECISION (mode
)
803 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
804 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
806 /* If subreg above is paradoxical and C is narrower
807 than A, return (subreg:A (truncate:C X) 0). */
808 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
809 GET_MODE (SUBREG_REG (op
)), 0);
812 /* (truncate:A (truncate:B X)) is (truncate:A X). */
813 if (GET_CODE (op
) == TRUNCATE
)
814 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
815 GET_MODE (XEXP (op
, 0)));
820 /* Try to simplify a unary operation CODE whose output mode is to be
821 MODE with input operand OP whose mode was originally OP_MODE.
822 Return zero if no simplification can be made. */
824 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
825 rtx op
, machine_mode op_mode
)
829 trueop
= avoid_constant_pool_reference (op
);
831 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
835 return simplify_unary_operation_1 (code
, mode
, op
);
838 /* Perform some simplifications we can do even if the operands
841 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
843 enum rtx_code reversed
;
849 /* (not (not X)) == X. */
850 if (GET_CODE (op
) == NOT
)
853 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
854 comparison is all ones. */
855 if (COMPARISON_P (op
)
856 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
857 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
858 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
859 XEXP (op
, 0), XEXP (op
, 1));
861 /* (not (plus X -1)) can become (neg X). */
862 if (GET_CODE (op
) == PLUS
863 && XEXP (op
, 1) == constm1_rtx
)
864 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
866 /* Similarly, (not (neg X)) is (plus X -1). */
867 if (GET_CODE (op
) == NEG
)
868 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
871 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
872 if (GET_CODE (op
) == XOR
873 && CONST_INT_P (XEXP (op
, 1))
874 && (temp
= simplify_unary_operation (NOT
, mode
,
875 XEXP (op
, 1), mode
)) != 0)
876 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
878 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
879 if (GET_CODE (op
) == PLUS
880 && CONST_INT_P (XEXP (op
, 1))
881 && mode_signbit_p (mode
, XEXP (op
, 1))
882 && (temp
= simplify_unary_operation (NOT
, mode
,
883 XEXP (op
, 1), mode
)) != 0)
884 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
887 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
888 operands other than 1, but that is not valid. We could do a
889 similar simplification for (not (lshiftrt C X)) where C is
890 just the sign bit, but this doesn't seem common enough to
892 if (GET_CODE (op
) == ASHIFT
893 && XEXP (op
, 0) == const1_rtx
)
895 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
896 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
899 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
900 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
901 so we can perform the above simplification. */
902 if (STORE_FLAG_VALUE
== -1
903 && GET_CODE (op
) == ASHIFTRT
904 && CONST_INT_P (XEXP (op
, 1))
905 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
906 return simplify_gen_relational (GE
, mode
, VOIDmode
,
907 XEXP (op
, 0), const0_rtx
);
910 if (GET_CODE (op
) == SUBREG
911 && subreg_lowpart_p (op
)
912 && (GET_MODE_SIZE (GET_MODE (op
))
913 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
914 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
915 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
917 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
920 x
= gen_rtx_ROTATE (inner_mode
,
921 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
923 XEXP (SUBREG_REG (op
), 1));
924 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
929 /* Apply De Morgan's laws to reduce number of patterns for machines
930 with negating logical insns (and-not, nand, etc.). If result has
931 only one NOT, put it first, since that is how the patterns are
933 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
935 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
936 machine_mode op_mode
;
938 op_mode
= GET_MODE (in1
);
939 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
941 op_mode
= GET_MODE (in2
);
942 if (op_mode
== VOIDmode
)
944 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
946 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
949 in2
= in1
; in1
= tem
;
952 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
956 /* (not (bswap x)) -> (bswap (not x)). */
957 if (GET_CODE (op
) == BSWAP
)
959 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
960 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
965 /* (neg (neg X)) == X. */
966 if (GET_CODE (op
) == NEG
)
969 /* (neg (plus X 1)) can become (not X). */
970 if (GET_CODE (op
) == PLUS
971 && XEXP (op
, 1) == const1_rtx
)
972 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
974 /* Similarly, (neg (not X)) is (plus X 1). */
975 if (GET_CODE (op
) == NOT
)
976 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
979 /* (neg (minus X Y)) can become (minus Y X). This transformation
980 isn't safe for modes with signed zeros, since if X and Y are
981 both +0, (minus Y X) is the same as (minus X Y). If the
982 rounding mode is towards +infinity (or -infinity) then the two
983 expressions will be rounded differently. */
984 if (GET_CODE (op
) == MINUS
985 && !HONOR_SIGNED_ZEROS (mode
)
986 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
987 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
989 if (GET_CODE (op
) == PLUS
990 && !HONOR_SIGNED_ZEROS (mode
)
991 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
993 /* (neg (plus A C)) is simplified to (minus -C A). */
994 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
995 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
997 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
999 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1002 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1003 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1004 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1007 /* (neg (mult A B)) becomes (mult A (neg B)).
1008 This works even for floating-point values. */
1009 if (GET_CODE (op
) == MULT
1010 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1012 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1013 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1016 /* NEG commutes with ASHIFT since it is multiplication. Only do
1017 this if we can then eliminate the NEG (e.g., if the operand
1019 if (GET_CODE (op
) == ASHIFT
)
1021 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1023 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1026 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1027 C is equal to the width of MODE minus 1. */
1028 if (GET_CODE (op
) == ASHIFTRT
1029 && CONST_INT_P (XEXP (op
, 1))
1030 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1031 return simplify_gen_binary (LSHIFTRT
, mode
,
1032 XEXP (op
, 0), XEXP (op
, 1));
1034 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1035 C is equal to the width of MODE minus 1. */
1036 if (GET_CODE (op
) == LSHIFTRT
1037 && CONST_INT_P (XEXP (op
, 1))
1038 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1039 return simplify_gen_binary (ASHIFTRT
, mode
,
1040 XEXP (op
, 0), XEXP (op
, 1));
1042 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1043 if (GET_CODE (op
) == XOR
1044 && XEXP (op
, 1) == const1_rtx
1045 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1046 return plus_constant (mode
, XEXP (op
, 0), -1);
1048 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1049 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1050 if (GET_CODE (op
) == LT
1051 && XEXP (op
, 1) == const0_rtx
1052 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1054 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1055 int isize
= GET_MODE_PRECISION (inner
);
1056 if (STORE_FLAG_VALUE
== 1)
1058 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1059 GEN_INT (isize
- 1));
1062 if (GET_MODE_PRECISION (mode
) > isize
)
1063 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1064 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1066 else if (STORE_FLAG_VALUE
== -1)
1068 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1069 GEN_INT (isize
- 1));
1072 if (GET_MODE_PRECISION (mode
) > isize
)
1073 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1074 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1080 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1081 with the umulXi3_highpart patterns. */
1082 if (GET_CODE (op
) == LSHIFTRT
1083 && GET_CODE (XEXP (op
, 0)) == MULT
)
1086 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1088 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1090 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1094 /* We can't handle truncation to a partial integer mode here
1095 because we don't know the real bitsize of the partial
1100 if (GET_MODE (op
) != VOIDmode
)
1102 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1107 /* If we know that the value is already truncated, we can
1108 replace the TRUNCATE with a SUBREG. */
1109 if (GET_MODE_NUNITS (mode
) == 1
1110 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1111 || truncated_to_mode (mode
, op
)))
1113 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1118 /* A truncate of a comparison can be replaced with a subreg if
1119 STORE_FLAG_VALUE permits. This is like the previous test,
1120 but it works even if the comparison is done in a mode larger
1121 than HOST_BITS_PER_WIDE_INT. */
1122 if (HWI_COMPUTABLE_MODE_P (mode
)
1123 && COMPARISON_P (op
)
1124 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1126 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1131 /* A truncate of a memory is just loading the low part of the memory
1132 if we are not changing the meaning of the address. */
1133 if (GET_CODE (op
) == MEM
1134 && !VECTOR_MODE_P (mode
)
1135 && !MEM_VOLATILE_P (op
)
1136 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1138 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1145 case FLOAT_TRUNCATE
:
1146 if (DECIMAL_FLOAT_MODE_P (mode
))
1149 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1150 if (GET_CODE (op
) == FLOAT_EXTEND
1151 && GET_MODE (XEXP (op
, 0)) == mode
)
1152 return XEXP (op
, 0);
1154 /* (float_truncate:SF (float_truncate:DF foo:XF))
1155 = (float_truncate:SF foo:XF).
1156 This may eliminate double rounding, so it is unsafe.
1158 (float_truncate:SF (float_extend:XF foo:DF))
1159 = (float_truncate:SF foo:DF).
1161 (float_truncate:DF (float_extend:XF foo:SF))
1162 = (float_extend:DF foo:SF). */
1163 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1164 && flag_unsafe_math_optimizations
)
1165 || GET_CODE (op
) == FLOAT_EXTEND
)
1166 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1168 > GET_MODE_SIZE (mode
)
1169 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1171 XEXP (op
, 0), mode
);
1173 /* (float_truncate (float x)) is (float x) */
1174 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1175 && (flag_unsafe_math_optimizations
1176 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1177 && ((unsigned)significand_size (GET_MODE (op
))
1178 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1179 - num_sign_bit_copies (XEXP (op
, 0),
1180 GET_MODE (XEXP (op
, 0))))))))
1181 return simplify_gen_unary (GET_CODE (op
), mode
,
1183 GET_MODE (XEXP (op
, 0)));
1185 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1186 (OP:SF foo:SF) if OP is NEG or ABS. */
1187 if ((GET_CODE (op
) == ABS
1188 || GET_CODE (op
) == NEG
)
1189 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1190 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1191 return simplify_gen_unary (GET_CODE (op
), mode
,
1192 XEXP (XEXP (op
, 0), 0), mode
);
1194 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1195 is (float_truncate:SF x). */
1196 if (GET_CODE (op
) == SUBREG
1197 && subreg_lowpart_p (op
)
1198 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1199 return SUBREG_REG (op
);
1203 if (DECIMAL_FLOAT_MODE_P (mode
))
1206 /* (float_extend (float_extend x)) is (float_extend x)
1208 (float_extend (float x)) is (float x) assuming that double
1209 rounding can't happen.
1211 if (GET_CODE (op
) == FLOAT_EXTEND
1212 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1213 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1214 && ((unsigned)significand_size (GET_MODE (op
))
1215 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1216 - num_sign_bit_copies (XEXP (op
, 0),
1217 GET_MODE (XEXP (op
, 0)))))))
1218 return simplify_gen_unary (GET_CODE (op
), mode
,
1220 GET_MODE (XEXP (op
, 0)));
1225 /* (abs (neg <foo>)) -> (abs <foo>) */
1226 if (GET_CODE (op
) == NEG
)
1227 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1228 GET_MODE (XEXP (op
, 0)));
1230 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1232 if (GET_MODE (op
) == VOIDmode
)
1235 /* If operand is something known to be positive, ignore the ABS. */
1236 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1237 || val_signbit_known_clear_p (GET_MODE (op
),
1238 nonzero_bits (op
, GET_MODE (op
))))
1241 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1242 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1243 return gen_rtx_NEG (mode
, op
);
1248 /* (ffs (*_extend <X>)) = (ffs <X>) */
1249 if (GET_CODE (op
) == SIGN_EXTEND
1250 || GET_CODE (op
) == ZERO_EXTEND
)
1251 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1252 GET_MODE (XEXP (op
, 0)));
1256 switch (GET_CODE (op
))
1260 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1261 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1262 GET_MODE (XEXP (op
, 0)));
1266 /* Rotations don't affect popcount. */
1267 if (!side_effects_p (XEXP (op
, 1)))
1268 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1269 GET_MODE (XEXP (op
, 0)));
1278 switch (GET_CODE (op
))
1284 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1285 GET_MODE (XEXP (op
, 0)));
1289 /* Rotations don't affect parity. */
1290 if (!side_effects_p (XEXP (op
, 1)))
1291 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1292 GET_MODE (XEXP (op
, 0)));
1301 /* (bswap (bswap x)) -> x. */
1302 if (GET_CODE (op
) == BSWAP
)
1303 return XEXP (op
, 0);
1307 /* (float (sign_extend <X>)) = (float <X>). */
1308 if (GET_CODE (op
) == SIGN_EXTEND
)
1309 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1310 GET_MODE (XEXP (op
, 0)));
1314 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1315 becomes just the MINUS if its mode is MODE. This allows
1316 folding switch statements on machines using casesi (such as
1318 if (GET_CODE (op
) == TRUNCATE
1319 && GET_MODE (XEXP (op
, 0)) == mode
1320 && GET_CODE (XEXP (op
, 0)) == MINUS
1321 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1322 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1323 return XEXP (op
, 0);
1325 /* Extending a widening multiplication should be canonicalized to
1326 a wider widening multiplication. */
1327 if (GET_CODE (op
) == MULT
)
1329 rtx lhs
= XEXP (op
, 0);
1330 rtx rhs
= XEXP (op
, 1);
1331 enum rtx_code lcode
= GET_CODE (lhs
);
1332 enum rtx_code rcode
= GET_CODE (rhs
);
1334 /* Widening multiplies usually extend both operands, but sometimes
1335 they use a shift to extract a portion of a register. */
1336 if ((lcode
== SIGN_EXTEND
1337 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1338 && (rcode
== SIGN_EXTEND
1339 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1341 machine_mode lmode
= GET_MODE (lhs
);
1342 machine_mode rmode
= GET_MODE (rhs
);
1345 if (lcode
== ASHIFTRT
)
1346 /* Number of bits not shifted off the end. */
1347 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1348 else /* lcode == SIGN_EXTEND */
1349 /* Size of inner mode. */
1350 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1352 if (rcode
== ASHIFTRT
)
1353 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1354 else /* rcode == SIGN_EXTEND */
1355 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1357 /* We can only widen multiplies if the result is mathematiclly
1358 equivalent. I.e. if overflow was impossible. */
1359 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1360 return simplify_gen_binary
1362 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1363 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1367 /* Check for a sign extension of a subreg of a promoted
1368 variable, where the promotion is sign-extended, and the
1369 target mode is the same as the variable's promotion. */
1370 if (GET_CODE (op
) == SUBREG
1371 && SUBREG_PROMOTED_VAR_P (op
)
1372 && SUBREG_PROMOTED_SIGNED_P (op
)
1373 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1375 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1380 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1381 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1382 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1384 gcc_assert (GET_MODE_PRECISION (mode
)
1385 > GET_MODE_PRECISION (GET_MODE (op
)));
1386 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1387 GET_MODE (XEXP (op
, 0)));
1390 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1391 is (sign_extend:M (subreg:O <X>)) if there is mode with
1392 GET_MODE_BITSIZE (N) - I bits.
1393 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1394 is similarly (zero_extend:M (subreg:O <X>)). */
1395 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1396 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1397 && CONST_INT_P (XEXP (op
, 1))
1398 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1399 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1402 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1403 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1404 gcc_assert (GET_MODE_BITSIZE (mode
)
1405 > GET_MODE_BITSIZE (GET_MODE (op
)));
1406 if (tmode
!= BLKmode
)
1409 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1411 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1412 ? SIGN_EXTEND
: ZERO_EXTEND
,
1413 mode
, inner
, tmode
);
1417 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1418 /* As we do not know which address space the pointer is referring to,
1419 we can do this only if the target does not support different pointer
1420 or address modes depending on the address space. */
1421 if (target_default_pointer_address_modes_p ()
1422 && ! POINTERS_EXTEND_UNSIGNED
1423 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1425 || (GET_CODE (op
) == SUBREG
1426 && REG_P (SUBREG_REG (op
))
1427 && REG_POINTER (SUBREG_REG (op
))
1428 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1429 return convert_memory_address (Pmode
, op
);
1434 /* Check for a zero extension of a subreg of a promoted
1435 variable, where the promotion is zero-extended, and the
1436 target mode is the same as the variable's promotion. */
1437 if (GET_CODE (op
) == SUBREG
1438 && SUBREG_PROMOTED_VAR_P (op
)
1439 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1440 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1442 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1447 /* Extending a widening multiplication should be canonicalized to
1448 a wider widening multiplication. */
1449 if (GET_CODE (op
) == MULT
)
1451 rtx lhs
= XEXP (op
, 0);
1452 rtx rhs
= XEXP (op
, 1);
1453 enum rtx_code lcode
= GET_CODE (lhs
);
1454 enum rtx_code rcode
= GET_CODE (rhs
);
1456 /* Widening multiplies usually extend both operands, but sometimes
1457 they use a shift to extract a portion of a register. */
1458 if ((lcode
== ZERO_EXTEND
1459 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1460 && (rcode
== ZERO_EXTEND
1461 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1463 machine_mode lmode
= GET_MODE (lhs
);
1464 machine_mode rmode
= GET_MODE (rhs
);
1467 if (lcode
== LSHIFTRT
)
1468 /* Number of bits not shifted off the end. */
1469 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1470 else /* lcode == ZERO_EXTEND */
1471 /* Size of inner mode. */
1472 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1474 if (rcode
== LSHIFTRT
)
1475 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1476 else /* rcode == ZERO_EXTEND */
1477 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1479 /* We can only widen multiplies if the result is mathematiclly
1480 equivalent. I.e. if overflow was impossible. */
1481 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1482 return simplify_gen_binary
1484 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1485 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1489 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1490 if (GET_CODE (op
) == ZERO_EXTEND
)
1491 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1492 GET_MODE (XEXP (op
, 0)));
1494 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 is (zero_extend:M (subreg:O <X>)) if there is mode with
1496 GET_MODE_PRECISION (N) - I bits. */
1497 if (GET_CODE (op
) == LSHIFTRT
1498 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1499 && CONST_INT_P (XEXP (op
, 1))
1500 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1501 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1504 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1505 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1506 if (tmode
!= BLKmode
)
1509 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1511 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1515 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1516 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1518 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1519 (and:SI (reg:SI) (const_int 63)). */
1520 if (GET_CODE (op
) == SUBREG
1521 && GET_MODE_PRECISION (GET_MODE (op
))
1522 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1523 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1524 <= HOST_BITS_PER_WIDE_INT
1525 && GET_MODE_PRECISION (mode
)
1526 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1527 && subreg_lowpart_p (op
)
1528 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1529 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1531 if (GET_MODE_PRECISION (mode
)
1532 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1533 return SUBREG_REG (op
);
1534 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1535 GET_MODE (SUBREG_REG (op
)));
1538 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1539 /* As we do not know which address space the pointer is referring to,
1540 we can do this only if the target does not support different pointer
1541 or address modes depending on the address space. */
1542 if (target_default_pointer_address_modes_p ()
1543 && POINTERS_EXTEND_UNSIGNED
> 0
1544 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1546 || (GET_CODE (op
) == SUBREG
1547 && REG_P (SUBREG_REG (op
))
1548 && REG_POINTER (SUBREG_REG (op
))
1549 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1550 return convert_memory_address (Pmode
, op
);
1561 /* Try to compute the value of a unary operation CODE whose output mode is to
1562 be MODE with input operand OP whose mode was originally OP_MODE.
1563 Return zero if the value cannot be computed. */
1565 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1566 rtx op
, machine_mode op_mode
)
1568 unsigned int width
= GET_MODE_PRECISION (mode
);
1570 if (code
== VEC_DUPLICATE
)
1572 gcc_assert (VECTOR_MODE_P (mode
));
1573 if (GET_MODE (op
) != VOIDmode
)
1575 if (!VECTOR_MODE_P (GET_MODE (op
)))
1576 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1578 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1581 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1582 || GET_CODE (op
) == CONST_VECTOR
)
1584 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1585 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1586 rtvec v
= rtvec_alloc (n_elts
);
1589 if (GET_CODE (op
) != CONST_VECTOR
)
1590 for (i
= 0; i
< n_elts
; i
++)
1591 RTVEC_ELT (v
, i
) = op
;
1594 machine_mode inmode
= GET_MODE (op
);
1595 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1596 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1598 gcc_assert (in_n_elts
< n_elts
);
1599 gcc_assert ((n_elts
% in_n_elts
) == 0);
1600 for (i
= 0; i
< n_elts
; i
++)
1601 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1603 return gen_rtx_CONST_VECTOR (mode
, v
);
1607 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1609 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1610 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1611 machine_mode opmode
= GET_MODE (op
);
1612 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1613 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1614 rtvec v
= rtvec_alloc (n_elts
);
1617 gcc_assert (op_n_elts
== n_elts
);
1618 for (i
= 0; i
< n_elts
; i
++)
1620 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1621 CONST_VECTOR_ELT (op
, i
),
1622 GET_MODE_INNER (opmode
));
1625 RTVEC_ELT (v
, i
) = x
;
1627 return gen_rtx_CONST_VECTOR (mode
, v
);
1630 /* The order of these tests is critical so that, for example, we don't
1631 check the wrong mode (input vs. output) for a conversion operation,
1632 such as FIX. At some point, this should be simplified. */
1634 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1638 if (op_mode
== VOIDmode
)
1640 /* CONST_INT have VOIDmode as the mode. We assume that all
1641 the bits of the constant are significant, though, this is
1642 a dangerous assumption as many times CONST_INTs are
1643 created and used with garbage in the bits outside of the
1644 precision of the implied mode of the const_int. */
1645 op_mode
= MAX_MODE_INT
;
1648 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1649 d
= real_value_truncate (mode
, d
);
1650 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1652 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1656 if (op_mode
== VOIDmode
)
1658 /* CONST_INT have VOIDmode as the mode. We assume that all
1659 the bits of the constant are significant, though, this is
1660 a dangerous assumption as many times CONST_INTs are
1661 created and used with garbage in the bits outside of the
1662 precision of the implied mode of the const_int. */
1663 op_mode
= MAX_MODE_INT
;
1666 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1667 d
= real_value_truncate (mode
, d
);
1668 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1671 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1674 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1675 rtx_mode_t op0
= std::make_pair (op
, imode
);
1678 #if TARGET_SUPPORTS_WIDE_INT == 0
1679 /* This assert keeps the simplification from producing a result
1680 that cannot be represented in a CONST_DOUBLE but a lot of
1681 upstream callers expect that this function never fails to
1682 simplify something and so you if you added this to the test
1683 above the code would die later anyway. If this assert
1684 happens, you just need to make the port support wide int. */
1685 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1691 result
= wi::bit_not (op0
);
1695 result
= wi::neg (op0
);
1699 result
= wi::abs (op0
);
1703 result
= wi::shwi (wi::ffs (op0
), mode
);
1707 if (wi::ne_p (op0
, 0))
1708 int_value
= wi::clz (op0
);
1709 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1710 int_value
= GET_MODE_PRECISION (mode
);
1711 result
= wi::shwi (int_value
, mode
);
1715 result
= wi::shwi (wi::clrsb (op0
), mode
);
1719 if (wi::ne_p (op0
, 0))
1720 int_value
= wi::ctz (op0
);
1721 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1722 int_value
= GET_MODE_PRECISION (mode
);
1723 result
= wi::shwi (int_value
, mode
);
1727 result
= wi::shwi (wi::popcount (op0
), mode
);
1731 result
= wi::shwi (wi::parity (op0
), mode
);
1735 result
= wide_int (op0
).bswap ();
1740 result
= wide_int::from (op0
, width
, UNSIGNED
);
1744 result
= wide_int::from (op0
, width
, SIGNED
);
1752 return immed_wide_int_const (result
, mode
);
1755 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1756 && SCALAR_FLOAT_MODE_P (mode
)
1757 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1760 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1767 d
= real_value_abs (&d
);
1770 d
= real_value_negate (&d
);
1772 case FLOAT_TRUNCATE
:
1773 d
= real_value_truncate (mode
, d
);
1776 /* All this does is change the mode, unless changing
1778 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1779 real_convert (&d
, mode
, &d
);
1782 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1789 real_to_target (tmp
, &d
, GET_MODE (op
));
1790 for (i
= 0; i
< 4; i
++)
1792 real_from_target (&d
, tmp
, mode
);
1798 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1800 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1801 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1802 && GET_MODE_CLASS (mode
) == MODE_INT
1805 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1806 operators are intentionally left unspecified (to ease implementation
1807 by target backends), for consistency, this routine implements the
1808 same semantics for constant folding as used by the middle-end. */
1810 /* This was formerly used only for non-IEEE float.
1811 eggert@twinsun.com says it is safe for IEEE also. */
1812 REAL_VALUE_TYPE x
, t
;
1813 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1814 wide_int wmax
, wmin
;
1815 /* This is part of the abi to real_to_integer, but we check
1816 things before making this call. */
1822 if (REAL_VALUE_ISNAN (x
))
1825 /* Test against the signed upper bound. */
1826 wmax
= wi::max_value (width
, SIGNED
);
1827 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1828 if (REAL_VALUES_LESS (t
, x
))
1829 return immed_wide_int_const (wmax
, mode
);
1831 /* Test against the signed lower bound. */
1832 wmin
= wi::min_value (width
, SIGNED
);
1833 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1834 if (REAL_VALUES_LESS (x
, t
))
1835 return immed_wide_int_const (wmin
, mode
);
1837 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1841 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1844 /* Test against the unsigned upper bound. */
1845 wmax
= wi::max_value (width
, UNSIGNED
);
1846 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1847 if (REAL_VALUES_LESS (t
, x
))
1848 return immed_wide_int_const (wmax
, mode
);
1850 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1862 /* Subroutine of simplify_binary_operation to simplify a binary operation
1863 CODE that can commute with byte swapping, with result mode MODE and
1864 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1865 Return zero if no simplification or canonicalization is possible. */
1868 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1873 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1874 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1876 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1877 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1878 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1881 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1882 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1884 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1885 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1891 /* Subroutine of simplify_binary_operation to simplify a commutative,
1892 associative binary operation CODE with result mode MODE, operating
1893 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1894 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1895 canonicalization is possible. */
1898 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1903 /* Linearize the operator to the left. */
1904 if (GET_CODE (op1
) == code
)
1906 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1907 if (GET_CODE (op0
) == code
)
1909 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1910 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1913 /* "a op (b op c)" becomes "(b op c) op a". */
1914 if (! swap_commutative_operands_p (op1
, op0
))
1915 return simplify_gen_binary (code
, mode
, op1
, op0
);
1917 std::swap (op0
, op1
);
1920 if (GET_CODE (op0
) == code
)
1922 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1923 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1925 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1926 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1929 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1930 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1932 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1934 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1935 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1937 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1944 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1945 and OP1. Return 0 if no simplification is possible.
1947 Don't use this for relational operations such as EQ or LT.
1948 Use simplify_relational_operation instead. */
1950 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1953 rtx trueop0
, trueop1
;
1956 /* Relational operations don't work here. We must know the mode
1957 of the operands in order to do the comparison correctly.
1958 Assuming a full word can give incorrect results.
1959 Consider comparing 128 with -128 in QImode. */
1960 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1961 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1963 /* Make sure the constant is second. */
1964 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1965 && swap_commutative_operands_p (op0
, op1
))
1966 std::swap (op0
, op1
);
1968 trueop0
= avoid_constant_pool_reference (op0
);
1969 trueop1
= avoid_constant_pool_reference (op1
);
1971 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1974 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1977 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1978 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1979 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1980 actual constants. */
1983 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
1984 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1986 rtx tem
, reversed
, opleft
, opright
;
1988 unsigned int width
= GET_MODE_PRECISION (mode
);
1990 /* Even if we can't compute a constant result,
1991 there are some cases worth simplifying. */
1996 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1997 when x is NaN, infinite, or finite and nonzero. They aren't
1998 when x is -0 and the rounding mode is not towards -infinity,
1999 since (-0) + 0 is then 0. */
2000 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2003 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2004 transformations are safe even for IEEE. */
2005 if (GET_CODE (op0
) == NEG
)
2006 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2007 else if (GET_CODE (op1
) == NEG
)
2008 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2010 /* (~a) + 1 -> -a */
2011 if (INTEGRAL_MODE_P (mode
)
2012 && GET_CODE (op0
) == NOT
2013 && trueop1
== const1_rtx
)
2014 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2016 /* Handle both-operands-constant cases. We can only add
2017 CONST_INTs to constants since the sum of relocatable symbols
2018 can't be handled by most assemblers. Don't add CONST_INT
2019 to CONST_INT since overflow won't be computed properly if wider
2020 than HOST_BITS_PER_WIDE_INT. */
2022 if ((GET_CODE (op0
) == CONST
2023 || GET_CODE (op0
) == SYMBOL_REF
2024 || GET_CODE (op0
) == LABEL_REF
)
2025 && CONST_INT_P (op1
))
2026 return plus_constant (mode
, op0
, INTVAL (op1
));
2027 else if ((GET_CODE (op1
) == CONST
2028 || GET_CODE (op1
) == SYMBOL_REF
2029 || GET_CODE (op1
) == LABEL_REF
)
2030 && CONST_INT_P (op0
))
2031 return plus_constant (mode
, op1
, INTVAL (op0
));
2033 /* See if this is something like X * C - X or vice versa or
2034 if the multiplication is written as a shift. If so, we can
2035 distribute and make a new multiply, shift, or maybe just
2036 have X (if C is 2 in the example above). But don't make
2037 something more expensive than we had before. */
2039 if (SCALAR_INT_MODE_P (mode
))
2041 rtx lhs
= op0
, rhs
= op1
;
2043 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2044 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2046 if (GET_CODE (lhs
) == NEG
)
2048 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2049 lhs
= XEXP (lhs
, 0);
2051 else if (GET_CODE (lhs
) == MULT
2052 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2054 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2055 lhs
= XEXP (lhs
, 0);
2057 else if (GET_CODE (lhs
) == ASHIFT
2058 && CONST_INT_P (XEXP (lhs
, 1))
2059 && INTVAL (XEXP (lhs
, 1)) >= 0
2060 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2062 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2063 GET_MODE_PRECISION (mode
));
2064 lhs
= XEXP (lhs
, 0);
2067 if (GET_CODE (rhs
) == NEG
)
2069 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2070 rhs
= XEXP (rhs
, 0);
2072 else if (GET_CODE (rhs
) == MULT
2073 && CONST_INT_P (XEXP (rhs
, 1)))
2075 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2076 rhs
= XEXP (rhs
, 0);
2078 else if (GET_CODE (rhs
) == ASHIFT
2079 && CONST_INT_P (XEXP (rhs
, 1))
2080 && INTVAL (XEXP (rhs
, 1)) >= 0
2081 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2083 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2084 GET_MODE_PRECISION (mode
));
2085 rhs
= XEXP (rhs
, 0);
2088 if (rtx_equal_p (lhs
, rhs
))
2090 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2092 bool speed
= optimize_function_for_speed_p (cfun
);
2094 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2096 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2097 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2102 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2103 if (CONST_SCALAR_INT_P (op1
)
2104 && GET_CODE (op0
) == XOR
2105 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2106 && mode_signbit_p (mode
, op1
))
2107 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2108 simplify_gen_binary (XOR
, mode
, op1
,
2111 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2112 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2113 && GET_CODE (op0
) == MULT
2114 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2118 in1
= XEXP (XEXP (op0
, 0), 0);
2119 in2
= XEXP (op0
, 1);
2120 return simplify_gen_binary (MINUS
, mode
, op1
,
2121 simplify_gen_binary (MULT
, mode
,
2125 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2126 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2128 if (COMPARISON_P (op0
)
2129 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2130 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2131 && (reversed
= reversed_comparison (op0
, mode
)))
2133 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2135 /* If one of the operands is a PLUS or a MINUS, see if we can
2136 simplify this by the associative law.
2137 Don't use the associative law for floating point.
2138 The inaccuracy makes it nonassociative,
2139 and subtle programs can break if operations are associated. */
2141 if (INTEGRAL_MODE_P (mode
)
2142 && (plus_minus_operand_p (op0
)
2143 || plus_minus_operand_p (op1
))
2144 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2147 /* Reassociate floating point addition only when the user
2148 specifies associative math operations. */
2149 if (FLOAT_MODE_P (mode
)
2150 && flag_associative_math
)
2152 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2159 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2160 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2161 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2162 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2164 rtx xop00
= XEXP (op0
, 0);
2165 rtx xop10
= XEXP (op1
, 0);
2167 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2170 if (REG_P (xop00
) && REG_P (xop10
)
2171 && GET_MODE (xop00
) == GET_MODE (xop10
)
2172 && REGNO (xop00
) == REGNO (xop10
)
2173 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2174 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2180 /* We can't assume x-x is 0 even with non-IEEE floating point,
2181 but since it is zero except in very strange circumstances, we
2182 will treat it as zero with -ffinite-math-only. */
2183 if (rtx_equal_p (trueop0
, trueop1
)
2184 && ! side_effects_p (op0
)
2185 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2186 return CONST0_RTX (mode
);
2188 /* Change subtraction from zero into negation. (0 - x) is the
2189 same as -x when x is NaN, infinite, or finite and nonzero.
2190 But if the mode has signed zeros, and does not round towards
2191 -infinity, then 0 - 0 is 0, not -0. */
2192 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2193 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2195 /* (-1 - a) is ~a. */
2196 if (trueop0
== constm1_rtx
)
2197 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2199 /* Subtracting 0 has no effect unless the mode has signed zeros
2200 and supports rounding towards -infinity. In such a case,
2202 if (!(HONOR_SIGNED_ZEROS (mode
)
2203 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2204 && trueop1
== CONST0_RTX (mode
))
2207 /* See if this is something like X * C - X or vice versa or
2208 if the multiplication is written as a shift. If so, we can
2209 distribute and make a new multiply, shift, or maybe just
2210 have X (if C is 2 in the example above). But don't make
2211 something more expensive than we had before. */
2213 if (SCALAR_INT_MODE_P (mode
))
2215 rtx lhs
= op0
, rhs
= op1
;
2217 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2218 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2220 if (GET_CODE (lhs
) == NEG
)
2222 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2223 lhs
= XEXP (lhs
, 0);
2225 else if (GET_CODE (lhs
) == MULT
2226 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2228 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2229 lhs
= XEXP (lhs
, 0);
2231 else if (GET_CODE (lhs
) == ASHIFT
2232 && CONST_INT_P (XEXP (lhs
, 1))
2233 && INTVAL (XEXP (lhs
, 1)) >= 0
2234 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2236 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2237 GET_MODE_PRECISION (mode
));
2238 lhs
= XEXP (lhs
, 0);
2241 if (GET_CODE (rhs
) == NEG
)
2243 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2244 rhs
= XEXP (rhs
, 0);
2246 else if (GET_CODE (rhs
) == MULT
2247 && CONST_INT_P (XEXP (rhs
, 1)))
2249 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2250 rhs
= XEXP (rhs
, 0);
2252 else if (GET_CODE (rhs
) == ASHIFT
2253 && CONST_INT_P (XEXP (rhs
, 1))
2254 && INTVAL (XEXP (rhs
, 1)) >= 0
2255 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2257 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2258 GET_MODE_PRECISION (mode
));
2259 negcoeff1
= -negcoeff1
;
2260 rhs
= XEXP (rhs
, 0);
2263 if (rtx_equal_p (lhs
, rhs
))
2265 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2267 bool speed
= optimize_function_for_speed_p (cfun
);
2269 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2271 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2272 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2277 /* (a - (-b)) -> (a + b). True even for IEEE. */
2278 if (GET_CODE (op1
) == NEG
)
2279 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2281 /* (-x - c) may be simplified as (-c - x). */
2282 if (GET_CODE (op0
) == NEG
2283 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2285 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2287 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2290 /* Don't let a relocatable value get a negative coeff. */
2291 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2292 return simplify_gen_binary (PLUS
, mode
,
2294 neg_const_int (mode
, op1
));
2296 /* (x - (x & y)) -> (x & ~y) */
2297 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2299 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2301 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2302 GET_MODE (XEXP (op1
, 1)));
2303 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2305 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2307 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2308 GET_MODE (XEXP (op1
, 0)));
2309 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2313 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2314 by reversing the comparison code if valid. */
2315 if (STORE_FLAG_VALUE
== 1
2316 && trueop0
== const1_rtx
2317 && COMPARISON_P (op1
)
2318 && (reversed
= reversed_comparison (op1
, mode
)))
2321 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2322 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2323 && GET_CODE (op1
) == MULT
2324 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2328 in1
= XEXP (XEXP (op1
, 0), 0);
2329 in2
= XEXP (op1
, 1);
2330 return simplify_gen_binary (PLUS
, mode
,
2331 simplify_gen_binary (MULT
, mode
,
2336 /* Canonicalize (minus (neg A) (mult B C)) to
2337 (minus (mult (neg B) C) A). */
2338 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2339 && GET_CODE (op1
) == MULT
2340 && GET_CODE (op0
) == NEG
)
2344 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2345 in2
= XEXP (op1
, 1);
2346 return simplify_gen_binary (MINUS
, mode
,
2347 simplify_gen_binary (MULT
, mode
,
2352 /* If one of the operands is a PLUS or a MINUS, see if we can
2353 simplify this by the associative law. This will, for example,
2354 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2355 Don't use the associative law for floating point.
2356 The inaccuracy makes it nonassociative,
2357 and subtle programs can break if operations are associated. */
2359 if (INTEGRAL_MODE_P (mode
)
2360 && (plus_minus_operand_p (op0
)
2361 || plus_minus_operand_p (op1
))
2362 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2367 if (trueop1
== constm1_rtx
)
2368 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2370 if (GET_CODE (op0
) == NEG
)
2372 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2373 /* If op1 is a MULT as well and simplify_unary_operation
2374 just moved the NEG to the second operand, simplify_gen_binary
2375 below could through simplify_associative_operation move
2376 the NEG around again and recurse endlessly. */
2378 && GET_CODE (op1
) == MULT
2379 && GET_CODE (temp
) == MULT
2380 && XEXP (op1
, 0) == XEXP (temp
, 0)
2381 && GET_CODE (XEXP (temp
, 1)) == NEG
2382 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2385 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2387 if (GET_CODE (op1
) == NEG
)
2389 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2390 /* If op0 is a MULT as well and simplify_unary_operation
2391 just moved the NEG to the second operand, simplify_gen_binary
2392 below could through simplify_associative_operation move
2393 the NEG around again and recurse endlessly. */
2395 && GET_CODE (op0
) == MULT
2396 && GET_CODE (temp
) == MULT
2397 && XEXP (op0
, 0) == XEXP (temp
, 0)
2398 && GET_CODE (XEXP (temp
, 1)) == NEG
2399 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2402 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2405 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2406 x is NaN, since x * 0 is then also NaN. Nor is it valid
2407 when the mode has signed zeros, since multiplying a negative
2408 number by 0 will give -0, not 0. */
2409 if (!HONOR_NANS (mode
)
2410 && !HONOR_SIGNED_ZEROS (mode
)
2411 && trueop1
== CONST0_RTX (mode
)
2412 && ! side_effects_p (op0
))
2415 /* In IEEE floating point, x*1 is not equivalent to x for
2417 if (!HONOR_SNANS (mode
)
2418 && trueop1
== CONST1_RTX (mode
))
2421 /* Convert multiply by constant power of two into shift. */
2422 if (CONST_SCALAR_INT_P (trueop1
))
2424 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2426 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2429 /* x*2 is x+x and x*(-1) is -x */
2430 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2431 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2432 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2433 && GET_MODE (op0
) == mode
)
2436 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2438 if (REAL_VALUES_EQUAL (d
, dconst2
))
2439 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2441 if (!HONOR_SNANS (mode
)
2442 && REAL_VALUES_EQUAL (d
, dconstm1
))
2443 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2446 /* Optimize -x * -x as x * x. */
2447 if (FLOAT_MODE_P (mode
)
2448 && GET_CODE (op0
) == NEG
2449 && GET_CODE (op1
) == NEG
2450 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2451 && !side_effects_p (XEXP (op0
, 0)))
2452 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2454 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2455 if (SCALAR_FLOAT_MODE_P (mode
)
2456 && GET_CODE (op0
) == ABS
2457 && GET_CODE (op1
) == ABS
2458 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2459 && !side_effects_p (XEXP (op0
, 0)))
2460 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2462 /* Reassociate multiplication, but for floating point MULTs
2463 only when the user specifies unsafe math optimizations. */
2464 if (! FLOAT_MODE_P (mode
)
2465 || flag_unsafe_math_optimizations
)
2467 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2474 if (trueop1
== CONST0_RTX (mode
))
2476 if (INTEGRAL_MODE_P (mode
)
2477 && trueop1
== CONSTM1_RTX (mode
)
2478 && !side_effects_p (op0
))
2480 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2482 /* A | (~A) -> -1 */
2483 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2484 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2485 && ! side_effects_p (op0
)
2486 && SCALAR_INT_MODE_P (mode
))
2489 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2490 if (CONST_INT_P (op1
)
2491 && HWI_COMPUTABLE_MODE_P (mode
)
2492 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2493 && !side_effects_p (op0
))
2496 /* Canonicalize (X & C1) | C2. */
2497 if (GET_CODE (op0
) == AND
2498 && CONST_INT_P (trueop1
)
2499 && CONST_INT_P (XEXP (op0
, 1)))
2501 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2502 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2503 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2505 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2507 && !side_effects_p (XEXP (op0
, 0)))
2510 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2511 if (((c1
|c2
) & mask
) == mask
)
2512 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2514 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2515 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2517 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2518 gen_int_mode (c1
& ~c2
, mode
));
2519 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2523 /* Convert (A & B) | A to A. */
2524 if (GET_CODE (op0
) == AND
2525 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2526 || rtx_equal_p (XEXP (op0
, 1), op1
))
2527 && ! side_effects_p (XEXP (op0
, 0))
2528 && ! side_effects_p (XEXP (op0
, 1)))
2531 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2532 mode size to (rotate A CX). */
2534 if (GET_CODE (op1
) == ASHIFT
2535 || GET_CODE (op1
) == SUBREG
)
2546 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2547 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2548 && CONST_INT_P (XEXP (opleft
, 1))
2549 && CONST_INT_P (XEXP (opright
, 1))
2550 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2551 == GET_MODE_PRECISION (mode
)))
2552 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2554 /* Same, but for ashift that has been "simplified" to a wider mode
2555 by simplify_shift_const. */
2557 if (GET_CODE (opleft
) == SUBREG
2558 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2559 && GET_CODE (opright
) == LSHIFTRT
2560 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2561 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2562 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2563 && (GET_MODE_SIZE (GET_MODE (opleft
))
2564 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2565 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2566 SUBREG_REG (XEXP (opright
, 0)))
2567 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2568 && CONST_INT_P (XEXP (opright
, 1))
2569 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2570 == GET_MODE_PRECISION (mode
)))
2571 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2572 XEXP (SUBREG_REG (opleft
), 1));
2574 /* If we have (ior (and (X C1) C2)), simplify this by making
2575 C1 as small as possible if C1 actually changes. */
2576 if (CONST_INT_P (op1
)
2577 && (HWI_COMPUTABLE_MODE_P (mode
)
2578 || INTVAL (op1
) > 0)
2579 && GET_CODE (op0
) == AND
2580 && CONST_INT_P (XEXP (op0
, 1))
2581 && CONST_INT_P (op1
)
2582 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2584 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2585 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2588 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2591 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2592 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2593 the PLUS does not affect any of the bits in OP1: then we can do
2594 the IOR as a PLUS and we can associate. This is valid if OP1
2595 can be safely shifted left C bits. */
2596 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2597 && GET_CODE (XEXP (op0
, 0)) == PLUS
2598 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2599 && CONST_INT_P (XEXP (op0
, 1))
2600 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2602 int count
= INTVAL (XEXP (op0
, 1));
2603 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2605 if (mask
>> count
== INTVAL (trueop1
)
2606 && trunc_int_for_mode (mask
, mode
) == mask
2607 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2608 return simplify_gen_binary (ASHIFTRT
, mode
,
2609 plus_constant (mode
, XEXP (op0
, 0),
2614 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2618 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2624 if (trueop1
== CONST0_RTX (mode
))
2626 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2627 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2628 if (rtx_equal_p (trueop0
, trueop1
)
2629 && ! side_effects_p (op0
)
2630 && GET_MODE_CLASS (mode
) != MODE_CC
)
2631 return CONST0_RTX (mode
);
2633 /* Canonicalize XOR of the most significant bit to PLUS. */
2634 if (CONST_SCALAR_INT_P (op1
)
2635 && mode_signbit_p (mode
, op1
))
2636 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2637 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2638 if (CONST_SCALAR_INT_P (op1
)
2639 && GET_CODE (op0
) == PLUS
2640 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2641 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2642 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2643 simplify_gen_binary (XOR
, mode
, op1
,
2646 /* If we are XORing two things that have no bits in common,
2647 convert them into an IOR. This helps to detect rotation encoded
2648 using those methods and possibly other simplifications. */
2650 if (HWI_COMPUTABLE_MODE_P (mode
)
2651 && (nonzero_bits (op0
, mode
)
2652 & nonzero_bits (op1
, mode
)) == 0)
2653 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2655 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2656 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2659 int num_negated
= 0;
2661 if (GET_CODE (op0
) == NOT
)
2662 num_negated
++, op0
= XEXP (op0
, 0);
2663 if (GET_CODE (op1
) == NOT
)
2664 num_negated
++, op1
= XEXP (op1
, 0);
2666 if (num_negated
== 2)
2667 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2668 else if (num_negated
== 1)
2669 return simplify_gen_unary (NOT
, mode
,
2670 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2674 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2675 correspond to a machine insn or result in further simplifications
2676 if B is a constant. */
2678 if (GET_CODE (op0
) == AND
2679 && rtx_equal_p (XEXP (op0
, 1), op1
)
2680 && ! side_effects_p (op1
))
2681 return simplify_gen_binary (AND
, mode
,
2682 simplify_gen_unary (NOT
, mode
,
2683 XEXP (op0
, 0), mode
),
2686 else if (GET_CODE (op0
) == AND
2687 && rtx_equal_p (XEXP (op0
, 0), op1
)
2688 && ! side_effects_p (op1
))
2689 return simplify_gen_binary (AND
, mode
,
2690 simplify_gen_unary (NOT
, mode
,
2691 XEXP (op0
, 1), mode
),
2694 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2695 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2696 out bits inverted twice and not set by C. Similarly, given
2697 (xor (and (xor A B) C) D), simplify without inverting C in
2698 the xor operand: (xor (and A C) (B&C)^D).
2700 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2701 && GET_CODE (XEXP (op0
, 0)) == XOR
2702 && CONST_INT_P (op1
)
2703 && CONST_INT_P (XEXP (op0
, 1))
2704 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2706 enum rtx_code op
= GET_CODE (op0
);
2707 rtx a
= XEXP (XEXP (op0
, 0), 0);
2708 rtx b
= XEXP (XEXP (op0
, 0), 1);
2709 rtx c
= XEXP (op0
, 1);
2711 HOST_WIDE_INT bval
= INTVAL (b
);
2712 HOST_WIDE_INT cval
= INTVAL (c
);
2713 HOST_WIDE_INT dval
= INTVAL (d
);
2714 HOST_WIDE_INT xcval
;
2721 return simplify_gen_binary (XOR
, mode
,
2722 simplify_gen_binary (op
, mode
, a
, c
),
2723 gen_int_mode ((bval
& xcval
) ^ dval
,
2727 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2728 we can transform like this:
2729 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2730 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2731 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2732 Attempt a few simplifications when B and C are both constants. */
2733 if (GET_CODE (op0
) == AND
2734 && CONST_INT_P (op1
)
2735 && CONST_INT_P (XEXP (op0
, 1)))
2737 rtx a
= XEXP (op0
, 0);
2738 rtx b
= XEXP (op0
, 1);
2740 HOST_WIDE_INT bval
= INTVAL (b
);
2741 HOST_WIDE_INT cval
= INTVAL (c
);
2743 /* Instead of computing ~A&C, we compute its negated value,
2744 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2745 optimize for sure. If it does not simplify, we still try
2746 to compute ~A&C below, but since that always allocates
2747 RTL, we don't try that before committing to returning a
2748 simplified expression. */
2749 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2752 if ((~cval
& bval
) == 0)
2754 rtx na_c
= NULL_RTX
;
2756 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2759 /* If ~A does not simplify, don't bother: we don't
2760 want to simplify 2 operations into 3, and if na_c
2761 were to simplify with na, n_na_c would have
2762 simplified as well. */
2763 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2765 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2768 /* Try to simplify ~A&C | ~B&C. */
2769 if (na_c
!= NULL_RTX
)
2770 return simplify_gen_binary (IOR
, mode
, na_c
,
2771 gen_int_mode (~bval
& cval
, mode
));
2775 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2776 if (n_na_c
== CONSTM1_RTX (mode
))
2778 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2779 gen_int_mode (~cval
& bval
,
2781 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2782 gen_int_mode (~bval
& cval
,
2788 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2789 comparison if STORE_FLAG_VALUE is 1. */
2790 if (STORE_FLAG_VALUE
== 1
2791 && trueop1
== const1_rtx
2792 && COMPARISON_P (op0
)
2793 && (reversed
= reversed_comparison (op0
, mode
)))
2796 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2797 is (lt foo (const_int 0)), so we can perform the above
2798 simplification if STORE_FLAG_VALUE is 1. */
2800 if (STORE_FLAG_VALUE
== 1
2801 && trueop1
== const1_rtx
2802 && GET_CODE (op0
) == LSHIFTRT
2803 && CONST_INT_P (XEXP (op0
, 1))
2804 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2805 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2807 /* (xor (comparison foo bar) (const_int sign-bit))
2808 when STORE_FLAG_VALUE is the sign bit. */
2809 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2810 && trueop1
== const_true_rtx
2811 && COMPARISON_P (op0
)
2812 && (reversed
= reversed_comparison (op0
, mode
)))
2815 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2819 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2825 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2827 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2829 if (HWI_COMPUTABLE_MODE_P (mode
))
2831 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2832 HOST_WIDE_INT nzop1
;
2833 if (CONST_INT_P (trueop1
))
2835 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2836 /* If we are turning off bits already known off in OP0, we need
2838 if ((nzop0
& ~val1
) == 0)
2841 nzop1
= nonzero_bits (trueop1
, mode
);
2842 /* If we are clearing all the nonzero bits, the result is zero. */
2843 if ((nzop1
& nzop0
) == 0
2844 && !side_effects_p (op0
) && !side_effects_p (op1
))
2845 return CONST0_RTX (mode
);
2847 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2848 && GET_MODE_CLASS (mode
) != MODE_CC
)
2851 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2852 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2853 && ! side_effects_p (op0
)
2854 && GET_MODE_CLASS (mode
) != MODE_CC
)
2855 return CONST0_RTX (mode
);
2857 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2858 there are no nonzero bits of C outside of X's mode. */
2859 if ((GET_CODE (op0
) == SIGN_EXTEND
2860 || GET_CODE (op0
) == ZERO_EXTEND
)
2861 && CONST_INT_P (trueop1
)
2862 && HWI_COMPUTABLE_MODE_P (mode
)
2863 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2864 & UINTVAL (trueop1
)) == 0)
2866 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2867 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2868 gen_int_mode (INTVAL (trueop1
),
2870 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2873 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2874 we might be able to further simplify the AND with X and potentially
2875 remove the truncation altogether. */
2876 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2878 rtx x
= XEXP (op0
, 0);
2879 machine_mode xmode
= GET_MODE (x
);
2880 tem
= simplify_gen_binary (AND
, xmode
, x
,
2881 gen_int_mode (INTVAL (trueop1
), xmode
));
2882 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2885 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2886 if (GET_CODE (op0
) == IOR
2887 && CONST_INT_P (trueop1
)
2888 && CONST_INT_P (XEXP (op0
, 1)))
2890 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2891 return simplify_gen_binary (IOR
, mode
,
2892 simplify_gen_binary (AND
, mode
,
2893 XEXP (op0
, 0), op1
),
2894 gen_int_mode (tmp
, mode
));
2897 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2898 insn (and may simplify more). */
2899 if (GET_CODE (op0
) == XOR
2900 && rtx_equal_p (XEXP (op0
, 0), op1
)
2901 && ! side_effects_p (op1
))
2902 return simplify_gen_binary (AND
, mode
,
2903 simplify_gen_unary (NOT
, mode
,
2904 XEXP (op0
, 1), mode
),
2907 if (GET_CODE (op0
) == XOR
2908 && rtx_equal_p (XEXP (op0
, 1), op1
)
2909 && ! side_effects_p (op1
))
2910 return simplify_gen_binary (AND
, mode
,
2911 simplify_gen_unary (NOT
, mode
,
2912 XEXP (op0
, 0), mode
),
2915 /* Similarly for (~(A ^ B)) & A. */
2916 if (GET_CODE (op0
) == NOT
2917 && GET_CODE (XEXP (op0
, 0)) == XOR
2918 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2919 && ! side_effects_p (op1
))
2920 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2922 if (GET_CODE (op0
) == NOT
2923 && GET_CODE (XEXP (op0
, 0)) == XOR
2924 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2925 && ! side_effects_p (op1
))
2926 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2928 /* Convert (A | B) & A to A. */
2929 if (GET_CODE (op0
) == IOR
2930 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2931 || rtx_equal_p (XEXP (op0
, 1), op1
))
2932 && ! side_effects_p (XEXP (op0
, 0))
2933 && ! side_effects_p (XEXP (op0
, 1)))
2936 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2937 ((A & N) + B) & M -> (A + B) & M
2938 Similarly if (N & M) == 0,
2939 ((A | N) + B) & M -> (A + B) & M
2940 and for - instead of + and/or ^ instead of |.
2941 Also, if (N & M) == 0, then
2942 (A +- N) & M -> A & M. */
2943 if (CONST_INT_P (trueop1
)
2944 && HWI_COMPUTABLE_MODE_P (mode
)
2945 && ~UINTVAL (trueop1
)
2946 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2947 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2952 pmop
[0] = XEXP (op0
, 0);
2953 pmop
[1] = XEXP (op0
, 1);
2955 if (CONST_INT_P (pmop
[1])
2956 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2957 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2959 for (which
= 0; which
< 2; which
++)
2962 switch (GET_CODE (tem
))
2965 if (CONST_INT_P (XEXP (tem
, 1))
2966 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2967 == UINTVAL (trueop1
))
2968 pmop
[which
] = XEXP (tem
, 0);
2972 if (CONST_INT_P (XEXP (tem
, 1))
2973 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2974 pmop
[which
] = XEXP (tem
, 0);
2981 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2983 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2985 return simplify_gen_binary (code
, mode
, tem
, op1
);
2989 /* (and X (ior (not X) Y) -> (and X Y) */
2990 if (GET_CODE (op1
) == IOR
2991 && GET_CODE (XEXP (op1
, 0)) == NOT
2992 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
2993 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2995 /* (and (ior (not X) Y) X) -> (and X Y) */
2996 if (GET_CODE (op0
) == IOR
2997 && GET_CODE (XEXP (op0
, 0)) == NOT
2998 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
2999 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3001 /* (and X (ior Y (not X)) -> (and X Y) */
3002 if (GET_CODE (op1
) == IOR
3003 && GET_CODE (XEXP (op1
, 1)) == NOT
3004 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3005 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3007 /* (and (ior Y (not X)) X) -> (and X Y) */
3008 if (GET_CODE (op0
) == IOR
3009 && GET_CODE (XEXP (op0
, 1)) == NOT
3010 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3011 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3013 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3017 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3023 /* 0/x is 0 (or x&0 if x has side-effects). */
3024 if (trueop0
== CONST0_RTX (mode
))
3026 if (side_effects_p (op1
))
3027 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3031 if (trueop1
== CONST1_RTX (mode
))
3033 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3037 /* Convert divide by power of two into shift. */
3038 if (CONST_INT_P (trueop1
)
3039 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3040 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3044 /* Handle floating point and integers separately. */
3045 if (SCALAR_FLOAT_MODE_P (mode
))
3047 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3048 safe for modes with NaNs, since 0.0 / 0.0 will then be
3049 NaN rather than 0.0. Nor is it safe for modes with signed
3050 zeros, since dividing 0 by a negative number gives -0.0 */
3051 if (trueop0
== CONST0_RTX (mode
)
3052 && !HONOR_NANS (mode
)
3053 && !HONOR_SIGNED_ZEROS (mode
)
3054 && ! side_effects_p (op1
))
3057 if (trueop1
== CONST1_RTX (mode
)
3058 && !HONOR_SNANS (mode
))
3061 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3062 && trueop1
!= CONST0_RTX (mode
))
3065 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3068 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3069 && !HONOR_SNANS (mode
))
3070 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3072 /* Change FP division by a constant into multiplication.
3073 Only do this with -freciprocal-math. */
3074 if (flag_reciprocal_math
3075 && !REAL_VALUES_EQUAL (d
, dconst0
))
3077 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3078 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3079 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3083 else if (SCALAR_INT_MODE_P (mode
))
3085 /* 0/x is 0 (or x&0 if x has side-effects). */
3086 if (trueop0
== CONST0_RTX (mode
)
3087 && !cfun
->can_throw_non_call_exceptions
)
3089 if (side_effects_p (op1
))
3090 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3094 if (trueop1
== CONST1_RTX (mode
))
3096 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3101 if (trueop1
== constm1_rtx
)
3103 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3105 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3111 /* 0%x is 0 (or x&0 if x has side-effects). */
3112 if (trueop0
== CONST0_RTX (mode
))
3114 if (side_effects_p (op1
))
3115 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3118 /* x%1 is 0 (of x&0 if x has side-effects). */
3119 if (trueop1
== CONST1_RTX (mode
))
3121 if (side_effects_p (op0
))
3122 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3123 return CONST0_RTX (mode
);
3125 /* Implement modulus by power of two as AND. */
3126 if (CONST_INT_P (trueop1
)
3127 && exact_log2 (UINTVAL (trueop1
)) > 0)
3128 return simplify_gen_binary (AND
, mode
, op0
,
3129 gen_int_mode (INTVAL (op1
) - 1, mode
));
3133 /* 0%x is 0 (or x&0 if x has side-effects). */
3134 if (trueop0
== CONST0_RTX (mode
))
3136 if (side_effects_p (op1
))
3137 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3140 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3141 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3143 if (side_effects_p (op0
))
3144 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3145 return CONST0_RTX (mode
);
3151 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3152 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3153 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3155 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3156 if (CONST_INT_P (trueop1
)
3157 && IN_RANGE (INTVAL (trueop1
),
3158 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3159 GET_MODE_PRECISION (mode
) - 1))
3160 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3161 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3162 - INTVAL (trueop1
)));
3166 if (trueop1
== CONST0_RTX (mode
))
3168 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3170 /* Rotating ~0 always results in ~0. */
3171 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3172 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3173 && ! side_effects_p (op1
))
3177 scalar constants c1, c2
3178 size (M2) > size (M1)
3179 c1 == size (M2) - size (M1)
3181 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3185 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3187 if (code
== ASHIFTRT
3188 && !VECTOR_MODE_P (mode
)
3190 && CONST_INT_P (op1
)
3191 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3192 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3193 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3194 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3195 > GET_MODE_BITSIZE (mode
))
3196 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3197 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3198 - GET_MODE_BITSIZE (mode
)))
3199 && subreg_lowpart_p (op0
))
3201 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3203 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3204 tmp
= simplify_gen_binary (ASHIFTRT
,
3205 GET_MODE (SUBREG_REG (op0
)),
3206 XEXP (SUBREG_REG (op0
), 0),
3208 return simplify_gen_subreg (mode
, tmp
, inner_mode
,
3209 subreg_lowpart_offset (mode
,
3213 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3215 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3216 if (val
!= INTVAL (op1
))
3217 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3224 if (trueop1
== CONST0_RTX (mode
))
3226 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3228 goto canonicalize_shift
;
3231 if (trueop1
== CONST0_RTX (mode
))
3233 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3235 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3236 if (GET_CODE (op0
) == CLZ
3237 && CONST_INT_P (trueop1
)
3238 && STORE_FLAG_VALUE
== 1
3239 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3241 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3242 unsigned HOST_WIDE_INT zero_val
= 0;
3244 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3245 && zero_val
== GET_MODE_PRECISION (imode
)
3246 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3247 return simplify_gen_relational (EQ
, mode
, imode
,
3248 XEXP (op0
, 0), const0_rtx
);
3250 goto canonicalize_shift
;
3253 if (width
<= HOST_BITS_PER_WIDE_INT
3254 && mode_signbit_p (mode
, trueop1
)
3255 && ! side_effects_p (op0
))
3257 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3259 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3265 if (width
<= HOST_BITS_PER_WIDE_INT
3266 && CONST_INT_P (trueop1
)
3267 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3268 && ! side_effects_p (op0
))
3270 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3272 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3278 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3280 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3282 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3288 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3290 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3292 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3305 /* ??? There are simplifications that can be done. */
3309 if (!VECTOR_MODE_P (mode
))
3311 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3312 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3313 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3314 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3315 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3317 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3318 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3321 /* Extract a scalar element from a nested VEC_SELECT expression
3322 (with optional nested VEC_CONCAT expression). Some targets
3323 (i386) extract scalar element from a vector using chain of
3324 nested VEC_SELECT expressions. When input operand is a memory
3325 operand, this operation can be simplified to a simple scalar
3326 load from an offseted memory address. */
3327 if (GET_CODE (trueop0
) == VEC_SELECT
)
3329 rtx op0
= XEXP (trueop0
, 0);
3330 rtx op1
= XEXP (trueop0
, 1);
3332 machine_mode opmode
= GET_MODE (op0
);
3333 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3334 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3336 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3342 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3343 gcc_assert (i
< n_elts
);
3345 /* Select element, pointed by nested selector. */
3346 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3348 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3349 if (GET_CODE (op0
) == VEC_CONCAT
)
3351 rtx op00
= XEXP (op0
, 0);
3352 rtx op01
= XEXP (op0
, 1);
3354 machine_mode mode00
, mode01
;
3355 int n_elts00
, n_elts01
;
3357 mode00
= GET_MODE (op00
);
3358 mode01
= GET_MODE (op01
);
3360 /* Find out number of elements of each operand. */
3361 if (VECTOR_MODE_P (mode00
))
3363 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3364 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3369 if (VECTOR_MODE_P (mode01
))
3371 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3372 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3377 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3379 /* Select correct operand of VEC_CONCAT
3380 and adjust selector. */
3381 if (elem
< n_elts01
)
3392 vec
= rtvec_alloc (1);
3393 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3395 tmp
= gen_rtx_fmt_ee (code
, mode
,
3396 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3399 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3400 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3401 return XEXP (trueop0
, 0);
3405 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3406 gcc_assert (GET_MODE_INNER (mode
)
3407 == GET_MODE_INNER (GET_MODE (trueop0
)));
3408 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3410 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3412 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3413 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3414 rtvec v
= rtvec_alloc (n_elts
);
3417 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3418 for (i
= 0; i
< n_elts
; i
++)
3420 rtx x
= XVECEXP (trueop1
, 0, i
);
3422 gcc_assert (CONST_INT_P (x
));
3423 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3427 return gen_rtx_CONST_VECTOR (mode
, v
);
3430 /* Recognize the identity. */
3431 if (GET_MODE (trueop0
) == mode
)
3433 bool maybe_ident
= true;
3434 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3436 rtx j
= XVECEXP (trueop1
, 0, i
);
3437 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3439 maybe_ident
= false;
3447 /* If we build {a,b} then permute it, build the result directly. */
3448 if (XVECLEN (trueop1
, 0) == 2
3449 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3450 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3451 && GET_CODE (trueop0
) == VEC_CONCAT
3452 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3453 && GET_MODE (XEXP (trueop0
, 0)) == mode
3454 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3455 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3457 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3458 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3461 gcc_assert (i0
< 4 && i1
< 4);
3462 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3463 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3465 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3468 if (XVECLEN (trueop1
, 0) == 2
3469 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3470 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3471 && GET_CODE (trueop0
) == VEC_CONCAT
3472 && GET_MODE (trueop0
) == mode
)
3474 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3475 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3478 gcc_assert (i0
< 2 && i1
< 2);
3479 subop0
= XEXP (trueop0
, i0
);
3480 subop1
= XEXP (trueop0
, i1
);
3482 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3485 /* If we select one half of a vec_concat, return that. */
3486 if (GET_CODE (trueop0
) == VEC_CONCAT
3487 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3489 rtx subop0
= XEXP (trueop0
, 0);
3490 rtx subop1
= XEXP (trueop0
, 1);
3491 machine_mode mode0
= GET_MODE (subop0
);
3492 machine_mode mode1
= GET_MODE (subop1
);
3493 int li
= GET_MODE_SIZE (GET_MODE_INNER (mode0
));
3494 int l0
= GET_MODE_SIZE (mode0
) / li
;
3495 int l1
= GET_MODE_SIZE (mode1
) / li
;
3496 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3497 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3499 bool success
= true;
3500 for (int i
= 1; i
< l0
; ++i
)
3502 rtx j
= XVECEXP (trueop1
, 0, i
);
3503 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3512 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3514 bool success
= true;
3515 for (int i
= 1; i
< l1
; ++i
)
3517 rtx j
= XVECEXP (trueop1
, 0, i
);
3518 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3530 if (XVECLEN (trueop1
, 0) == 1
3531 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3532 && GET_CODE (trueop0
) == VEC_CONCAT
)
3535 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3537 /* Try to find the element in the VEC_CONCAT. */
3538 while (GET_MODE (vec
) != mode
3539 && GET_CODE (vec
) == VEC_CONCAT
)
3541 HOST_WIDE_INT vec_size
;
3543 if (CONST_INT_P (XEXP (vec
, 0)))
3545 /* vec_concat of two const_ints doesn't make sense with
3546 respect to modes. */
3547 if (CONST_INT_P (XEXP (vec
, 1)))
3550 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3551 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3554 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3556 if (offset
< vec_size
)
3557 vec
= XEXP (vec
, 0);
3561 vec
= XEXP (vec
, 1);
3563 vec
= avoid_constant_pool_reference (vec
);
3566 if (GET_MODE (vec
) == mode
)
3570 /* If we select elements in a vec_merge that all come from the same
3571 operand, select from that operand directly. */
3572 if (GET_CODE (op0
) == VEC_MERGE
)
3574 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3575 if (CONST_INT_P (trueop02
))
3577 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3578 bool all_operand0
= true;
3579 bool all_operand1
= true;
3580 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3582 rtx j
= XVECEXP (trueop1
, 0, i
);
3583 if (sel
& (1 << UINTVAL (j
)))
3584 all_operand1
= false;
3586 all_operand0
= false;
3588 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3589 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3590 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3591 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3595 /* If we have two nested selects that are inverses of each
3596 other, replace them with the source operand. */
3597 if (GET_CODE (trueop0
) == VEC_SELECT
3598 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3600 rtx op0_subop1
= XEXP (trueop0
, 1);
3601 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3602 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3604 /* Apply the outer ordering vector to the inner one. (The inner
3605 ordering vector is expressly permitted to be of a different
3606 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3607 then the two VEC_SELECTs cancel. */
3608 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3610 rtx x
= XVECEXP (trueop1
, 0, i
);
3611 if (!CONST_INT_P (x
))
3613 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3614 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3617 return XEXP (trueop0
, 0);
3623 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3624 ? GET_MODE (trueop0
)
3625 : GET_MODE_INNER (mode
));
3626 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3627 ? GET_MODE (trueop1
)
3628 : GET_MODE_INNER (mode
));
3630 gcc_assert (VECTOR_MODE_P (mode
));
3631 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3632 == GET_MODE_SIZE (mode
));
3634 if (VECTOR_MODE_P (op0_mode
))
3635 gcc_assert (GET_MODE_INNER (mode
)
3636 == GET_MODE_INNER (op0_mode
));
3638 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3640 if (VECTOR_MODE_P (op1_mode
))
3641 gcc_assert (GET_MODE_INNER (mode
)
3642 == GET_MODE_INNER (op1_mode
));
3644 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3646 if ((GET_CODE (trueop0
) == CONST_VECTOR
3647 || CONST_SCALAR_INT_P (trueop0
)
3648 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3649 && (GET_CODE (trueop1
) == CONST_VECTOR
3650 || CONST_SCALAR_INT_P (trueop1
)
3651 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3653 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3654 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3655 rtvec v
= rtvec_alloc (n_elts
);
3657 unsigned in_n_elts
= 1;
3659 if (VECTOR_MODE_P (op0_mode
))
3660 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3661 for (i
= 0; i
< n_elts
; i
++)
3665 if (!VECTOR_MODE_P (op0_mode
))
3666 RTVEC_ELT (v
, i
) = trueop0
;
3668 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3672 if (!VECTOR_MODE_P (op1_mode
))
3673 RTVEC_ELT (v
, i
) = trueop1
;
3675 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3680 return gen_rtx_CONST_VECTOR (mode
, v
);
3683 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3684 Restrict the transformation to avoid generating a VEC_SELECT with a
3685 mode unrelated to its operand. */
3686 if (GET_CODE (trueop0
) == VEC_SELECT
3687 && GET_CODE (trueop1
) == VEC_SELECT
3688 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3689 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3691 rtx par0
= XEXP (trueop0
, 1);
3692 rtx par1
= XEXP (trueop1
, 1);
3693 int len0
= XVECLEN (par0
, 0);
3694 int len1
= XVECLEN (par1
, 0);
3695 rtvec vec
= rtvec_alloc (len0
+ len1
);
3696 for (int i
= 0; i
< len0
; i
++)
3697 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3698 for (int i
= 0; i
< len1
; i
++)
3699 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3700 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3701 gen_rtx_PARALLEL (VOIDmode
, vec
));
3714 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3717 unsigned int width
= GET_MODE_PRECISION (mode
);
3719 if (VECTOR_MODE_P (mode
)
3720 && code
!= VEC_CONCAT
3721 && GET_CODE (op0
) == CONST_VECTOR
3722 && GET_CODE (op1
) == CONST_VECTOR
)
3724 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3725 machine_mode op0mode
= GET_MODE (op0
);
3726 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3727 machine_mode op1mode
= GET_MODE (op1
);
3728 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3729 rtvec v
= rtvec_alloc (n_elts
);
3732 gcc_assert (op0_n_elts
== n_elts
);
3733 gcc_assert (op1_n_elts
== n_elts
);
3734 for (i
= 0; i
< n_elts
; i
++)
3736 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3737 CONST_VECTOR_ELT (op0
, i
),
3738 CONST_VECTOR_ELT (op1
, i
));
3741 RTVEC_ELT (v
, i
) = x
;
3744 return gen_rtx_CONST_VECTOR (mode
, v
);
3747 if (VECTOR_MODE_P (mode
)
3748 && code
== VEC_CONCAT
3749 && (CONST_SCALAR_INT_P (op0
)
3750 || GET_CODE (op0
) == CONST_FIXED
3751 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3752 && (CONST_SCALAR_INT_P (op1
)
3753 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3754 || GET_CODE (op1
) == CONST_FIXED
))
3756 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3757 rtvec v
= rtvec_alloc (n_elts
);
3759 gcc_assert (n_elts
>= 2);
3762 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3763 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3765 RTVEC_ELT (v
, 0) = op0
;
3766 RTVEC_ELT (v
, 1) = op1
;
3770 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3771 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3774 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3775 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3776 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3778 for (i
= 0; i
< op0_n_elts
; ++i
)
3779 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3780 for (i
= 0; i
< op1_n_elts
; ++i
)
3781 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3784 return gen_rtx_CONST_VECTOR (mode
, v
);
3787 if (SCALAR_FLOAT_MODE_P (mode
)
3788 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3789 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3790 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3801 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3803 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3805 for (i
= 0; i
< 4; i
++)
3822 real_from_target (&r
, tmp0
, mode
);
3823 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3827 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3830 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3831 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3832 real_convert (&f0
, mode
, &f0
);
3833 real_convert (&f1
, mode
, &f1
);
3835 if (HONOR_SNANS (mode
)
3836 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3840 && REAL_VALUES_EQUAL (f1
, dconst0
)
3841 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3844 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3845 && flag_trapping_math
3846 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3848 int s0
= REAL_VALUE_NEGATIVE (f0
);
3849 int s1
= REAL_VALUE_NEGATIVE (f1
);
3854 /* Inf + -Inf = NaN plus exception. */
3859 /* Inf - Inf = NaN plus exception. */
3864 /* Inf / Inf = NaN plus exception. */
3871 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3872 && flag_trapping_math
3873 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3874 || (REAL_VALUE_ISINF (f1
)
3875 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3876 /* Inf * 0 = NaN plus exception. */
3879 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3881 real_convert (&result
, mode
, &value
);
3883 /* Don't constant fold this floating point operation if
3884 the result has overflowed and flag_trapping_math. */
3886 if (flag_trapping_math
3887 && MODE_HAS_INFINITIES (mode
)
3888 && REAL_VALUE_ISINF (result
)
3889 && !REAL_VALUE_ISINF (f0
)
3890 && !REAL_VALUE_ISINF (f1
))
3891 /* Overflow plus exception. */
3894 /* Don't constant fold this floating point operation if the
3895 result may dependent upon the run-time rounding mode and
3896 flag_rounding_math is set, or if GCC's software emulation
3897 is unable to accurately represent the result. */
3899 if ((flag_rounding_math
3900 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3901 && (inexact
|| !real_identical (&result
, &value
)))
3904 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3908 /* We can fold some multi-word operations. */
3909 if ((GET_MODE_CLASS (mode
) == MODE_INT
3910 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3911 && CONST_SCALAR_INT_P (op0
)
3912 && CONST_SCALAR_INT_P (op1
))
3916 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3917 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3919 #if TARGET_SUPPORTS_WIDE_INT == 0
3920 /* This assert keeps the simplification from producing a result
3921 that cannot be represented in a CONST_DOUBLE but a lot of
3922 upstream callers expect that this function never fails to
3923 simplify something and so you if you added this to the test
3924 above the code would die later anyway. If this assert
3925 happens, you just need to make the port support wide int. */
3926 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3931 result
= wi::sub (pop0
, pop1
);
3935 result
= wi::add (pop0
, pop1
);
3939 result
= wi::mul (pop0
, pop1
);
3943 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3949 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3955 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3961 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3967 result
= wi::bit_and (pop0
, pop1
);
3971 result
= wi::bit_or (pop0
, pop1
);
3975 result
= wi::bit_xor (pop0
, pop1
);
3979 result
= wi::smin (pop0
, pop1
);
3983 result
= wi::smax (pop0
, pop1
);
3987 result
= wi::umin (pop0
, pop1
);
3991 result
= wi::umax (pop0
, pop1
);
3998 wide_int wop1
= pop1
;
3999 if (SHIFT_COUNT_TRUNCATED
)
4000 wop1
= wi::umod_trunc (wop1
, width
);
4001 else if (wi::geu_p (wop1
, width
))
4007 result
= wi::lrshift (pop0
, wop1
);
4011 result
= wi::arshift (pop0
, wop1
);
4015 result
= wi::lshift (pop0
, wop1
);
4026 if (wi::neg_p (pop1
))
4032 result
= wi::lrotate (pop0
, pop1
);
4036 result
= wi::rrotate (pop0
, pop1
);
4047 return immed_wide_int_const (result
, mode
);
4055 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4058 Rather than test for specific case, we do this by a brute-force method
4059 and do all possible simplifications until no more changes occur. Then
4060 we rebuild the operation. */
4062 struct simplify_plus_minus_op_data
4069 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4073 result
= (commutative_operand_precedence (y
)
4074 - commutative_operand_precedence (x
));
4078 /* Group together equal REGs to do more simplification. */
4079 if (REG_P (x
) && REG_P (y
))
4080 return REGNO (x
) > REGNO (y
);
4086 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4089 struct simplify_plus_minus_op_data ops
[16];
4092 int changed
, n_constants
, canonicalized
= 0;
4095 memset (ops
, 0, sizeof ops
);
4097 /* Set up the two operands and then expand them until nothing has been
4098 changed. If we run out of room in our array, give up; this should
4099 almost never happen. */
4104 ops
[1].neg
= (code
== MINUS
);
4111 for (i
= 0; i
< n_ops
; i
++)
4113 rtx this_op
= ops
[i
].op
;
4114 int this_neg
= ops
[i
].neg
;
4115 enum rtx_code this_code
= GET_CODE (this_op
);
4121 if (n_ops
== ARRAY_SIZE (ops
))
4124 ops
[n_ops
].op
= XEXP (this_op
, 1);
4125 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4128 ops
[i
].op
= XEXP (this_op
, 0);
4130 canonicalized
|= this_neg
|| i
!= n_ops
- 2;
4134 ops
[i
].op
= XEXP (this_op
, 0);
4135 ops
[i
].neg
= ! this_neg
;
4141 if (n_ops
!= ARRAY_SIZE (ops
)
4142 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4143 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4144 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4146 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4147 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4148 ops
[n_ops
].neg
= this_neg
;
4156 /* ~a -> (-a - 1) */
4157 if (n_ops
!= ARRAY_SIZE (ops
))
4159 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4160 ops
[n_ops
++].neg
= this_neg
;
4161 ops
[i
].op
= XEXP (this_op
, 0);
4162 ops
[i
].neg
= !this_neg
;
4172 ops
[i
].op
= neg_const_int (mode
, this_op
);
4186 if (n_constants
> 1)
4189 gcc_assert (n_ops
>= 2);
4191 /* If we only have two operands, we can avoid the loops. */
4194 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4197 /* Get the two operands. Be careful with the order, especially for
4198 the cases where code == MINUS. */
4199 if (ops
[0].neg
&& ops
[1].neg
)
4201 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4204 else if (ops
[0].neg
)
4215 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4218 /* Now simplify each pair of operands until nothing changes. */
4221 /* Insertion sort is good enough for a small array. */
4222 for (i
= 1; i
< n_ops
; i
++)
4224 struct simplify_plus_minus_op_data save
;
4226 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4232 ops
[j
+ 1] = ops
[j
];
4233 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4238 for (i
= n_ops
- 1; i
> 0; i
--)
4239 for (j
= i
- 1; j
>= 0; j
--)
4241 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4242 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4244 if (lhs
!= 0 && rhs
!= 0)
4246 enum rtx_code ncode
= PLUS
;
4252 std::swap (lhs
, rhs
);
4254 else if (swap_commutative_operands_p (lhs
, rhs
))
4255 std::swap (lhs
, rhs
);
4257 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4258 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4260 rtx tem_lhs
, tem_rhs
;
4262 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4263 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4264 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4266 if (tem
&& !CONSTANT_P (tem
))
4267 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4270 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4274 /* Reject "simplifications" that just wrap the two
4275 arguments in a CONST. Failure to do so can result
4276 in infinite recursion with simplify_binary_operation
4277 when it calls us to simplify CONST operations.
4278 Also, if we find such a simplification, don't try
4279 any more combinations with this rhs: We must have
4280 something like symbol+offset, ie. one of the
4281 trivial CONST expressions we handle later. */
4282 if (GET_CODE (tem
) == CONST
4283 && GET_CODE (XEXP (tem
, 0)) == ncode
4284 && XEXP (XEXP (tem
, 0), 0) == lhs
4285 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4288 if (GET_CODE (tem
) == NEG
)
4289 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4290 if (CONST_INT_P (tem
) && lneg
)
4291 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4295 ops
[j
].op
= NULL_RTX
;
4302 /* If nothing changed, fail. */
4306 /* Pack all the operands to the lower-numbered entries. */
4307 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4317 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4319 && CONST_INT_P (ops
[1].op
)
4320 && CONSTANT_P (ops
[0].op
)
4322 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4324 /* We suppressed creation of trivial CONST expressions in the
4325 combination loop to avoid recursion. Create one manually now.
4326 The combination loop should have ensured that there is exactly
4327 one CONST_INT, and the sort will have ensured that it is last
4328 in the array and that any other constant will be next-to-last. */
4331 && CONST_INT_P (ops
[n_ops
- 1].op
)
4332 && CONSTANT_P (ops
[n_ops
- 2].op
))
4334 rtx value
= ops
[n_ops
- 1].op
;
4335 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4336 value
= neg_const_int (mode
, value
);
4337 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4342 /* Put a non-negated operand first, if possible. */
4344 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4347 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4356 /* Now make the result by performing the requested operations. */
4358 for (i
= 1; i
< n_ops
; i
++)
4359 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4360 mode
, result
, ops
[i
].op
);
4365 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4367 plus_minus_operand_p (const_rtx x
)
4369 return GET_CODE (x
) == PLUS
4370 || GET_CODE (x
) == MINUS
4371 || (GET_CODE (x
) == CONST
4372 && GET_CODE (XEXP (x
, 0)) == PLUS
4373 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4374 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4377 /* Like simplify_binary_operation except used for relational operators.
4378 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4379 not also be VOIDmode.
4381 CMP_MODE specifies in which mode the comparison is done in, so it is
4382 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4383 the operands or, if both are VOIDmode, the operands are compared in
4384 "infinite precision". */
4386 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4387 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4389 rtx tem
, trueop0
, trueop1
;
4391 if (cmp_mode
== VOIDmode
)
4392 cmp_mode
= GET_MODE (op0
);
4393 if (cmp_mode
== VOIDmode
)
4394 cmp_mode
= GET_MODE (op1
);
4396 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4399 if (SCALAR_FLOAT_MODE_P (mode
))
4401 if (tem
== const0_rtx
)
4402 return CONST0_RTX (mode
);
4403 #ifdef FLOAT_STORE_FLAG_VALUE
4405 REAL_VALUE_TYPE val
;
4406 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4407 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4413 if (VECTOR_MODE_P (mode
))
4415 if (tem
== const0_rtx
)
4416 return CONST0_RTX (mode
);
4417 #ifdef VECTOR_STORE_FLAG_VALUE
4422 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4423 if (val
== NULL_RTX
)
4425 if (val
== const1_rtx
)
4426 return CONST1_RTX (mode
);
4428 units
= GET_MODE_NUNITS (mode
);
4429 v
= rtvec_alloc (units
);
4430 for (i
= 0; i
< units
; i
++)
4431 RTVEC_ELT (v
, i
) = val
;
4432 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4442 /* For the following tests, ensure const0_rtx is op1. */
4443 if (swap_commutative_operands_p (op0
, op1
)
4444 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4445 std::swap (op0
, op1
), code
= swap_condition (code
);
4447 /* If op0 is a compare, extract the comparison arguments from it. */
4448 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4449 return simplify_gen_relational (code
, mode
, VOIDmode
,
4450 XEXP (op0
, 0), XEXP (op0
, 1));
4452 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4456 trueop0
= avoid_constant_pool_reference (op0
);
4457 trueop1
= avoid_constant_pool_reference (op1
);
4458 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4462 /* This part of simplify_relational_operation is only used when CMP_MODE
4463 is not in class MODE_CC (i.e. it is a real comparison).
4465 MODE is the mode of the result, while CMP_MODE specifies in which
4466 mode the comparison is done in, so it is the mode of the operands. */
4469 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4470 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4472 enum rtx_code op0code
= GET_CODE (op0
);
4474 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4476 /* If op0 is a comparison, extract the comparison arguments
4480 if (GET_MODE (op0
) == mode
)
4481 return simplify_rtx (op0
);
4483 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4484 XEXP (op0
, 0), XEXP (op0
, 1));
4486 else if (code
== EQ
)
4488 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4489 if (new_code
!= UNKNOWN
)
4490 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4491 XEXP (op0
, 0), XEXP (op0
, 1));
4495 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4496 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4497 if ((code
== LTU
|| code
== GEU
)
4498 && GET_CODE (op0
) == PLUS
4499 && CONST_INT_P (XEXP (op0
, 1))
4500 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4501 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4502 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4503 && XEXP (op0
, 1) != const0_rtx
)
4506 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4507 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4508 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4511 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4512 if ((code
== LTU
|| code
== GEU
)
4513 && GET_CODE (op0
) == PLUS
4514 && rtx_equal_p (op1
, XEXP (op0
, 1))
4515 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4516 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4517 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4518 copy_rtx (XEXP (op0
, 0)));
4520 if (op1
== const0_rtx
)
4522 /* Canonicalize (GTU x 0) as (NE x 0). */
4524 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4525 /* Canonicalize (LEU x 0) as (EQ x 0). */
4527 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4529 else if (op1
== const1_rtx
)
4534 /* Canonicalize (GE x 1) as (GT x 0). */
4535 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4538 /* Canonicalize (GEU x 1) as (NE x 0). */
4539 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4542 /* Canonicalize (LT x 1) as (LE x 0). */
4543 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4546 /* Canonicalize (LTU x 1) as (EQ x 0). */
4547 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4553 else if (op1
== constm1_rtx
)
4555 /* Canonicalize (LE x -1) as (LT x 0). */
4557 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4558 /* Canonicalize (GT x -1) as (GE x 0). */
4560 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4563 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4564 if ((code
== EQ
|| code
== NE
)
4565 && (op0code
== PLUS
|| op0code
== MINUS
)
4567 && CONSTANT_P (XEXP (op0
, 1))
4568 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4570 rtx x
= XEXP (op0
, 0);
4571 rtx c
= XEXP (op0
, 1);
4572 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4573 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4575 /* Detect an infinite recursive condition, where we oscillate at this
4576 simplification case between:
4577 A + B == C <---> C - B == A,
4578 where A, B, and C are all constants with non-simplifiable expressions,
4579 usually SYMBOL_REFs. */
4580 if (GET_CODE (tem
) == invcode
4582 && rtx_equal_p (c
, XEXP (tem
, 1)))
4585 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4588 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4589 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4591 && op1
== const0_rtx
4592 && GET_MODE_CLASS (mode
) == MODE_INT
4593 && cmp_mode
!= VOIDmode
4594 /* ??? Work-around BImode bugs in the ia64 backend. */
4596 && cmp_mode
!= BImode
4597 && nonzero_bits (op0
, cmp_mode
) == 1
4598 && STORE_FLAG_VALUE
== 1)
4599 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4600 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4601 : lowpart_subreg (mode
, op0
, cmp_mode
);
4603 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4604 if ((code
== EQ
|| code
== NE
)
4605 && op1
== const0_rtx
4607 return simplify_gen_relational (code
, mode
, cmp_mode
,
4608 XEXP (op0
, 0), XEXP (op0
, 1));
4610 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4611 if ((code
== EQ
|| code
== NE
)
4613 && rtx_equal_p (XEXP (op0
, 0), op1
)
4614 && !side_effects_p (XEXP (op0
, 0)))
4615 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4618 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4619 if ((code
== EQ
|| code
== NE
)
4621 && rtx_equal_p (XEXP (op0
, 1), op1
)
4622 && !side_effects_p (XEXP (op0
, 1)))
4623 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4626 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4627 if ((code
== EQ
|| code
== NE
)
4629 && CONST_SCALAR_INT_P (op1
)
4630 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4631 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4632 simplify_gen_binary (XOR
, cmp_mode
,
4633 XEXP (op0
, 1), op1
));
4635 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4636 can be implemented with a BICS instruction on some targets, or
4637 constant-folded if y is a constant. */
4638 if ((code
== EQ
|| code
== NE
)
4640 && rtx_equal_p (XEXP (op0
, 0), op1
)
4641 && !side_effects_p (op1
)
4642 && op1
!= CONST0_RTX (cmp_mode
))
4644 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4645 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4647 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4648 CONST0_RTX (cmp_mode
));
4651 /* Likewise for (eq/ne (and x y) y). */
4652 if ((code
== EQ
|| code
== NE
)
4654 && rtx_equal_p (XEXP (op0
, 1), op1
)
4655 && !side_effects_p (op1
)
4656 && op1
!= CONST0_RTX (cmp_mode
))
4658 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4659 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4661 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4662 CONST0_RTX (cmp_mode
));
4665 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4666 if ((code
== EQ
|| code
== NE
)
4667 && GET_CODE (op0
) == BSWAP
4668 && CONST_SCALAR_INT_P (op1
))
4669 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4670 simplify_gen_unary (BSWAP
, cmp_mode
,
4673 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4674 if ((code
== EQ
|| code
== NE
)
4675 && GET_CODE (op0
) == BSWAP
4676 && GET_CODE (op1
) == BSWAP
)
4677 return simplify_gen_relational (code
, mode
, cmp_mode
,
4678 XEXP (op0
, 0), XEXP (op1
, 0));
4680 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4686 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4687 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4688 XEXP (op0
, 0), const0_rtx
);
4693 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4694 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4695 XEXP (op0
, 0), const0_rtx
);
4714 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4715 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4716 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4717 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4718 For floating-point comparisons, assume that the operands were ordered. */
4721 comparison_result (enum rtx_code code
, int known_results
)
4727 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4730 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4734 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4737 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4741 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4744 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4747 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4749 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4752 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4754 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4757 return const_true_rtx
;
4765 /* Check if the given comparison (done in the given MODE) is actually
4766 a tautology or a contradiction. If the mode is VOID_mode, the
4767 comparison is done in "infinite precision". If no simplification
4768 is possible, this function returns zero. Otherwise, it returns
4769 either const_true_rtx or const0_rtx. */
4772 simplify_const_relational_operation (enum rtx_code code
,
4780 gcc_assert (mode
!= VOIDmode
4781 || (GET_MODE (op0
) == VOIDmode
4782 && GET_MODE (op1
) == VOIDmode
));
4784 /* If op0 is a compare, extract the comparison arguments from it. */
4785 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4787 op1
= XEXP (op0
, 1);
4788 op0
= XEXP (op0
, 0);
4790 if (GET_MODE (op0
) != VOIDmode
)
4791 mode
= GET_MODE (op0
);
4792 else if (GET_MODE (op1
) != VOIDmode
)
4793 mode
= GET_MODE (op1
);
4798 /* We can't simplify MODE_CC values since we don't know what the
4799 actual comparison is. */
4800 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4803 /* Make sure the constant is second. */
4804 if (swap_commutative_operands_p (op0
, op1
))
4806 std::swap (op0
, op1
);
4807 code
= swap_condition (code
);
4810 trueop0
= avoid_constant_pool_reference (op0
);
4811 trueop1
= avoid_constant_pool_reference (op1
);
4813 /* For integer comparisons of A and B maybe we can simplify A - B and can
4814 then simplify a comparison of that with zero. If A and B are both either
4815 a register or a CONST_INT, this can't help; testing for these cases will
4816 prevent infinite recursion here and speed things up.
4818 We can only do this for EQ and NE comparisons as otherwise we may
4819 lose or introduce overflow which we cannot disregard as undefined as
4820 we do not know the signedness of the operation on either the left or
4821 the right hand side of the comparison. */
4823 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4824 && (code
== EQ
|| code
== NE
)
4825 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4826 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4827 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4828 /* We cannot do this if tem is a nonzero address. */
4829 && ! nonzero_address_p (tem
))
4830 return simplify_const_relational_operation (signed_condition (code
),
4831 mode
, tem
, const0_rtx
);
4833 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4834 return const_true_rtx
;
4836 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4839 /* For modes without NaNs, if the two operands are equal, we know the
4840 result except if they have side-effects. Even with NaNs we know
4841 the result of unordered comparisons and, if signaling NaNs are
4842 irrelevant, also the result of LT/GT/LTGT. */
4843 if ((! HONOR_NANS (trueop0
)
4844 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4845 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4846 && ! HONOR_SNANS (trueop0
)))
4847 && rtx_equal_p (trueop0
, trueop1
)
4848 && ! side_effects_p (trueop0
))
4849 return comparison_result (code
, CMP_EQ
);
4851 /* If the operands are floating-point constants, see if we can fold
4853 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4854 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4855 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4857 REAL_VALUE_TYPE d0
, d1
;
4859 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4860 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4862 /* Comparisons are unordered iff at least one of the values is NaN. */
4863 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4873 return const_true_rtx
;
4886 return comparison_result (code
,
4887 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4888 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4891 /* Otherwise, see if the operands are both integers. */
4892 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4893 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4895 /* It would be nice if we really had a mode here. However, the
4896 largest int representable on the target is as good as
4898 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4899 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4900 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4902 if (wi::eq_p (ptrueop0
, ptrueop1
))
4903 return comparison_result (code
, CMP_EQ
);
4906 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4907 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4908 return comparison_result (code
, cr
);
4912 /* Optimize comparisons with upper and lower bounds. */
4913 if (HWI_COMPUTABLE_MODE_P (mode
)
4914 && CONST_INT_P (trueop1
))
4917 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4918 HOST_WIDE_INT val
= INTVAL (trueop1
);
4919 HOST_WIDE_INT mmin
, mmax
;
4929 /* Get a reduced range if the sign bit is zero. */
4930 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4937 rtx mmin_rtx
, mmax_rtx
;
4938 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4940 mmin
= INTVAL (mmin_rtx
);
4941 mmax
= INTVAL (mmax_rtx
);
4944 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4946 mmin
>>= (sign_copies
- 1);
4947 mmax
>>= (sign_copies
- 1);
4953 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4955 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4956 return const_true_rtx
;
4957 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4962 return const_true_rtx
;
4967 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4969 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4970 return const_true_rtx
;
4971 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4976 return const_true_rtx
;
4982 /* x == y is always false for y out of range. */
4983 if (val
< mmin
|| val
> mmax
)
4987 /* x > y is always false for y >= mmax, always true for y < mmin. */
4989 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4991 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4992 return const_true_rtx
;
4998 return const_true_rtx
;
5001 /* x < y is always false for y <= mmin, always true for y > mmax. */
5003 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5005 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5006 return const_true_rtx
;
5012 return const_true_rtx
;
5016 /* x != y is always true for y out of range. */
5017 if (val
< mmin
|| val
> mmax
)
5018 return const_true_rtx
;
5026 /* Optimize integer comparisons with zero. */
5027 if (trueop1
== const0_rtx
)
5029 /* Some addresses are known to be nonzero. We don't know
5030 their sign, but equality comparisons are known. */
5031 if (nonzero_address_p (trueop0
))
5033 if (code
== EQ
|| code
== LEU
)
5035 if (code
== NE
|| code
== GTU
)
5036 return const_true_rtx
;
5039 /* See if the first operand is an IOR with a constant. If so, we
5040 may be able to determine the result of this comparison. */
5041 if (GET_CODE (op0
) == IOR
)
5043 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5044 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5046 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5047 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5048 && (UINTVAL (inner_const
)
5049 & ((unsigned HOST_WIDE_INT
) 1
5059 return const_true_rtx
;
5063 return const_true_rtx
;
5077 /* Optimize comparison of ABS with zero. */
5078 if (trueop1
== CONST0_RTX (mode
)
5079 && (GET_CODE (trueop0
) == ABS
5080 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5081 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5086 /* Optimize abs(x) < 0.0. */
5087 if (!HONOR_SNANS (mode
)
5088 && (!INTEGRAL_MODE_P (mode
)
5089 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5091 if (INTEGRAL_MODE_P (mode
)
5092 && (issue_strict_overflow_warning
5093 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5094 warning (OPT_Wstrict_overflow
,
5095 ("assuming signed overflow does not occur when "
5096 "assuming abs (x) < 0 is false"));
5102 /* Optimize abs(x) >= 0.0. */
5103 if (!HONOR_NANS (mode
)
5104 && (!INTEGRAL_MODE_P (mode
)
5105 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5107 if (INTEGRAL_MODE_P (mode
)
5108 && (issue_strict_overflow_warning
5109 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5110 warning (OPT_Wstrict_overflow
,
5111 ("assuming signed overflow does not occur when "
5112 "assuming abs (x) >= 0 is true"));
5113 return const_true_rtx
;
5118 /* Optimize ! (abs(x) < 0.0). */
5119 return const_true_rtx
;
5129 /* Simplify CODE, an operation with result mode MODE and three operands,
5130 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5131 a constant. Return 0 if no simplifications is possible. */
5134 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5135 machine_mode op0_mode
, rtx op0
, rtx op1
,
5138 unsigned int width
= GET_MODE_PRECISION (mode
);
5139 bool any_change
= false;
5142 /* VOIDmode means "infinite" precision. */
5144 width
= HOST_BITS_PER_WIDE_INT
;
5149 /* Simplify negations around the multiplication. */
5150 /* -a * -b + c => a * b + c. */
5151 if (GET_CODE (op0
) == NEG
)
5153 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5155 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5157 else if (GET_CODE (op1
) == NEG
)
5159 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5161 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5164 /* Canonicalize the two multiplication operands. */
5165 /* a * -b + c => -b * a + c. */
5166 if (swap_commutative_operands_p (op0
, op1
))
5167 std::swap (op0
, op1
), any_change
= true;
5170 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5175 if (CONST_INT_P (op0
)
5176 && CONST_INT_P (op1
)
5177 && CONST_INT_P (op2
)
5178 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5179 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5181 /* Extracting a bit-field from a constant */
5182 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5183 HOST_WIDE_INT op1val
= INTVAL (op1
);
5184 HOST_WIDE_INT op2val
= INTVAL (op2
);
5185 if (BITS_BIG_ENDIAN
)
5186 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5190 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5192 /* First zero-extend. */
5193 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5194 /* If desired, propagate sign bit. */
5195 if (code
== SIGN_EXTRACT
5196 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5198 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5201 return gen_int_mode (val
, mode
);
5206 if (CONST_INT_P (op0
))
5207 return op0
!= const0_rtx
? op1
: op2
;
5209 /* Convert c ? a : a into "a". */
5210 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5213 /* Convert a != b ? a : b into "a". */
5214 if (GET_CODE (op0
) == NE
5215 && ! side_effects_p (op0
)
5216 && ! HONOR_NANS (mode
)
5217 && ! HONOR_SIGNED_ZEROS (mode
)
5218 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5219 && rtx_equal_p (XEXP (op0
, 1), op2
))
5220 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5221 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5224 /* Convert a == b ? a : b into "b". */
5225 if (GET_CODE (op0
) == EQ
5226 && ! side_effects_p (op0
)
5227 && ! HONOR_NANS (mode
)
5228 && ! HONOR_SIGNED_ZEROS (mode
)
5229 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5230 && rtx_equal_p (XEXP (op0
, 1), op2
))
5231 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5232 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5235 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5237 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5238 ? GET_MODE (XEXP (op0
, 1))
5239 : GET_MODE (XEXP (op0
, 0)));
5242 /* Look for happy constants in op1 and op2. */
5243 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5245 HOST_WIDE_INT t
= INTVAL (op1
);
5246 HOST_WIDE_INT f
= INTVAL (op2
);
5248 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5249 code
= GET_CODE (op0
);
5250 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5253 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5261 return simplify_gen_relational (code
, mode
, cmp_mode
,
5262 XEXP (op0
, 0), XEXP (op0
, 1));
5265 if (cmp_mode
== VOIDmode
)
5266 cmp_mode
= op0_mode
;
5267 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5268 cmp_mode
, XEXP (op0
, 0),
5271 /* See if any simplifications were possible. */
5274 if (CONST_INT_P (temp
))
5275 return temp
== const0_rtx
? op2
: op1
;
5277 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5283 gcc_assert (GET_MODE (op0
) == mode
);
5284 gcc_assert (GET_MODE (op1
) == mode
);
5285 gcc_assert (VECTOR_MODE_P (mode
));
5286 trueop2
= avoid_constant_pool_reference (op2
);
5287 if (CONST_INT_P (trueop2
))
5289 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5290 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5291 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5292 unsigned HOST_WIDE_INT mask
;
5293 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5296 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5298 if (!(sel
& mask
) && !side_effects_p (op0
))
5300 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5303 rtx trueop0
= avoid_constant_pool_reference (op0
);
5304 rtx trueop1
= avoid_constant_pool_reference (op1
);
5305 if (GET_CODE (trueop0
) == CONST_VECTOR
5306 && GET_CODE (trueop1
) == CONST_VECTOR
)
5308 rtvec v
= rtvec_alloc (n_elts
);
5311 for (i
= 0; i
< n_elts
; i
++)
5312 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5313 ? CONST_VECTOR_ELT (trueop0
, i
)
5314 : CONST_VECTOR_ELT (trueop1
, i
));
5315 return gen_rtx_CONST_VECTOR (mode
, v
);
5318 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5319 if no element from a appears in the result. */
5320 if (GET_CODE (op0
) == VEC_MERGE
)
5322 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5323 if (CONST_INT_P (tem
))
5325 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5326 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5327 return simplify_gen_ternary (code
, mode
, mode
,
5328 XEXP (op0
, 1), op1
, op2
);
5329 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5330 return simplify_gen_ternary (code
, mode
, mode
,
5331 XEXP (op0
, 0), op1
, op2
);
5334 if (GET_CODE (op1
) == VEC_MERGE
)
5336 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5337 if (CONST_INT_P (tem
))
5339 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5340 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5341 return simplify_gen_ternary (code
, mode
, mode
,
5342 op0
, XEXP (op1
, 1), op2
);
5343 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5344 return simplify_gen_ternary (code
, mode
, mode
,
5345 op0
, XEXP (op1
, 0), op2
);
5349 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5351 if (GET_CODE (op0
) == VEC_DUPLICATE
5352 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5353 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5354 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5356 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5357 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5359 if (XEXP (XEXP (op0
, 0), 0) == op1
5360 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5366 if (rtx_equal_p (op0
, op1
)
5367 && !side_effects_p (op2
) && !side_effects_p (op1
))
5379 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5380 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5381 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5383 Works by unpacking OP into a collection of 8-bit values
5384 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5385 and then repacking them again for OUTERMODE. */
5388 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5389 machine_mode innermode
, unsigned int byte
)
5393 value_mask
= (1 << value_bit
) - 1
5395 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5404 rtvec result_v
= NULL
;
5405 enum mode_class outer_class
;
5406 machine_mode outer_submode
;
5409 /* Some ports misuse CCmode. */
5410 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5413 /* We have no way to represent a complex constant at the rtl level. */
5414 if (COMPLEX_MODE_P (outermode
))
5417 /* We support any size mode. */
5418 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5419 GET_MODE_BITSIZE (innermode
));
5421 /* Unpack the value. */
5423 if (GET_CODE (op
) == CONST_VECTOR
)
5425 num_elem
= CONST_VECTOR_NUNITS (op
);
5426 elems
= &CONST_VECTOR_ELT (op
, 0);
5427 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5433 elem_bitsize
= max_bitsize
;
5435 /* If this asserts, it is too complicated; reducing value_bit may help. */
5436 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5437 /* I don't know how to handle endianness of sub-units. */
5438 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5440 for (elem
= 0; elem
< num_elem
; elem
++)
5443 rtx el
= elems
[elem
];
5445 /* Vectors are kept in target memory order. (This is probably
5448 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5449 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5451 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5452 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5453 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5454 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5455 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5458 switch (GET_CODE (el
))
5462 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5464 *vp
++ = INTVAL (el
) >> i
;
5465 /* CONST_INTs are always logically sign-extended. */
5466 for (; i
< elem_bitsize
; i
+= value_bit
)
5467 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5470 case CONST_WIDE_INT
:
5472 rtx_mode_t val
= std::make_pair (el
, innermode
);
5473 unsigned char extend
= wi::sign_mask (val
);
5475 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5476 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5477 for (; i
< elem_bitsize
; i
+= value_bit
)
5483 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5485 unsigned char extend
= 0;
5486 /* If this triggers, someone should have generated a
5487 CONST_INT instead. */
5488 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5490 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5491 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5492 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5495 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5499 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5501 for (; i
< elem_bitsize
; i
+= value_bit
)
5506 /* This is big enough for anything on the platform. */
5507 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5508 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5510 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5511 gcc_assert (bitsize
<= elem_bitsize
);
5512 gcc_assert (bitsize
% value_bit
== 0);
5514 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5517 /* real_to_target produces its result in words affected by
5518 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5519 and use WORDS_BIG_ENDIAN instead; see the documentation
5520 of SUBREG in rtl.texi. */
5521 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5524 if (WORDS_BIG_ENDIAN
)
5525 ibase
= bitsize
- 1 - i
;
5528 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5531 /* It shouldn't matter what's done here, so fill it with
5533 for (; i
< elem_bitsize
; i
+= value_bit
)
5539 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5541 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5542 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5546 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5547 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5548 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5550 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5551 >> (i
- HOST_BITS_PER_WIDE_INT
);
5552 for (; i
< elem_bitsize
; i
+= value_bit
)
5562 /* Now, pick the right byte to start with. */
5563 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5564 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5565 will already have offset 0. */
5566 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5568 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5570 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5571 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5572 byte
= (subword_byte
% UNITS_PER_WORD
5573 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5576 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5577 so if it's become negative it will instead be very large.) */
5578 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5580 /* Convert from bytes to chunks of size value_bit. */
5581 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5583 /* Re-pack the value. */
5585 if (VECTOR_MODE_P (outermode
))
5587 num_elem
= GET_MODE_NUNITS (outermode
);
5588 result_v
= rtvec_alloc (num_elem
);
5589 elems
= &RTVEC_ELT (result_v
, 0);
5590 outer_submode
= GET_MODE_INNER (outermode
);
5596 outer_submode
= outermode
;
5599 outer_class
= GET_MODE_CLASS (outer_submode
);
5600 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5602 gcc_assert (elem_bitsize
% value_bit
== 0);
5603 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5605 for (elem
= 0; elem
< num_elem
; elem
++)
5609 /* Vectors are stored in target memory order. (This is probably
5612 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5613 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5615 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5616 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5617 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5618 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5619 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5622 switch (outer_class
)
5625 case MODE_PARTIAL_INT
:
5630 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5631 / HOST_BITS_PER_WIDE_INT
;
5632 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5635 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5637 for (u
= 0; u
< units
; u
++)
5639 unsigned HOST_WIDE_INT buf
= 0;
5641 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5643 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5646 base
+= HOST_BITS_PER_WIDE_INT
;
5648 r
= wide_int::from_array (tmp
, units
,
5649 GET_MODE_PRECISION (outer_submode
));
5650 #if TARGET_SUPPORTS_WIDE_INT == 0
5651 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5652 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5655 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5660 case MODE_DECIMAL_FLOAT
:
5663 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5665 /* real_from_target wants its input in words affected by
5666 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5667 and use WORDS_BIG_ENDIAN instead; see the documentation
5668 of SUBREG in rtl.texi. */
5669 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5671 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5674 if (WORDS_BIG_ENDIAN
)
5675 ibase
= elem_bitsize
- 1 - i
;
5678 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5681 real_from_target (&r
, tmp
, outer_submode
);
5682 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5694 f
.mode
= outer_submode
;
5697 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5699 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5700 for (; i
< elem_bitsize
; i
+= value_bit
)
5701 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5702 << (i
- HOST_BITS_PER_WIDE_INT
));
5704 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5712 if (VECTOR_MODE_P (outermode
))
5713 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5718 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5719 Return 0 if no simplifications are possible. */
5721 simplify_subreg (machine_mode outermode
, rtx op
,
5722 machine_mode innermode
, unsigned int byte
)
5724 /* Little bit of sanity checking. */
5725 gcc_assert (innermode
!= VOIDmode
);
5726 gcc_assert (outermode
!= VOIDmode
);
5727 gcc_assert (innermode
!= BLKmode
);
5728 gcc_assert (outermode
!= BLKmode
);
5730 gcc_assert (GET_MODE (op
) == innermode
5731 || GET_MODE (op
) == VOIDmode
);
5733 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5736 if (byte
>= GET_MODE_SIZE (innermode
))
5739 if (outermode
== innermode
&& !byte
)
5742 if (CONST_SCALAR_INT_P (op
)
5743 || CONST_DOUBLE_AS_FLOAT_P (op
)
5744 || GET_CODE (op
) == CONST_FIXED
5745 || GET_CODE (op
) == CONST_VECTOR
)
5746 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5748 /* Changing mode twice with SUBREG => just change it once,
5749 or not at all if changing back op starting mode. */
5750 if (GET_CODE (op
) == SUBREG
)
5752 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5753 int final_offset
= byte
+ SUBREG_BYTE (op
);
5756 if (outermode
== innermostmode
5757 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5758 return SUBREG_REG (op
);
5760 /* The SUBREG_BYTE represents offset, as if the value were stored
5761 in memory. Irritating exception is paradoxical subreg, where
5762 we define SUBREG_BYTE to be 0. On big endian machines, this
5763 value should be negative. For a moment, undo this exception. */
5764 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5766 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5767 if (WORDS_BIG_ENDIAN
)
5768 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5769 if (BYTES_BIG_ENDIAN
)
5770 final_offset
+= difference
% UNITS_PER_WORD
;
5772 if (SUBREG_BYTE (op
) == 0
5773 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5775 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5776 if (WORDS_BIG_ENDIAN
)
5777 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5778 if (BYTES_BIG_ENDIAN
)
5779 final_offset
+= difference
% UNITS_PER_WORD
;
5782 /* See whether resulting subreg will be paradoxical. */
5783 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5785 /* In nonparadoxical subregs we can't handle negative offsets. */
5786 if (final_offset
< 0)
5788 /* Bail out in case resulting subreg would be incorrect. */
5789 if (final_offset
% GET_MODE_SIZE (outermode
)
5790 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5796 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5798 /* In paradoxical subreg, see if we are still looking on lower part.
5799 If so, our SUBREG_BYTE will be 0. */
5800 if (WORDS_BIG_ENDIAN
)
5801 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5802 if (BYTES_BIG_ENDIAN
)
5803 offset
+= difference
% UNITS_PER_WORD
;
5804 if (offset
== final_offset
)
5810 /* Recurse for further possible simplifications. */
5811 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5815 if (validate_subreg (outermode
, innermostmode
,
5816 SUBREG_REG (op
), final_offset
))
5818 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5819 if (SUBREG_PROMOTED_VAR_P (op
)
5820 && SUBREG_PROMOTED_SIGN (op
) >= 0
5821 && GET_MODE_CLASS (outermode
) == MODE_INT
5822 && IN_RANGE (GET_MODE_SIZE (outermode
),
5823 GET_MODE_SIZE (innermode
),
5824 GET_MODE_SIZE (innermostmode
))
5825 && subreg_lowpart_p (newx
))
5827 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5828 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5835 /* SUBREG of a hard register => just change the register number
5836 and/or mode. If the hard register is not valid in that mode,
5837 suppress this simplification. If the hard register is the stack,
5838 frame, or argument pointer, leave this as a SUBREG. */
5840 if (REG_P (op
) && HARD_REGISTER_P (op
))
5842 unsigned int regno
, final_regno
;
5845 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5846 if (HARD_REGISTER_NUM_P (final_regno
))
5849 int final_offset
= byte
;
5851 /* Adjust offset for paradoxical subregs. */
5853 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5855 int difference
= (GET_MODE_SIZE (innermode
)
5856 - GET_MODE_SIZE (outermode
));
5857 if (WORDS_BIG_ENDIAN
)
5858 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5859 if (BYTES_BIG_ENDIAN
)
5860 final_offset
+= difference
% UNITS_PER_WORD
;
5863 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5865 /* Propagate original regno. We don't have any way to specify
5866 the offset inside original regno, so do so only for lowpart.
5867 The information is used only by alias analysis that can not
5868 grog partial register anyway. */
5870 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5871 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5876 /* If we have a SUBREG of a register that we are replacing and we are
5877 replacing it with a MEM, make a new MEM and try replacing the
5878 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5879 or if we would be widening it. */
5882 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5883 /* Allow splitting of volatile memory references in case we don't
5884 have instruction to move the whole thing. */
5885 && (! MEM_VOLATILE_P (op
)
5886 || ! have_insn_for (SET
, innermode
))
5887 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5888 return adjust_address_nv (op
, outermode
, byte
);
5890 /* Handle complex values represented as CONCAT
5891 of real and imaginary part. */
5892 if (GET_CODE (op
) == CONCAT
)
5894 unsigned int part_size
, final_offset
;
5897 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5898 if (byte
< part_size
)
5900 part
= XEXP (op
, 0);
5901 final_offset
= byte
;
5905 part
= XEXP (op
, 1);
5906 final_offset
= byte
- part_size
;
5909 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5912 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5915 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5916 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5920 /* A SUBREG resulting from a zero extension may fold to zero if
5921 it extracts higher bits that the ZERO_EXTEND's source bits. */
5922 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5924 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5925 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5926 return CONST0_RTX (outermode
);
5929 if (SCALAR_INT_MODE_P (outermode
)
5930 && SCALAR_INT_MODE_P (innermode
)
5931 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5932 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5934 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5942 /* Make a SUBREG operation or equivalent if it folds. */
5945 simplify_gen_subreg (machine_mode outermode
, rtx op
,
5946 machine_mode innermode
, unsigned int byte
)
5950 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5954 if (GET_CODE (op
) == SUBREG
5955 || GET_CODE (op
) == CONCAT
5956 || GET_MODE (op
) == VOIDmode
)
5959 if (validate_subreg (outermode
, innermode
, op
, byte
))
5960 return gen_rtx_SUBREG (outermode
, op
, byte
);
5965 /* Simplify X, an rtx expression.
5967 Return the simplified expression or NULL if no simplifications
5970 This is the preferred entry point into the simplification routines;
5971 however, we still allow passes to call the more specific routines.
5973 Right now GCC has three (yes, three) major bodies of RTL simplification
5974 code that need to be unified.
5976 1. fold_rtx in cse.c. This code uses various CSE specific
5977 information to aid in RTL simplification.
5979 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5980 it uses combine specific information to aid in RTL
5983 3. The routines in this file.
5986 Long term we want to only have one body of simplification code; to
5987 get to that state I recommend the following steps:
5989 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5990 which are not pass dependent state into these routines.
5992 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5993 use this routine whenever possible.
5995 3. Allow for pass dependent state to be provided to these
5996 routines and add simplifications based on the pass dependent
5997 state. Remove code from cse.c & combine.c that becomes
6000 It will take time, but ultimately the compiler will be easier to
6001 maintain and improve. It's totally silly that when we add a
6002 simplification that it needs to be added to 4 places (3 for RTL
6003 simplification and 1 for tree simplification. */
6006 simplify_rtx (const_rtx x
)
6008 const enum rtx_code code
= GET_CODE (x
);
6009 const machine_mode mode
= GET_MODE (x
);
6011 switch (GET_RTX_CLASS (code
))
6014 return simplify_unary_operation (code
, mode
,
6015 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6016 case RTX_COMM_ARITH
:
6017 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6018 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6020 /* Fall through.... */
6023 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6026 case RTX_BITFIELD_OPS
:
6027 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6028 XEXP (x
, 0), XEXP (x
, 1),
6032 case RTX_COMM_COMPARE
:
6033 return simplify_relational_operation (code
, mode
,
6034 ((GET_MODE (XEXP (x
, 0))
6036 ? GET_MODE (XEXP (x
, 0))
6037 : GET_MODE (XEXP (x
, 1))),
6043 return simplify_subreg (mode
, SUBREG_REG (x
),
6044 GET_MODE (SUBREG_REG (x
)),
6051 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6052 if (GET_CODE (XEXP (x
, 0)) == HIGH
6053 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))