]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/simplify-rtx.c
alias.c: Reorder #include statements and remove duplicates.
[thirdparty/gcc.git] / gcc / simplify-rtx.c
CommitLineData
749a2da1 1/* RTL simplification functions for GNU compiler.
5624e564 2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
0cedb36c 3
1322177d 4This file is part of GCC.
0cedb36c 5
1322177d
LB
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
9dcd6f09 8Software Foundation; either version 3, or (at your option) any later
1322177d 9version.
0cedb36c 10
1322177d
LB
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
0cedb36c
JL
15
16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
0cedb36c
JL
19
20
21#include "config.h"
0cedb36c 22#include "system.h"
4977bab6 23#include "coretypes.h"
c7131fb2 24#include "backend.h"
957060b5 25#include "target.h"
0cedb36c 26#include "rtl.h"
efdc7e19 27#include "tree.h"
957060b5 28#include "predict.h"
0cedb36c 29#include "tm_p.h"
957060b5
AM
30#include "expmed.h"
31#include "optabs.h"
0cedb36c 32#include "regs.h"
957060b5 33#include "emit-rtl.h"
0cedb36c 34#include "recog.h"
957060b5
AM
35#include "diagnostic-core.h"
36#include "alias.h"
37#include "fold-const.h"
38#include "varasm.h"
39#include "flags.h"
36566b39
PK
40#include "dojump.h"
41#include "explow.h"
42#include "calls.h"
36566b39 43#include "stmt.h"
0cedb36c 44#include "expr.h"
0cedb36c
JL
45
46/* Simplification and canonicalization of RTL. */
47
3839069b
ZW
48/* Much code operates on (low, high) pairs; the low value is an
49 unsigned wide int, the high value a signed wide int. We
50 occasionally need to sign extend from low to high as if low were a
51 signed wide int. */
ba34d877 52#define HWI_SIGN_EXTEND(low) \
3839069b 53 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
0cedb36c 54
ef4bddc2 55static rtx neg_const_int (machine_mode, const_rtx);
f7d504c2 56static bool plus_minus_operand_p (const_rtx);
ef4bddc2
RS
57static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
58static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
550d1387 59 unsigned int);
ef4bddc2 60static rtx simplify_associative_operation (enum rtx_code, machine_mode,
dd61aa98 61 rtx, rtx);
ef4bddc2
RS
62static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
63 machine_mode, rtx, rtx);
64static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
65static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
0a67e02c 66 rtx, rtx, rtx, rtx);
aff8a8d5
CM
67\f
68/* Negate a CONST_INT rtx, truncating (because a conversion from a
23d1aac4 69 maximally negative number can overflow). */
aff8a8d5 70static rtx
ef4bddc2 71neg_const_int (machine_mode mode, const_rtx i)
aff8a8d5 72{
eb87c7c4 73 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
aff8a8d5
CM
74}
75
0b24db88
RS
76/* Test whether expression, X, is an immediate constant that represents
77 the most significant bit of machine mode MODE. */
78
b757b9f8 79bool
ef4bddc2 80mode_signbit_p (machine_mode mode, const_rtx x)
0b24db88
RS
81{
82 unsigned HOST_WIDE_INT val;
83 unsigned int width;
84
85 if (GET_MODE_CLASS (mode) != MODE_INT)
86 return false;
87
2d0c270f 88 width = GET_MODE_PRECISION (mode);
0b24db88
RS
89 if (width == 0)
90 return false;
b8698a0f 91
0b24db88 92 if (width <= HOST_BITS_PER_WIDE_INT
481683e1 93 && CONST_INT_P (x))
0b24db88 94 val = INTVAL (x);
807e902e
KZ
95#if TARGET_SUPPORTS_WIDE_INT
96 else if (CONST_WIDE_INT_P (x))
97 {
98 unsigned int i;
99 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
100 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
101 return false;
102 for (i = 0; i < elts - 1; i++)
103 if (CONST_WIDE_INT_ELT (x, i) != 0)
104 return false;
105 val = CONST_WIDE_INT_ELT (x, elts - 1);
106 width %= HOST_BITS_PER_WIDE_INT;
107 if (width == 0)
108 width = HOST_BITS_PER_WIDE_INT;
109 }
110#else
49ab6098 111 else if (width <= HOST_BITS_PER_DOUBLE_INT
48175537 112 && CONST_DOUBLE_AS_INT_P (x)
0b24db88
RS
113 && CONST_DOUBLE_LOW (x) == 0)
114 {
115 val = CONST_DOUBLE_HIGH (x);
116 width -= HOST_BITS_PER_WIDE_INT;
117 }
807e902e 118#endif
0b24db88 119 else
807e902e 120 /* X is not an integer constant. */
0b24db88
RS
121 return false;
122
123 if (width < HOST_BITS_PER_WIDE_INT)
124 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
125 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
126}
2d0c270f
BS
127
128/* Test whether VAL is equal to the most significant bit of mode MODE
129 (after masking with the mode mask of MODE). Returns false if the
130 precision of MODE is too large to handle. */
131
132bool
ef4bddc2 133val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
2d0c270f
BS
134{
135 unsigned int width;
136
137 if (GET_MODE_CLASS (mode) != MODE_INT)
138 return false;
139
140 width = GET_MODE_PRECISION (mode);
141 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
142 return false;
143
144 val &= GET_MODE_MASK (mode);
145 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
146}
147
148/* Test whether the most significant bit of mode MODE is set in VAL.
149 Returns false if the precision of MODE is too large to handle. */
150bool
ef4bddc2 151val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
2d0c270f
BS
152{
153 unsigned int width;
154
155 if (GET_MODE_CLASS (mode) != MODE_INT)
156 return false;
157
158 width = GET_MODE_PRECISION (mode);
159 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
160 return false;
161
162 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
163 return val != 0;
164}
165
166/* Test whether the most significant bit of mode MODE is clear in VAL.
167 Returns false if the precision of MODE is too large to handle. */
168bool
ef4bddc2 169val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
2d0c270f
BS
170{
171 unsigned int width;
172
173 if (GET_MODE_CLASS (mode) != MODE_INT)
174 return false;
175
176 width = GET_MODE_PRECISION (mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
179
180 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
181 return val == 0;
182}
749a2da1 183\f
786de7eb 184/* Make a binary operation by properly ordering the operands and
0cedb36c
JL
185 seeing if the expression folds. */
186
187rtx
ef4bddc2 188simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
46c5ad27 189 rtx op1)
0cedb36c
JL
190{
191 rtx tem;
192
0cedb36c
JL
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
0cedb36c
JL
195 if (tem)
196 return tem;
197
68162a97
ILT
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
e2be0590 201 std::swap (op0, op1);
68162a97 202
e16e3291 203 return gen_rtx_fmt_ee (code, mode, op0, op1);
0cedb36c
JL
204}
205\f
5a2aa3bd 206/* If X is a MEM referencing the constant pool, return the real value.
4ba5f925 207 Otherwise return X. */
732910b9 208rtx
46c5ad27 209avoid_constant_pool_reference (rtx x)
4ba5f925 210{
7daebb7a 211 rtx c, tmp, addr;
ef4bddc2 212 machine_mode cmode;
bdb82177 213 HOST_WIDE_INT offset = 0;
5a2aa3bd 214
7daebb7a
RS
215 switch (GET_CODE (x))
216 {
217 case MEM:
218 break;
219
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
48175537 224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
34a72c33
RS
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
7daebb7a
RS
227 return x;
228
229 default:
230 return x;
231 }
232
d82a02fa
AK
233 if (GET_MODE (x) == BLKmode)
234 return x;
235
5a2aa3bd
RH
236 addr = XEXP (x, 0);
237
59e4e217 238 /* Call target hook to avoid the effects of -fpic etc.... */
5fd9b178 239 addr = targetm.delegitimize_address (addr);
7daebb7a 240
bdb82177
PB
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
481683e1 244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
bdb82177
PB
245 {
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
248 }
249
11f3e4c7
RS
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
252
bdb82177
PB
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
5a2aa3bd 257 {
bdb82177
PB
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
260
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
b63fe007
UB
264 if ((offset != 0 || cmode != GET_MODE (x))
265 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
bdb82177
PB
266 {
267 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
268 if (tem && CONSTANT_P (tem))
269 return tem;
270 }
271 else
272 return c;
5a2aa3bd
RH
273 }
274
bdb82177 275 return x;
4ba5f925
JH
276}
277\f
b5b8b0ac
AO
278/* Simplify a MEM based on its attributes. This is the default
279 delegitimize_address target hook, and it's recommended that every
280 overrider call it. */
281
282rtx
283delegitimize_mem_from_attrs (rtx x)
284{
e0a80069
AO
285 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
286 use their base addresses as equivalent. */
b5b8b0ac
AO
287 if (MEM_P (x)
288 && MEM_EXPR (x)
527210c4 289 && MEM_OFFSET_KNOWN_P (x))
b5b8b0ac
AO
290 {
291 tree decl = MEM_EXPR (x);
ef4bddc2 292 machine_mode mode = GET_MODE (x);
b5b8b0ac
AO
293 HOST_WIDE_INT offset = 0;
294
295 switch (TREE_CODE (decl))
296 {
297 default:
298 decl = NULL;
299 break;
300
301 case VAR_DECL:
302 break;
303
304 case ARRAY_REF:
305 case ARRAY_RANGE_REF:
306 case COMPONENT_REF:
307 case BIT_FIELD_REF:
308 case REALPART_EXPR:
309 case IMAGPART_EXPR:
310 case VIEW_CONVERT_EXPR:
311 {
312 HOST_WIDE_INT bitsize, bitpos;
313 tree toffset;
b199074d 314 int unsignedp, volatilep = 0;
b5b8b0ac
AO
315
316 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
b3ecff82 317 &mode, &unsignedp, &volatilep, false);
b5b8b0ac
AO
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
9541ffee 320 || (toffset && !tree_fits_shwi_p (toffset)))
b5b8b0ac
AO
321 decl = NULL;
322 else
323 {
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
eb1ce453 326 offset += tree_to_shwi (toffset);
b5b8b0ac
AO
327 }
328 break;
329 }
330 }
331
332 if (decl
333 && mode == GET_MODE (x)
334 && TREE_CODE (decl) == VAR_DECL
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
339 {
340 rtx newx;
341
527210c4 342 offset += MEM_OFFSET (x);
b5b8b0ac
AO
343
344 newx = DECL_RTL (decl);
345
346 if (MEM_P (newx))
347 {
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
349
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
368 }
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
372 }
373 }
374
375 return x;
376}
377\f
d9c695ff
RK
378/* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
380
381rtx
ef4bddc2
RS
382simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
d9c695ff
RK
384{
385 rtx tem;
386
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
390
391 return gen_rtx_fmt_e (code, mode, op);
392}
393
394/* Likewise for ternary operations. */
395
396rtx
ef4bddc2
RS
397simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
d9c695ff
RK
399{
400 rtx tem;
401
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
406
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
408}
c6fb08ad 409
141e454b 410/* Likewise, for relational operations.
c6fb08ad 411 CMP_MODE specifies mode comparison is done in. */
d9c695ff
RK
412
413rtx
ef4bddc2
RS
414simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
d9c695ff
RK
416{
417 rtx tem;
418
c6fb08ad
PB
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
bc9c18c3 422
d9c695ff
RK
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
424}
425\f
457eeaae
JJ
426/* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
d9c695ff
RK
430
431rtx
3af4ba41 432simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
457eeaae 433 rtx (*fn) (rtx, const_rtx, void *), void *data)
d9c695ff
RK
434{
435 enum rtx_code code = GET_CODE (x);
ef4bddc2
RS
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
4fb296d9
RS
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
d9c695ff 442
457eeaae 443 if (__builtin_expect (fn != NULL, 0))
3af4ba41 444 {
457eeaae
JJ
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
3af4ba41 448 }
457eeaae
JJ
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
d9c695ff
RK
451
452 switch (GET_RTX_CLASS (code))
453 {
ec8e098d 454 case RTX_UNARY:
077a148b
RS
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
3af4ba41 457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
077a148b
RS
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
d9c695ff 461
ec8e098d
PB
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
3af4ba41
RS
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
077a148b
RS
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
469
ec8e098d
PB
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
077a148b
RS
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
3af4ba41
RS
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
077a148b
RS
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
d9c695ff 480
ec8e098d
PB
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
077a148b
RS
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
3af4ba41
RS
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
077a148b
RS
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
d9c695ff 493
ec8e098d 494 case RTX_EXTRA:
949c5d62
JH
495 if (code == SUBREG)
496 {
3af4ba41 497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
077a148b
RS
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
949c5d62
JH
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
077a148b 503 return op0 ? op0 : x;
949c5d62 504 }
077a148b 505 break;
d9c695ff 506
ec8e098d 507 case RTX_OBJ:
60c86d4e 508 if (code == MEM)
077a148b 509 {
3af4ba41 510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
077a148b
RS
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
514 }
f4e3e618
RH
515 else if (code == LO_SUM)
516 {
3af4ba41
RS
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
f4e3e618 519
ece4d1ac
RS
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
522 {
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
528 }
60c86d4e 529
077a148b
RS
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
f4e3e618
RH
532 return gen_rtx_LO_SUM (mode, op0, op1);
533 }
077a148b 534 break;
60c86d4e
RS
535
536 default:
077a148b 537 break;
d9c695ff 538 }
4fb296d9
RS
539
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
544 {
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
549 {
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
553 {
554 if (newvec == vec)
555 {
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
560 }
561 RTVEC_ELT (newvec, j) = op;
562 }
563 }
564 break;
565
566 case 'e':
8a1eb57b 567 if (XEXP (x, i))
4fb296d9 568 {
8a1eb57b
UB
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
571 {
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
575 }
4fb296d9
RS
576 }
577 break;
578 }
579 return newx;
d9c695ff 580}
3af4ba41
RS
581
582/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
584
585rtx
586simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
587{
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
589}
d9c695ff 590\f
40c5ed5b
RS
591/* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
593
594 RTL provides two ways of truncating a value:
595
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
600
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
606
607 2. a TRUNCATE. This form handles both scalar and compound integers.
608
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
613
614 simplify_gen_unary (TRUNCATE, ...)
615
616 and leave simplify_unary_operation to work out which representation
617 should be used.
618
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
624
625 (and:DI X Y)
626
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
631
632 (and:DI (reg:DI X) (const_int 63))
633
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
637
638static rtx
ef4bddc2
RS
639simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
40c5ed5b
RS
641{
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 gcc_assert (precision <= op_precision);
645
646 /* Optimize truncations of zero and sign extended values. */
647 if (GET_CODE (op) == ZERO_EXTEND
648 || GET_CODE (op) == SIGN_EXTEND)
649 {
650 /* There are three possibilities. If MODE is the same as the
651 origmode, we can omit both the extension and the subreg.
652 If MODE is not larger than the origmode, we can apply the
653 truncation without the extension. Finally, if the outermode
654 is larger than the origmode, we can just extend to the appropriate
655 mode. */
ef4bddc2 656 machine_mode origmode = GET_MODE (XEXP (op, 0));
40c5ed5b
RS
657 if (mode == origmode)
658 return XEXP (op, 0);
659 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
660 return simplify_gen_unary (TRUNCATE, mode,
661 XEXP (op, 0), origmode);
662 else
663 return simplify_gen_unary (GET_CODE (op), mode,
664 XEXP (op, 0), origmode);
665 }
666
808c4303
EB
667 /* If the machine can perform operations in the truncated mode, distribute
668 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
669 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
670 if (1
9e11bfef 671 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
808c4303
EB
672 && (GET_CODE (op) == PLUS
673 || GET_CODE (op) == MINUS
674 || GET_CODE (op) == MULT))
40c5ed5b
RS
675 {
676 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
677 if (op0)
678 {
679 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
680 if (op1)
681 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
682 }
683 }
684
685 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
686 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
687 the outer subreg is effectively a truncation to the original mode. */
688 if ((GET_CODE (op) == LSHIFTRT
689 || GET_CODE (op) == ASHIFTRT)
690 /* Ensure that OP_MODE is at least twice as wide as MODE
691 to avoid the possibility that an outer LSHIFTRT shifts by more
692 than the sign extension's sign_bit_copies and introduces zeros
693 into the high bits of the result. */
694 && 2 * precision <= op_precision
695 && CONST_INT_P (XEXP (op, 1))
696 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
697 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
0365ba7c 698 && UINTVAL (XEXP (op, 1)) < precision)
40c5ed5b
RS
699 return simplify_gen_binary (ASHIFTRT, mode,
700 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
701
702 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
703 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
704 the outer subreg is effectively a truncation to the original mode. */
705 if ((GET_CODE (op) == LSHIFTRT
706 || GET_CODE (op) == ASHIFTRT)
707 && CONST_INT_P (XEXP (op, 1))
708 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
709 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
0365ba7c 710 && UINTVAL (XEXP (op, 1)) < precision)
40c5ed5b
RS
711 return simplify_gen_binary (LSHIFTRT, mode,
712 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
713
714 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
715 to (ashift:QI (x:QI) C), where C is a suitable small constant and
716 the outer subreg is effectively a truncation to the original mode. */
717 if (GET_CODE (op) == ASHIFT
718 && CONST_INT_P (XEXP (op, 1))
719 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
720 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
721 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
0365ba7c 722 && UINTVAL (XEXP (op, 1)) < precision)
40c5ed5b
RS
723 return simplify_gen_binary (ASHIFT, mode,
724 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
725
726 /* Recognize a word extraction from a multi-word subreg. */
727 if ((GET_CODE (op) == LSHIFTRT
728 || GET_CODE (op) == ASHIFTRT)
729 && SCALAR_INT_MODE_P (mode)
730 && SCALAR_INT_MODE_P (op_mode)
731 && precision >= BITS_PER_WORD
732 && 2 * precision <= op_precision
733 && CONST_INT_P (XEXP (op, 1))
734 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
0365ba7c 735 && UINTVAL (XEXP (op, 1)) < op_precision)
40c5ed5b
RS
736 {
737 int byte = subreg_lowpart_offset (mode, op_mode);
738 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
739 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
740 (WORDS_BIG_ENDIAN
741 ? byte - shifted_bytes
742 : byte + shifted_bytes));
743 }
744
745 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
746 and try replacing the TRUNCATE and shift with it. Don't do this
747 if the MEM has a mode-dependent address. */
748 if ((GET_CODE (op) == LSHIFTRT
749 || GET_CODE (op) == ASHIFTRT)
750 && SCALAR_INT_MODE_P (op_mode)
751 && MEM_P (XEXP (op, 0))
752 && CONST_INT_P (XEXP (op, 1))
753 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
754 && INTVAL (XEXP (op, 1)) > 0
755 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
756 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
757 MEM_ADDR_SPACE (XEXP (op, 0)))
758 && ! MEM_VOLATILE_P (XEXP (op, 0))
759 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
760 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
761 {
762 int byte = subreg_lowpart_offset (mode, op_mode);
763 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
764 return adjust_address_nv (XEXP (op, 0), mode,
765 (WORDS_BIG_ENDIAN
766 ? byte - shifted_bytes
767 : byte + shifted_bytes));
768 }
769
770 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
771 (OP:SI foo:SI) if OP is NEG or ABS. */
772 if ((GET_CODE (op) == ABS
773 || GET_CODE (op) == NEG)
774 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
775 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
776 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
777 return simplify_gen_unary (GET_CODE (op), mode,
778 XEXP (XEXP (op, 0), 0), mode);
779
780 /* (truncate:A (subreg:B (truncate:C X) 0)) is
781 (truncate:A X). */
782 if (GET_CODE (op) == SUBREG
783 && SCALAR_INT_MODE_P (mode)
784 && SCALAR_INT_MODE_P (op_mode)
785 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
786 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
787 && subreg_lowpart_p (op))
86efb5cd
JJ
788 {
789 rtx inner = XEXP (SUBREG_REG (op), 0);
790 if (GET_MODE_PRECISION (mode)
791 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
792 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
793 else
794 /* If subreg above is paradoxical and C is narrower
795 than A, return (subreg:A (truncate:C X) 0). */
796 return simplify_gen_subreg (mode, SUBREG_REG (op),
797 GET_MODE (SUBREG_REG (op)), 0);
798 }
40c5ed5b
RS
799
800 /* (truncate:A (truncate:B X)) is (truncate:A X). */
801 if (GET_CODE (op) == TRUNCATE)
802 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
803 GET_MODE (XEXP (op, 0)));
804
805 return NULL_RTX;
806}
807\f
0cedb36c
JL
808/* Try to simplify a unary operation CODE whose output mode is to be
809 MODE with input operand OP whose mode was originally OP_MODE.
810 Return zero if no simplification can be made. */
0cedb36c 811rtx
ef4bddc2
RS
812simplify_unary_operation (enum rtx_code code, machine_mode mode,
813 rtx op, machine_mode op_mode)
0a67e02c
PB
814{
815 rtx trueop, tem;
816
0a67e02c
PB
817 trueop = avoid_constant_pool_reference (op);
818
819 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
820 if (tem)
821 return tem;
822
823 return simplify_unary_operation_1 (code, mode, op);
824}
825
a1c045ca
RS
826/* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
827 to be exact. */
828
829static bool
830exact_int_to_float_conversion_p (const_rtx op)
831{
832 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
833 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
834 /* Constants shouldn't reach here. */
835 gcc_assert (op0_mode != VOIDmode);
836 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
837 int in_bits = in_prec;
838 if (HWI_COMPUTABLE_MODE_P (op0_mode))
839 {
840 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
841 if (GET_CODE (op) == FLOAT)
842 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
843 else if (GET_CODE (op) == UNSIGNED_FLOAT)
844 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
845 else
846 gcc_unreachable ();
847 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
848 }
849 return in_bits <= out_bits;
850}
851
0a67e02c
PB
852/* Perform some simplifications we can do even if the operands
853 aren't constant. */
854static rtx
ef4bddc2 855simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
0a67e02c
PB
856{
857 enum rtx_code reversed;
858 rtx temp;
859
860 switch (code)
861 {
862 case NOT:
863 /* (not (not X)) == X. */
864 if (GET_CODE (op) == NOT)
865 return XEXP (op, 0);
866
bd1ef757
PB
867 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
868 comparison is all ones. */
0a67e02c
PB
869 if (COMPARISON_P (op)
870 && (mode == BImode || STORE_FLAG_VALUE == -1)
871 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
872 return simplify_gen_relational (reversed, mode, VOIDmode,
873 XEXP (op, 0), XEXP (op, 1));
874
875 /* (not (plus X -1)) can become (neg X). */
876 if (GET_CODE (op) == PLUS
877 && XEXP (op, 1) == constm1_rtx)
878 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
879
880 /* Similarly, (not (neg X)) is (plus X -1). */
881 if (GET_CODE (op) == NEG)
088845a5
RS
882 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
883 CONSTM1_RTX (mode));
0a67e02c
PB
884
885 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
886 if (GET_CODE (op) == XOR
481683e1 887 && CONST_INT_P (XEXP (op, 1))
0a67e02c
PB
888 && (temp = simplify_unary_operation (NOT, mode,
889 XEXP (op, 1), mode)) != 0)
890 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
891
892 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
893 if (GET_CODE (op) == PLUS
481683e1 894 && CONST_INT_P (XEXP (op, 1))
0a67e02c
PB
895 && mode_signbit_p (mode, XEXP (op, 1))
896 && (temp = simplify_unary_operation (NOT, mode,
897 XEXP (op, 1), mode)) != 0)
898 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
899
900
901 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
902 operands other than 1, but that is not valid. We could do a
903 similar simplification for (not (lshiftrt C X)) where C is
904 just the sign bit, but this doesn't seem common enough to
905 bother with. */
906 if (GET_CODE (op) == ASHIFT
907 && XEXP (op, 0) == const1_rtx)
908 {
909 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
910 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
911 }
912
0a67e02c
PB
913 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
914 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
915 so we can perform the above simplification. */
0a67e02c
PB
916 if (STORE_FLAG_VALUE == -1
917 && GET_CODE (op) == ASHIFTRT
bddd3671 918 && CONST_INT_P (XEXP (op, 1))
5511bc5a 919 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
0a67e02c
PB
920 return simplify_gen_relational (GE, mode, VOIDmode,
921 XEXP (op, 0), const0_rtx);
922
bd1ef757
PB
923
924 if (GET_CODE (op) == SUBREG
925 && subreg_lowpart_p (op)
926 && (GET_MODE_SIZE (GET_MODE (op))
927 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
928 && GET_CODE (SUBREG_REG (op)) == ASHIFT
929 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
930 {
ef4bddc2 931 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
bd1ef757
PB
932 rtx x;
933
934 x = gen_rtx_ROTATE (inner_mode,
935 simplify_gen_unary (NOT, inner_mode, const1_rtx,
936 inner_mode),
937 XEXP (SUBREG_REG (op), 1));
76bd29f6
JJ
938 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
939 if (temp)
940 return temp;
bd1ef757
PB
941 }
942
943 /* Apply De Morgan's laws to reduce number of patterns for machines
944 with negating logical insns (and-not, nand, etc.). If result has
945 only one NOT, put it first, since that is how the patterns are
946 coded. */
bd1ef757
PB
947 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
948 {
949 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
ef4bddc2 950 machine_mode op_mode;
bd1ef757
PB
951
952 op_mode = GET_MODE (in1);
953 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
954
955 op_mode = GET_MODE (in2);
956 if (op_mode == VOIDmode)
957 op_mode = mode;
958 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
959
960 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
6b4db501 961 std::swap (in1, in2);
bd1ef757
PB
962
963 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
964 mode, in1, in2);
965 }
b17c024f
EB
966
967 /* (not (bswap x)) -> (bswap (not x)). */
968 if (GET_CODE (op) == BSWAP)
969 {
970 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
971 return simplify_gen_unary (BSWAP, mode, x, mode);
972 }
0a67e02c
PB
973 break;
974
975 case NEG:
976 /* (neg (neg X)) == X. */
977 if (GET_CODE (op) == NEG)
978 return XEXP (op, 0);
979
7040e903
KT
980 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
981 If comparison is not reversible use
982 x ? y : (neg y). */
983 if (GET_CODE (op) == IF_THEN_ELSE)
984 {
985 rtx cond = XEXP (op, 0);
986 rtx true_rtx = XEXP (op, 1);
987 rtx false_rtx = XEXP (op, 2);
988
989 if ((GET_CODE (true_rtx) == NEG
990 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
991 || (GET_CODE (false_rtx) == NEG
992 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
993 {
994 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
995 temp = reversed_comparison (cond, mode);
996 else
997 {
998 temp = cond;
999 std::swap (true_rtx, false_rtx);
1000 }
1001 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1002 mode, temp, true_rtx, false_rtx);
1003 }
1004 }
1005
0a67e02c
PB
1006 /* (neg (plus X 1)) can become (not X). */
1007 if (GET_CODE (op) == PLUS
1008 && XEXP (op, 1) == const1_rtx)
1009 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
b8698a0f 1010
0a67e02c
PB
1011 /* Similarly, (neg (not X)) is (plus X 1). */
1012 if (GET_CODE (op) == NOT)
088845a5
RS
1013 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1014 CONST1_RTX (mode));
b8698a0f 1015
0a67e02c
PB
1016 /* (neg (minus X Y)) can become (minus Y X). This transformation
1017 isn't safe for modes with signed zeros, since if X and Y are
1018 both +0, (minus Y X) is the same as (minus X Y). If the
1019 rounding mode is towards +infinity (or -infinity) then the two
1020 expressions will be rounded differently. */
1021 if (GET_CODE (op) == MINUS
1022 && !HONOR_SIGNED_ZEROS (mode)
1023 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1024 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
b8698a0f 1025
0a67e02c
PB
1026 if (GET_CODE (op) == PLUS
1027 && !HONOR_SIGNED_ZEROS (mode)
1028 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1029 {
1030 /* (neg (plus A C)) is simplified to (minus -C A). */
33ffb5c5
KZ
1031 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1032 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
0a67e02c
PB
1033 {
1034 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1035 if (temp)
1036 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1037 }
1038
1039 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1040 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1041 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1042 }
1043
707f9919 1044 /* (neg (mult A B)) becomes (mult A (neg B)).
0a67e02c
PB
1045 This works even for floating-point values. */
1046 if (GET_CODE (op) == MULT
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1048 {
707f9919
JJ
1049 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1050 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
0a67e02c
PB
1051 }
1052
1053 /* NEG commutes with ASHIFT since it is multiplication. Only do
1054 this if we can then eliminate the NEG (e.g., if the operand
1055 is a constant). */
1056 if (GET_CODE (op) == ASHIFT)
1057 {
1058 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1059 if (temp)
1060 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1061 }
1062
1063 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1064 C is equal to the width of MODE minus 1. */
1065 if (GET_CODE (op) == ASHIFTRT
481683e1 1066 && CONST_INT_P (XEXP (op, 1))
5511bc5a 1067 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
0a67e02c
PB
1068 return simplify_gen_binary (LSHIFTRT, mode,
1069 XEXP (op, 0), XEXP (op, 1));
1070
1071 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1072 C is equal to the width of MODE minus 1. */
1073 if (GET_CODE (op) == LSHIFTRT
481683e1 1074 && CONST_INT_P (XEXP (op, 1))
5511bc5a 1075 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
0a67e02c
PB
1076 return simplify_gen_binary (ASHIFTRT, mode,
1077 XEXP (op, 0), XEXP (op, 1));
b8698a0f 1078
bd1ef757
PB
1079 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1080 if (GET_CODE (op) == XOR
1081 && XEXP (op, 1) == const1_rtx
1082 && nonzero_bits (XEXP (op, 0), mode) == 1)
0a81f074 1083 return plus_constant (mode, XEXP (op, 0), -1);
8305d786
RS
1084
1085 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1086 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1087 if (GET_CODE (op) == LT
71cca289
JJ
1088 && XEXP (op, 1) == const0_rtx
1089 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
8305d786 1090 {
ef4bddc2 1091 machine_mode inner = GET_MODE (XEXP (op, 0));
5511bc5a 1092 int isize = GET_MODE_PRECISION (inner);
8305d786 1093 if (STORE_FLAG_VALUE == 1)
0f2f71b5
RS
1094 {
1095 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1096 GEN_INT (isize - 1));
1097 if (mode == inner)
1098 return temp;
5511bc5a 1099 if (GET_MODE_PRECISION (mode) > isize)
0f2f71b5
RS
1100 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1101 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1102 }
8305d786 1103 else if (STORE_FLAG_VALUE == -1)
0f2f71b5
RS
1104 {
1105 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1106 GEN_INT (isize - 1));
1107 if (mode == inner)
1108 return temp;
5511bc5a 1109 if (GET_MODE_PRECISION (mode) > isize)
0f2f71b5
RS
1110 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1111 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1112 }
8305d786 1113 }
bd1ef757
PB
1114 break;
1115
1116 case TRUNCATE:
40c5ed5b
RS
1117 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1118 with the umulXi3_highpart patterns. */
1119 if (GET_CODE (op) == LSHIFTRT
1120 && GET_CODE (XEXP (op, 0)) == MULT)
1121 break;
bd1ef757 1122
40c5ed5b
RS
1123 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1124 {
1125 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
76bd29f6
JJ
1126 {
1127 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1128 if (temp)
1129 return temp;
1130 }
40c5ed5b
RS
1131 /* We can't handle truncation to a partial integer mode here
1132 because we don't know the real bitsize of the partial
1133 integer mode. */
1134 break;
1135 }
bd1ef757 1136
40c5ed5b
RS
1137 if (GET_MODE (op) != VOIDmode)
1138 {
1139 temp = simplify_truncation (mode, op, GET_MODE (op));
1140 if (temp)
1141 return temp;
1142 }
bd1ef757
PB
1143
1144 /* If we know that the value is already truncated, we can
40c5ed5b
RS
1145 replace the TRUNCATE with a SUBREG. */
1146 if (GET_MODE_NUNITS (mode) == 1
1147 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1148 || truncated_to_mode (mode, op)))
76bd29f6
JJ
1149 {
1150 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1151 if (temp)
1152 return temp;
1153 }
bd1ef757
PB
1154
1155 /* A truncate of a comparison can be replaced with a subreg if
1156 STORE_FLAG_VALUE permits. This is like the previous test,
1157 but it works even if the comparison is done in a mode larger
1158 than HOST_BITS_PER_WIDE_INT. */
46c9550f 1159 if (HWI_COMPUTABLE_MODE_P (mode)
bd1ef757 1160 && COMPARISON_P (op)
43c36287 1161 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
76bd29f6
JJ
1162 {
1163 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1164 if (temp)
1165 return temp;
1166 }
dcf8468f
AP
1167
1168 /* A truncate of a memory is just loading the low part of the memory
1169 if we are not changing the meaning of the address. */
1170 if (GET_CODE (op) == MEM
fa607dda 1171 && !VECTOR_MODE_P (mode)
dcf8468f 1172 && !MEM_VOLATILE_P (op)
5bfed9a9 1173 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
76bd29f6
JJ
1174 {
1175 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1176 if (temp)
1177 return temp;
1178 }
dcf8468f 1179
bd1ef757
PB
1180 break;
1181
1182 case FLOAT_TRUNCATE:
15ed7b52
JG
1183 if (DECIMAL_FLOAT_MODE_P (mode))
1184 break;
1185
bd1ef757
PB
1186 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1187 if (GET_CODE (op) == FLOAT_EXTEND
1188 && GET_MODE (XEXP (op, 0)) == mode)
1189 return XEXP (op, 0);
1190
1191 /* (float_truncate:SF (float_truncate:DF foo:XF))
1192 = (float_truncate:SF foo:XF).
1193 This may eliminate double rounding, so it is unsafe.
1194
1195 (float_truncate:SF (float_extend:XF foo:DF))
1196 = (float_truncate:SF foo:DF).
1197
1198 (float_truncate:DF (float_extend:XF foo:SF))
76978b21 1199 = (float_extend:DF foo:SF). */
bd1ef757
PB
1200 if ((GET_CODE (op) == FLOAT_TRUNCATE
1201 && flag_unsafe_math_optimizations)
1202 || GET_CODE (op) == FLOAT_EXTEND)
1203 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1204 0)))
1205 > GET_MODE_SIZE (mode)
1206 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1207 mode,
1208 XEXP (op, 0), mode);
1209
1210 /* (float_truncate (float x)) is (float x) */
76978b21 1211 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
bd1ef757 1212 && (flag_unsafe_math_optimizations
a1c045ca 1213 || exact_int_to_float_conversion_p (op)))
76978b21 1214 return simplify_gen_unary (GET_CODE (op), mode,
bd1ef757
PB
1215 XEXP (op, 0),
1216 GET_MODE (XEXP (op, 0)));
1217
1218 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1219 (OP:SF foo:SF) if OP is NEG or ABS. */
1220 if ((GET_CODE (op) == ABS
1221 || GET_CODE (op) == NEG)
1222 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1223 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1224 return simplify_gen_unary (GET_CODE (op), mode,
1225 XEXP (XEXP (op, 0), 0), mode);
1226
1227 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1228 is (float_truncate:SF x). */
1229 if (GET_CODE (op) == SUBREG
1230 && subreg_lowpart_p (op)
1231 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1232 return SUBREG_REG (op);
1233 break;
1234
1235 case FLOAT_EXTEND:
15ed7b52
JG
1236 if (DECIMAL_FLOAT_MODE_P (mode))
1237 break;
1238
bd1ef757
PB
1239 /* (float_extend (float_extend x)) is (float_extend x)
1240
1241 (float_extend (float x)) is (float x) assuming that double
1242 rounding can't happen.
1243 */
1244 if (GET_CODE (op) == FLOAT_EXTEND
76978b21 1245 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
a1c045ca 1246 && exact_int_to_float_conversion_p (op)))
bd1ef757
PB
1247 return simplify_gen_unary (GET_CODE (op), mode,
1248 XEXP (op, 0),
1249 GET_MODE (XEXP (op, 0)));
1250
1251 break;
1252
1253 case ABS:
1254 /* (abs (neg <foo>)) -> (abs <foo>) */
1255 if (GET_CODE (op) == NEG)
1256 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1257 GET_MODE (XEXP (op, 0)));
1258
1259 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1260 do nothing. */
1261 if (GET_MODE (op) == VOIDmode)
1262 break;
1263
1264 /* If operand is something known to be positive, ignore the ABS. */
1265 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
2d0c270f
BS
1266 || val_signbit_known_clear_p (GET_MODE (op),
1267 nonzero_bits (op, GET_MODE (op))))
bd1ef757
PB
1268 return op;
1269
1270 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
5511bc5a 1271 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
bd1ef757
PB
1272 return gen_rtx_NEG (mode, op);
1273
1274 break;
1275
1276 case FFS:
1277 /* (ffs (*_extend <X>)) = (ffs <X>) */
1278 if (GET_CODE (op) == SIGN_EXTEND
1279 || GET_CODE (op) == ZERO_EXTEND)
1280 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1281 GET_MODE (XEXP (op, 0)));
1282 break;
1283
1284 case POPCOUNT:
9f05adb0
RS
1285 switch (GET_CODE (op))
1286 {
1287 case BSWAP:
1288 case ZERO_EXTEND:
1289 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1290 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1291 GET_MODE (XEXP (op, 0)));
1292
1293 case ROTATE:
1294 case ROTATERT:
1295 /* Rotations don't affect popcount. */
1296 if (!side_effects_p (XEXP (op, 1)))
1297 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1298 GET_MODE (XEXP (op, 0)));
1299 break;
1300
1301 default:
1302 break;
1303 }
1304 break;
1305
bd1ef757 1306 case PARITY:
9f05adb0
RS
1307 switch (GET_CODE (op))
1308 {
1309 case NOT:
1310 case BSWAP:
1311 case ZERO_EXTEND:
1312 case SIGN_EXTEND:
1313 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1314 GET_MODE (XEXP (op, 0)));
1315
1316 case ROTATE:
1317 case ROTATERT:
1318 /* Rotations don't affect parity. */
1319 if (!side_effects_p (XEXP (op, 1)))
1320 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1321 GET_MODE (XEXP (op, 0)));
1322 break;
1323
1324 default:
1325 break;
1326 }
1327 break;
1328
1329 case BSWAP:
1330 /* (bswap (bswap x)) -> x. */
1331 if (GET_CODE (op) == BSWAP)
1332 return XEXP (op, 0);
bd1ef757
PB
1333 break;
1334
1335 case FLOAT:
1336 /* (float (sign_extend <X>)) = (float <X>). */
1337 if (GET_CODE (op) == SIGN_EXTEND)
1338 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1339 GET_MODE (XEXP (op, 0)));
0a67e02c
PB
1340 break;
1341
1342 case SIGN_EXTEND:
1343 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1344 becomes just the MINUS if its mode is MODE. This allows
1345 folding switch statements on machines using casesi (such as
1346 the VAX). */
1347 if (GET_CODE (op) == TRUNCATE
1348 && GET_MODE (XEXP (op, 0)) == mode
1349 && GET_CODE (XEXP (op, 0)) == MINUS
1350 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1351 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1352 return XEXP (op, 0);
1353
c536876e
AS
1354 /* Extending a widening multiplication should be canonicalized to
1355 a wider widening multiplication. */
1356 if (GET_CODE (op) == MULT)
1357 {
1358 rtx lhs = XEXP (op, 0);
1359 rtx rhs = XEXP (op, 1);
1360 enum rtx_code lcode = GET_CODE (lhs);
1361 enum rtx_code rcode = GET_CODE (rhs);
1362
1363 /* Widening multiplies usually extend both operands, but sometimes
1364 they use a shift to extract a portion of a register. */
1365 if ((lcode == SIGN_EXTEND
1366 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1367 && (rcode == SIGN_EXTEND
1368 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1369 {
ef4bddc2
RS
1370 machine_mode lmode = GET_MODE (lhs);
1371 machine_mode rmode = GET_MODE (rhs);
c536876e
AS
1372 int bits;
1373
1374 if (lcode == ASHIFTRT)
1375 /* Number of bits not shifted off the end. */
1376 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1377 else /* lcode == SIGN_EXTEND */
1378 /* Size of inner mode. */
1379 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1380
1381 if (rcode == ASHIFTRT)
1382 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1383 else /* rcode == SIGN_EXTEND */
1384 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1385
1386 /* We can only widen multiplies if the result is mathematiclly
1387 equivalent. I.e. if overflow was impossible. */
1388 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1389 return simplify_gen_binary
1390 (MULT, mode,
1391 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1392 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1393 }
1394 }
1395
0a67e02c
PB
1396 /* Check for a sign extension of a subreg of a promoted
1397 variable, where the promotion is sign-extended, and the
1398 target mode is the same as the variable's promotion. */
1399 if (GET_CODE (op) == SUBREG
1400 && SUBREG_PROMOTED_VAR_P (op)
362d42dc 1401 && SUBREG_PROMOTED_SIGNED_P (op)
4613543f 1402 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
76bd29f6
JJ
1403 {
1404 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1405 if (temp)
1406 return temp;
1407 }
0a67e02c 1408
561da6bc
JJ
1409 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1410 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1411 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1412 {
50b6ee8b
DD
1413 gcc_assert (GET_MODE_PRECISION (mode)
1414 > GET_MODE_PRECISION (GET_MODE (op)));
561da6bc
JJ
1415 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1416 GET_MODE (XEXP (op, 0)));
1417 }
a5d8253f
JJ
1418
1419 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1420 is (sign_extend:M (subreg:O <X>)) if there is mode with
561da6bc
JJ
1421 GET_MODE_BITSIZE (N) - I bits.
1422 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1423 is similarly (zero_extend:M (subreg:O <X>)). */
1424 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
a5d8253f
JJ
1425 && GET_CODE (XEXP (op, 0)) == ASHIFT
1426 && CONST_INT_P (XEXP (op, 1))
1427 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1428 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1429 {
ef4bddc2 1430 machine_mode tmode
a5d8253f
JJ
1431 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1432 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
561da6bc
JJ
1433 gcc_assert (GET_MODE_BITSIZE (mode)
1434 > GET_MODE_BITSIZE (GET_MODE (op)));
a5d8253f
JJ
1435 if (tmode != BLKmode)
1436 {
1437 rtx inner =
1438 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
76bd29f6
JJ
1439 if (inner)
1440 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1441 ? SIGN_EXTEND : ZERO_EXTEND,
1442 mode, inner, tmode);
a5d8253f
JJ
1443 }
1444 }
1445
2a870875 1446#if defined(POINTERS_EXTEND_UNSIGNED)
5932a4d4 1447 /* As we do not know which address space the pointer is referring to,
d4ebfa65
BE
1448 we can do this only if the target does not support different pointer
1449 or address modes depending on the address space. */
1450 if (target_default_pointer_address_modes_p ()
1451 && ! POINTERS_EXTEND_UNSIGNED
0a67e02c
PB
1452 && mode == Pmode && GET_MODE (op) == ptr_mode
1453 && (CONSTANT_P (op)
1454 || (GET_CODE (op) == SUBREG
1455 && REG_P (SUBREG_REG (op))
1456 && REG_POINTER (SUBREG_REG (op))
2a870875
RS
1457 && GET_MODE (SUBREG_REG (op)) == Pmode))
1458 && !targetm.have_ptr_extend ())
0a67e02c
PB
1459 return convert_memory_address (Pmode, op);
1460#endif
1461 break;
1462
1463 case ZERO_EXTEND:
1464 /* Check for a zero extension of a subreg of a promoted
1465 variable, where the promotion is zero-extended, and the
1466 target mode is the same as the variable's promotion. */
1467 if (GET_CODE (op) == SUBREG
1468 && SUBREG_PROMOTED_VAR_P (op)
362d42dc 1469 && SUBREG_PROMOTED_UNSIGNED_P (op)
4613543f 1470 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
76bd29f6
JJ
1471 {
1472 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1473 if (temp)
1474 return temp;
1475 }
0a67e02c 1476
c536876e
AS
1477 /* Extending a widening multiplication should be canonicalized to
1478 a wider widening multiplication. */
1479 if (GET_CODE (op) == MULT)
1480 {
1481 rtx lhs = XEXP (op, 0);
1482 rtx rhs = XEXP (op, 1);
1483 enum rtx_code lcode = GET_CODE (lhs);
1484 enum rtx_code rcode = GET_CODE (rhs);
1485
1486 /* Widening multiplies usually extend both operands, but sometimes
1487 they use a shift to extract a portion of a register. */
1488 if ((lcode == ZERO_EXTEND
1489 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1490 && (rcode == ZERO_EXTEND
1491 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1492 {
ef4bddc2
RS
1493 machine_mode lmode = GET_MODE (lhs);
1494 machine_mode rmode = GET_MODE (rhs);
c536876e
AS
1495 int bits;
1496
1497 if (lcode == LSHIFTRT)
1498 /* Number of bits not shifted off the end. */
1499 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1500 else /* lcode == ZERO_EXTEND */
1501 /* Size of inner mode. */
1502 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1503
1504 if (rcode == LSHIFTRT)
1505 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1506 else /* rcode == ZERO_EXTEND */
1507 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1508
1509 /* We can only widen multiplies if the result is mathematiclly
1510 equivalent. I.e. if overflow was impossible. */
1511 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1512 return simplify_gen_binary
1513 (MULT, mode,
1514 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1515 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1516 }
1517 }
1518
a5d8253f
JJ
1519 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1520 if (GET_CODE (op) == ZERO_EXTEND)
1521 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1522 GET_MODE (XEXP (op, 0)));
1523
561da6bc
JJ
1524 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1525 is (zero_extend:M (subreg:O <X>)) if there is mode with
50b6ee8b 1526 GET_MODE_PRECISION (N) - I bits. */
561da6bc
JJ
1527 if (GET_CODE (op) == LSHIFTRT
1528 && GET_CODE (XEXP (op, 0)) == ASHIFT
1529 && CONST_INT_P (XEXP (op, 1))
1530 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
50b6ee8b 1531 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
561da6bc 1532 {
ef4bddc2 1533 machine_mode tmode
50b6ee8b 1534 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
561da6bc
JJ
1535 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1536 if (tmode != BLKmode)
1537 {
1538 rtx inner =
1539 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
76bd29f6
JJ
1540 if (inner)
1541 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
561da6bc
JJ
1542 }
1543 }
1544
8140c065
JJ
1545 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1546 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1547 of mode N. E.g.
1548 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1549 (and:SI (reg:SI) (const_int 63)). */
1550 if (GET_CODE (op) == SUBREG
1551 && GET_MODE_PRECISION (GET_MODE (op))
1552 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1553 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1554 <= HOST_BITS_PER_WIDE_INT
1555 && GET_MODE_PRECISION (mode)
1556 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1557 && subreg_lowpart_p (op)
1558 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1559 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1560 {
1561 if (GET_MODE_PRECISION (mode)
1562 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1563 return SUBREG_REG (op);
1564 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1565 GET_MODE (SUBREG_REG (op)));
1566 }
1567
2a870875 1568#if defined(POINTERS_EXTEND_UNSIGNED)
5932a4d4 1569 /* As we do not know which address space the pointer is referring to,
d4ebfa65
BE
1570 we can do this only if the target does not support different pointer
1571 or address modes depending on the address space. */
1572 if (target_default_pointer_address_modes_p ()
1573 && POINTERS_EXTEND_UNSIGNED > 0
0a67e02c
PB
1574 && mode == Pmode && GET_MODE (op) == ptr_mode
1575 && (CONSTANT_P (op)
1576 || (GET_CODE (op) == SUBREG
1577 && REG_P (SUBREG_REG (op))
1578 && REG_POINTER (SUBREG_REG (op))
2a870875
RS
1579 && GET_MODE (SUBREG_REG (op)) == Pmode))
1580 && !targetm.have_ptr_extend ())
0a67e02c
PB
1581 return convert_memory_address (Pmode, op);
1582#endif
1583 break;
1584
1585 default:
1586 break;
1587 }
b8698a0f 1588
0a67e02c
PB
1589 return 0;
1590}
1591
1592/* Try to compute the value of a unary operation CODE whose output mode is to
1593 be MODE with input operand OP whose mode was originally OP_MODE.
1594 Return zero if the value cannot be computed. */
1595rtx
ef4bddc2
RS
1596simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1597 rtx op, machine_mode op_mode)
0cedb36c 1598{
5511bc5a 1599 unsigned int width = GET_MODE_PRECISION (mode);
0cedb36c 1600
d9deed68
JH
1601 if (code == VEC_DUPLICATE)
1602 {
41374e13 1603 gcc_assert (VECTOR_MODE_P (mode));
0a67e02c 1604 if (GET_MODE (op) != VOIDmode)
41374e13 1605 {
0a67e02c
PB
1606 if (!VECTOR_MODE_P (GET_MODE (op)))
1607 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
41374e13
NS
1608 else
1609 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
0a67e02c 1610 (GET_MODE (op)));
41374e13 1611 }
33ffb5c5 1612 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
0a67e02c 1613 || GET_CODE (op) == CONST_VECTOR)
d9deed68 1614 {
cb5ca315 1615 int elt_size = GET_MODE_UNIT_SIZE (mode);
d9deed68
JH
1616 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1617 rtvec v = rtvec_alloc (n_elts);
1618 unsigned int i;
1619
0a67e02c 1620 if (GET_CODE (op) != CONST_VECTOR)
d9deed68 1621 for (i = 0; i < n_elts; i++)
0a67e02c 1622 RTVEC_ELT (v, i) = op;
d9deed68
JH
1623 else
1624 {
ef4bddc2 1625 machine_mode inmode = GET_MODE (op);
cb5ca315 1626 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
d9deed68
JH
1627 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1628
41374e13
NS
1629 gcc_assert (in_n_elts < n_elts);
1630 gcc_assert ((n_elts % in_n_elts) == 0);
d9deed68 1631 for (i = 0; i < n_elts; i++)
0a67e02c 1632 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
d9deed68
JH
1633 }
1634 return gen_rtx_CONST_VECTOR (mode, v);
1635 }
1636 }
1637
0a67e02c 1638 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
852c8ba1 1639 {
cb5ca315 1640 int elt_size = GET_MODE_UNIT_SIZE (mode);
852c8ba1 1641 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
ef4bddc2 1642 machine_mode opmode = GET_MODE (op);
cb5ca315 1643 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
852c8ba1
JH
1644 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1645 rtvec v = rtvec_alloc (n_elts);
1646 unsigned int i;
1647
41374e13 1648 gcc_assert (op_n_elts == n_elts);
852c8ba1
JH
1649 for (i = 0; i < n_elts; i++)
1650 {
1651 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
0a67e02c 1652 CONST_VECTOR_ELT (op, i),
852c8ba1
JH
1653 GET_MODE_INNER (opmode));
1654 if (!x)
1655 return 0;
1656 RTVEC_ELT (v, i) = x;
1657 }
1658 return gen_rtx_CONST_VECTOR (mode, v);
1659 }
1660
0cedb36c
JL
1661 /* The order of these tests is critical so that, for example, we don't
1662 check the wrong mode (input vs. output) for a conversion operation,
1663 such as FIX. At some point, this should be simplified. */
1664
33ffb5c5 1665 if (code == FLOAT && CONST_SCALAR_INT_P (op))
0cedb36c 1666 {
0cedb36c
JL
1667 REAL_VALUE_TYPE d;
1668
807e902e
KZ
1669 if (op_mode == VOIDmode)
1670 {
1671 /* CONST_INT have VOIDmode as the mode. We assume that all
1672 the bits of the constant are significant, though, this is
1673 a dangerous assumption as many times CONST_INTs are
1674 created and used with garbage in the bits outside of the
1675 precision of the implied mode of the const_int. */
1676 op_mode = MAX_MODE_INT;
1677 }
0cedb36c 1678
807e902e 1679 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
0cedb36c 1680 d = real_value_truncate (mode, d);
555affd7 1681 return const_double_from_real_value (d, mode);
0cedb36c 1682 }
33ffb5c5 1683 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
0cedb36c 1684 {
0cedb36c
JL
1685 REAL_VALUE_TYPE d;
1686
807e902e
KZ
1687 if (op_mode == VOIDmode)
1688 {
1689 /* CONST_INT have VOIDmode as the mode. We assume that all
1690 the bits of the constant are significant, though, this is
1691 a dangerous assumption as many times CONST_INTs are
1692 created and used with garbage in the bits outside of the
1693 precision of the implied mode of the const_int. */
1694 op_mode = MAX_MODE_INT;
1695 }
0cedb36c 1696
807e902e 1697 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
0cedb36c 1698 d = real_value_truncate (mode, d);
555affd7 1699 return const_double_from_real_value (d, mode);
0cedb36c 1700 }
0cedb36c 1701
807e902e 1702 if (CONST_SCALAR_INT_P (op) && width > 0)
0cedb36c 1703 {
807e902e 1704 wide_int result;
ef4bddc2 1705 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
807e902e
KZ
1706 rtx_mode_t op0 = std::make_pair (op, imode);
1707 int int_value;
1708
1709#if TARGET_SUPPORTS_WIDE_INT == 0
1710 /* This assert keeps the simplification from producing a result
1711 that cannot be represented in a CONST_DOUBLE but a lot of
1712 upstream callers expect that this function never fails to
1713 simplify something and so you if you added this to the test
1714 above the code would die later anyway. If this assert
1715 happens, you just need to make the port support wide int. */
1716 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1717#endif
0cedb36c
JL
1718
1719 switch (code)
1720 {
1721 case NOT:
807e902e 1722 result = wi::bit_not (op0);
0cedb36c
JL
1723 break;
1724
1725 case NEG:
807e902e 1726 result = wi::neg (op0);
0cedb36c
JL
1727 break;
1728
1729 case ABS:
807e902e 1730 result = wi::abs (op0);
0cedb36c
JL
1731 break;
1732
1733 case FFS:
807e902e 1734 result = wi::shwi (wi::ffs (op0), mode);
0cedb36c
JL
1735 break;
1736
2928cd7a 1737 case CLZ:
807e902e
KZ
1738 if (wi::ne_p (op0, 0))
1739 int_value = wi::clz (op0);
1740 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1741 int_value = GET_MODE_PRECISION (mode);
1742 result = wi::shwi (int_value, mode);
3801c801
BS
1743 break;
1744
1745 case CLRSB:
807e902e 1746 result = wi::shwi (wi::clrsb (op0), mode);
2928cd7a
RH
1747 break;
1748
1749 case CTZ:
807e902e
KZ
1750 if (wi::ne_p (op0, 0))
1751 int_value = wi::ctz (op0);
1752 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1753 int_value = GET_MODE_PRECISION (mode);
1754 result = wi::shwi (int_value, mode);
2928cd7a
RH
1755 break;
1756
1757 case POPCOUNT:
807e902e 1758 result = wi::shwi (wi::popcount (op0), mode);
2928cd7a
RH
1759 break;
1760
1761 case PARITY:
807e902e 1762 result = wi::shwi (wi::parity (op0), mode);
2928cd7a
RH
1763 break;
1764
167fa32c 1765 case BSWAP:
807e902e 1766 result = wide_int (op0).bswap ();
9f05adb0 1767 break;
167fa32c 1768
0cedb36c 1769 case TRUNCATE:
0cedb36c 1770 case ZERO_EXTEND:
807e902e 1771 result = wide_int::from (op0, width, UNSIGNED);
0cedb36c
JL
1772 break;
1773
1774 case SIGN_EXTEND:
807e902e 1775 result = wide_int::from (op0, width, SIGNED);
0cedb36c
JL
1776 break;
1777
1778 case SQRT:
0cedb36c
JL
1779 default:
1780 return 0;
1781 }
1782
807e902e 1783 return immed_wide_int_const (result, mode);
0cedb36c
JL
1784 }
1785
48175537 1786 else if (CONST_DOUBLE_AS_FLOAT_P (op)
6f0c9f06
JJ
1787 && SCALAR_FLOAT_MODE_P (mode)
1788 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
0cedb36c 1789 {
34a72c33 1790 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
15e5ad76
ZW
1791 switch (code)
1792 {
1793 case SQRT:
3c8e8595 1794 return 0;
94313f35 1795 case ABS:
d49b6e1e 1796 d = real_value_abs (&d);
94313f35
RH
1797 break;
1798 case NEG:
d49b6e1e 1799 d = real_value_negate (&d);
94313f35
RH
1800 break;
1801 case FLOAT_TRUNCATE:
1802 d = real_value_truncate (mode, d);
1803 break;
1804 case FLOAT_EXTEND:
6f0c9f06
JJ
1805 /* All this does is change the mode, unless changing
1806 mode class. */
1807 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1808 real_convert (&d, mode, &d);
94313f35
RH
1809 break;
1810 case FIX:
1811 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1812 break;
79ae63b1
JH
1813 case NOT:
1814 {
1815 long tmp[4];
1816 int i;
1817
0a67e02c 1818 real_to_target (tmp, &d, GET_MODE (op));
79ae63b1
JH
1819 for (i = 0; i < 4; i++)
1820 tmp[i] = ~tmp[i];
1821 real_from_target (&d, tmp, mode);
0a67e02c 1822 break;
79ae63b1 1823 }
15e5ad76 1824 default:
41374e13 1825 gcc_unreachable ();
15e5ad76 1826 }
555affd7 1827 return const_double_from_real_value (d, mode);
0cedb36c 1828 }
48175537 1829 else if (CONST_DOUBLE_AS_FLOAT_P (op)
3d8bf70f 1830 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
0cedb36c 1831 && GET_MODE_CLASS (mode) == MODE_INT
807e902e 1832 && width > 0)
0cedb36c 1833 {
875eda9c 1834 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2067c116 1835 operators are intentionally left unspecified (to ease implementation
875eda9c
RS
1836 by target backends), for consistency, this routine implements the
1837 same semantics for constant folding as used by the middle-end. */
1838
0a67e02c
PB
1839 /* This was formerly used only for non-IEEE float.
1840 eggert@twinsun.com says it is safe for IEEE also. */
34a72c33
RS
1841 REAL_VALUE_TYPE t;
1842 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
807e902e
KZ
1843 wide_int wmax, wmin;
1844 /* This is part of the abi to real_to_integer, but we check
1845 things before making this call. */
1846 bool fail;
1847
15e5ad76
ZW
1848 switch (code)
1849 {
875eda9c 1850 case FIX:
34a72c33 1851 if (REAL_VALUE_ISNAN (*x))
875eda9c
RS
1852 return const0_rtx;
1853
1854 /* Test against the signed upper bound. */
807e902e
KZ
1855 wmax = wi::max_value (width, SIGNED);
1856 real_from_integer (&t, VOIDmode, wmax, SIGNED);
34a72c33 1857 if (real_less (&t, x))
807e902e 1858 return immed_wide_int_const (wmax, mode);
875eda9c
RS
1859
1860 /* Test against the signed lower bound. */
807e902e
KZ
1861 wmin = wi::min_value (width, SIGNED);
1862 real_from_integer (&t, VOIDmode, wmin, SIGNED);
34a72c33 1863 if (real_less (x, &t))
807e902e
KZ
1864 return immed_wide_int_const (wmin, mode);
1865
34a72c33
RS
1866 return immed_wide_int_const (real_to_integer (x, &fail, width),
1867 mode);
875eda9c
RS
1868
1869 case UNSIGNED_FIX:
34a72c33 1870 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
875eda9c
RS
1871 return const0_rtx;
1872
1873 /* Test against the unsigned upper bound. */
807e902e
KZ
1874 wmax = wi::max_value (width, UNSIGNED);
1875 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
34a72c33 1876 if (real_less (&t, x))
807e902e 1877 return immed_wide_int_const (wmax, mode);
875eda9c 1878
34a72c33 1879 return immed_wide_int_const (real_to_integer (x, &fail, width),
807e902e 1880 mode);
875eda9c 1881
15e5ad76 1882 default:
41374e13 1883 gcc_unreachable ();
15e5ad76 1884 }
0cedb36c 1885 }
ba31d94e 1886
0a67e02c 1887 return NULL_RTX;
0cedb36c
JL
1888}
1889\f
b17c024f
EB
1890/* Subroutine of simplify_binary_operation to simplify a binary operation
1891 CODE that can commute with byte swapping, with result mode MODE and
1892 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1893 Return zero if no simplification or canonicalization is possible. */
1894
1895static rtx
ef4bddc2 1896simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
b17c024f
EB
1897 rtx op0, rtx op1)
1898{
1899 rtx tem;
1900
1901 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
a8c50132 1902 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
b17c024f
EB
1903 {
1904 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1905 simplify_gen_unary (BSWAP, mode, op1, mode));
1906 return simplify_gen_unary (BSWAP, mode, tem, mode);
1907 }
1908
1909 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1910 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1911 {
1912 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1913 return simplify_gen_unary (BSWAP, mode, tem, mode);
1914 }
1915
1916 return NULL_RTX;
1917}
1918
9ce79a7a
RS
1919/* Subroutine of simplify_binary_operation to simplify a commutative,
1920 associative binary operation CODE with result mode MODE, operating
1921 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1922 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1923 canonicalization is possible. */
dd61aa98 1924
dd61aa98 1925static rtx
ef4bddc2 1926simplify_associative_operation (enum rtx_code code, machine_mode mode,
dd61aa98
RS
1927 rtx op0, rtx op1)
1928{
1929 rtx tem;
1930
9ce79a7a
RS
1931 /* Linearize the operator to the left. */
1932 if (GET_CODE (op1) == code)
dd61aa98 1933 {
9ce79a7a
RS
1934 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1935 if (GET_CODE (op0) == code)
1936 {
1937 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1938 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1939 }
dd61aa98 1940
9ce79a7a
RS
1941 /* "a op (b op c)" becomes "(b op c) op a". */
1942 if (! swap_commutative_operands_p (op1, op0))
1943 return simplify_gen_binary (code, mode, op1, op0);
dd61aa98 1944
e2be0590 1945 std::swap (op0, op1);
dd61aa98
RS
1946 }
1947
9ce79a7a 1948 if (GET_CODE (op0) == code)
dd61aa98 1949 {
9ce79a7a
RS
1950 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1951 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1952 {
1953 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1954 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1955 }
1956
1957 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
7e0b4eae 1958 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
9ce79a7a
RS
1959 if (tem != 0)
1960 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1961
1962 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
7e0b4eae 1963 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
9ce79a7a
RS
1964 if (tem != 0)
1965 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
dd61aa98
RS
1966 }
1967
1968 return 0;
1969}
1970
0a67e02c 1971
0cedb36c
JL
1972/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1973 and OP1. Return 0 if no simplification is possible.
1974
1975 Don't use this for relational operations such as EQ or LT.
1976 Use simplify_relational_operation instead. */
0cedb36c 1977rtx
ef4bddc2 1978simplify_binary_operation (enum rtx_code code, machine_mode mode,
46c5ad27 1979 rtx op0, rtx op1)
0cedb36c 1980{
9ce79a7a 1981 rtx trueop0, trueop1;
0cedb36c
JL
1982 rtx tem;
1983
1984 /* Relational operations don't work here. We must know the mode
1985 of the operands in order to do the comparison correctly.
1986 Assuming a full word can give incorrect results.
1987 Consider comparing 128 with -128 in QImode. */
41374e13
NS
1988 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1989 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
0cedb36c 1990
4ba5f925 1991 /* Make sure the constant is second. */
ec8e098d 1992 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9ce79a7a 1993 && swap_commutative_operands_p (op0, op1))
e2be0590 1994 std::swap (op0, op1);
4ba5f925 1995
9ce79a7a
RS
1996 trueop0 = avoid_constant_pool_reference (op0);
1997 trueop1 = avoid_constant_pool_reference (op1);
1998
0a67e02c
PB
1999 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2000 if (tem)
2001 return tem;
bfb792b6
KT
2002 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2003
2004 if (tem)
2005 return tem;
2006
2007 /* If the above steps did not result in a simplification and op0 or op1
2008 were constant pool references, use the referenced constants directly. */
2009 if (trueop0 != op0 || trueop1 != op1)
2010 return simplify_gen_binary (code, mode, trueop0, trueop1);
2011
2012 return NULL_RTX;
0a67e02c
PB
2013}
2014
1753331b
RS
2015/* Subroutine of simplify_binary_operation. Simplify a binary operation
2016 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2017 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2018 actual constants. */
2019
0a67e02c 2020static rtx
ef4bddc2 2021simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
0a67e02c
PB
2022 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2023{
bd1ef757 2024 rtx tem, reversed, opleft, opright;
0a67e02c 2025 HOST_WIDE_INT val;
5511bc5a 2026 unsigned int width = GET_MODE_PRECISION (mode);
0a67e02c
PB
2027
2028 /* Even if we can't compute a constant result,
2029 there are some cases worth simplifying. */
2030
2031 switch (code)
852c8ba1 2032 {
0a67e02c
PB
2033 case PLUS:
2034 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2035 when x is NaN, infinite, or finite and nonzero. They aren't
2036 when x is -0 and the rounding mode is not towards -infinity,
2037 since (-0) + 0 is then 0. */
2038 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2039 return op0;
2040
2041 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2042 transformations are safe even for IEEE. */
2043 if (GET_CODE (op0) == NEG)
2044 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2045 else if (GET_CODE (op1) == NEG)
2046 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2047
2048 /* (~a) + 1 -> -a */
2049 if (INTEGRAL_MODE_P (mode)
2050 && GET_CODE (op0) == NOT
2051 && trueop1 == const1_rtx)
2052 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2053
2054 /* Handle both-operands-constant cases. We can only add
2055 CONST_INTs to constants since the sum of relocatable symbols
2056 can't be handled by most assemblers. Don't add CONST_INT
2057 to CONST_INT since overflow won't be computed properly if wider
2058 than HOST_BITS_PER_WIDE_INT. */
2059
dd59ef13
RS
2060 if ((GET_CODE (op0) == CONST
2061 || GET_CODE (op0) == SYMBOL_REF
2062 || GET_CODE (op0) == LABEL_REF)
481683e1 2063 && CONST_INT_P (op1))
0a81f074 2064 return plus_constant (mode, op0, INTVAL (op1));
dd59ef13
RS
2065 else if ((GET_CODE (op1) == CONST
2066 || GET_CODE (op1) == SYMBOL_REF
2067 || GET_CODE (op1) == LABEL_REF)
481683e1 2068 && CONST_INT_P (op0))
0a81f074 2069 return plus_constant (mode, op1, INTVAL (op0));
0a67e02c
PB
2070
2071 /* See if this is something like X * C - X or vice versa or
2072 if the multiplication is written as a shift. If so, we can
2073 distribute and make a new multiply, shift, or maybe just
2074 have X (if C is 2 in the example above). But don't make
2075 something more expensive than we had before. */
2076
6800ea5c 2077 if (SCALAR_INT_MODE_P (mode))
0a67e02c 2078 {
0a67e02c
PB
2079 rtx lhs = op0, rhs = op1;
2080
807e902e
KZ
2081 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2082 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
54fb1ae0 2083
0a67e02c 2084 if (GET_CODE (lhs) == NEG)
fab2f52c 2085 {
807e902e 2086 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2087 lhs = XEXP (lhs, 0);
2088 }
0a67e02c 2089 else if (GET_CODE (lhs) == MULT
807e902e 2090 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
fab2f52c 2091 {
807e902e 2092 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
fab2f52c
AO
2093 lhs = XEXP (lhs, 0);
2094 }
0a67e02c 2095 else if (GET_CODE (lhs) == ASHIFT
481683e1 2096 && CONST_INT_P (XEXP (lhs, 1))
54fb1ae0 2097 && INTVAL (XEXP (lhs, 1)) >= 0
807e902e 2098 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2099 {
807e902e
KZ
2100 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2101 GET_MODE_PRECISION (mode));
0a67e02c
PB
2102 lhs = XEXP (lhs, 0);
2103 }
852c8ba1 2104
0a67e02c 2105 if (GET_CODE (rhs) == NEG)
fab2f52c 2106 {
807e902e 2107 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2108 rhs = XEXP (rhs, 0);
2109 }
0a67e02c 2110 else if (GET_CODE (rhs) == MULT
481683e1 2111 && CONST_INT_P (XEXP (rhs, 1)))
0a67e02c 2112 {
807e902e 2113 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
fab2f52c 2114 rhs = XEXP (rhs, 0);
0a67e02c
PB
2115 }
2116 else if (GET_CODE (rhs) == ASHIFT
481683e1 2117 && CONST_INT_P (XEXP (rhs, 1))
0a67e02c 2118 && INTVAL (XEXP (rhs, 1)) >= 0
807e902e 2119 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2120 {
807e902e
KZ
2121 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2122 GET_MODE_PRECISION (mode));
0a67e02c
PB
2123 rhs = XEXP (rhs, 0);
2124 }
2125
2126 if (rtx_equal_p (lhs, rhs))
2127 {
2128 rtx orig = gen_rtx_PLUS (mode, op0, op1);
fab2f52c 2129 rtx coeff;
f40751dd 2130 bool speed = optimize_function_for_speed_p (cfun);
fab2f52c 2131
807e902e 2132 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
fab2f52c
AO
2133
2134 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
e548c9df
AM
2135 return (set_src_cost (tem, mode, speed)
2136 <= set_src_cost (orig, mode, speed) ? tem : 0);
0a67e02c
PB
2137 }
2138 }
2139
2140 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
33ffb5c5 2141 if (CONST_SCALAR_INT_P (op1)
0a67e02c 2142 && GET_CODE (op0) == XOR
33ffb5c5 2143 && CONST_SCALAR_INT_P (XEXP (op0, 1))
0a67e02c
PB
2144 && mode_signbit_p (mode, op1))
2145 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2146 simplify_gen_binary (XOR, mode, op1,
2147 XEXP (op0, 1)));
2148
bd1ef757 2149 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
4bf371ea
RG
2150 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2151 && GET_CODE (op0) == MULT
bd1ef757
PB
2152 && GET_CODE (XEXP (op0, 0)) == NEG)
2153 {
2154 rtx in1, in2;
2155
2156 in1 = XEXP (XEXP (op0, 0), 0);
2157 in2 = XEXP (op0, 1);
2158 return simplify_gen_binary (MINUS, mode, op1,
2159 simplify_gen_binary (MULT, mode,
2160 in1, in2));
2161 }
2162
2163 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2164 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2165 is 1. */
2166 if (COMPARISON_P (op0)
2167 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2168 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2169 && (reversed = reversed_comparison (op0, mode)))
2170 return
2171 simplify_gen_unary (NEG, mode, reversed, mode);
2172
0a67e02c
PB
2173 /* If one of the operands is a PLUS or a MINUS, see if we can
2174 simplify this by the associative law.
2175 Don't use the associative law for floating point.
2176 The inaccuracy makes it nonassociative,
2177 and subtle programs can break if operations are associated. */
2178
2179 if (INTEGRAL_MODE_P (mode)
2180 && (plus_minus_operand_p (op0)
2181 || plus_minus_operand_p (op1))
1941069a 2182 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
0a67e02c
PB
2183 return tem;
2184
2185 /* Reassociate floating point addition only when the user
a1a82611 2186 specifies associative math operations. */
0a67e02c 2187 if (FLOAT_MODE_P (mode)
a1a82611 2188 && flag_associative_math)
852c8ba1 2189 {
0a67e02c
PB
2190 tem = simplify_associative_operation (code, mode, op0, op1);
2191 if (tem)
2192 return tem;
852c8ba1 2193 }
0a67e02c 2194 break;
852c8ba1 2195
0a67e02c 2196 case COMPARE:
0a67e02c
PB
2197 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2198 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2199 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2200 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
3198b947 2201 {
0a67e02c
PB
2202 rtx xop00 = XEXP (op0, 0);
2203 rtx xop10 = XEXP (op1, 0);
3198b947 2204
0a67e02c 2205 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
176cb568
TS
2206 return xop00;
2207
0a67e02c
PB
2208 if (REG_P (xop00) && REG_P (xop10)
2209 && GET_MODE (xop00) == GET_MODE (xop10)
2210 && REGNO (xop00) == REGNO (xop10)
2211 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2212 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
0a67e02c 2213 return xop00;
3198b947 2214 }
0a67e02c
PB
2215 break;
2216
2217 case MINUS:
2218 /* We can't assume x-x is 0 even with non-IEEE floating point,
2219 but since it is zero except in very strange circumstances, we
81d2fb02 2220 will treat it as zero with -ffinite-math-only. */
0a67e02c
PB
2221 if (rtx_equal_p (trueop0, trueop1)
2222 && ! side_effects_p (op0)
81d2fb02 2223 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
0a67e02c
PB
2224 return CONST0_RTX (mode);
2225
2226 /* Change subtraction from zero into negation. (0 - x) is the
2227 same as -x when x is NaN, infinite, or finite and nonzero.
2228 But if the mode has signed zeros, and does not round towards
2229 -infinity, then 0 - 0 is 0, not -0. */
2230 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2231 return simplify_gen_unary (NEG, mode, op1, mode);
2232
2233 /* (-1 - a) is ~a. */
2234 if (trueop0 == constm1_rtx)
2235 return simplify_gen_unary (NOT, mode, op1, mode);
2236
2237 /* Subtracting 0 has no effect unless the mode has signed zeros
2238 and supports rounding towards -infinity. In such a case,
2239 0 - 0 is -0. */
2240 if (!(HONOR_SIGNED_ZEROS (mode)
2241 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2242 && trueop1 == CONST0_RTX (mode))
2243 return op0;
2244
2245 /* See if this is something like X * C - X or vice versa or
2246 if the multiplication is written as a shift. If so, we can
2247 distribute and make a new multiply, shift, or maybe just
2248 have X (if C is 2 in the example above). But don't make
2249 something more expensive than we had before. */
2250
6800ea5c 2251 if (SCALAR_INT_MODE_P (mode))
3198b947 2252 {
0a67e02c 2253 rtx lhs = op0, rhs = op1;
3198b947 2254
807e902e
KZ
2255 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2256 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
54fb1ae0 2257
0a67e02c 2258 if (GET_CODE (lhs) == NEG)
fab2f52c 2259 {
807e902e 2260 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2261 lhs = XEXP (lhs, 0);
2262 }
0a67e02c 2263 else if (GET_CODE (lhs) == MULT
807e902e 2264 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
0a67e02c 2265 {
807e902e 2266 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
fab2f52c 2267 lhs = XEXP (lhs, 0);
0a67e02c
PB
2268 }
2269 else if (GET_CODE (lhs) == ASHIFT
481683e1 2270 && CONST_INT_P (XEXP (lhs, 1))
0a67e02c 2271 && INTVAL (XEXP (lhs, 1)) >= 0
807e902e 2272 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2273 {
807e902e
KZ
2274 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2275 GET_MODE_PRECISION (mode));
0a67e02c
PB
2276 lhs = XEXP (lhs, 0);
2277 }
3198b947 2278
0a67e02c 2279 if (GET_CODE (rhs) == NEG)
fab2f52c 2280 {
807e902e 2281 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2282 rhs = XEXP (rhs, 0);
2283 }
0a67e02c 2284 else if (GET_CODE (rhs) == MULT
481683e1 2285 && CONST_INT_P (XEXP (rhs, 1)))
0a67e02c 2286 {
807e902e 2287 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
fab2f52c 2288 rhs = XEXP (rhs, 0);
0a67e02c
PB
2289 }
2290 else if (GET_CODE (rhs) == ASHIFT
481683e1 2291 && CONST_INT_P (XEXP (rhs, 1))
0a67e02c 2292 && INTVAL (XEXP (rhs, 1)) >= 0
807e902e 2293 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2294 {
807e902e
KZ
2295 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2296 GET_MODE_PRECISION (mode));
27bcd47c 2297 negcoeff1 = -negcoeff1;
0a67e02c
PB
2298 rhs = XEXP (rhs, 0);
2299 }
2300
2301 if (rtx_equal_p (lhs, rhs))
2302 {
2303 rtx orig = gen_rtx_MINUS (mode, op0, op1);
fab2f52c 2304 rtx coeff;
f40751dd 2305 bool speed = optimize_function_for_speed_p (cfun);
fab2f52c 2306
807e902e 2307 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
fab2f52c
AO
2308
2309 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
e548c9df
AM
2310 return (set_src_cost (tem, mode, speed)
2311 <= set_src_cost (orig, mode, speed) ? tem : 0);
0a67e02c 2312 }
3198b947
RH
2313 }
2314
0a67e02c
PB
2315 /* (a - (-b)) -> (a + b). True even for IEEE. */
2316 if (GET_CODE (op1) == NEG)
2317 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3198b947 2318
0a67e02c
PB
2319 /* (-x - c) may be simplified as (-c - x). */
2320 if (GET_CODE (op0) == NEG
33ffb5c5 2321 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
79ae63b1 2322 {
0a67e02c
PB
2323 tem = simplify_unary_operation (NEG, mode, op1, mode);
2324 if (tem)
2325 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2326 }
79ae63b1 2327
0a67e02c 2328 /* Don't let a relocatable value get a negative coeff. */
481683e1 2329 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
0a67e02c
PB
2330 return simplify_gen_binary (PLUS, mode,
2331 op0,
2332 neg_const_int (mode, op1));
2333
2334 /* (x - (x & y)) -> (x & ~y) */
6b74529d 2335 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
0a67e02c
PB
2336 {
2337 if (rtx_equal_p (op0, XEXP (op1, 0)))
79ae63b1 2338 {
0a67e02c
PB
2339 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2340 GET_MODE (XEXP (op1, 1)));
2341 return simplify_gen_binary (AND, mode, op0, tem);
2342 }
2343 if (rtx_equal_p (op0, XEXP (op1, 1)))
2344 {
2345 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2346 GET_MODE (XEXP (op1, 0)));
2347 return simplify_gen_binary (AND, mode, op0, tem);
79ae63b1 2348 }
79ae63b1 2349 }
1941069a 2350
bd1ef757
PB
2351 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2352 by reversing the comparison code if valid. */
2353 if (STORE_FLAG_VALUE == 1
2354 && trueop0 == const1_rtx
2355 && COMPARISON_P (op1)
2356 && (reversed = reversed_comparison (op1, mode)))
2357 return reversed;
2358
2359 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
4bf371ea
RG
2360 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2361 && GET_CODE (op1) == MULT
bd1ef757
PB
2362 && GET_CODE (XEXP (op1, 0)) == NEG)
2363 {
2364 rtx in1, in2;
2365
2366 in1 = XEXP (XEXP (op1, 0), 0);
2367 in2 = XEXP (op1, 1);
2368 return simplify_gen_binary (PLUS, mode,
2369 simplify_gen_binary (MULT, mode,
2370 in1, in2),
2371 op0);
2372 }
2373
2374 /* Canonicalize (minus (neg A) (mult B C)) to
2375 (minus (mult (neg B) C) A). */
4bf371ea
RG
2376 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2377 && GET_CODE (op1) == MULT
bd1ef757
PB
2378 && GET_CODE (op0) == NEG)
2379 {
2380 rtx in1, in2;
2381
2382 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2383 in2 = XEXP (op1, 1);
2384 return simplify_gen_binary (MINUS, mode,
2385 simplify_gen_binary (MULT, mode,
2386 in1, in2),
2387 XEXP (op0, 0));
2388 }
2389
1941069a
PB
2390 /* If one of the operands is a PLUS or a MINUS, see if we can
2391 simplify this by the associative law. This will, for example,
2392 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2393 Don't use the associative law for floating point.
2394 The inaccuracy makes it nonassociative,
2395 and subtle programs can break if operations are associated. */
2396
2397 if (INTEGRAL_MODE_P (mode)
2398 && (plus_minus_operand_p (op0)
2399 || plus_minus_operand_p (op1))
2400 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2401 return tem;
0a67e02c 2402 break;
15e5ad76 2403
0a67e02c
PB
2404 case MULT:
2405 if (trueop1 == constm1_rtx)
2406 return simplify_gen_unary (NEG, mode, op0, mode);
2407
29b40d79
BS
2408 if (GET_CODE (op0) == NEG)
2409 {
2410 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
707f9919
JJ
2411 /* If op1 is a MULT as well and simplify_unary_operation
2412 just moved the NEG to the second operand, simplify_gen_binary
2413 below could through simplify_associative_operation move
2414 the NEG around again and recurse endlessly. */
2415 if (temp
2416 && GET_CODE (op1) == MULT
2417 && GET_CODE (temp) == MULT
2418 && XEXP (op1, 0) == XEXP (temp, 0)
2419 && GET_CODE (XEXP (temp, 1)) == NEG
2420 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2421 temp = NULL_RTX;
29b40d79
BS
2422 if (temp)
2423 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2424 }
2425 if (GET_CODE (op1) == NEG)
2426 {
2427 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
707f9919
JJ
2428 /* If op0 is a MULT as well and simplify_unary_operation
2429 just moved the NEG to the second operand, simplify_gen_binary
2430 below could through simplify_associative_operation move
2431 the NEG around again and recurse endlessly. */
2432 if (temp
2433 && GET_CODE (op0) == MULT
2434 && GET_CODE (temp) == MULT
2435 && XEXP (op0, 0) == XEXP (temp, 0)
2436 && GET_CODE (XEXP (temp, 1)) == NEG
2437 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2438 temp = NULL_RTX;
29b40d79
BS
2439 if (temp)
2440 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2441 }
2442
0a67e02c
PB
2443 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2444 x is NaN, since x * 0 is then also NaN. Nor is it valid
2445 when the mode has signed zeros, since multiplying a negative
2446 number by 0 will give -0, not 0. */
2447 if (!HONOR_NANS (mode)
2448 && !HONOR_SIGNED_ZEROS (mode)
2449 && trueop1 == CONST0_RTX (mode)
2450 && ! side_effects_p (op0))
2451 return op1;
2452
2453 /* In IEEE floating point, x*1 is not equivalent to x for
2454 signalling NaNs. */
2455 if (!HONOR_SNANS (mode)
2456 && trueop1 == CONST1_RTX (mode))
2457 return op0;
2458
807e902e
KZ
2459 /* Convert multiply by constant power of two into shift. */
2460 if (CONST_SCALAR_INT_P (trueop1))
2461 {
2462 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2463 if (val >= 0)
2464 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2465 }
fab2f52c 2466
0a67e02c 2467 /* x*2 is x+x and x*(-1) is -x */
48175537 2468 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3d8bf70f 2469 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
50cd60be 2470 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
0a67e02c
PB
2471 && GET_MODE (op0) == mode)
2472 {
34a72c33 2473 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
15e5ad76 2474
34a72c33 2475 if (real_equal (d1, &dconst2))
0a67e02c 2476 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
3e4093b6 2477
1753331b 2478 if (!HONOR_SNANS (mode)
34a72c33 2479 && real_equal (d1, &dconstm1))
0a67e02c
PB
2480 return simplify_gen_unary (NEG, mode, op0, mode);
2481 }
15e5ad76 2482
1753331b
RS
2483 /* Optimize -x * -x as x * x. */
2484 if (FLOAT_MODE_P (mode)
2485 && GET_CODE (op0) == NEG
2486 && GET_CODE (op1) == NEG
2487 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2488 && !side_effects_p (XEXP (op0, 0)))
2489 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2490
2491 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2492 if (SCALAR_FLOAT_MODE_P (mode)
2493 && GET_CODE (op0) == ABS
2494 && GET_CODE (op1) == ABS
2495 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2496 && !side_effects_p (XEXP (op0, 0)))
2497 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2498
0a67e02c
PB
2499 /* Reassociate multiplication, but for floating point MULTs
2500 only when the user specifies unsafe math optimizations. */
2501 if (! FLOAT_MODE_P (mode)
2502 || flag_unsafe_math_optimizations)
2503 {
2504 tem = simplify_associative_operation (code, mode, op0, op1);
2505 if (tem)
2506 return tem;
2507 }
2508 break;
6355b2d5 2509
0a67e02c 2510 case IOR:
a82e045d 2511 if (trueop1 == CONST0_RTX (mode))
0a67e02c 2512 return op0;
e7160b27
JM
2513 if (INTEGRAL_MODE_P (mode)
2514 && trueop1 == CONSTM1_RTX (mode)
2515 && !side_effects_p (op0))
0a67e02c
PB
2516 return op1;
2517 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2518 return op0;
2519 /* A | (~A) -> -1 */
2520 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2521 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2522 && ! side_effects_p (op0)
3f2960d5 2523 && SCALAR_INT_MODE_P (mode))
0a67e02c 2524 return constm1_rtx;
bd1ef757
PB
2525
2526 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
481683e1 2527 if (CONST_INT_P (op1)
46c9550f 2528 && HWI_COMPUTABLE_MODE_P (mode)
e7160b27
JM
2529 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2530 && !side_effects_p (op0))
bd1ef757 2531 return op1;
b8698a0f 2532
49e7a9d4
RS
2533 /* Canonicalize (X & C1) | C2. */
2534 if (GET_CODE (op0) == AND
481683e1
SZ
2535 && CONST_INT_P (trueop1)
2536 && CONST_INT_P (XEXP (op0, 1)))
49e7a9d4
RS
2537 {
2538 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2539 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2540 HOST_WIDE_INT c2 = INTVAL (trueop1);
2541
2542 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2543 if ((c1 & c2) == c1
2544 && !side_effects_p (XEXP (op0, 0)))
2545 return trueop1;
2546
2547 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2548 if (((c1|c2) & mask) == mask)
2549 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2550
2551 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2552 if (((c1 & ~c2) & mask) != (c1 & mask))
2553 {
2554 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2555 gen_int_mode (c1 & ~c2, mode));
2556 return simplify_gen_binary (IOR, mode, tem, op1);
2557 }
2558 }
2559
bd1ef757
PB
2560 /* Convert (A & B) | A to A. */
2561 if (GET_CODE (op0) == AND
2562 && (rtx_equal_p (XEXP (op0, 0), op1)
2563 || rtx_equal_p (XEXP (op0, 1), op1))
2564 && ! side_effects_p (XEXP (op0, 0))
2565 && ! side_effects_p (XEXP (op0, 1)))
2566 return op1;
2567
2568 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2569 mode size to (rotate A CX). */
2570
2571 if (GET_CODE (op1) == ASHIFT
2572 || GET_CODE (op1) == SUBREG)
2573 {
2574 opleft = op1;
2575 opright = op0;
2576 }
2577 else
2578 {
2579 opright = op1;
2580 opleft = op0;
2581 }
2582
2583 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2584 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
481683e1
SZ
2585 && CONST_INT_P (XEXP (opleft, 1))
2586 && CONST_INT_P (XEXP (opright, 1))
bd1ef757 2587 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
5511bc5a 2588 == GET_MODE_PRECISION (mode)))
bd1ef757
PB
2589 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2590
2591 /* Same, but for ashift that has been "simplified" to a wider mode
2592 by simplify_shift_const. */
2593
2594 if (GET_CODE (opleft) == SUBREG
2595 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2596 && GET_CODE (opright) == LSHIFTRT
2597 && GET_CODE (XEXP (opright, 0)) == SUBREG
2598 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2599 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2600 && (GET_MODE_SIZE (GET_MODE (opleft))
2601 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2602 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2603 SUBREG_REG (XEXP (opright, 0)))
481683e1
SZ
2604 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2605 && CONST_INT_P (XEXP (opright, 1))
bd1ef757 2606 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
5511bc5a 2607 == GET_MODE_PRECISION (mode)))
bd1ef757 2608 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
01578564 2609 XEXP (SUBREG_REG (opleft), 1));
bd1ef757
PB
2610
2611 /* If we have (ior (and (X C1) C2)), simplify this by making
2612 C1 as small as possible if C1 actually changes. */
481683e1 2613 if (CONST_INT_P (op1)
46c9550f 2614 && (HWI_COMPUTABLE_MODE_P (mode)
bd1ef757
PB
2615 || INTVAL (op1) > 0)
2616 && GET_CODE (op0) == AND
481683e1
SZ
2617 && CONST_INT_P (XEXP (op0, 1))
2618 && CONST_INT_P (op1)
43c36287 2619 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
69a59f0f
RS
2620 {
2621 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2622 gen_int_mode (UINTVAL (XEXP (op0, 1))
2623 & ~UINTVAL (op1),
2624 mode));
2625 return simplify_gen_binary (IOR, mode, tmp, op1);
2626 }
bd1ef757
PB
2627
2628 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2629 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2630 the PLUS does not affect any of the bits in OP1: then we can do
2631 the IOR as a PLUS and we can associate. This is valid if OP1
2632 can be safely shifted left C bits. */
481683e1 2633 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
bd1ef757 2634 && GET_CODE (XEXP (op0, 0)) == PLUS
481683e1
SZ
2635 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2636 && CONST_INT_P (XEXP (op0, 1))
bd1ef757
PB
2637 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2638 {
2639 int count = INTVAL (XEXP (op0, 1));
2640 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2641
2642 if (mask >> count == INTVAL (trueop1)
046f1eee 2643 && trunc_int_for_mode (mask, mode) == mask
bd1ef757
PB
2644 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2645 return simplify_gen_binary (ASHIFTRT, mode,
0a81f074
RS
2646 plus_constant (mode, XEXP (op0, 0),
2647 mask),
bd1ef757
PB
2648 XEXP (op0, 1));
2649 }
2650
b17c024f
EB
2651 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2652 if (tem)
2653 return tem;
2654
0a67e02c
PB
2655 tem = simplify_associative_operation (code, mode, op0, op1);
2656 if (tem)
2657 return tem;
2658 break;
2659
2660 case XOR:
a82e045d 2661 if (trueop1 == CONST0_RTX (mode))
0a67e02c 2662 return op0;
e7c82a99 2663 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
0a67e02c 2664 return simplify_gen_unary (NOT, mode, op0, mode);
f5d1572a 2665 if (rtx_equal_p (trueop0, trueop1)
0a67e02c
PB
2666 && ! side_effects_p (op0)
2667 && GET_MODE_CLASS (mode) != MODE_CC)
6bd13540 2668 return CONST0_RTX (mode);
0a67e02c
PB
2669
2670 /* Canonicalize XOR of the most significant bit to PLUS. */
33ffb5c5 2671 if (CONST_SCALAR_INT_P (op1)
0a67e02c
PB
2672 && mode_signbit_p (mode, op1))
2673 return simplify_gen_binary (PLUS, mode, op0, op1);
2674 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
33ffb5c5 2675 if (CONST_SCALAR_INT_P (op1)
0a67e02c 2676 && GET_CODE (op0) == PLUS
33ffb5c5 2677 && CONST_SCALAR_INT_P (XEXP (op0, 1))
0a67e02c
PB
2678 && mode_signbit_p (mode, XEXP (op0, 1)))
2679 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2680 simplify_gen_binary (XOR, mode, op1,
2681 XEXP (op0, 1)));
bd1ef757
PB
2682
2683 /* If we are XORing two things that have no bits in common,
2684 convert them into an IOR. This helps to detect rotation encoded
2685 using those methods and possibly other simplifications. */
2686
46c9550f 2687 if (HWI_COMPUTABLE_MODE_P (mode)
bd1ef757
PB
2688 && (nonzero_bits (op0, mode)
2689 & nonzero_bits (op1, mode)) == 0)
2690 return (simplify_gen_binary (IOR, mode, op0, op1));
2691
2692 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2693 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2694 (NOT y). */
2695 {
2696 int num_negated = 0;
2697
2698 if (GET_CODE (op0) == NOT)
2699 num_negated++, op0 = XEXP (op0, 0);
2700 if (GET_CODE (op1) == NOT)
2701 num_negated++, op1 = XEXP (op1, 0);
2702
2703 if (num_negated == 2)
2704 return simplify_gen_binary (XOR, mode, op0, op1);
2705 else if (num_negated == 1)
2706 return simplify_gen_unary (NOT, mode,
2707 simplify_gen_binary (XOR, mode, op0, op1),
2708 mode);
2709 }
2710
2711 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2712 correspond to a machine insn or result in further simplifications
2713 if B is a constant. */
2714
2715 if (GET_CODE (op0) == AND
2716 && rtx_equal_p (XEXP (op0, 1), op1)
2717 && ! side_effects_p (op1))
2718 return simplify_gen_binary (AND, mode,
2719 simplify_gen_unary (NOT, mode,
2720 XEXP (op0, 0), mode),
2721 op1);
2722
2723 else if (GET_CODE (op0) == AND
2724 && rtx_equal_p (XEXP (op0, 0), op1)
2725 && ! side_effects_p (op1))
2726 return simplify_gen_binary (AND, mode,
2727 simplify_gen_unary (NOT, mode,
2728 XEXP (op0, 1), mode),
2729 op1);
2730
af9f5d95
AO
2731 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2732 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2733 out bits inverted twice and not set by C. Similarly, given
2734 (xor (and (xor A B) C) D), simplify without inverting C in
2735 the xor operand: (xor (and A C) (B&C)^D).
2736 */
2737 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2738 && GET_CODE (XEXP (op0, 0)) == XOR
2739 && CONST_INT_P (op1)
2740 && CONST_INT_P (XEXP (op0, 1))
2741 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2742 {
2743 enum rtx_code op = GET_CODE (op0);
2744 rtx a = XEXP (XEXP (op0, 0), 0);
2745 rtx b = XEXP (XEXP (op0, 0), 1);
2746 rtx c = XEXP (op0, 1);
2747 rtx d = op1;
2748 HOST_WIDE_INT bval = INTVAL (b);
2749 HOST_WIDE_INT cval = INTVAL (c);
2750 HOST_WIDE_INT dval = INTVAL (d);
2751 HOST_WIDE_INT xcval;
2752
2753 if (op == IOR)
af9f5d95 2754 xcval = ~cval;
d26ac279
JJ
2755 else
2756 xcval = cval;
af9f5d95
AO
2757
2758 return simplify_gen_binary (XOR, mode,
2759 simplify_gen_binary (op, mode, a, c),
2760 gen_int_mode ((bval & xcval) ^ dval,
2761 mode));
2762 }
2763
54833ec0
CLT
2764 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2765 we can transform like this:
2766 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2767 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2768 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2769 Attempt a few simplifications when B and C are both constants. */
2770 if (GET_CODE (op0) == AND
2771 && CONST_INT_P (op1)
2772 && CONST_INT_P (XEXP (op0, 1)))
2773 {
2774 rtx a = XEXP (op0, 0);
2775 rtx b = XEXP (op0, 1);
2776 rtx c = op1;
2777 HOST_WIDE_INT bval = INTVAL (b);
2778 HOST_WIDE_INT cval = INTVAL (c);
2779
a720f0ef
AO
2780 /* Instead of computing ~A&C, we compute its negated value,
2781 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2782 optimize for sure. If it does not simplify, we still try
2783 to compute ~A&C below, but since that always allocates
2784 RTL, we don't try that before committing to returning a
2785 simplified expression. */
2786 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2787 GEN_INT (~cval));
2788
54833ec0
CLT
2789 if ((~cval & bval) == 0)
2790 {
a720f0ef
AO
2791 rtx na_c = NULL_RTX;
2792 if (n_na_c)
2793 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2794 else
2795 {
2796 /* If ~A does not simplify, don't bother: we don't
2797 want to simplify 2 operations into 3, and if na_c
2798 were to simplify with na, n_na_c would have
2799 simplified as well. */
2800 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2801 if (na)
2802 na_c = simplify_gen_binary (AND, mode, na, c);
2803 }
2804
54833ec0
CLT
2805 /* Try to simplify ~A&C | ~B&C. */
2806 if (na_c != NULL_RTX)
2807 return simplify_gen_binary (IOR, mode, na_c,
69a59f0f 2808 gen_int_mode (~bval & cval, mode));
54833ec0
CLT
2809 }
2810 else
2811 {
2812 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
a720f0ef 2813 if (n_na_c == CONSTM1_RTX (mode))
54833ec0
CLT
2814 {
2815 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
69a59f0f
RS
2816 gen_int_mode (~cval & bval,
2817 mode));
54833ec0 2818 return simplify_gen_binary (IOR, mode, a_nc_b,
69a59f0f
RS
2819 gen_int_mode (~bval & cval,
2820 mode));
54833ec0
CLT
2821 }
2822 }
2823 }
2824
bd1ef757
PB
2825 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2826 comparison if STORE_FLAG_VALUE is 1. */
2827 if (STORE_FLAG_VALUE == 1
2828 && trueop1 == const1_rtx
2829 && COMPARISON_P (op0)
2830 && (reversed = reversed_comparison (op0, mode)))
2831 return reversed;
2832
2833 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2834 is (lt foo (const_int 0)), so we can perform the above
2835 simplification if STORE_FLAG_VALUE is 1. */
2836
2837 if (STORE_FLAG_VALUE == 1
2838 && trueop1 == const1_rtx
2839 && GET_CODE (op0) == LSHIFTRT
481683e1 2840 && CONST_INT_P (XEXP (op0, 1))
5511bc5a 2841 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
bd1ef757
PB
2842 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2843
2844 /* (xor (comparison foo bar) (const_int sign-bit))
2845 when STORE_FLAG_VALUE is the sign bit. */
2d0c270f 2846 if (val_signbit_p (mode, STORE_FLAG_VALUE)
bd1ef757
PB
2847 && trueop1 == const_true_rtx
2848 && COMPARISON_P (op0)
2849 && (reversed = reversed_comparison (op0, mode)))
2850 return reversed;
2851
b17c024f
EB
2852 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2853 if (tem)
2854 return tem;
2855
0a67e02c
PB
2856 tem = simplify_associative_operation (code, mode, op0, op1);
2857 if (tem)
2858 return tem;
2859 break;
2860
2861 case AND:
3f2960d5
RH
2862 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2863 return trueop1;
e7c82a99
JJ
2864 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2865 return op0;
46c9550f 2866 if (HWI_COMPUTABLE_MODE_P (mode))
dc5b3407
ZD
2867 {
2868 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
f5a17c43 2869 HOST_WIDE_INT nzop1;
481683e1 2870 if (CONST_INT_P (trueop1))
f5a17c43
BS
2871 {
2872 HOST_WIDE_INT val1 = INTVAL (trueop1);
2873 /* If we are turning off bits already known off in OP0, we need
2874 not do an AND. */
2875 if ((nzop0 & ~val1) == 0)
2876 return op0;
2877 }
2878 nzop1 = nonzero_bits (trueop1, mode);
dc5b3407 2879 /* If we are clearing all the nonzero bits, the result is zero. */
f5a17c43
BS
2880 if ((nzop1 & nzop0) == 0
2881 && !side_effects_p (op0) && !side_effects_p (op1))
dc5b3407
ZD
2882 return CONST0_RTX (mode);
2883 }
f5d1572a 2884 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
0a67e02c
PB
2885 && GET_MODE_CLASS (mode) != MODE_CC)
2886 return op0;
2887 /* A & (~A) -> 0 */
2888 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2889 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2890 && ! side_effects_p (op0)
2891 && GET_MODE_CLASS (mode) != MODE_CC)
3f2960d5 2892 return CONST0_RTX (mode);
0a67e02c
PB
2893
2894 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2895 there are no nonzero bits of C outside of X's mode. */
2896 if ((GET_CODE (op0) == SIGN_EXTEND
2897 || GET_CODE (op0) == ZERO_EXTEND)
481683e1 2898 && CONST_INT_P (trueop1)
46c9550f 2899 && HWI_COMPUTABLE_MODE_P (mode)
0a67e02c 2900 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
43c36287 2901 & UINTVAL (trueop1)) == 0)
0a67e02c 2902 {
ef4bddc2 2903 machine_mode imode = GET_MODE (XEXP (op0, 0));
0a67e02c
PB
2904 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2905 gen_int_mode (INTVAL (trueop1),
2906 imode));
2907 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2908 }
2909
fcaf7e12
AN
2910 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2911 we might be able to further simplify the AND with X and potentially
2912 remove the truncation altogether. */
2913 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2914 {
2915 rtx x = XEXP (op0, 0);
ef4bddc2 2916 machine_mode xmode = GET_MODE (x);
fcaf7e12
AN
2917 tem = simplify_gen_binary (AND, xmode, x,
2918 gen_int_mode (INTVAL (trueop1), xmode));
2919 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2920 }
2921
49e7a9d4
RS
2922 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2923 if (GET_CODE (op0) == IOR
481683e1
SZ
2924 && CONST_INT_P (trueop1)
2925 && CONST_INT_P (XEXP (op0, 1)))
49e7a9d4
RS
2926 {
2927 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2928 return simplify_gen_binary (IOR, mode,
2929 simplify_gen_binary (AND, mode,
2930 XEXP (op0, 0), op1),
2931 gen_int_mode (tmp, mode));
2932 }
2933
bd1ef757
PB
2934 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2935 insn (and may simplify more). */
2936 if (GET_CODE (op0) == XOR
2937 && rtx_equal_p (XEXP (op0, 0), op1)
2938 && ! side_effects_p (op1))
2939 return simplify_gen_binary (AND, mode,
2940 simplify_gen_unary (NOT, mode,
2941 XEXP (op0, 1), mode),
2942 op1);
2943
2944 if (GET_CODE (op0) == XOR
2945 && rtx_equal_p (XEXP (op0, 1), op1)
2946 && ! side_effects_p (op1))
2947 return simplify_gen_binary (AND, mode,
2948 simplify_gen_unary (NOT, mode,
2949 XEXP (op0, 0), mode),
2950 op1);
2951
2952 /* Similarly for (~(A ^ B)) & A. */
2953 if (GET_CODE (op0) == NOT
2954 && GET_CODE (XEXP (op0, 0)) == XOR
2955 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2956 && ! side_effects_p (op1))
2957 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2958
2959 if (GET_CODE (op0) == NOT
2960 && GET_CODE (XEXP (op0, 0)) == XOR
2961 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2962 && ! side_effects_p (op1))
2963 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2964
2965 /* Convert (A | B) & A to A. */
2966 if (GET_CODE (op0) == IOR
2967 && (rtx_equal_p (XEXP (op0, 0), op1)
2968 || rtx_equal_p (XEXP (op0, 1), op1))
2969 && ! side_effects_p (XEXP (op0, 0))
2970 && ! side_effects_p (XEXP (op0, 1)))
2971 return op1;
2972
0a67e02c
PB
2973 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2974 ((A & N) + B) & M -> (A + B) & M
2975 Similarly if (N & M) == 0,
2976 ((A | N) + B) & M -> (A + B) & M
dc5b3407
ZD
2977 and for - instead of + and/or ^ instead of |.
2978 Also, if (N & M) == 0, then
2979 (A +- N) & M -> A & M. */
481683e1 2980 if (CONST_INT_P (trueop1)
46c9550f 2981 && HWI_COMPUTABLE_MODE_P (mode)
43c36287
EB
2982 && ~UINTVAL (trueop1)
2983 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
0a67e02c
PB
2984 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2985 {
2986 rtx pmop[2];
2987 int which;
2988
2989 pmop[0] = XEXP (op0, 0);
2990 pmop[1] = XEXP (op0, 1);
2991
481683e1 2992 if (CONST_INT_P (pmop[1])
43c36287 2993 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
dc5b3407
ZD
2994 return simplify_gen_binary (AND, mode, pmop[0], op1);
2995
0a67e02c
PB
2996 for (which = 0; which < 2; which++)
2997 {
2998 tem = pmop[which];
2999 switch (GET_CODE (tem))
6355b2d5 3000 {
0a67e02c 3001 case AND:
481683e1 3002 if (CONST_INT_P (XEXP (tem, 1))
43c36287
EB
3003 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3004 == UINTVAL (trueop1))
0a67e02c 3005 pmop[which] = XEXP (tem, 0);
6355b2d5 3006 break;
0a67e02c
PB
3007 case IOR:
3008 case XOR:
481683e1 3009 if (CONST_INT_P (XEXP (tem, 1))
43c36287 3010 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
0a67e02c 3011 pmop[which] = XEXP (tem, 0);
6355b2d5 3012 break;
6355b2d5
JJ
3013 default:
3014 break;
3015 }
3016 }
3017
0a67e02c
PB
3018 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3019 {
3020 tem = simplify_gen_binary (GET_CODE (op0), mode,
3021 pmop[0], pmop[1]);
3022 return simplify_gen_binary (code, mode, tem, op1);
3023 }
3024 }
f79db4f6
AP
3025
3026 /* (and X (ior (not X) Y) -> (and X Y) */
3027 if (GET_CODE (op1) == IOR
3028 && GET_CODE (XEXP (op1, 0)) == NOT
31dd2a86 3029 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
f79db4f6
AP
3030 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3031
3032 /* (and (ior (not X) Y) X) -> (and X Y) */
3033 if (GET_CODE (op0) == IOR
3034 && GET_CODE (XEXP (op0, 0)) == NOT
31dd2a86 3035 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
f79db4f6
AP
3036 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3037
31dd2a86
SB
3038 /* (and X (ior Y (not X)) -> (and X Y) */
3039 if (GET_CODE (op1) == IOR
3040 && GET_CODE (XEXP (op1, 1)) == NOT
3041 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3042 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3043
3044 /* (and (ior Y (not X)) X) -> (and X Y) */
3045 if (GET_CODE (op0) == IOR
3046 && GET_CODE (XEXP (op0, 1)) == NOT
3047 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3048 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3049
b17c024f
EB
3050 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3051 if (tem)
3052 return tem;
3053
0a67e02c
PB
3054 tem = simplify_associative_operation (code, mode, op0, op1);
3055 if (tem)
3056 return tem;
3057 break;
762297d9 3058
0a67e02c
PB
3059 case UDIV:
3060 /* 0/x is 0 (or x&0 if x has side-effects). */
3f2960d5
RH
3061 if (trueop0 == CONST0_RTX (mode))
3062 {
3063 if (side_effects_p (op1))
3064 return simplify_gen_binary (AND, mode, op1, trueop0);
3065 return trueop0;
3066 }
3067 /* x/1 is x. */
3068 if (trueop1 == CONST1_RTX (mode))
76bd29f6
JJ
3069 {
3070 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3071 if (tem)
3072 return tem;
3073 }
3f2960d5 3074 /* Convert divide by power of two into shift. */
481683e1 3075 if (CONST_INT_P (trueop1)
43c36287 3076 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3f2960d5
RH
3077 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3078 break;
d284eb28 3079
0a67e02c
PB
3080 case DIV:
3081 /* Handle floating point and integers separately. */
3d8bf70f 3082 if (SCALAR_FLOAT_MODE_P (mode))
0a67e02c
PB
3083 {
3084 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3085 safe for modes with NaNs, since 0.0 / 0.0 will then be
3086 NaN rather than 0.0. Nor is it safe for modes with signed
3087 zeros, since dividing 0 by a negative number gives -0.0 */
3088 if (trueop0 == CONST0_RTX (mode)
3089 && !HONOR_NANS (mode)
3090 && !HONOR_SIGNED_ZEROS (mode)
3091 && ! side_effects_p (op1))
3092 return op0;
3093 /* x/1.0 is x. */
3094 if (trueop1 == CONST1_RTX (mode)
3095 && !HONOR_SNANS (mode))
3096 return op0;
0cedb36c 3097
48175537 3098 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
0a67e02c
PB
3099 && trueop1 != CONST0_RTX (mode))
3100 {
34a72c33 3101 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
0cedb36c 3102
0a67e02c 3103 /* x/-1.0 is -x. */
34a72c33 3104 if (real_equal (d1, &dconstm1)
0a67e02c
PB
3105 && !HONOR_SNANS (mode))
3106 return simplify_gen_unary (NEG, mode, op0, mode);
0cedb36c 3107
0a67e02c 3108 /* Change FP division by a constant into multiplication.
a1a82611
RE
3109 Only do this with -freciprocal-math. */
3110 if (flag_reciprocal_math
34a72c33 3111 && !real_equal (d1, &dconst0))
0a67e02c 3112 {
34a72c33
RS
3113 REAL_VALUE_TYPE d;
3114 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
555affd7 3115 tem = const_double_from_real_value (d, mode);
0a67e02c
PB
3116 return simplify_gen_binary (MULT, mode, op0, tem);
3117 }
3118 }
3119 }
e46bf5d6 3120 else if (SCALAR_INT_MODE_P (mode))
0cedb36c 3121 {
0a67e02c 3122 /* 0/x is 0 (or x&0 if x has side-effects). */
0e1b8b10
ILT
3123 if (trueop0 == CONST0_RTX (mode)
3124 && !cfun->can_throw_non_call_exceptions)
3f2960d5
RH
3125 {
3126 if (side_effects_p (op1))
3127 return simplify_gen_binary (AND, mode, op1, trueop0);
3128 return trueop0;
3129 }
0a67e02c 3130 /* x/1 is x. */
3f2960d5 3131 if (trueop1 == CONST1_RTX (mode))
76bd29f6
JJ
3132 {
3133 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3134 if (tem)
3135 return tem;
3136 }
0a67e02c
PB
3137 /* x/-1 is -x. */
3138 if (trueop1 == constm1_rtx)
3139 {
9ce921ab 3140 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
76bd29f6
JJ
3141 if (x)
3142 return simplify_gen_unary (NEG, mode, x, mode);
0a67e02c
PB
3143 }
3144 }
3145 break;
0cedb36c 3146
0a67e02c
PB
3147 case UMOD:
3148 /* 0%x is 0 (or x&0 if x has side-effects). */
3f2960d5
RH
3149 if (trueop0 == CONST0_RTX (mode))
3150 {
3151 if (side_effects_p (op1))
3152 return simplify_gen_binary (AND, mode, op1, trueop0);
3153 return trueop0;
3154 }
3155 /* x%1 is 0 (of x&0 if x has side-effects). */
3156 if (trueop1 == CONST1_RTX (mode))
3157 {
3158 if (side_effects_p (op0))
3159 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3160 return CONST0_RTX (mode);
3161 }
3162 /* Implement modulus by power of two as AND. */
481683e1 3163 if (CONST_INT_P (trueop1)
43c36287 3164 && exact_log2 (UINTVAL (trueop1)) > 0)
3f2960d5 3165 return simplify_gen_binary (AND, mode, op0,
69a59f0f 3166 gen_int_mode (INTVAL (op1) - 1, mode));
3f2960d5 3167 break;
0cedb36c 3168
0a67e02c
PB
3169 case MOD:
3170 /* 0%x is 0 (or x&0 if x has side-effects). */
3f2960d5
RH
3171 if (trueop0 == CONST0_RTX (mode))
3172 {
3173 if (side_effects_p (op1))
3174 return simplify_gen_binary (AND, mode, op1, trueop0);
3175 return trueop0;
3176 }
3177 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3178 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3179 {
3180 if (side_effects_p (op0))
3181 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3182 return CONST0_RTX (mode);
3183 }
3184 break;
0cedb36c 3185
0a67e02c
PB
3186 case ROTATERT:
3187 case ROTATE:
75776c6d
JJ
3188 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3189 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3190 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3191 amount instead. */
4ed3092f 3192#if defined(HAVE_rotate) && defined(HAVE_rotatert)
75776c6d
JJ
3193 if (CONST_INT_P (trueop1)
3194 && IN_RANGE (INTVAL (trueop1),
50b6ee8b
DD
3195 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3196 GET_MODE_PRECISION (mode) - 1))
75776c6d 3197 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
50b6ee8b 3198 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
75776c6d 3199 - INTVAL (trueop1)));
4ed3092f 3200#endif
75776c6d 3201 /* FALLTHRU */
0a67e02c 3202 case ASHIFTRT:
70233f37
RS
3203 if (trueop1 == CONST0_RTX (mode))
3204 return op0;
3205 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3206 return op0;
0a67e02c 3207 /* Rotating ~0 always results in ~0. */
481683e1 3208 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
43c36287 3209 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
0a67e02c
PB
3210 && ! side_effects_p (op1))
3211 return op0;
96023bba 3212 /* Given:
05f9c675
JJ
3213 scalar modes M1, M2
3214 scalar constants c1, c2
3215 size (M2) > size (M1)
3216 c1 == size (M2) - size (M1)
3217 optimize:
3218 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3219 <low_part>)
3220 (const_int <c2>))
3221 to:
3222 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3223 <low_part>). */
3224 if (code == ASHIFTRT
3225 && !VECTOR_MODE_P (mode)
3226 && SUBREG_P (op0)
3227 && CONST_INT_P (op1)
3228 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3229 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3230 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3231 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3232 > GET_MODE_BITSIZE (mode))
3233 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3234 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3235 - GET_MODE_BITSIZE (mode)))
3236 && subreg_lowpart_p (op0))
3237 {
3238 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3239 + INTVAL (op1));
3240 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3241 tmp = simplify_gen_binary (ASHIFTRT,
3242 GET_MODE (SUBREG_REG (op0)),
3243 XEXP (SUBREG_REG (op0), 0),
3244 tmp);
3403a1a9 3245 return lowpart_subreg (mode, tmp, inner_mode);
05f9c675 3246 }
cbc9503d 3247 canonicalize_shift:
481683e1 3248 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
cbc9503d 3249 {
50b6ee8b 3250 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
cbc9503d
RS
3251 if (val != INTVAL (op1))
3252 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3253 }
70233f37 3254 break;
9d317251 3255
0a67e02c 3256 case ASHIFT:
e551ad26 3257 case SS_ASHIFT:
14c931f1 3258 case US_ASHIFT:
70233f37
RS
3259 if (trueop1 == CONST0_RTX (mode))
3260 return op0;
3261 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3262 return op0;
cbc9503d 3263 goto canonicalize_shift;
70233f37 3264
0a67e02c 3265 case LSHIFTRT:
3f2960d5 3266 if (trueop1 == CONST0_RTX (mode))
0a67e02c 3267 return op0;
3f2960d5 3268 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
0a67e02c 3269 return op0;
70233f37
RS
3270 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3271 if (GET_CODE (op0) == CLZ
481683e1 3272 && CONST_INT_P (trueop1)
70233f37 3273 && STORE_FLAG_VALUE == 1
e40122f0 3274 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
70233f37 3275 {
ef4bddc2 3276 machine_mode imode = GET_MODE (XEXP (op0, 0));
70233f37
RS
3277 unsigned HOST_WIDE_INT zero_val = 0;
3278
3279 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
5511bc5a 3280 && zero_val == GET_MODE_PRECISION (imode)
70233f37
RS
3281 && INTVAL (trueop1) == exact_log2 (zero_val))
3282 return simplify_gen_relational (EQ, mode, imode,
3283 XEXP (op0, 0), const0_rtx);
3284 }
cbc9503d 3285 goto canonicalize_shift;
9d317251 3286
0a67e02c
PB
3287 case SMIN:
3288 if (width <= HOST_BITS_PER_WIDE_INT
2d0c270f 3289 && mode_signbit_p (mode, trueop1)
0a67e02c
PB
3290 && ! side_effects_p (op0))
3291 return op1;
3292 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3293 return op0;
3294 tem = simplify_associative_operation (code, mode, op0, op1);
3295 if (tem)
3296 return tem;
3297 break;
0cedb36c 3298
0a67e02c
PB
3299 case SMAX:
3300 if (width <= HOST_BITS_PER_WIDE_INT
481683e1 3301 && CONST_INT_P (trueop1)
43c36287 3302 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
0a67e02c
PB
3303 && ! side_effects_p (op0))
3304 return op1;
3305 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3306 return op0;
3307 tem = simplify_associative_operation (code, mode, op0, op1);
3308 if (tem)
3309 return tem;
3310 break;
0cedb36c 3311
0a67e02c 3312 case UMIN:
3f2960d5 3313 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
0a67e02c
PB
3314 return op1;
3315 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3316 return op0;
3317 tem = simplify_associative_operation (code, mode, op0, op1);
3318 if (tem)
3319 return tem;
3320 break;
0cedb36c 3321
0a67e02c
PB
3322 case UMAX:
3323 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3324 return op1;
3325 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3326 return op0;
3327 tem = simplify_associative_operation (code, mode, op0, op1);
3328 if (tem)
3329 return tem;
3330 break;
0cedb36c 3331
0a67e02c
PB
3332 case SS_PLUS:
3333 case US_PLUS:
3334 case SS_MINUS:
3335 case US_MINUS:
14c931f1
CF
3336 case SS_MULT:
3337 case US_MULT:
3338 case SS_DIV:
3339 case US_DIV:
0a67e02c
PB
3340 /* ??? There are simplifications that can be done. */
3341 return 0;
0cedb36c 3342
0a67e02c
PB
3343 case VEC_SELECT:
3344 if (!VECTOR_MODE_P (mode))
3345 {
3346 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3347 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3348 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3349 gcc_assert (XVECLEN (trueop1, 0) == 1);
481683e1 3350 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
0a67e02c
PB
3351
3352 if (GET_CODE (trueop0) == CONST_VECTOR)
3353 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3354 (trueop1, 0, 0)));
7f97f938
UB
3355
3356 /* Extract a scalar element from a nested VEC_SELECT expression
3357 (with optional nested VEC_CONCAT expression). Some targets
3358 (i386) extract scalar element from a vector using chain of
3359 nested VEC_SELECT expressions. When input operand is a memory
3360 operand, this operation can be simplified to a simple scalar
3361 load from an offseted memory address. */
3362 if (GET_CODE (trueop0) == VEC_SELECT)
3363 {
3364 rtx op0 = XEXP (trueop0, 0);
3365 rtx op1 = XEXP (trueop0, 1);
3366
ef4bddc2 3367 machine_mode opmode = GET_MODE (op0);
cb5ca315 3368 int elt_size = GET_MODE_UNIT_SIZE (opmode);
7f97f938
UB
3369 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3370
3371 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3372 int elem;
3373
3374 rtvec vec;
3375 rtx tmp_op, tmp;
3376
3377 gcc_assert (GET_CODE (op1) == PARALLEL);
3378 gcc_assert (i < n_elts);
3379
3380 /* Select element, pointed by nested selector. */
3743c639 3381 elem = INTVAL (XVECEXP (op1, 0, i));
7f97f938
UB
3382
3383 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3384 if (GET_CODE (op0) == VEC_CONCAT)
3385 {
3386 rtx op00 = XEXP (op0, 0);
3387 rtx op01 = XEXP (op0, 1);
3388
ef4bddc2 3389 machine_mode mode00, mode01;
7f97f938
UB
3390 int n_elts00, n_elts01;
3391
3392 mode00 = GET_MODE (op00);
3393 mode01 = GET_MODE (op01);
3394
3395 /* Find out number of elements of each operand. */
3396 if (VECTOR_MODE_P (mode00))
3397 {
cb5ca315 3398 elt_size = GET_MODE_UNIT_SIZE (mode00);
7f97f938
UB
3399 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3400 }
3401 else
3402 n_elts00 = 1;
3403
3404 if (VECTOR_MODE_P (mode01))
3405 {
cb5ca315 3406 elt_size = GET_MODE_UNIT_SIZE (mode01);
7f97f938
UB
3407 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3408 }
3409 else
3410 n_elts01 = 1;
3411
3412 gcc_assert (n_elts == n_elts00 + n_elts01);
3413
3414 /* Select correct operand of VEC_CONCAT
3415 and adjust selector. */
3416 if (elem < n_elts01)
3417 tmp_op = op00;
3418 else
3419 {
3420 tmp_op = op01;
3421 elem -= n_elts00;
3422 }
3423 }
3424 else
3425 tmp_op = op0;
3426
3427 vec = rtvec_alloc (1);
3428 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3429
3430 tmp = gen_rtx_fmt_ee (code, mode,
3431 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3432 return tmp;
3433 }
0e159e0f
AP
3434 if (GET_CODE (trueop0) == VEC_DUPLICATE
3435 && GET_MODE (XEXP (trueop0, 0)) == mode)
3436 return XEXP (trueop0, 0);
0a67e02c
PB
3437 }
3438 else
3439 {
3440 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3441 gcc_assert (GET_MODE_INNER (mode)
3442 == GET_MODE_INNER (GET_MODE (trueop0)));
3443 gcc_assert (GET_CODE (trueop1) == PARALLEL);
0cedb36c 3444
0a67e02c
PB
3445 if (GET_CODE (trueop0) == CONST_VECTOR)
3446 {
cb5ca315 3447 int elt_size = GET_MODE_UNIT_SIZE (mode);
0a67e02c
PB
3448 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3449 rtvec v = rtvec_alloc (n_elts);
3450 unsigned int i;
0cedb36c 3451
0a67e02c
PB
3452 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3453 for (i = 0; i < n_elts; i++)
3454 {
3455 rtx x = XVECEXP (trueop1, 0, i);
0cedb36c 3456
481683e1 3457 gcc_assert (CONST_INT_P (x));
0a67e02c
PB
3458 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3459 INTVAL (x));
0cedb36c
JL
3460 }
3461
0a67e02c 3462 return gen_rtx_CONST_VECTOR (mode, v);
dd61aa98 3463 }
66c540d2 3464
5f6e1c55
MG
3465 /* Recognize the identity. */
3466 if (GET_MODE (trueop0) == mode)
3467 {
3468 bool maybe_ident = true;
3469 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3470 {
3471 rtx j = XVECEXP (trueop1, 0, i);
3472 if (!CONST_INT_P (j) || INTVAL (j) != i)
3473 {
3474 maybe_ident = false;
3475 break;
3476 }
3477 }
3478 if (maybe_ident)
3479 return trueop0;
3480 }
3481
66c540d2
MG
3482 /* If we build {a,b} then permute it, build the result directly. */
3483 if (XVECLEN (trueop1, 0) == 2
3484 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3485 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3486 && GET_CODE (trueop0) == VEC_CONCAT
3487 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3488 && GET_MODE (XEXP (trueop0, 0)) == mode
3489 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3490 && GET_MODE (XEXP (trueop0, 1)) == mode)
3491 {
3492 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3493 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3494 rtx subop0, subop1;
3495
3496 gcc_assert (i0 < 4 && i1 < 4);
3497 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3498 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3499
fd9da2c8
MG
3500 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3501 }
3502
3503 if (XVECLEN (trueop1, 0) == 2
3504 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3505 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3506 && GET_CODE (trueop0) == VEC_CONCAT
3507 && GET_MODE (trueop0) == mode)
3508 {
3509 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3510 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3511 rtx subop0, subop1;
3512
3513 gcc_assert (i0 < 2 && i1 < 2);
3514 subop0 = XEXP (trueop0, i0);
3515 subop1 = XEXP (trueop0, i1);
3516
66c540d2
MG
3517 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3518 }
82e3a719
MG
3519
3520 /* If we select one half of a vec_concat, return that. */
3521 if (GET_CODE (trueop0) == VEC_CONCAT
3522 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3523 {
3524 rtx subop0 = XEXP (trueop0, 0);
3525 rtx subop1 = XEXP (trueop0, 1);
ef4bddc2
RS
3526 machine_mode mode0 = GET_MODE (subop0);
3527 machine_mode mode1 = GET_MODE (subop1);
cb5ca315 3528 int li = GET_MODE_UNIT_SIZE (mode0);
82e3a719
MG
3529 int l0 = GET_MODE_SIZE (mode0) / li;
3530 int l1 = GET_MODE_SIZE (mode1) / li;
3531 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3532 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3533 {
3534 bool success = true;
3535 for (int i = 1; i < l0; ++i)
3536 {
3537 rtx j = XVECEXP (trueop1, 0, i);
3538 if (!CONST_INT_P (j) || INTVAL (j) != i)
3539 {
3540 success = false;
3541 break;
3542 }
3543 }
3544 if (success)
3545 return subop0;
3546 }
3547 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3548 {
3549 bool success = true;
3550 for (int i = 1; i < l1; ++i)
3551 {
3552 rtx j = XVECEXP (trueop1, 0, i);
3553 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3554 {
3555 success = false;
3556 break;
3557 }
3558 }
3559 if (success)
3560 return subop1;
3561 }
3562 }
0a67e02c 3563 }
bd1ef757
PB
3564
3565 if (XVECLEN (trueop1, 0) == 1
481683e1 3566 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
bd1ef757
PB
3567 && GET_CODE (trueop0) == VEC_CONCAT)
3568 {
3569 rtx vec = trueop0;
3570 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3571
3572 /* Try to find the element in the VEC_CONCAT. */
3573 while (GET_MODE (vec) != mode
3574 && GET_CODE (vec) == VEC_CONCAT)
3575 {
644f6fd8
KT
3576 HOST_WIDE_INT vec_size;
3577
3578 if (CONST_INT_P (XEXP (vec, 0)))
3579 {
3580 /* vec_concat of two const_ints doesn't make sense with
3581 respect to modes. */
3582 if (CONST_INT_P (XEXP (vec, 1)))
3583 return 0;
3584
3585 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3586 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3587 }
3588 else
3589 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3590
bd1ef757
PB
3591 if (offset < vec_size)
3592 vec = XEXP (vec, 0);
3593 else
3594 {
3595 offset -= vec_size;
3596 vec = XEXP (vec, 1);
3597 }
3598 vec = avoid_constant_pool_reference (vec);
3599 }
3600
3601 if (GET_MODE (vec) == mode)
3602 return vec;
3603 }
3604
da694a77
MG
3605 /* If we select elements in a vec_merge that all come from the same
3606 operand, select from that operand directly. */
3607 if (GET_CODE (op0) == VEC_MERGE)
3608 {
3609 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3610 if (CONST_INT_P (trueop02))
3611 {
3612 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3613 bool all_operand0 = true;
3614 bool all_operand1 = true;
3615 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3616 {
3617 rtx j = XVECEXP (trueop1, 0, i);
3618 if (sel & (1 << UINTVAL (j)))
3619 all_operand1 = false;
3620 else
3621 all_operand0 = false;
3622 }
3623 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3624 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3625 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3626 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3627 }
3628 }
3629
ec217bd8
BS
3630 /* If we have two nested selects that are inverses of each
3631 other, replace them with the source operand. */
3632 if (GET_CODE (trueop0) == VEC_SELECT
3633 && GET_MODE (XEXP (trueop0, 0)) == mode)
3634 {
3635 rtx op0_subop1 = XEXP (trueop0, 1);
3636 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3637 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3638
3639 /* Apply the outer ordering vector to the inner one. (The inner
3640 ordering vector is expressly permitted to be of a different
3641 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3642 then the two VEC_SELECTs cancel. */
3643 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3644 {
3645 rtx x = XVECEXP (trueop1, 0, i);
3646 if (!CONST_INT_P (x))
3647 return 0;
3648 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3649 if (!CONST_INT_P (y) || i != INTVAL (y))
3650 return 0;
3651 }
3652 return XEXP (trueop0, 0);
3653 }
3654
0a67e02c
PB
3655 return 0;
3656 case VEC_CONCAT:
3657 {
ef4bddc2 3658 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
0a67e02c
PB
3659 ? GET_MODE (trueop0)
3660 : GET_MODE_INNER (mode));
ef4bddc2 3661 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
0a67e02c
PB
3662 ? GET_MODE (trueop1)
3663 : GET_MODE_INNER (mode));
3664
3665 gcc_assert (VECTOR_MODE_P (mode));
3666 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3667 == GET_MODE_SIZE (mode));
3668
3669 if (VECTOR_MODE_P (op0_mode))
3670 gcc_assert (GET_MODE_INNER (mode)
3671 == GET_MODE_INNER (op0_mode));
3672 else
3673 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
0cedb36c 3674
0a67e02c
PB
3675 if (VECTOR_MODE_P (op1_mode))
3676 gcc_assert (GET_MODE_INNER (mode)
3677 == GET_MODE_INNER (op1_mode));
3678 else
3679 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3680
3681 if ((GET_CODE (trueop0) == CONST_VECTOR
33ffb5c5
KZ
3682 || CONST_SCALAR_INT_P (trueop0)
3683 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
0a67e02c 3684 && (GET_CODE (trueop1) == CONST_VECTOR
33ffb5c5
KZ
3685 || CONST_SCALAR_INT_P (trueop1)
3686 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
0a67e02c 3687 {
cb5ca315 3688 int elt_size = GET_MODE_UNIT_SIZE (mode);
0a67e02c
PB
3689 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3690 rtvec v = rtvec_alloc (n_elts);
3691 unsigned int i;
3692 unsigned in_n_elts = 1;
c877353c 3693
0a67e02c
PB
3694 if (VECTOR_MODE_P (op0_mode))
3695 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3696 for (i = 0; i < n_elts; i++)
3697 {
3698 if (i < in_n_elts)
3699 {
3700 if (!VECTOR_MODE_P (op0_mode))
3701 RTVEC_ELT (v, i) = trueop0;
3702 else
3703 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3704 }
3705 else
3706 {
3707 if (!VECTOR_MODE_P (op1_mode))
3708 RTVEC_ELT (v, i) = trueop1;
3709 else
3710 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3711 i - in_n_elts);
3712 }
3713 }
0cedb36c 3714
0a67e02c
PB
3715 return gen_rtx_CONST_VECTOR (mode, v);
3716 }
5f6e1c55 3717
d08633b4
MG
3718 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3719 Restrict the transformation to avoid generating a VEC_SELECT with a
3720 mode unrelated to its operand. */
5f6e1c55
MG
3721 if (GET_CODE (trueop0) == VEC_SELECT
3722 && GET_CODE (trueop1) == VEC_SELECT
d08633b4
MG
3723 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3724 && GET_MODE (XEXP (trueop0, 0)) == mode)
5f6e1c55
MG
3725 {
3726 rtx par0 = XEXP (trueop0, 1);
3727 rtx par1 = XEXP (trueop1, 1);
3728 int len0 = XVECLEN (par0, 0);
3729 int len1 = XVECLEN (par1, 0);
3730 rtvec vec = rtvec_alloc (len0 + len1);
3731 for (int i = 0; i < len0; i++)
3732 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3733 for (int i = 0; i < len1; i++)
3734 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3735 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3736 gen_rtx_PARALLEL (VOIDmode, vec));
3737 }
0a67e02c
PB
3738 }
3739 return 0;
0cedb36c 3740
0a67e02c
PB
3741 default:
3742 gcc_unreachable ();
3743 }
0cedb36c 3744
0a67e02c
PB
3745 return 0;
3746}
0cedb36c 3747
0a67e02c 3748rtx
ef4bddc2 3749simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
0a67e02c
PB
3750 rtx op0, rtx op1)
3751{
5511bc5a 3752 unsigned int width = GET_MODE_PRECISION (mode);
0cedb36c 3753
0a67e02c
PB
3754 if (VECTOR_MODE_P (mode)
3755 && code != VEC_CONCAT
3756 && GET_CODE (op0) == CONST_VECTOR
3757 && GET_CODE (op1) == CONST_VECTOR)
3758 {
3759 unsigned n_elts = GET_MODE_NUNITS (mode);
ef4bddc2 3760 machine_mode op0mode = GET_MODE (op0);
0a67e02c 3761 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
ef4bddc2 3762 machine_mode op1mode = GET_MODE (op1);
0a67e02c
PB
3763 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3764 rtvec v = rtvec_alloc (n_elts);
3765 unsigned int i;
0cedb36c 3766
0a67e02c
PB
3767 gcc_assert (op0_n_elts == n_elts);
3768 gcc_assert (op1_n_elts == n_elts);
3769 for (i = 0; i < n_elts; i++)
3770 {
3771 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3772 CONST_VECTOR_ELT (op0, i),
3773 CONST_VECTOR_ELT (op1, i));
3774 if (!x)
3775 return 0;
3776 RTVEC_ELT (v, i) = x;
3777 }
0cedb36c 3778
0a67e02c
PB
3779 return gen_rtx_CONST_VECTOR (mode, v);
3780 }
0cedb36c 3781
0a67e02c
PB
3782 if (VECTOR_MODE_P (mode)
3783 && code == VEC_CONCAT
33ffb5c5 3784 && (CONST_SCALAR_INT_P (op0)
48175537 3785 || GET_CODE (op0) == CONST_FIXED
33ffb5c5
KZ
3786 || CONST_DOUBLE_AS_FLOAT_P (op0))
3787 && (CONST_SCALAR_INT_P (op1)
3788 || CONST_DOUBLE_AS_FLOAT_P (op1)
d1f0728e 3789 || GET_CODE (op1) == CONST_FIXED))
0a67e02c
PB
3790 {
3791 unsigned n_elts = GET_MODE_NUNITS (mode);
3792 rtvec v = rtvec_alloc (n_elts);
0cedb36c 3793
0a67e02c
PB
3794 gcc_assert (n_elts >= 2);
3795 if (n_elts == 2)
3796 {
3797 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3798 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
dd61aa98 3799
0a67e02c
PB
3800 RTVEC_ELT (v, 0) = op0;
3801 RTVEC_ELT (v, 1) = op1;
3802 }
3803 else
3804 {
3805 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3806 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3807 unsigned i;
0cedb36c 3808
0a67e02c
PB
3809 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3810 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3811 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
0cedb36c 3812
0a67e02c
PB
3813 for (i = 0; i < op0_n_elts; ++i)
3814 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3815 for (i = 0; i < op1_n_elts; ++i)
3816 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3817 }
0b24db88 3818
0a67e02c
PB
3819 return gen_rtx_CONST_VECTOR (mode, v);
3820 }
0cedb36c 3821
3d8bf70f 3822 if (SCALAR_FLOAT_MODE_P (mode)
48175537
KZ
3823 && CONST_DOUBLE_AS_FLOAT_P (op0)
3824 && CONST_DOUBLE_AS_FLOAT_P (op1)
0a67e02c
PB
3825 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3826 {
3827 if (code == AND
3828 || code == IOR
3829 || code == XOR)
3830 {
3831 long tmp0[4];
3832 long tmp1[4];
3833 REAL_VALUE_TYPE r;
3834 int i;
a0ee8b5f 3835
0a67e02c
PB
3836 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3837 GET_MODE (op0));
3838 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3839 GET_MODE (op1));
3840 for (i = 0; i < 4; i++)
a0ee8b5f 3841 {
0a67e02c
PB
3842 switch (code)
3843 {
3844 case AND:
3845 tmp0[i] &= tmp1[i];
3846 break;
3847 case IOR:
3848 tmp0[i] |= tmp1[i];
3849 break;
3850 case XOR:
3851 tmp0[i] ^= tmp1[i];
3852 break;
3853 default:
3854 gcc_unreachable ();
3855 }
a0ee8b5f 3856 }
0a67e02c 3857 real_from_target (&r, tmp0, mode);
555affd7 3858 return const_double_from_real_value (r, mode);
0a67e02c
PB
3859 }
3860 else
3861 {
3862 REAL_VALUE_TYPE f0, f1, value, result;
3863 bool inexact;
a0ee8b5f 3864
34a72c33
RS
3865 real_convert (&f0, mode, CONST_DOUBLE_REAL_VALUE (op0));
3866 real_convert (&f1, mode, CONST_DOUBLE_REAL_VALUE (op1));
df62f18a 3867
0a67e02c
PB
3868 if (HONOR_SNANS (mode)
3869 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3870 return 0;
0cedb36c 3871
0a67e02c 3872 if (code == DIV
624d31fe 3873 && real_equal (&f1, &dconst0)
0a67e02c
PB
3874 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3875 return 0;
0cedb36c 3876
0a67e02c
PB
3877 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3878 && flag_trapping_math
3879 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
0cedb36c 3880 {
0a67e02c
PB
3881 int s0 = REAL_VALUE_NEGATIVE (f0);
3882 int s1 = REAL_VALUE_NEGATIVE (f1);
0cedb36c 3883
0a67e02c 3884 switch (code)
1e9b78b0 3885 {
0a67e02c
PB
3886 case PLUS:
3887 /* Inf + -Inf = NaN plus exception. */
3888 if (s0 != s1)
3889 return 0;
3890 break;
3891 case MINUS:
3892 /* Inf - Inf = NaN plus exception. */
3893 if (s0 == s1)
3894 return 0;
3895 break;
3896 case DIV:
3897 /* Inf / Inf = NaN plus exception. */
3898 return 0;
3899 default:
3900 break;
0cedb36c
JL
3901 }
3902 }
0cedb36c 3903
0a67e02c
PB
3904 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3905 && flag_trapping_math
624d31fe 3906 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
0a67e02c 3907 || (REAL_VALUE_ISINF (f1)
624d31fe 3908 && real_equal (&f0, &dconst0))))
0a67e02c
PB
3909 /* Inf * 0 = NaN plus exception. */
3910 return 0;
852c8ba1 3911
0a67e02c
PB
3912 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3913 &f0, &f1);
3914 real_convert (&result, mode, &value);
41374e13 3915
68328cda
EB
3916 /* Don't constant fold this floating point operation if
3917 the result has overflowed and flag_trapping_math. */
3918
3919 if (flag_trapping_math
3920 && MODE_HAS_INFINITIES (mode)
3921 && REAL_VALUE_ISINF (result)
3922 && !REAL_VALUE_ISINF (f0)
3923 && !REAL_VALUE_ISINF (f1))
3924 /* Overflow plus exception. */
3925 return 0;
3926
0a67e02c
PB
3927 /* Don't constant fold this floating point operation if the
3928 result may dependent upon the run-time rounding mode and
3929 flag_rounding_math is set, or if GCC's software emulation
3930 is unable to accurately represent the result. */
852c8ba1 3931
0a67e02c 3932 if ((flag_rounding_math
4099e2c2 3933 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
0a67e02c
PB
3934 && (inexact || !real_identical (&result, &value)))
3935 return NULL_RTX;
d9deed68 3936
555affd7 3937 return const_double_from_real_value (result, mode);
0cedb36c 3938 }
0cedb36c
JL
3939 }
3940
0a67e02c 3941 /* We can fold some multi-word operations. */
50b6ee8b
DD
3942 if ((GET_MODE_CLASS (mode) == MODE_INT
3943 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
807e902e
KZ
3944 && CONST_SCALAR_INT_P (op0)
3945 && CONST_SCALAR_INT_P (op1))
0a67e02c 3946 {
807e902e 3947 wide_int result;
9be0ac8c 3948 bool overflow;
807e902e
KZ
3949 rtx_mode_t pop0 = std::make_pair (op0, mode);
3950 rtx_mode_t pop1 = std::make_pair (op1, mode);
3951
3952#if TARGET_SUPPORTS_WIDE_INT == 0
3953 /* This assert keeps the simplification from producing a result
3954 that cannot be represented in a CONST_DOUBLE but a lot of
3955 upstream callers expect that this function never fails to
3956 simplify something and so you if you added this to the test
3957 above the code would die later anyway. If this assert
3958 happens, you just need to make the port support wide int. */
3959 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3960#endif
0a67e02c
PB
3961 switch (code)
3962 {
3963 case MINUS:
807e902e
KZ
3964 result = wi::sub (pop0, pop1);
3965 break;
0cedb36c 3966
0a67e02c 3967 case PLUS:
807e902e 3968 result = wi::add (pop0, pop1);
0a67e02c 3969 break;
0cedb36c 3970
0a67e02c 3971 case MULT:
807e902e 3972 result = wi::mul (pop0, pop1);
0a67e02c 3973 break;
0cedb36c 3974
0a67e02c 3975 case DIV:
807e902e 3976 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
9be0ac8c 3977 if (overflow)
807e902e 3978 return NULL_RTX;
0a67e02c 3979 break;
0cedb36c 3980
0a67e02c 3981 case MOD:
807e902e 3982 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
9be0ac8c 3983 if (overflow)
807e902e 3984 return NULL_RTX;
0a67e02c 3985 break;
0cedb36c 3986
0a67e02c 3987 case UDIV:
807e902e 3988 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
9be0ac8c 3989 if (overflow)
807e902e 3990 return NULL_RTX;
0a67e02c 3991 break;
0cedb36c 3992
0a67e02c 3993 case UMOD:
807e902e 3994 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
9be0ac8c 3995 if (overflow)
807e902e 3996 return NULL_RTX;
0a67e02c 3997 break;
0cedb36c 3998
0a67e02c 3999 case AND:
807e902e 4000 result = wi::bit_and (pop0, pop1);
0a67e02c 4001 break;
0cedb36c 4002
0a67e02c 4003 case IOR:
807e902e 4004 result = wi::bit_or (pop0, pop1);
0a67e02c 4005 break;
0cedb36c 4006
0a67e02c 4007 case XOR:
807e902e 4008 result = wi::bit_xor (pop0, pop1);
0a67e02c 4009 break;
0cedb36c 4010
0a67e02c 4011 case SMIN:
807e902e 4012 result = wi::smin (pop0, pop1);
0a67e02c 4013 break;
0cedb36c 4014
0a67e02c 4015 case SMAX:
807e902e 4016 result = wi::smax (pop0, pop1);
0a67e02c 4017 break;
0cedb36c 4018
0a67e02c 4019 case UMIN:
807e902e 4020 result = wi::umin (pop0, pop1);
0a67e02c 4021 break;
0cedb36c 4022
0a67e02c 4023 case UMAX:
807e902e 4024 result = wi::umax (pop0, pop1);
0a67e02c 4025 break;
0cedb36c 4026
807e902e
KZ
4027 case LSHIFTRT:
4028 case ASHIFTRT:
0a67e02c 4029 case ASHIFT:
fd7de64c 4030 {
807e902e 4031 wide_int wop1 = pop1;
fd7de64c 4032 if (SHIFT_COUNT_TRUNCATED)
807e902e
KZ
4033 wop1 = wi::umod_trunc (wop1, width);
4034 else if (wi::geu_p (wop1, width))
4035 return NULL_RTX;
b8698a0f 4036
807e902e
KZ
4037 switch (code)
4038 {
4039 case LSHIFTRT:
4040 result = wi::lrshift (pop0, wop1);
4041 break;
b8698a0f 4042
807e902e
KZ
4043 case ASHIFTRT:
4044 result = wi::arshift (pop0, wop1);
4045 break;
b8698a0f 4046
807e902e
KZ
4047 case ASHIFT:
4048 result = wi::lshift (pop0, wop1);
4049 break;
b8698a0f 4050
807e902e
KZ
4051 default:
4052 gcc_unreachable ();
4053 }
4054 break;
4055 }
0a67e02c 4056 case ROTATE:
807e902e
KZ
4057 case ROTATERT:
4058 {
4059 if (wi::neg_p (pop1))
4060 return NULL_RTX;
b8698a0f 4061
807e902e
KZ
4062 switch (code)
4063 {
4064 case ROTATE:
4065 result = wi::lrotate (pop0, pop1);
4066 break;
b8698a0f 4067
807e902e
KZ
4068 case ROTATERT:
4069 result = wi::rrotate (pop0, pop1);
4070 break;
b8698a0f 4071
807e902e
KZ
4072 default:
4073 gcc_unreachable ();
4074 }
4075 break;
4076 }
0a67e02c 4077 default:
807e902e 4078 return NULL_RTX;
0a67e02c 4079 }
807e902e 4080 return immed_wide_int_const (result, mode);
0a67e02c 4081 }
0cedb36c 4082
0a67e02c 4083 return NULL_RTX;
0cedb36c 4084}
0a67e02c
PB
4085
4086
0cedb36c 4087\f
350911e6
AM
4088/* Return a positive integer if X should sort after Y. The value
4089 returned is 1 if and only if X and Y are both regs. */
0cedb36c 4090
350911e6 4091static int
7e0b4eae 4092simplify_plus_minus_op_data_cmp (rtx x, rtx y)
9b3bd424 4093{
f805670f 4094 int result;
9b3bd424 4095
7e0b4eae
PB
4096 result = (commutative_operand_precedence (y)
4097 - commutative_operand_precedence (x));
f805670f 4098 if (result)
350911e6 4099 return result + result;
d26cef13
PB
4100
4101 /* Group together equal REGs to do more simplification. */
7e0b4eae
PB
4102 if (REG_P (x) && REG_P (y))
4103 return REGNO (x) > REGNO (y);
350911e6
AM
4104
4105 return 0;
9b3bd424
RH
4106}
4107
350911e6
AM
4108/* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4109 operands may be another PLUS or MINUS.
4110
4111 Rather than test for specific case, we do this by a brute-force method
4112 and do all possible simplifications until no more changes occur. Then
4113 we rebuild the operation.
4114
4115 May return NULL_RTX when no changes were made. */
4116
0cedb36c 4117static rtx
ef4bddc2 4118simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
1941069a 4119 rtx op1)
0cedb36c 4120{
350911e6
AM
4121 struct simplify_plus_minus_op_data
4122 {
4123 rtx op;
4124 short neg;
4125 } ops[16];
0cedb36c 4126 rtx result, tem;
c11c8664
AM
4127 int n_ops = 2;
4128 int changed, n_constants, canonicalized = 0;
0cedb36c
JL
4129 int i, j;
4130
703ad42b 4131 memset (ops, 0, sizeof ops);
786de7eb 4132
0cedb36c
JL
4133 /* Set up the two operands and then expand them until nothing has been
4134 changed. If we run out of room in our array, give up; this should
4135 almost never happen. */
4136
9b3bd424
RH
4137 ops[0].op = op0;
4138 ops[0].neg = 0;
4139 ops[1].op = op1;
4140 ops[1].neg = (code == MINUS);
0cedb36c 4141
9b3bd424 4142 do
0cedb36c
JL
4143 {
4144 changed = 0;
c11c8664 4145 n_constants = 0;
0cedb36c
JL
4146
4147 for (i = 0; i < n_ops; i++)
9b3bd424
RH
4148 {
4149 rtx this_op = ops[i].op;
4150 int this_neg = ops[i].neg;
4151 enum rtx_code this_code = GET_CODE (this_op);
0cedb36c 4152
9b3bd424
RH
4153 switch (this_code)
4154 {
4155 case PLUS:
4156 case MINUS:
c11c8664 4157 if (n_ops == ARRAY_SIZE (ops))
e16e3291 4158 return NULL_RTX;
0cedb36c 4159
9b3bd424
RH
4160 ops[n_ops].op = XEXP (this_op, 1);
4161 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4162 n_ops++;
4163
4164 ops[i].op = XEXP (this_op, 0);
9b3bd424 4165 changed = 1;
350911e6
AM
4166 /* If this operand was negated then we will potentially
4167 canonicalize the expression. Similarly if we don't
4168 place the operands adjacent we're re-ordering the
4169 expression and thus might be performing a
4170 canonicalization. Ignore register re-ordering.
4171 ??? It might be better to shuffle the ops array here,
4172 but then (plus (plus (A, B), plus (C, D))) wouldn't
4173 be seen as non-canonical. */
4174 if (this_neg
4175 || (i != n_ops - 2
4176 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4177 canonicalized = 1;
9b3bd424
RH
4178 break;
4179
4180 case NEG:
4181 ops[i].op = XEXP (this_op, 0);
4182 ops[i].neg = ! this_neg;
4183 changed = 1;
1941069a 4184 canonicalized = 1;
9b3bd424
RH
4185 break;
4186
4187 case CONST:
c11c8664 4188 if (n_ops != ARRAY_SIZE (ops)
e3c8ea67
RH
4189 && GET_CODE (XEXP (this_op, 0)) == PLUS
4190 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4191 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4192 {
4193 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4194 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4195 ops[n_ops].neg = this_neg;
4196 n_ops++;
e3c8ea67 4197 changed = 1;
350911e6 4198 canonicalized = 1;
e3c8ea67 4199 }
9b3bd424
RH
4200 break;
4201
4202 case NOT:
4203 /* ~a -> (-a - 1) */
c11c8664 4204 if (n_ops != ARRAY_SIZE (ops))
9b3bd424 4205 {
ea72cc1d 4206 ops[n_ops].op = CONSTM1_RTX (mode);
2e951384 4207 ops[n_ops++].neg = this_neg;
9b3bd424
RH
4208 ops[i].op = XEXP (this_op, 0);
4209 ops[i].neg = !this_neg;
4210 changed = 1;
350911e6 4211 canonicalized = 1;
9b3bd424
RH
4212 }
4213 break;
0cedb36c 4214
9b3bd424 4215 case CONST_INT:
d26cef13 4216 n_constants++;
9b3bd424
RH
4217 if (this_neg)
4218 {
aff8a8d5 4219 ops[i].op = neg_const_int (mode, this_op);
9b3bd424
RH
4220 ops[i].neg = 0;
4221 changed = 1;
350911e6 4222 canonicalized = 1;
9b3bd424
RH
4223 }
4224 break;
0cedb36c 4225
9b3bd424
RH
4226 default:
4227 break;
4228 }
4229 }
0cedb36c 4230 }
9b3bd424 4231 while (changed);
0cedb36c 4232
d26cef13
PB
4233 if (n_constants > 1)
4234 canonicalized = 1;
36686ad6 4235
d26cef13 4236 gcc_assert (n_ops >= 2);
0cedb36c 4237
1941069a
PB
4238 /* If we only have two operands, we can avoid the loops. */
4239 if (n_ops == 2)
4240 {
4241 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4242 rtx lhs, rhs;
4243
4244 /* Get the two operands. Be careful with the order, especially for
4245 the cases where code == MINUS. */
4246 if (ops[0].neg && ops[1].neg)
4247 {
4248 lhs = gen_rtx_NEG (mode, ops[0].op);
4249 rhs = ops[1].op;
4250 }
4251 else if (ops[0].neg)
4252 {
4253 lhs = ops[1].op;
4254 rhs = ops[0].op;
4255 }
4256 else
4257 {
4258 lhs = ops[0].op;
4259 rhs = ops[1].op;
4260 }
4261
4262 return simplify_const_binary_operation (code, mode, lhs, rhs);
4263 }
4264
d26cef13 4265 /* Now simplify each pair of operands until nothing changes. */
350911e6 4266 while (1)
0cedb36c 4267 {
c11c8664 4268 /* Insertion sort is good enough for a small array. */
d26cef13 4269 for (i = 1; i < n_ops; i++)
350911e6
AM
4270 {
4271 struct simplify_plus_minus_op_data save;
4272 int cmp;
4273
4274 j = i - 1;
4275 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4276 if (cmp <= 0)
d26cef13 4277 continue;
350911e6
AM
4278 /* Just swapping registers doesn't count as canonicalization. */
4279 if (cmp != 1)
4280 canonicalized = 1;
d26cef13 4281
350911e6
AM
4282 save = ops[i];
4283 do
d26cef13 4284 ops[j + 1] = ops[j];
350911e6
AM
4285 while (j--
4286 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4287 ops[j + 1] = save;
4288 }
0cedb36c 4289
d26cef13
PB
4290 changed = 0;
4291 for (i = n_ops - 1; i > 0; i--)
4292 for (j = i - 1; j >= 0; j--)
9b3bd424 4293 {
d26cef13
PB
4294 rtx lhs = ops[j].op, rhs = ops[i].op;
4295 int lneg = ops[j].neg, rneg = ops[i].neg;
0cedb36c 4296
d26cef13 4297 if (lhs != 0 && rhs != 0)
9b3bd424
RH
4298 {
4299 enum rtx_code ncode = PLUS;
4300
4301 if (lneg != rneg)
4302 {
4303 ncode = MINUS;
4304 if (lneg)
e2be0590 4305 std::swap (lhs, rhs);
9b3bd424
RH
4306 }
4307 else if (swap_commutative_operands_p (lhs, rhs))
e2be0590 4308 std::swap (lhs, rhs);
9b3bd424 4309
481683e1
SZ
4310 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4311 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
349f4ea1
AK
4312 {
4313 rtx tem_lhs, tem_rhs;
4314
4315 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4316 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
350911e6
AM
4317 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4318 tem_rhs);
9b3bd424 4319
349f4ea1
AK
4320 if (tem && !CONSTANT_P (tem))
4321 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4322 }
4323 else
4324 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
b8698a0f 4325
c11c8664 4326 if (tem)
9b3bd424 4327 {
c11c8664
AM
4328 /* Reject "simplifications" that just wrap the two
4329 arguments in a CONST. Failure to do so can result
4330 in infinite recursion with simplify_binary_operation
4331 when it calls us to simplify CONST operations.
4332 Also, if we find such a simplification, don't try
4333 any more combinations with this rhs: We must have
4334 something like symbol+offset, ie. one of the
4335 trivial CONST expressions we handle later. */
4336 if (GET_CODE (tem) == CONST
4337 && GET_CODE (XEXP (tem, 0)) == ncode
4338 && XEXP (XEXP (tem, 0), 0) == lhs
4339 && XEXP (XEXP (tem, 0), 1) == rhs)
4340 break;
9b3bd424
RH
4341 lneg &= rneg;
4342 if (GET_CODE (tem) == NEG)
4343 tem = XEXP (tem, 0), lneg = !lneg;
481683e1 4344 if (CONST_INT_P (tem) && lneg)
aff8a8d5 4345 tem = neg_const_int (mode, tem), lneg = 0;
9b3bd424
RH
4346
4347 ops[i].op = tem;
4348 ops[i].neg = lneg;
4349 ops[j].op = NULL_RTX;
4350 changed = 1;
dc5b3407 4351 canonicalized = 1;
9b3bd424
RH
4352 }
4353 }
4354 }
0cedb36c 4355
350911e6
AM
4356 if (!changed)
4357 break;
dc5b3407 4358
d26cef13
PB
4359 /* Pack all the operands to the lower-numbered entries. */
4360 for (i = 0, j = 0; j < n_ops; j++)
350911e6
AM
4361 if (ops[j].op)
4362 {
d26cef13
PB
4363 ops[i] = ops[j];
4364 i++;
350911e6 4365 }
d26cef13 4366 n_ops = i;
0cedb36c 4367 }
350911e6
AM
4368
4369 /* If nothing changed, fail. */
4370 if (!canonicalized)
4371 return NULL_RTX;
0cedb36c 4372
c877353c
RS
4373 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4374 if (n_ops == 2
481683e1 4375 && CONST_INT_P (ops[1].op)
c877353c
RS
4376 && CONSTANT_P (ops[0].op)
4377 && ops[0].neg)
4378 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
b8698a0f 4379
9b3bd424
RH
4380 /* We suppressed creation of trivial CONST expressions in the
4381 combination loop to avoid recursion. Create one manually now.
4382 The combination loop should have ensured that there is exactly
4383 one CONST_INT, and the sort will have ensured that it is last
4384 in the array and that any other constant will be next-to-last. */
0cedb36c 4385
9b3bd424 4386 if (n_ops > 1
481683e1 4387 && CONST_INT_P (ops[n_ops - 1].op)
9b3bd424
RH
4388 && CONSTANT_P (ops[n_ops - 2].op))
4389 {
aff8a8d5 4390 rtx value = ops[n_ops - 1].op;
4768dbdd 4391 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
aff8a8d5 4392 value = neg_const_int (mode, value);
0a81f074
RS
4393 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4394 INTVAL (value));
9b3bd424
RH
4395 n_ops--;
4396 }
4397
0786ca87 4398 /* Put a non-negated operand first, if possible. */
0cedb36c 4399
9b3bd424
RH
4400 for (i = 0; i < n_ops && ops[i].neg; i++)
4401 continue;
0cedb36c 4402 if (i == n_ops)
0786ca87 4403 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
0cedb36c
JL
4404 else if (i != 0)
4405 {
9b3bd424
RH
4406 tem = ops[0].op;
4407 ops[0] = ops[i];
4408 ops[i].op = tem;
4409 ops[i].neg = 1;
0cedb36c
JL
4410 }
4411
4412 /* Now make the result by performing the requested operations. */
9b3bd424 4413 result = ops[0].op;
0cedb36c 4414 for (i = 1; i < n_ops; i++)
9b3bd424
RH
4415 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4416 mode, result, ops[i].op);
0cedb36c 4417
0786ca87 4418 return result;
0cedb36c
JL
4419}
4420
5ac20c1a
RS
4421/* Check whether an operand is suitable for calling simplify_plus_minus. */
4422static bool
f7d504c2 4423plus_minus_operand_p (const_rtx x)
5ac20c1a
RS
4424{
4425 return GET_CODE (x) == PLUS
4426 || GET_CODE (x) == MINUS
4427 || (GET_CODE (x) == CONST
4428 && GET_CODE (XEXP (x, 0)) == PLUS
4429 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4430 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4431}
4432
0cedb36c 4433/* Like simplify_binary_operation except used for relational operators.
c6fb08ad 4434 MODE is the mode of the result. If MODE is VOIDmode, both operands must
fc7ca5fd 4435 not also be VOIDmode.
c6fb08ad
PB
4436
4437 CMP_MODE specifies in which mode the comparison is done in, so it is
4438 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4439 the operands or, if both are VOIDmode, the operands are compared in
4440 "infinite precision". */
4441rtx
ef4bddc2
RS
4442simplify_relational_operation (enum rtx_code code, machine_mode mode,
4443 machine_mode cmp_mode, rtx op0, rtx op1)
c6fb08ad
PB
4444{
4445 rtx tem, trueop0, trueop1;
4446
4447 if (cmp_mode == VOIDmode)
4448 cmp_mode = GET_MODE (op0);
4449 if (cmp_mode == VOIDmode)
4450 cmp_mode = GET_MODE (op1);
4451
4452 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4453 if (tem)
4454 {
3d8bf70f 4455 if (SCALAR_FLOAT_MODE_P (mode))
c6fb08ad
PB
4456 {
4457 if (tem == const0_rtx)
4458 return CONST0_RTX (mode);
fc7ca5fd
RS
4459#ifdef FLOAT_STORE_FLAG_VALUE
4460 {
4461 REAL_VALUE_TYPE val;
4462 val = FLOAT_STORE_FLAG_VALUE (mode);
555affd7 4463 return const_double_from_real_value (val, mode);
fc7ca5fd
RS
4464 }
4465#else
4466 return NULL_RTX;
b8698a0f 4467#endif
c6fb08ad 4468 }
fc7ca5fd
RS
4469 if (VECTOR_MODE_P (mode))
4470 {
4471 if (tem == const0_rtx)
4472 return CONST0_RTX (mode);
4473#ifdef VECTOR_STORE_FLAG_VALUE
4474 {
4475 int i, units;
21e5076a 4476 rtvec v;
fc7ca5fd
RS
4477
4478 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4479 if (val == NULL_RTX)
4480 return NULL_RTX;
4481 if (val == const1_rtx)
4482 return CONST1_RTX (mode);
4483
4484 units = GET_MODE_NUNITS (mode);
4485 v = rtvec_alloc (units);
4486 for (i = 0; i < units; i++)
4487 RTVEC_ELT (v, i) = val;
4488 return gen_rtx_raw_CONST_VECTOR (mode, v);
4489 }
4490#else
4491 return NULL_RTX;
c6fb08ad 4492#endif
fc7ca5fd 4493 }
c6fb08ad
PB
4494
4495 return tem;
4496 }
4497
4498 /* For the following tests, ensure const0_rtx is op1. */
4499 if (swap_commutative_operands_p (op0, op1)
4500 || (op0 == const0_rtx && op1 != const0_rtx))
e2be0590 4501 std::swap (op0, op1), code = swap_condition (code);
c6fb08ad
PB
4502
4503 /* If op0 is a compare, extract the comparison arguments from it. */
4504 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
f90b7a5a
PB
4505 return simplify_gen_relational (code, mode, VOIDmode,
4506 XEXP (op0, 0), XEXP (op0, 1));
c6fb08ad 4507
30a440a7 4508 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
c6fb08ad
PB
4509 || CC0_P (op0))
4510 return NULL_RTX;
4511
4512 trueop0 = avoid_constant_pool_reference (op0);
4513 trueop1 = avoid_constant_pool_reference (op1);
4514 return simplify_relational_operation_1 (code, mode, cmp_mode,
4515 trueop0, trueop1);
4516}
4517
4518/* This part of simplify_relational_operation is only used when CMP_MODE
4519 is not in class MODE_CC (i.e. it is a real comparison).
4520
4521 MODE is the mode of the result, while CMP_MODE specifies in which
4522 mode the comparison is done in, so it is the mode of the operands. */
bc4ad38c
ZD
4523
4524static rtx
ef4bddc2
RS
4525simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4526 machine_mode cmp_mode, rtx op0, rtx op1)
c6fb08ad 4527{
bc4ad38c
ZD
4528 enum rtx_code op0code = GET_CODE (op0);
4529
3fa0cacd 4530 if (op1 == const0_rtx && COMPARISON_P (op0))
c6fb08ad 4531 {
3fa0cacd
RS
4532 /* If op0 is a comparison, extract the comparison arguments
4533 from it. */
4534 if (code == NE)
c6fb08ad 4535 {
3fa0cacd
RS
4536 if (GET_MODE (op0) == mode)
4537 return simplify_rtx (op0);
4538 else
4539 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4540 XEXP (op0, 0), XEXP (op0, 1));
4541 }
4542 else if (code == EQ)
4543 {
4544 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4545 if (new_code != UNKNOWN)
4546 return simplify_gen_relational (new_code, mode, VOIDmode,
4547 XEXP (op0, 0), XEXP (op0, 1));
4548 }
4549 }
4550
1d1eb80c
BS
4551 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4552 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4553 if ((code == LTU || code == GEU)
4554 && GET_CODE (op0) == PLUS
481683e1 4555 && CONST_INT_P (XEXP (op0, 1))
1d1eb80c 4556 && (rtx_equal_p (op1, XEXP (op0, 0))
5352ea68
AP
4557 || rtx_equal_p (op1, XEXP (op0, 1)))
4558 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4559 && XEXP (op0, 1) != const0_rtx)
1d1eb80c
BS
4560 {
4561 rtx new_cmp
4562 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4563 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4564 cmp_mode, XEXP (op0, 0), new_cmp);
4565 }
4566
921c4418
RIL
4567 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4568 if ((code == LTU || code == GEU)
4569 && GET_CODE (op0) == PLUS
cf369845
HPN
4570 && rtx_equal_p (op1, XEXP (op0, 1))
4571 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4572 && !rtx_equal_p (op1, XEXP (op0, 0)))
4942b76b
JJ
4573 return simplify_gen_relational (code, mode, cmp_mode, op0,
4574 copy_rtx (XEXP (op0, 0)));
921c4418 4575
3fa0cacd
RS
4576 if (op1 == const0_rtx)
4577 {
4578 /* Canonicalize (GTU x 0) as (NE x 0). */
4579 if (code == GTU)
4580 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4581 /* Canonicalize (LEU x 0) as (EQ x 0). */
4582 if (code == LEU)
4583 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4584 }
4585 else if (op1 == const1_rtx)
4586 {
4587 switch (code)
4588 {
4589 case GE:
4590 /* Canonicalize (GE x 1) as (GT x 0). */
4591 return simplify_gen_relational (GT, mode, cmp_mode,
4592 op0, const0_rtx);
4593 case GEU:
4594 /* Canonicalize (GEU x 1) as (NE x 0). */
4595 return simplify_gen_relational (NE, mode, cmp_mode,
4596 op0, const0_rtx);
4597 case LT:
4598 /* Canonicalize (LT x 1) as (LE x 0). */
4599 return simplify_gen_relational (LE, mode, cmp_mode,
4600 op0, const0_rtx);
4601 case LTU:
4602 /* Canonicalize (LTU x 1) as (EQ x 0). */
4603 return simplify_gen_relational (EQ, mode, cmp_mode,
4604 op0, const0_rtx);
4605 default:
4606 break;
c6fb08ad
PB
4607 }
4608 }
3fa0cacd
RS
4609 else if (op1 == constm1_rtx)
4610 {
4611 /* Canonicalize (LE x -1) as (LT x 0). */
4612 if (code == LE)
4613 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4614 /* Canonicalize (GT x -1) as (GE x 0). */
4615 if (code == GT)
4616 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4617 }
0cedb36c 4618
bc4ad38c
ZD
4619 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4620 if ((code == EQ || code == NE)
4621 && (op0code == PLUS || op0code == MINUS)
4622 && CONSTANT_P (op1)
551a3297
RH
4623 && CONSTANT_P (XEXP (op0, 1))
4624 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
bc4ad38c
ZD
4625 {
4626 rtx x = XEXP (op0, 0);
4627 rtx c = XEXP (op0, 1);
d303c992
CLT
4628 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4629 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4630
4631 /* Detect an infinite recursive condition, where we oscillate at this
4632 simplification case between:
4633 A + B == C <---> C - B == A,
4634 where A, B, and C are all constants with non-simplifiable expressions,
4635 usually SYMBOL_REFs. */
4636 if (GET_CODE (tem) == invcode
4637 && CONSTANT_P (x)
4638 && rtx_equal_p (c, XEXP (tem, 1)))
4639 return NULL_RTX;
4640
4641 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
bc4ad38c
ZD
4642 }
4643
1419a885
RS
4644 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4645 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4646 if (code == NE
4647 && op1 == const0_rtx
4648 && GET_MODE_CLASS (mode) == MODE_INT
4649 && cmp_mode != VOIDmode
61961eff
RS
4650 /* ??? Work-around BImode bugs in the ia64 backend. */
4651 && mode != BImode
f8eacd97 4652 && cmp_mode != BImode
1419a885
RS
4653 && nonzero_bits (op0, cmp_mode) == 1
4654 && STORE_FLAG_VALUE == 1)
f8eacd97
RS
4655 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4656 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4657 : lowpart_subreg (mode, op0, cmp_mode);
1419a885 4658
5484a3c3
RS
4659 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4660 if ((code == EQ || code == NE)
4661 && op1 == const0_rtx
4662 && op0code == XOR)
4663 return simplify_gen_relational (code, mode, cmp_mode,
4664 XEXP (op0, 0), XEXP (op0, 1));
4665
4d49d44d 4666 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5484a3c3
RS
4667 if ((code == EQ || code == NE)
4668 && op0code == XOR
4669 && rtx_equal_p (XEXP (op0, 0), op1)
4d49d44d 4670 && !side_effects_p (XEXP (op0, 0)))
9d31ea5b
RS
4671 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4672 CONST0_RTX (mode));
4d49d44d
KH
4673
4674 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5484a3c3
RS
4675 if ((code == EQ || code == NE)
4676 && op0code == XOR
4677 && rtx_equal_p (XEXP (op0, 1), op1)
4d49d44d 4678 && !side_effects_p (XEXP (op0, 1)))
9d31ea5b
RS
4679 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4680 CONST0_RTX (mode));
5484a3c3
RS
4681
4682 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4683 if ((code == EQ || code == NE)
4684 && op0code == XOR
33ffb5c5
KZ
4685 && CONST_SCALAR_INT_P (op1)
4686 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5484a3c3
RS
4687 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4688 simplify_gen_binary (XOR, cmp_mode,
4689 XEXP (op0, 1), op1));
4690
10828a01
SL
4691 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4692 can be implemented with a BICS instruction on some targets, or
4693 constant-folded if y is a constant. */
4694 if ((code == EQ || code == NE)
4695 && op0code == AND
4696 && rtx_equal_p (XEXP (op0, 0), op1)
98f51044
JJ
4697 && !side_effects_p (op1)
4698 && op1 != CONST0_RTX (cmp_mode))
10828a01
SL
4699 {
4700 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4701 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4702
3202dccc
JJ
4703 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4704 CONST0_RTX (cmp_mode));
10828a01
SL
4705 }
4706
4707 /* Likewise for (eq/ne (and x y) y). */
4708 if ((code == EQ || code == NE)
4709 && op0code == AND
4710 && rtx_equal_p (XEXP (op0, 1), op1)
98f51044
JJ
4711 && !side_effects_p (op1)
4712 && op1 != CONST0_RTX (cmp_mode))
10828a01
SL
4713 {
4714 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4715 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4716
3202dccc
JJ
4717 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4718 CONST0_RTX (cmp_mode));
10828a01
SL
4719 }
4720
b17c024f
EB
4721 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4722 if ((code == EQ || code == NE)
4723 && GET_CODE (op0) == BSWAP
a8c50132 4724 && CONST_SCALAR_INT_P (op1))
b17c024f
EB
4725 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4726 simplify_gen_unary (BSWAP, cmp_mode,
4727 op1, cmp_mode));
4728
4729 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4730 if ((code == EQ || code == NE)
4731 && GET_CODE (op0) == BSWAP
4732 && GET_CODE (op1) == BSWAP)
4733 return simplify_gen_relational (code, mode, cmp_mode,
4734 XEXP (op0, 0), XEXP (op1, 0));
4735
69fce32f
RS
4736 if (op0code == POPCOUNT && op1 == const0_rtx)
4737 switch (code)
4738 {
4739 case EQ:
4740 case LE:
4741 case LEU:
4742 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4743 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4744 XEXP (op0, 0), const0_rtx);
4745
4746 case NE:
4747 case GT:
4748 case GTU:
4749 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
2376c58f 4750 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
69fce32f
RS
4751 XEXP (op0, 0), const0_rtx);
4752
4753 default:
4754 break;
4755 }
4756
c6fb08ad
PB
4757 return NULL_RTX;
4758}
4759
b8698a0f 4760enum
39641489 4761{
a567207e
PB
4762 CMP_EQ = 1,
4763 CMP_LT = 2,
4764 CMP_GT = 4,
4765 CMP_LTU = 8,
4766 CMP_GTU = 16
39641489
PB
4767};
4768
4769
4770/* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4771 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
b8698a0f 4772 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
a567207e 4773 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
39641489
PB
4774 For floating-point comparisons, assume that the operands were ordered. */
4775
4776static rtx
4777comparison_result (enum rtx_code code, int known_results)
4778{
39641489
PB
4779 switch (code)
4780 {
4781 case EQ:
4782 case UNEQ:
a567207e 4783 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
39641489
PB
4784 case NE:
4785 case LTGT:
a567207e 4786 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
39641489
PB
4787
4788 case LT:
4789 case UNLT:
a567207e 4790 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
39641489
PB
4791 case GE:
4792 case UNGE:
a567207e 4793 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
39641489
PB
4794
4795 case GT:
4796 case UNGT:
a567207e 4797 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
39641489
PB
4798 case LE:
4799 case UNLE:
a567207e 4800 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
39641489
PB
4801
4802 case LTU:
a567207e 4803 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
39641489 4804 case GEU:
a567207e 4805 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
39641489
PB
4806
4807 case GTU:
a567207e 4808 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
39641489 4809 case LEU:
a567207e 4810 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
39641489
PB
4811
4812 case ORDERED:
4813 return const_true_rtx;
4814 case UNORDERED:
4815 return const0_rtx;
4816 default:
4817 gcc_unreachable ();
4818 }
4819}
4820
807e902e
KZ
4821/* Check if the given comparison (done in the given MODE) is actually
4822 a tautology or a contradiction. If the mode is VOID_mode, the
4823 comparison is done in "infinite precision". If no simplification
4824 is possible, this function returns zero. Otherwise, it returns
4825 either const_true_rtx or const0_rtx. */
0cedb36c
JL
4826
4827rtx
7ce3e360 4828simplify_const_relational_operation (enum rtx_code code,
ef4bddc2 4829 machine_mode mode,
7ce3e360 4830 rtx op0, rtx op1)
0cedb36c 4831{
0cedb36c 4832 rtx tem;
4ba5f925
JH
4833 rtx trueop0;
4834 rtx trueop1;
0cedb36c 4835
41374e13
NS
4836 gcc_assert (mode != VOIDmode
4837 || (GET_MODE (op0) == VOIDmode
4838 && GET_MODE (op1) == VOIDmode));
47b1e19b 4839
0cedb36c
JL
4840 /* If op0 is a compare, extract the comparison arguments from it. */
4841 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5b5dc475
UW
4842 {
4843 op1 = XEXP (op0, 1);
4844 op0 = XEXP (op0, 0);
4845
4846 if (GET_MODE (op0) != VOIDmode)
4847 mode = GET_MODE (op0);
4848 else if (GET_MODE (op1) != VOIDmode)
4849 mode = GET_MODE (op1);
4850 else
4851 return 0;
4852 }
0cedb36c
JL
4853
4854 /* We can't simplify MODE_CC values since we don't know what the
4855 actual comparison is. */
8beccec8 4856 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
0cedb36c
JL
4857 return 0;
4858
52a75c3c 4859 /* Make sure the constant is second. */
9ce79a7a 4860 if (swap_commutative_operands_p (op0, op1))
52a75c3c 4861 {
e2be0590 4862 std::swap (op0, op1);
52a75c3c
RH
4863 code = swap_condition (code);
4864 }
4865
9ce79a7a
RS
4866 trueop0 = avoid_constant_pool_reference (op0);
4867 trueop1 = avoid_constant_pool_reference (op1);
4868
0cedb36c
JL
4869 /* For integer comparisons of A and B maybe we can simplify A - B and can
4870 then simplify a comparison of that with zero. If A and B are both either
4871 a register or a CONST_INT, this can't help; testing for these cases will
4872 prevent infinite recursion here and speed things up.
4873
e0d0c193
RG
4874 We can only do this for EQ and NE comparisons as otherwise we may
4875 lose or introduce overflow which we cannot disregard as undefined as
4876 we do not know the signedness of the operation on either the left or
4877 the right hand side of the comparison. */
0cedb36c 4878
e0d0c193
RG
4879 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4880 && (code == EQ || code == NE)
481683e1
SZ
4881 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4882 && (REG_P (op1) || CONST_INT_P (trueop1)))
0cedb36c 4883 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
e0d0c193
RG
4884 /* We cannot do this if tem is a nonzero address. */
4885 && ! nonzero_address_p (tem))
7ce3e360
RS
4886 return simplify_const_relational_operation (signed_condition (code),
4887 mode, tem, const0_rtx);
0cedb36c 4888
bdbb0460 4889 if (! HONOR_NANS (mode) && code == ORDERED)
1f36a2dd
JH
4890 return const_true_rtx;
4891
bdbb0460 4892 if (! HONOR_NANS (mode) && code == UNORDERED)
1f36a2dd
JH
4893 return const0_rtx;
4894
71925bc0 4895 /* For modes without NaNs, if the two operands are equal, we know the
39641489
PB
4896 result except if they have side-effects. Even with NaNs we know
4897 the result of unordered comparisons and, if signaling NaNs are
4898 irrelevant, also the result of LT/GT/LTGT. */
1b457aa4 4899 if ((! HONOR_NANS (trueop0)
39641489
PB
4900 || code == UNEQ || code == UNLE || code == UNGE
4901 || ((code == LT || code == GT || code == LTGT)
3d3dbadd 4902 && ! HONOR_SNANS (trueop0)))
8821d091
EB
4903 && rtx_equal_p (trueop0, trueop1)
4904 && ! side_effects_p (trueop0))
a567207e 4905 return comparison_result (code, CMP_EQ);
0cedb36c
JL
4906
4907 /* If the operands are floating-point constants, see if we can fold
4908 the result. */
48175537
KZ
4909 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4910 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
39641489 4911 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
0cedb36c 4912 {
34a72c33
RS
4913 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4914 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
90a74703 4915
1eeeb6a4 4916 /* Comparisons are unordered iff at least one of the values is NaN. */
34a72c33 4917 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
90a74703
JH
4918 switch (code)
4919 {
4920 case UNEQ:
4921 case UNLT:
4922 case UNGT:
4923 case UNLE:
4924 case UNGE:
4925 case NE:
4926 case UNORDERED:
4927 return const_true_rtx;
4928 case EQ:
4929 case LT:
4930 case GT:
4931 case LE:
4932 case GE:
4933 case LTGT:
4934 case ORDERED:
4935 return const0_rtx;
4936 default:
4937 return 0;
4938 }
0cedb36c 4939
39641489 4940 return comparison_result (code,
34a72c33
RS
4941 (real_equal (d0, d1) ? CMP_EQ :
4942 real_less (d0, d1) ? CMP_LT : CMP_GT));
0cedb36c 4943 }
0cedb36c
JL
4944
4945 /* Otherwise, see if the operands are both integers. */
39641489 4946 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
807e902e 4947 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
0cedb36c 4948 {
807e902e
KZ
4949 /* It would be nice if we really had a mode here. However, the
4950 largest int representable on the target is as good as
4951 infinite. */
ef4bddc2 4952 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
807e902e
KZ
4953 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4954 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4955
4956 if (wi::eq_p (ptrueop0, ptrueop1))
a567207e 4957 return comparison_result (code, CMP_EQ);
39641489
PB
4958 else
4959 {
807e902e
KZ
4960 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4961 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
a567207e 4962 return comparison_result (code, cr);
39641489 4963 }
0cedb36c
JL
4964 }
4965
39641489 4966 /* Optimize comparisons with upper and lower bounds. */
46c9550f 4967 if (HWI_COMPUTABLE_MODE_P (mode)
2d87c1d4
RL
4968 && CONST_INT_P (trueop1)
4969 && !side_effects_p (trueop0))
0cedb36c 4970 {
39641489
PB
4971 int sign;
4972 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4973 HOST_WIDE_INT val = INTVAL (trueop1);
4974 HOST_WIDE_INT mmin, mmax;
4975
4976 if (code == GEU
4977 || code == LEU
4978 || code == GTU
4979 || code == LTU)
4980 sign = 0;
4981 else
4982 sign = 1;
0aea6467 4983
39641489
PB
4984 /* Get a reduced range if the sign bit is zero. */
4985 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4986 {
4987 mmin = 0;
4988 mmax = nonzero;
4989 }
4990 else
4991 {
4992 rtx mmin_rtx, mmax_rtx;
a567207e 4993 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
39641489 4994
dc7c279e
JJ
4995 mmin = INTVAL (mmin_rtx);
4996 mmax = INTVAL (mmax_rtx);
4997 if (sign)
4998 {
4999 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5000
5001 mmin >>= (sign_copies - 1);
5002 mmax >>= (sign_copies - 1);
5003 }
0aea6467
ZD
5004 }
5005
0cedb36c
JL
5006 switch (code)
5007 {
39641489
PB
5008 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5009 case GEU:
5010 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5011 return const_true_rtx;
5012 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5013 return const0_rtx;
5014 break;
5015 case GE:
5016 if (val <= mmin)
5017 return const_true_rtx;
5018 if (val > mmax)
5019 return const0_rtx;
5020 break;
5021
5022 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5023 case LEU:
5024 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5025 return const_true_rtx;
5026 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5027 return const0_rtx;
5028 break;
5029 case LE:
5030 if (val >= mmax)
5031 return const_true_rtx;
5032 if (val < mmin)
5033 return const0_rtx;
5034 break;
5035
0cedb36c 5036 case EQ:
39641489
PB
5037 /* x == y is always false for y out of range. */
5038 if (val < mmin || val > mmax)
5039 return const0_rtx;
5040 break;
5041
5042 /* x > y is always false for y >= mmax, always true for y < mmin. */
5043 case GTU:
5044 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5045 return const0_rtx;
5046 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5047 return const_true_rtx;
5048 break;
5049 case GT:
5050 if (val >= mmax)
5051 return const0_rtx;
5052 if (val < mmin)
5053 return const_true_rtx;
5054 break;
5055
5056 /* x < y is always false for y <= mmin, always true for y > mmax. */
5057 case LTU:
5058 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5059 return const0_rtx;
5060 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5061 return const_true_rtx;
5062 break;
5063 case LT:
5064 if (val <= mmin)
0cedb36c 5065 return const0_rtx;
39641489
PB
5066 if (val > mmax)
5067 return const_true_rtx;
0cedb36c
JL
5068 break;
5069
5070 case NE:
39641489
PB
5071 /* x != y is always true for y out of range. */
5072 if (val < mmin || val > mmax)
0cedb36c
JL
5073 return const_true_rtx;
5074 break;
5075
39641489
PB
5076 default:
5077 break;
5078 }
5079 }
5080
5081 /* Optimize integer comparisons with zero. */
2d87c1d4 5082 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
39641489
PB
5083 {
5084 /* Some addresses are known to be nonzero. We don't know
a567207e 5085 their sign, but equality comparisons are known. */
39641489 5086 if (nonzero_address_p (trueop0))
a567207e 5087 {
39641489
PB
5088 if (code == EQ || code == LEU)
5089 return const0_rtx;
5090 if (code == NE || code == GTU)
5091 return const_true_rtx;
a567207e 5092 }
39641489
PB
5093
5094 /* See if the first operand is an IOR with a constant. If so, we
5095 may be able to determine the result of this comparison. */
5096 if (GET_CODE (op0) == IOR)
a567207e 5097 {
39641489 5098 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
481683e1 5099 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
39641489 5100 {
5511bc5a 5101 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
a567207e 5102 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
43c36287
EB
5103 && (UINTVAL (inner_const)
5104 & ((unsigned HOST_WIDE_INT) 1
5105 << sign_bitnum)));
a567207e
PB
5106
5107 switch (code)
5108 {
5109 case EQ:
39641489 5110 case LEU:
a567207e
PB
5111 return const0_rtx;
5112 case NE:
39641489 5113 case GTU:
a567207e
PB
5114 return const_true_rtx;
5115 case LT:
5116 case LE:
5117 if (has_sign)
5118 return const_true_rtx;
5119 break;
5120 case GT:
39641489 5121 case GE:
a567207e
PB
5122 if (has_sign)
5123 return const0_rtx;
5124 break;
5125 default:
5126 break;
5127 }
5128 }
39641489
PB
5129 }
5130 }
5131
5132 /* Optimize comparison of ABS with zero. */
2d87c1d4 5133 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
39641489
PB
5134 && (GET_CODE (trueop0) == ABS
5135 || (GET_CODE (trueop0) == FLOAT_EXTEND
5136 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5137 {
5138 switch (code)
5139 {
0da65b89
RS
5140 case LT:
5141 /* Optimize abs(x) < 0.0. */
39641489 5142 if (!HONOR_SNANS (mode)
eeef0e45
ILT
5143 && (!INTEGRAL_MODE_P (mode)
5144 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
0da65b89 5145 {
39641489
PB
5146 if (INTEGRAL_MODE_P (mode)
5147 && (issue_strict_overflow_warning
5148 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5149 warning (OPT_Wstrict_overflow,
5150 ("assuming signed overflow does not occur when "
5151 "assuming abs (x) < 0 is false"));
5152 return const0_rtx;
0da65b89
RS
5153 }
5154 break;
5155
5156 case GE:
5157 /* Optimize abs(x) >= 0.0. */
39641489 5158 if (!HONOR_NANS (mode)
eeef0e45
ILT
5159 && (!INTEGRAL_MODE_P (mode)
5160 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
0da65b89 5161 {
39641489
PB
5162 if (INTEGRAL_MODE_P (mode)
5163 && (issue_strict_overflow_warning
5164 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5165 warning (OPT_Wstrict_overflow,
5166 ("assuming signed overflow does not occur when "
5167 "assuming abs (x) >= 0 is true"));
5168 return const_true_rtx;
0da65b89
RS
5169 }
5170 break;
5171
8d90f9c0
GK
5172 case UNGE:
5173 /* Optimize ! (abs(x) < 0.0). */
39641489 5174 return const_true_rtx;
46c5ad27 5175
0cedb36c
JL
5176 default:
5177 break;
5178 }
0cedb36c
JL
5179 }
5180
39641489 5181 return 0;
0cedb36c
JL
5182}
5183\f
5184/* Simplify CODE, an operation with result mode MODE and three operands,
5185 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5186 a constant. Return 0 if no simplifications is possible. */
5187
5188rtx
ef4bddc2
RS
5189simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5190 machine_mode op0_mode, rtx op0, rtx op1,
46c5ad27 5191 rtx op2)
0cedb36c 5192{
5511bc5a 5193 unsigned int width = GET_MODE_PRECISION (mode);
5c822194 5194 bool any_change = false;
da694a77 5195 rtx tem, trueop2;
0cedb36c
JL
5196
5197 /* VOIDmode means "infinite" precision. */
5198 if (width == 0)
5199 width = HOST_BITS_PER_WIDE_INT;
5200
5201 switch (code)
5202 {
1b1562a5 5203 case FMA:
5c822194
RH
5204 /* Simplify negations around the multiplication. */
5205 /* -a * -b + c => a * b + c. */
5206 if (GET_CODE (op0) == NEG)
5207 {
5208 tem = simplify_unary_operation (NEG, mode, op1, mode);
5209 if (tem)
5210 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5211 }
5212 else if (GET_CODE (op1) == NEG)
5213 {
5214 tem = simplify_unary_operation (NEG, mode, op0, mode);
5215 if (tem)
5216 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5217 }
5218
5219 /* Canonicalize the two multiplication operands. */
5220 /* a * -b + c => -b * a + c. */
5221 if (swap_commutative_operands_p (op0, op1))
e2be0590 5222 std::swap (op0, op1), any_change = true;
5c822194
RH
5223
5224 if (any_change)
5225 return gen_rtx_FMA (mode, op0, op1, op2);
1b1562a5
MM
5226 return NULL_RTX;
5227
0cedb36c
JL
5228 case SIGN_EXTRACT:
5229 case ZERO_EXTRACT:
481683e1
SZ
5230 if (CONST_INT_P (op0)
5231 && CONST_INT_P (op1)
5232 && CONST_INT_P (op2)
d882fe51 5233 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
f9e158c3 5234 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
0cedb36c
JL
5235 {
5236 /* Extracting a bit-field from a constant */
43c36287 5237 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5511bc5a
BS
5238 HOST_WIDE_INT op1val = INTVAL (op1);
5239 HOST_WIDE_INT op2val = INTVAL (op2);
0cedb36c 5240 if (BITS_BIG_ENDIAN)
5511bc5a 5241 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
0cedb36c 5242 else
5511bc5a 5243 val >>= op2val;
0cedb36c 5244
5511bc5a 5245 if (HOST_BITS_PER_WIDE_INT != op1val)
0cedb36c
JL
5246 {
5247 /* First zero-extend. */
5511bc5a 5248 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
0cedb36c
JL
5249 /* If desired, propagate sign bit. */
5250 if (code == SIGN_EXTRACT
5511bc5a 5251 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
43c36287 5252 != 0)
5511bc5a 5253 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
0cedb36c
JL
5254 }
5255
449ecb09 5256 return gen_int_mode (val, mode);
0cedb36c
JL
5257 }
5258 break;
5259
5260 case IF_THEN_ELSE:
481683e1 5261 if (CONST_INT_P (op0))
0cedb36c
JL
5262 return op0 != const0_rtx ? op1 : op2;
5263
31f0f571
RS
5264 /* Convert c ? a : a into "a". */
5265 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
0cedb36c 5266 return op1;
31f0f571
RS
5267
5268 /* Convert a != b ? a : b into "a". */
5269 if (GET_CODE (op0) == NE
5270 && ! side_effects_p (op0)
5271 && ! HONOR_NANS (mode)
5272 && ! HONOR_SIGNED_ZEROS (mode)
5273 && ((rtx_equal_p (XEXP (op0, 0), op1)
5274 && rtx_equal_p (XEXP (op0, 1), op2))
5275 || (rtx_equal_p (XEXP (op0, 0), op2)
5276 && rtx_equal_p (XEXP (op0, 1), op1))))
5277 return op1;
5278
5279 /* Convert a == b ? a : b into "b". */
5280 if (GET_CODE (op0) == EQ
5281 && ! side_effects_p (op0)
5282 && ! HONOR_NANS (mode)
5283 && ! HONOR_SIGNED_ZEROS (mode)
5284 && ((rtx_equal_p (XEXP (op0, 0), op1)
5285 && rtx_equal_p (XEXP (op0, 1), op2))
5286 || (rtx_equal_p (XEXP (op0, 0), op2)
5287 && rtx_equal_p (XEXP (op0, 1), op1))))
0cedb36c 5288 return op2;
31f0f571 5289
34222cd6
BS
5290 /* Convert (!c) != {0,...,0} ? a : b into
5291 c != {0,...,0} ? b : a for vector modes. */
5292 if (VECTOR_MODE_P (GET_MODE (op1))
5293 && GET_CODE (op0) == NE
5294 && GET_CODE (XEXP (op0, 0)) == NOT
5295 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5296 {
5297 rtx cv = XEXP (op0, 1);
5298 int nunits = CONST_VECTOR_NUNITS (cv);
5299 bool ok = true;
5300 for (int i = 0; i < nunits; ++i)
5301 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5302 {
5303 ok = false;
5304 break;
5305 }
5306 if (ok)
5307 {
5308 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5309 XEXP (XEXP (op0, 0), 0),
5310 XEXP (op0, 1));
5311 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5312 return retval;
5313 }
5314 }
5315
ec8e098d 5316 if (COMPARISON_P (op0) && ! side_effects_p (op0))
0cedb36c 5317 {
ef4bddc2 5318 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
47b1e19b
JH
5319 ? GET_MODE (XEXP (op0, 1))
5320 : GET_MODE (XEXP (op0, 0)));
3e882897 5321 rtx temp;
a774e06e
RH
5322
5323 /* Look for happy constants in op1 and op2. */
481683e1 5324 if (CONST_INT_P (op1) && CONST_INT_P (op2))
a774e06e
RH
5325 {
5326 HOST_WIDE_INT t = INTVAL (op1);
5327 HOST_WIDE_INT f = INTVAL (op2);
786de7eb 5328
a774e06e
RH
5329 if (t == STORE_FLAG_VALUE && f == 0)
5330 code = GET_CODE (op0);
261efdef
JH
5331 else if (t == 0 && f == STORE_FLAG_VALUE)
5332 {
5333 enum rtx_code tmp;
5334 tmp = reversed_comparison_code (op0, NULL_RTX);
5335 if (tmp == UNKNOWN)
5336 break;
5337 code = tmp;
5338 }
a774e06e
RH
5339 else
5340 break;
5341
77306e3e 5342 return simplify_gen_relational (code, mode, cmp_mode,
c6fb08ad
PB
5343 XEXP (op0, 0), XEXP (op0, 1));
5344 }
5345
5346 if (cmp_mode == VOIDmode)
5347 cmp_mode = op0_mode;
5348 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5349 cmp_mode, XEXP (op0, 0),
5350 XEXP (op0, 1));
5351
5352 /* See if any simplifications were possible. */
5353 if (temp)
5354 {
481683e1 5355 if (CONST_INT_P (temp))
c6fb08ad
PB
5356 return temp == const0_rtx ? op2 : op1;
5357 else if (temp)
5358 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
a774e06e 5359 }
0cedb36c
JL
5360 }
5361 break;
31f0f571 5362
d9deed68 5363 case VEC_MERGE:
41374e13
NS
5364 gcc_assert (GET_MODE (op0) == mode);
5365 gcc_assert (GET_MODE (op1) == mode);
5366 gcc_assert (VECTOR_MODE_P (mode));
da694a77
MG
5367 trueop2 = avoid_constant_pool_reference (op2);
5368 if (CONST_INT_P (trueop2))
d9deed68 5369 {
cb5ca315 5370 int elt_size = GET_MODE_UNIT_SIZE (mode);
d9deed68 5371 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
da694a77
MG
5372 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5373 unsigned HOST_WIDE_INT mask;
5374 if (n_elts == HOST_BITS_PER_WIDE_INT)
5375 mask = -1;
5376 else
5377 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
d9deed68 5378
da694a77 5379 if (!(sel & mask) && !side_effects_p (op0))
852c8ba1 5380 return op1;
da694a77 5381 if ((sel & mask) == mask && !side_effects_p (op1))
852c8ba1
JH
5382 return op0;
5383
da694a77
MG
5384 rtx trueop0 = avoid_constant_pool_reference (op0);
5385 rtx trueop1 = avoid_constant_pool_reference (op1);
5386 if (GET_CODE (trueop0) == CONST_VECTOR
5387 && GET_CODE (trueop1) == CONST_VECTOR)
852c8ba1
JH
5388 {
5389 rtvec v = rtvec_alloc (n_elts);
5390 unsigned int i;
5391
5392 for (i = 0; i < n_elts; i++)
da694a77
MG
5393 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5394 ? CONST_VECTOR_ELT (trueop0, i)
5395 : CONST_VECTOR_ELT (trueop1, i));
852c8ba1
JH
5396 return gen_rtx_CONST_VECTOR (mode, v);
5397 }
da694a77
MG
5398
5399 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5400 if no element from a appears in the result. */
5401 if (GET_CODE (op0) == VEC_MERGE)
5402 {
5403 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5404 if (CONST_INT_P (tem))
5405 {
5406 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5407 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5408 return simplify_gen_ternary (code, mode, mode,
5409 XEXP (op0, 1), op1, op2);
5410 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5411 return simplify_gen_ternary (code, mode, mode,
5412 XEXP (op0, 0), op1, op2);
5413 }
5414 }
5415 if (GET_CODE (op1) == VEC_MERGE)
5416 {
5417 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5418 if (CONST_INT_P (tem))
5419 {
5420 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5421 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5422 return simplify_gen_ternary (code, mode, mode,
5423 op0, XEXP (op1, 1), op2);
5424 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5425 return simplify_gen_ternary (code, mode, mode,
5426 op0, XEXP (op1, 0), op2);
5427 }
5428 }
691b9fb7
PM
5429
5430 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5431 with a. */
5432 if (GET_CODE (op0) == VEC_DUPLICATE
5433 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5434 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5435 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5436 {
5437 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5438 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5439 {
5440 if (XEXP (XEXP (op0, 0), 0) == op1
5441 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5442 return op1;
5443 }
5444 }
d9deed68 5445 }
da694a77
MG
5446
5447 if (rtx_equal_p (op0, op1)
5448 && !side_effects_p (op2) && !side_effects_p (op1))
5449 return op0;
5450
d9deed68 5451 break;
0cedb36c
JL
5452
5453 default:
41374e13 5454 gcc_unreachable ();
0cedb36c
JL
5455 }
5456
5457 return 0;
5458}
5459
807e902e
KZ
5460/* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5461 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5462 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
eea50aa0 5463
550d1387
GK
5464 Works by unpacking OP into a collection of 8-bit values
5465 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5466 and then repacking them again for OUTERMODE. */
eea50aa0 5467
550d1387 5468static rtx
ef4bddc2
RS
5469simplify_immed_subreg (machine_mode outermode, rtx op,
5470 machine_mode innermode, unsigned int byte)
550d1387 5471{
550d1387 5472 enum {
550d1387
GK
5473 value_bit = 8,
5474 value_mask = (1 << value_bit) - 1
5475 };
807e902e 5476 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
550d1387
GK
5477 int value_start;
5478 int i;
5479 int elem;
5480
5481 int num_elem;
5482 rtx * elems;
5483 int elem_bitsize;
5484 rtx result_s;
5485 rtvec result_v = NULL;
5486 enum mode_class outer_class;
ef4bddc2 5487 machine_mode outer_submode;
807e902e 5488 int max_bitsize;
550d1387
GK
5489
5490 /* Some ports misuse CCmode. */
481683e1 5491 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
e5c56fd9
JH
5492 return op;
5493
6e4b5aaf
RH
5494 /* We have no way to represent a complex constant at the rtl level. */
5495 if (COMPLEX_MODE_P (outermode))
5496 return NULL_RTX;
5497
807e902e
KZ
5498 /* We support any size mode. */
5499 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5500 GET_MODE_BITSIZE (innermode));
5501
550d1387
GK
5502 /* Unpack the value. */
5503
cb2a532e
AH
5504 if (GET_CODE (op) == CONST_VECTOR)
5505 {
550d1387
GK
5506 num_elem = CONST_VECTOR_NUNITS (op);
5507 elems = &CONST_VECTOR_ELT (op, 0);
6c825cd4 5508 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
550d1387
GK
5509 }
5510 else
5511 {
5512 num_elem = 1;
5513 elems = &op;
5514 elem_bitsize = max_bitsize;
5515 }
41374e13
NS
5516 /* If this asserts, it is too complicated; reducing value_bit may help. */
5517 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5518 /* I don't know how to handle endianness of sub-units. */
5519 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
b8698a0f 5520
550d1387
GK
5521 for (elem = 0; elem < num_elem; elem++)
5522 {
5523 unsigned char * vp;
5524 rtx el = elems[elem];
b8698a0f 5525
550d1387
GK
5526 /* Vectors are kept in target memory order. (This is probably
5527 a mistake.) */
5528 {
5529 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
558c51c5 5530 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
550d1387
GK
5531 / BITS_PER_UNIT);
5532 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5533 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5534 unsigned bytele = (subword_byte % UNITS_PER_WORD
5535 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5536 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5537 }
b8698a0f 5538
550d1387 5539 switch (GET_CODE (el))
34a80643 5540 {
550d1387
GK
5541 case CONST_INT:
5542 for (i = 0;
b8698a0f 5543 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
550d1387
GK
5544 i += value_bit)
5545 *vp++ = INTVAL (el) >> i;
5546 /* CONST_INTs are always logically sign-extended. */
5547 for (; i < elem_bitsize; i += value_bit)
5548 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5549 break;
b8698a0f 5550
807e902e
KZ
5551 case CONST_WIDE_INT:
5552 {
5553 rtx_mode_t val = std::make_pair (el, innermode);
5554 unsigned char extend = wi::sign_mask (val);
5555
5556 for (i = 0; i < elem_bitsize; i += value_bit)
5557 *vp++ = wi::extract_uhwi (val, i, value_bit);
5558 for (; i < elem_bitsize; i += value_bit)
5559 *vp++ = extend;
5560 }
5561 break;
5562
550d1387 5563 case CONST_DOUBLE:
807e902e 5564 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
550d1387 5565 {
929e10f4 5566 unsigned char extend = 0;
550d1387
GK
5567 /* If this triggers, someone should have generated a
5568 CONST_INT instead. */
41374e13 5569 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
cb2a532e 5570
550d1387
GK
5571 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5572 *vp++ = CONST_DOUBLE_LOW (el) >> i;
49ab6098 5573 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
550d1387 5574 {
8064d930
RE
5575 *vp++
5576 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
550d1387
GK
5577 i += value_bit;
5578 }
929e10f4
MS
5579
5580 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5581 extend = -1;
1125164c 5582 for (; i < elem_bitsize; i += value_bit)
929e10f4 5583 *vp++ = extend;
550d1387 5584 }
41374e13 5585 else
34a80643 5586 {
807e902e
KZ
5587 /* This is big enough for anything on the platform. */
5588 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
550d1387 5589 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
41374e13 5590
3d8bf70f 5591 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
41374e13
NS
5592 gcc_assert (bitsize <= elem_bitsize);
5593 gcc_assert (bitsize % value_bit == 0);
550d1387
GK
5594
5595 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5596 GET_MODE (el));
5597
5598 /* real_to_target produces its result in words affected by
5599 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5600 and use WORDS_BIG_ENDIAN instead; see the documentation
5601 of SUBREG in rtl.texi. */
5602 for (i = 0; i < bitsize; i += value_bit)
226cfe61 5603 {
550d1387
GK
5604 int ibase;
5605 if (WORDS_BIG_ENDIAN)
5606 ibase = bitsize - 1 - i;
5607 else
5608 ibase = i;
5609 *vp++ = tmp[ibase / 32] >> i % 32;
226cfe61 5610 }
b8698a0f 5611
550d1387
GK
5612 /* It shouldn't matter what's done here, so fill it with
5613 zero. */
5614 for (; i < elem_bitsize; i += value_bit)
5615 *vp++ = 0;
34a80643 5616 }
550d1387 5617 break;
14c931f1
CF
5618
5619 case CONST_FIXED:
5620 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5621 {
5622 for (i = 0; i < elem_bitsize; i += value_bit)
5623 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5624 }
5625 else
5626 {
5627 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5628 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
49ab6098 5629 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
14c931f1
CF
5630 i += value_bit)
5631 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5632 >> (i - HOST_BITS_PER_WIDE_INT);
5633 for (; i < elem_bitsize; i += value_bit)
5634 *vp++ = 0;
5635 }
5636 break;
b8698a0f 5637
550d1387 5638 default:
41374e13 5639 gcc_unreachable ();
226cfe61 5640 }
cb2a532e
AH
5641 }
5642
550d1387
GK
5643 /* Now, pick the right byte to start with. */
5644 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5645 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5646 will already have offset 0. */
5647 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
eea50aa0 5648 {
558c51c5 5649 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
550d1387
GK
5650 - byte);
5651 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5652 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5653 byte = (subword_byte % UNITS_PER_WORD
5654 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5655 }
eea50aa0 5656
550d1387
GK
5657 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5658 so if it's become negative it will instead be very large.) */
41374e13 5659 gcc_assert (byte < GET_MODE_SIZE (innermode));
3767c0fd 5660
550d1387
GK
5661 /* Convert from bytes to chunks of size value_bit. */
5662 value_start = byte * (BITS_PER_UNIT / value_bit);
eea50aa0 5663
550d1387 5664 /* Re-pack the value. */
1c0e448f 5665 num_elem = GET_MODE_NUNITS (outermode);
b8698a0f 5666
550d1387
GK
5667 if (VECTOR_MODE_P (outermode))
5668 {
550d1387
GK
5669 result_v = rtvec_alloc (num_elem);
5670 elems = &RTVEC_ELT (result_v, 0);
550d1387
GK
5671 }
5672 else
1c0e448f 5673 elems = &result_s;
eea50aa0 5674
1c0e448f 5675 outer_submode = GET_MODE_INNER (outermode);
550d1387
GK
5676 outer_class = GET_MODE_CLASS (outer_submode);
5677 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
451f86fd 5678
41374e13
NS
5679 gcc_assert (elem_bitsize % value_bit == 0);
5680 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
451f86fd 5681
550d1387
GK
5682 for (elem = 0; elem < num_elem; elem++)
5683 {
5684 unsigned char *vp;
b8698a0f 5685
550d1387
GK
5686 /* Vectors are stored in target memory order. (This is probably
5687 a mistake.) */
5688 {
5689 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
558c51c5 5690 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
550d1387
GK
5691 / BITS_PER_UNIT);
5692 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5693 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5694 unsigned bytele = (subword_byte % UNITS_PER_WORD
5695 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5696 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5697 }
5698
5699 switch (outer_class)
eea50aa0 5700 {
550d1387
GK
5701 case MODE_INT:
5702 case MODE_PARTIAL_INT:
5703 {
807e902e
KZ
5704 int u;
5705 int base = 0;
5706 int units
5707 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5708 / HOST_BITS_PER_WIDE_INT;
5709 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5710 wide_int r;
5711
0daaf8aa
JJ
5712 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5713 return NULL_RTX;
807e902e
KZ
5714 for (u = 0; u < units; u++)
5715 {
5716 unsigned HOST_WIDE_INT buf = 0;
5717 for (i = 0;
5718 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5719 i += value_bit)
5720 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5721
5722 tmp[u] = buf;
5723 base += HOST_BITS_PER_WIDE_INT;
5724 }
807e902e
KZ
5725 r = wide_int::from_array (tmp, units,
5726 GET_MODE_PRECISION (outer_submode));
0daaf8aa
JJ
5727#if TARGET_SUPPORTS_WIDE_INT == 0
5728 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5729 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5730 return NULL_RTX;
5731#endif
807e902e 5732 elems[elem] = immed_wide_int_const (r, outer_submode);
550d1387
GK
5733 }
5734 break;
b8698a0f 5735
550d1387 5736 case MODE_FLOAT:
15ed7b52 5737 case MODE_DECIMAL_FLOAT:
550d1387
GK
5738 {
5739 REAL_VALUE_TYPE r;
807e902e 5740 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
b8698a0f 5741
550d1387
GK
5742 /* real_from_target wants its input in words affected by
5743 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5744 and use WORDS_BIG_ENDIAN instead; see the documentation
5745 of SUBREG in rtl.texi. */
5746 for (i = 0; i < max_bitsize / 32; i++)
5747 tmp[i] = 0;
5748 for (i = 0; i < elem_bitsize; i += value_bit)
5749 {
5750 int ibase;
5751 if (WORDS_BIG_ENDIAN)
5752 ibase = elem_bitsize - 1 - i;
5753 else
5754 ibase = i;
effdb493 5755 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
550d1387 5756 }
eea50aa0 5757
550d1387 5758 real_from_target (&r, tmp, outer_submode);
555affd7 5759 elems[elem] = const_double_from_real_value (r, outer_submode);
550d1387
GK
5760 }
5761 break;
14c931f1
CF
5762
5763 case MODE_FRACT:
5764 case MODE_UFRACT:
5765 case MODE_ACCUM:
5766 case MODE_UACCUM:
5767 {
5768 FIXED_VALUE_TYPE f;
5769 f.data.low = 0;
5770 f.data.high = 0;
5771 f.mode = outer_submode;
5772
5773 for (i = 0;
5774 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5775 i += value_bit)
43c36287 5776 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
14c931f1 5777 for (; i < elem_bitsize; i += value_bit)
43c36287 5778 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
14c931f1
CF
5779 << (i - HOST_BITS_PER_WIDE_INT));
5780
5781 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5782 }
5783 break;
b8698a0f 5784
550d1387 5785 default:
41374e13 5786 gcc_unreachable ();
550d1387
GK
5787 }
5788 }
5789 if (VECTOR_MODE_P (outermode))
5790 return gen_rtx_CONST_VECTOR (outermode, result_v);
5791 else
5792 return result_s;
5793}
eea50aa0 5794
550d1387
GK
5795/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5796 Return 0 if no simplifications are possible. */
5797rtx
ef4bddc2
RS
5798simplify_subreg (machine_mode outermode, rtx op,
5799 machine_mode innermode, unsigned int byte)
550d1387
GK
5800{
5801 /* Little bit of sanity checking. */
41374e13
NS
5802 gcc_assert (innermode != VOIDmode);
5803 gcc_assert (outermode != VOIDmode);
5804 gcc_assert (innermode != BLKmode);
5805 gcc_assert (outermode != BLKmode);
eea50aa0 5806
41374e13
NS
5807 gcc_assert (GET_MODE (op) == innermode
5808 || GET_MODE (op) == VOIDmode);
eea50aa0 5809
0343822b
RS
5810 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5811 return NULL_RTX;
5812
5813 if (byte >= GET_MODE_SIZE (innermode))
5814 return NULL_RTX;
eea50aa0 5815
550d1387
GK
5816 if (outermode == innermode && !byte)
5817 return op;
eea50aa0 5818
33ffb5c5
KZ
5819 if (CONST_SCALAR_INT_P (op)
5820 || CONST_DOUBLE_AS_FLOAT_P (op)
14c931f1 5821 || GET_CODE (op) == CONST_FIXED
550d1387
GK
5822 || GET_CODE (op) == CONST_VECTOR)
5823 return simplify_immed_subreg (outermode, op, innermode, byte);
eea50aa0
JH
5824
5825 /* Changing mode twice with SUBREG => just change it once,
5826 or not at all if changing back op starting mode. */
5827 if (GET_CODE (op) == SUBREG)
5828 {
ef4bddc2 5829 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
1ffb3f9a 5830 int final_offset = byte + SUBREG_BYTE (op);
53ed1a12 5831 rtx newx;
eea50aa0
JH
5832
5833 if (outermode == innermostmode
5834 && byte == 0 && SUBREG_BYTE (op) == 0)
5835 return SUBREG_REG (op);
5836
1ffb3f9a
JH
5837 /* The SUBREG_BYTE represents offset, as if the value were stored
5838 in memory. Irritating exception is paradoxical subreg, where
5839 we define SUBREG_BYTE to be 0. On big endian machines, this
2d76cb1a 5840 value should be negative. For a moment, undo this exception. */
1ffb3f9a 5841 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
eea50aa0 5842 {
1ffb3f9a
JH
5843 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5844 if (WORDS_BIG_ENDIAN)
5845 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5846 if (BYTES_BIG_ENDIAN)
5847 final_offset += difference % UNITS_PER_WORD;
5848 }
5849 if (SUBREG_BYTE (op) == 0
5850 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5851 {
5852 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5853 if (WORDS_BIG_ENDIAN)
5854 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5855 if (BYTES_BIG_ENDIAN)
5856 final_offset += difference % UNITS_PER_WORD;
5857 }
5858
5859 /* See whether resulting subreg will be paradoxical. */
2fe7bb35 5860 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
1ffb3f9a
JH
5861 {
5862 /* In nonparadoxical subregs we can't handle negative offsets. */
5863 if (final_offset < 0)
5864 return NULL_RTX;
5865 /* Bail out in case resulting subreg would be incorrect. */
5866 if (final_offset % GET_MODE_SIZE (outermode)
ae0ed63a
JM
5867 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5868 return NULL_RTX;
1ffb3f9a
JH
5869 }
5870 else
5871 {
5872 int offset = 0;
5873 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5874
5875 /* In paradoxical subreg, see if we are still looking on lower part.
5876 If so, our SUBREG_BYTE will be 0. */
5877 if (WORDS_BIG_ENDIAN)
5878 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5879 if (BYTES_BIG_ENDIAN)
5880 offset += difference % UNITS_PER_WORD;
5881 if (offset == final_offset)
5882 final_offset = 0;
eea50aa0 5883 else
ae0ed63a 5884 return NULL_RTX;
eea50aa0
JH
5885 }
5886
4d6922ee 5887 /* Recurse for further possible simplifications. */
beb72684
RH
5888 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5889 final_offset);
53ed1a12
BI
5890 if (newx)
5891 return newx;
beb72684
RH
5892 if (validate_subreg (outermode, innermostmode,
5893 SUBREG_REG (op), final_offset))
4613543f
RS
5894 {
5895 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5896 if (SUBREG_PROMOTED_VAR_P (op)
362d42dc 5897 && SUBREG_PROMOTED_SIGN (op) >= 0
4613543f
RS
5898 && GET_MODE_CLASS (outermode) == MODE_INT
5899 && IN_RANGE (GET_MODE_SIZE (outermode),
5900 GET_MODE_SIZE (innermode),
5901 GET_MODE_SIZE (innermostmode))
5902 && subreg_lowpart_p (newx))
5903 {
5904 SUBREG_PROMOTED_VAR_P (newx) = 1;
362d42dc 5905 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
4613543f
RS
5906 }
5907 return newx;
5908 }
beb72684 5909 return NULL_RTX;
eea50aa0
JH
5910 }
5911
5912 /* SUBREG of a hard register => just change the register number
5913 and/or mode. If the hard register is not valid in that mode,
5914 suppress this simplification. If the hard register is the stack,
5915 frame, or argument pointer, leave this as a SUBREG. */
5916
eef302d2 5917 if (REG_P (op) && HARD_REGISTER_P (op))
eea50aa0 5918 {
eef302d2
RS
5919 unsigned int regno, final_regno;
5920
5921 regno = REGNO (op);
5922 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5923 if (HARD_REGISTER_NUM_P (final_regno))
49d801d3 5924 {
dedc1e6d
AO
5925 rtx x;
5926 int final_offset = byte;
5927
5928 /* Adjust offset for paradoxical subregs. */
5929 if (byte == 0
5930 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5931 {
5932 int difference = (GET_MODE_SIZE (innermode)
5933 - GET_MODE_SIZE (outermode));
5934 if (WORDS_BIG_ENDIAN)
5935 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5936 if (BYTES_BIG_ENDIAN)
5937 final_offset += difference % UNITS_PER_WORD;
5938 }
5939
5940 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
49d801d3
JH
5941
5942 /* Propagate original regno. We don't have any way to specify
14b493d6 5943 the offset inside original regno, so do so only for lowpart.
49d801d3
JH
5944 The information is used only by alias analysis that can not
5945 grog partial register anyway. */
5946
5947 if (subreg_lowpart_offset (outermode, innermode) == byte)
5948 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5949 return x;
5950 }
eea50aa0
JH
5951 }
5952
5953 /* If we have a SUBREG of a register that we are replacing and we are
5954 replacing it with a MEM, make a new MEM and try replacing the
5955 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5956 or if we would be widening it. */
5957
3c0cb5de 5958 if (MEM_P (op)
5bfed9a9 5959 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
04864a46
JH
5960 /* Allow splitting of volatile memory references in case we don't
5961 have instruction to move the whole thing. */
5962 && (! MEM_VOLATILE_P (op)
ef89d648 5963 || ! have_insn_for (SET, innermode))
eea50aa0 5964 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
f1ec5147 5965 return adjust_address_nv (op, outermode, byte);
e5c56fd9
JH
5966
5967 /* Handle complex values represented as CONCAT
5968 of real and imaginary part. */
5969 if (GET_CODE (op) == CONCAT)
5970 {
a957d77f 5971 unsigned int part_size, final_offset;
4f1da2e9
RS
5972 rtx part, res;
5973
a957d77f
RS
5974 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5975 if (byte < part_size)
5976 {
5977 part = XEXP (op, 0);
5978 final_offset = byte;
5979 }
5980 else
5981 {
5982 part = XEXP (op, 1);
5983 final_offset = byte - part_size;
5984 }
5985
5986 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4f1da2e9 5987 return NULL_RTX;
e5c56fd9 5988
9199d62b
DD
5989 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5990 if (res)
5991 return res;
beb72684 5992 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4f1da2e9 5993 return gen_rtx_SUBREG (outermode, part, final_offset);
beb72684 5994 return NULL_RTX;
e5c56fd9
JH
5995 }
5996
40c5ed5b
RS
5997 /* A SUBREG resulting from a zero extension may fold to zero if
5998 it extracts higher bits that the ZERO_EXTEND's source bits. */
373b9e78 5999 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
bb51e270
RS
6000 {
6001 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
40c5ed5b 6002 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
bb51e270
RS
6003 return CONST0_RTX (outermode);
6004 }
6005
40c5ed5b 6006 if (SCALAR_INT_MODE_P (outermode)
992103ad
UB
6007 && SCALAR_INT_MODE_P (innermode)
6008 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6009 && byte == subreg_lowpart_offset (outermode, innermode))
6010 {
40c5ed5b
RS
6011 rtx tem = simplify_truncation (outermode, op, innermode);
6012 if (tem)
6013 return tem;
509dd380
JJ
6014 }
6015
eea50aa0
JH
6016 return NULL_RTX;
6017}
550d1387 6018
949c5d62
JH
6019/* Make a SUBREG operation or equivalent if it folds. */
6020
6021rtx
ef4bddc2
RS
6022simplify_gen_subreg (machine_mode outermode, rtx op,
6023 machine_mode innermode, unsigned int byte)
949c5d62 6024{
53ed1a12 6025 rtx newx;
949c5d62 6026
53ed1a12
BI
6027 newx = simplify_subreg (outermode, op, innermode, byte);
6028 if (newx)
6029 return newx;
949c5d62 6030
4f1da2e9
RS
6031 if (GET_CODE (op) == SUBREG
6032 || GET_CODE (op) == CONCAT
6033 || GET_MODE (op) == VOIDmode)
949c5d62
JH
6034 return NULL_RTX;
6035
beb72684
RH
6036 if (validate_subreg (outermode, innermode, op, byte))
6037 return gen_rtx_SUBREG (outermode, op, byte);
6038
6039 return NULL_RTX;
949c5d62 6040}
beb72684 6041
3403a1a9
AS
6042/* Generates a subreg to get the least significant part of EXPR (in mode
6043 INNER_MODE) to OUTER_MODE. */
6044
6045rtx
6046lowpart_subreg (machine_mode outer_mode, rtx expr,
6047 machine_mode inner_mode)
6048{
6049 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6050 subreg_lowpart_offset (outer_mode, inner_mode));
6051}
6052
0cedb36c
JL
6053/* Simplify X, an rtx expression.
6054
6055 Return the simplified expression or NULL if no simplifications
6056 were possible.
6057
6058 This is the preferred entry point into the simplification routines;
6059 however, we still allow passes to call the more specific routines.
6060
14b493d6 6061 Right now GCC has three (yes, three) major bodies of RTL simplification
0cedb36c
JL
6062 code that need to be unified.
6063
6064 1. fold_rtx in cse.c. This code uses various CSE specific
6065 information to aid in RTL simplification.
6066
6067 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6068 it uses combine specific information to aid in RTL
6069 simplification.
6070
6071 3. The routines in this file.
6072
6073
6074 Long term we want to only have one body of simplification code; to
6075 get to that state I recommend the following steps:
6076
6077 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6078 which are not pass dependent state into these routines.
6079
6080 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6081 use this routine whenever possible.
6082
6083 3. Allow for pass dependent state to be provided to these
6084 routines and add simplifications based on the pass dependent
6085 state. Remove code from cse.c & combine.c that becomes
6086 redundant/dead.
6087
6088 It will take time, but ultimately the compiler will be easier to
6089 maintain and improve. It's totally silly that when we add a
6090 simplification that it needs to be added to 4 places (3 for RTL
6091 simplification and 1 for tree simplification. */
786de7eb 6092
0cedb36c 6093rtx
58f9752a 6094simplify_rtx (const_rtx x)
0cedb36c 6095{
58f9752a 6096 const enum rtx_code code = GET_CODE (x);
ef4bddc2 6097 const machine_mode mode = GET_MODE (x);
0cedb36c
JL
6098
6099 switch (GET_RTX_CLASS (code))
6100 {
ec8e098d 6101 case RTX_UNARY:
0cedb36c
JL
6102 return simplify_unary_operation (code, mode,
6103 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
ec8e098d 6104 case RTX_COMM_ARITH:
df0afdbe 6105 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
cf6bcbd0 6106 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
b42abad8 6107
2b72593e 6108 /* Fall through.... */
b42abad8 6109
ec8e098d 6110 case RTX_BIN_ARITH:
0cedb36c
JL
6111 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6112
ec8e098d
PB
6113 case RTX_TERNARY:
6114 case RTX_BITFIELD_OPS:
0cedb36c 6115 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
d9c695ff
RK
6116 XEXP (x, 0), XEXP (x, 1),
6117 XEXP (x, 2));
0cedb36c 6118
ec8e098d
PB
6119 case RTX_COMPARE:
6120 case RTX_COMM_COMPARE:
c6fb08ad
PB
6121 return simplify_relational_operation (code, mode,
6122 ((GET_MODE (XEXP (x, 0))
6123 != VOIDmode)
6124 ? GET_MODE (XEXP (x, 0))
6125 : GET_MODE (XEXP (x, 1))),
6126 XEXP (x, 0),
6127 XEXP (x, 1));
d41ba56f 6128
ec8e098d 6129 case RTX_EXTRA:
949c5d62 6130 if (code == SUBREG)
e2561558
RS
6131 return simplify_subreg (mode, SUBREG_REG (x),
6132 GET_MODE (SUBREG_REG (x)),
6133 SUBREG_BYTE (x));
d41ba56f
RS
6134 break;
6135
ec8e098d 6136 case RTX_OBJ:
d41ba56f
RS
6137 if (code == LO_SUM)
6138 {
6139 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6140 if (GET_CODE (XEXP (x, 0)) == HIGH
6141 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6142 return XEXP (x, 1);
6143 }
6144 break;
6145
0cedb36c 6146 default:
d41ba56f 6147 break;
0cedb36c 6148 }
d41ba56f 6149 return NULL;
0cedb36c 6150}