]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/simplify-rtx.c
dojump.h: New header file.
[thirdparty/gcc.git] / gcc / simplify-rtx.c
CommitLineData
749a2da1 1/* RTL simplification functions for GNU compiler.
5624e564 2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
0cedb36c 3
1322177d 4This file is part of GCC.
0cedb36c 5
1322177d
LB
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
9dcd6f09 8Software Foundation; either version 3, or (at your option) any later
1322177d 9version.
0cedb36c 10
1322177d
LB
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
0cedb36c
JL
15
16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
0cedb36c
JL
19
20
21#include "config.h"
0cedb36c 22#include "system.h"
4977bab6
ZW
23#include "coretypes.h"
24#include "tm.h"
0cedb36c 25#include "rtl.h"
40e23961
MC
26#include "hash-set.h"
27#include "machmode.h"
28#include "vec.h"
29#include "double-int.h"
30#include "input.h"
31#include "alias.h"
32#include "symtab.h"
33#include "wide-int.h"
34#include "inchash.h"
efdc7e19 35#include "tree.h"
40e23961 36#include "fold-const.h"
d8a2d370 37#include "varasm.h"
0cedb36c
JL
38#include "tm_p.h"
39#include "regs.h"
40#include "hard-reg-set.h"
41#include "flags.h"
0cedb36c
JL
42#include "insn-config.h"
43#include "recog.h"
44#include "function.h"
b0710fe1
AM
45#include "insn-codes.h"
46#include "optabs.h"
36566b39
PK
47#include "hashtab.h"
48#include "statistics.h"
49#include "real.h"
50#include "fixed-value.h"
51#include "expmed.h"
52#include "dojump.h"
53#include "explow.h"
54#include "calls.h"
55#include "emit-rtl.h"
56#include "stmt.h"
0cedb36c 57#include "expr.h"
718f9c0f 58#include "diagnostic-core.h"
eab5c70a 59#include "ggc.h"
7daebb7a 60#include "target.h"
83685514 61#include "predict.h"
0cedb36c
JL
62
63/* Simplification and canonicalization of RTL. */
64
3839069b
ZW
65/* Much code operates on (low, high) pairs; the low value is an
66 unsigned wide int, the high value a signed wide int. We
67 occasionally need to sign extend from low to high as if low were a
68 signed wide int. */
ba34d877 69#define HWI_SIGN_EXTEND(low) \
3839069b 70 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
0cedb36c 71
ef4bddc2 72static rtx neg_const_int (machine_mode, const_rtx);
f7d504c2 73static bool plus_minus_operand_p (const_rtx);
7e0b4eae 74static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
ef4bddc2
RS
75static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
76static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
550d1387 77 unsigned int);
ef4bddc2 78static rtx simplify_associative_operation (enum rtx_code, machine_mode,
dd61aa98 79 rtx, rtx);
ef4bddc2
RS
80static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
81 machine_mode, rtx, rtx);
82static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
83static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
0a67e02c 84 rtx, rtx, rtx, rtx);
aff8a8d5
CM
85\f
86/* Negate a CONST_INT rtx, truncating (because a conversion from a
23d1aac4 87 maximally negative number can overflow). */
aff8a8d5 88static rtx
ef4bddc2 89neg_const_int (machine_mode mode, const_rtx i)
aff8a8d5 90{
eb87c7c4 91 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
aff8a8d5
CM
92}
93
0b24db88
RS
94/* Test whether expression, X, is an immediate constant that represents
95 the most significant bit of machine mode MODE. */
96
b757b9f8 97bool
ef4bddc2 98mode_signbit_p (machine_mode mode, const_rtx x)
0b24db88
RS
99{
100 unsigned HOST_WIDE_INT val;
101 unsigned int width;
102
103 if (GET_MODE_CLASS (mode) != MODE_INT)
104 return false;
105
2d0c270f 106 width = GET_MODE_PRECISION (mode);
0b24db88
RS
107 if (width == 0)
108 return false;
b8698a0f 109
0b24db88 110 if (width <= HOST_BITS_PER_WIDE_INT
481683e1 111 && CONST_INT_P (x))
0b24db88 112 val = INTVAL (x);
807e902e
KZ
113#if TARGET_SUPPORTS_WIDE_INT
114 else if (CONST_WIDE_INT_P (x))
115 {
116 unsigned int i;
117 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
118 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
119 return false;
120 for (i = 0; i < elts - 1; i++)
121 if (CONST_WIDE_INT_ELT (x, i) != 0)
122 return false;
123 val = CONST_WIDE_INT_ELT (x, elts - 1);
124 width %= HOST_BITS_PER_WIDE_INT;
125 if (width == 0)
126 width = HOST_BITS_PER_WIDE_INT;
127 }
128#else
49ab6098 129 else if (width <= HOST_BITS_PER_DOUBLE_INT
48175537 130 && CONST_DOUBLE_AS_INT_P (x)
0b24db88
RS
131 && CONST_DOUBLE_LOW (x) == 0)
132 {
133 val = CONST_DOUBLE_HIGH (x);
134 width -= HOST_BITS_PER_WIDE_INT;
135 }
807e902e 136#endif
0b24db88 137 else
807e902e 138 /* X is not an integer constant. */
0b24db88
RS
139 return false;
140
141 if (width < HOST_BITS_PER_WIDE_INT)
142 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
143 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
144}
2d0c270f
BS
145
146/* Test whether VAL is equal to the most significant bit of mode MODE
147 (after masking with the mode mask of MODE). Returns false if the
148 precision of MODE is too large to handle. */
149
150bool
ef4bddc2 151val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
2d0c270f
BS
152{
153 unsigned int width;
154
155 if (GET_MODE_CLASS (mode) != MODE_INT)
156 return false;
157
158 width = GET_MODE_PRECISION (mode);
159 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
160 return false;
161
162 val &= GET_MODE_MASK (mode);
163 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
164}
165
166/* Test whether the most significant bit of mode MODE is set in VAL.
167 Returns false if the precision of MODE is too large to handle. */
168bool
ef4bddc2 169val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
2d0c270f
BS
170{
171 unsigned int width;
172
173 if (GET_MODE_CLASS (mode) != MODE_INT)
174 return false;
175
176 width = GET_MODE_PRECISION (mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
179
180 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
181 return val != 0;
182}
183
184/* Test whether the most significant bit of mode MODE is clear in VAL.
185 Returns false if the precision of MODE is too large to handle. */
186bool
ef4bddc2 187val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
2d0c270f
BS
188{
189 unsigned int width;
190
191 if (GET_MODE_CLASS (mode) != MODE_INT)
192 return false;
193
194 width = GET_MODE_PRECISION (mode);
195 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
196 return false;
197
198 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
199 return val == 0;
200}
749a2da1 201\f
786de7eb 202/* Make a binary operation by properly ordering the operands and
0cedb36c
JL
203 seeing if the expression folds. */
204
205rtx
ef4bddc2 206simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
46c5ad27 207 rtx op1)
0cedb36c
JL
208{
209 rtx tem;
210
0cedb36c
JL
211 /* If this simplifies, do it. */
212 tem = simplify_binary_operation (code, mode, op0, op1);
0cedb36c
JL
213 if (tem)
214 return tem;
215
68162a97
ILT
216 /* Put complex operands first and constants second if commutative. */
217 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
218 && swap_commutative_operands_p (op0, op1))
219 tem = op0, op0 = op1, op1 = tem;
220
e16e3291 221 return gen_rtx_fmt_ee (code, mode, op0, op1);
0cedb36c
JL
222}
223\f
5a2aa3bd 224/* If X is a MEM referencing the constant pool, return the real value.
4ba5f925 225 Otherwise return X. */
732910b9 226rtx
46c5ad27 227avoid_constant_pool_reference (rtx x)
4ba5f925 228{
7daebb7a 229 rtx c, tmp, addr;
ef4bddc2 230 machine_mode cmode;
bdb82177 231 HOST_WIDE_INT offset = 0;
5a2aa3bd 232
7daebb7a
RS
233 switch (GET_CODE (x))
234 {
235 case MEM:
236 break;
237
238 case FLOAT_EXTEND:
239 /* Handle float extensions of constant pool references. */
240 tmp = XEXP (x, 0);
241 c = avoid_constant_pool_reference (tmp);
48175537 242 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
7daebb7a
RS
243 {
244 REAL_VALUE_TYPE d;
245
246 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
247 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
248 }
249 return x;
250
251 default:
252 return x;
253 }
254
d82a02fa
AK
255 if (GET_MODE (x) == BLKmode)
256 return x;
257
5a2aa3bd
RH
258 addr = XEXP (x, 0);
259
59e4e217 260 /* Call target hook to avoid the effects of -fpic etc.... */
5fd9b178 261 addr = targetm.delegitimize_address (addr);
7daebb7a 262
bdb82177
PB
263 /* Split the address into a base and integer offset. */
264 if (GET_CODE (addr) == CONST
265 && GET_CODE (XEXP (addr, 0)) == PLUS
481683e1 266 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
bdb82177
PB
267 {
268 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
269 addr = XEXP (XEXP (addr, 0), 0);
270 }
271
11f3e4c7
RS
272 if (GET_CODE (addr) == LO_SUM)
273 addr = XEXP (addr, 1);
274
bdb82177
PB
275 /* If this is a constant pool reference, we can turn it into its
276 constant and hope that simplifications happen. */
277 if (GET_CODE (addr) == SYMBOL_REF
278 && CONSTANT_POOL_ADDRESS_P (addr))
5a2aa3bd 279 {
bdb82177
PB
280 c = get_pool_constant (addr);
281 cmode = get_pool_mode (addr);
282
283 /* If we're accessing the constant in a different mode than it was
284 originally stored, attempt to fix that up via subreg simplifications.
285 If that fails we have no choice but to return the original memory. */
b63fe007
UB
286 if ((offset != 0 || cmode != GET_MODE (x))
287 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
bdb82177
PB
288 {
289 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
290 if (tem && CONSTANT_P (tem))
291 return tem;
292 }
293 else
294 return c;
5a2aa3bd
RH
295 }
296
bdb82177 297 return x;
4ba5f925
JH
298}
299\f
b5b8b0ac
AO
300/* Simplify a MEM based on its attributes. This is the default
301 delegitimize_address target hook, and it's recommended that every
302 overrider call it. */
303
304rtx
305delegitimize_mem_from_attrs (rtx x)
306{
e0a80069
AO
307 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
308 use their base addresses as equivalent. */
b5b8b0ac
AO
309 if (MEM_P (x)
310 && MEM_EXPR (x)
527210c4 311 && MEM_OFFSET_KNOWN_P (x))
b5b8b0ac
AO
312 {
313 tree decl = MEM_EXPR (x);
ef4bddc2 314 machine_mode mode = GET_MODE (x);
b5b8b0ac
AO
315 HOST_WIDE_INT offset = 0;
316
317 switch (TREE_CODE (decl))
318 {
319 default:
320 decl = NULL;
321 break;
322
323 case VAR_DECL:
324 break;
325
326 case ARRAY_REF:
327 case ARRAY_RANGE_REF:
328 case COMPONENT_REF:
329 case BIT_FIELD_REF:
330 case REALPART_EXPR:
331 case IMAGPART_EXPR:
332 case VIEW_CONVERT_EXPR:
333 {
334 HOST_WIDE_INT bitsize, bitpos;
335 tree toffset;
b199074d 336 int unsignedp, volatilep = 0;
b5b8b0ac
AO
337
338 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
b3ecff82 339 &mode, &unsignedp, &volatilep, false);
b5b8b0ac
AO
340 if (bitsize != GET_MODE_BITSIZE (mode)
341 || (bitpos % BITS_PER_UNIT)
9541ffee 342 || (toffset && !tree_fits_shwi_p (toffset)))
b5b8b0ac
AO
343 decl = NULL;
344 else
345 {
346 offset += bitpos / BITS_PER_UNIT;
347 if (toffset)
eb1ce453 348 offset += tree_to_shwi (toffset);
b5b8b0ac
AO
349 }
350 break;
351 }
352 }
353
354 if (decl
355 && mode == GET_MODE (x)
356 && TREE_CODE (decl) == VAR_DECL
357 && (TREE_STATIC (decl)
358 || DECL_THREAD_LOCAL_P (decl))
359 && DECL_RTL_SET_P (decl)
360 && MEM_P (DECL_RTL (decl)))
361 {
362 rtx newx;
363
527210c4 364 offset += MEM_OFFSET (x);
b5b8b0ac
AO
365
366 newx = DECL_RTL (decl);
367
368 if (MEM_P (newx))
369 {
370 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
371
372 /* Avoid creating a new MEM needlessly if we already had
373 the same address. We do if there's no OFFSET and the
374 old address X is identical to NEWX, or if X is of the
375 form (plus NEWX OFFSET), or the NEWX is of the form
376 (plus Y (const_int Z)) and X is that with the offset
377 added: (plus Y (const_int Z+OFFSET)). */
378 if (!((offset == 0
379 || (GET_CODE (o) == PLUS
380 && GET_CODE (XEXP (o, 1)) == CONST_INT
381 && (offset == INTVAL (XEXP (o, 1))
382 || (GET_CODE (n) == PLUS
383 && GET_CODE (XEXP (n, 1)) == CONST_INT
384 && (INTVAL (XEXP (n, 1)) + offset
385 == INTVAL (XEXP (o, 1)))
386 && (n = XEXP (n, 0))))
387 && (o = XEXP (o, 0))))
388 && rtx_equal_p (o, n)))
389 x = adjust_address_nv (newx, mode, offset);
390 }
391 else if (GET_MODE (x) == GET_MODE (newx)
392 && offset == 0)
393 x = newx;
394 }
395 }
396
397 return x;
398}
399\f
d9c695ff
RK
400/* Make a unary operation by first seeing if it folds and otherwise making
401 the specified operation. */
402
403rtx
ef4bddc2
RS
404simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
405 machine_mode op_mode)
d9c695ff
RK
406{
407 rtx tem;
408
409 /* If this simplifies, use it. */
410 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
411 return tem;
412
413 return gen_rtx_fmt_e (code, mode, op);
414}
415
416/* Likewise for ternary operations. */
417
418rtx
ef4bddc2
RS
419simplify_gen_ternary (enum rtx_code code, machine_mode mode,
420 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
d9c695ff
RK
421{
422 rtx tem;
423
424 /* If this simplifies, use it. */
425 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
426 op0, op1, op2)))
427 return tem;
428
429 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
430}
c6fb08ad 431
141e454b 432/* Likewise, for relational operations.
c6fb08ad 433 CMP_MODE specifies mode comparison is done in. */
d9c695ff
RK
434
435rtx
ef4bddc2
RS
436simplify_gen_relational (enum rtx_code code, machine_mode mode,
437 machine_mode cmp_mode, rtx op0, rtx op1)
d9c695ff
RK
438{
439 rtx tem;
440
c6fb08ad
PB
441 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
442 op0, op1)))
443 return tem;
bc9c18c3 444
d9c695ff
RK
445 return gen_rtx_fmt_ee (code, mode, op0, op1);
446}
447\f
457eeaae
JJ
448/* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
449 and simplify the result. If FN is non-NULL, call this callback on each
450 X, if it returns non-NULL, replace X with its return value and simplify the
451 result. */
d9c695ff
RK
452
453rtx
3af4ba41 454simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
457eeaae 455 rtx (*fn) (rtx, const_rtx, void *), void *data)
d9c695ff
RK
456{
457 enum rtx_code code = GET_CODE (x);
ef4bddc2
RS
458 machine_mode mode = GET_MODE (x);
459 machine_mode op_mode;
4fb296d9
RS
460 const char *fmt;
461 rtx op0, op1, op2, newx, op;
462 rtvec vec, newvec;
463 int i, j;
d9c695ff 464
457eeaae 465 if (__builtin_expect (fn != NULL, 0))
3af4ba41 466 {
457eeaae
JJ
467 newx = fn (x, old_rtx, data);
468 if (newx)
469 return newx;
3af4ba41 470 }
457eeaae
JJ
471 else if (rtx_equal_p (x, old_rtx))
472 return copy_rtx ((rtx) data);
d9c695ff
RK
473
474 switch (GET_RTX_CLASS (code))
475 {
ec8e098d 476 case RTX_UNARY:
077a148b
RS
477 op0 = XEXP (x, 0);
478 op_mode = GET_MODE (op0);
3af4ba41 479 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
077a148b
RS
480 if (op0 == XEXP (x, 0))
481 return x;
482 return simplify_gen_unary (code, mode, op0, op_mode);
d9c695ff 483
ec8e098d
PB
484 case RTX_BIN_ARITH:
485 case RTX_COMM_ARITH:
3af4ba41
RS
486 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
487 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
077a148b
RS
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
489 return x;
490 return simplify_gen_binary (code, mode, op0, op1);
491
ec8e098d
PB
492 case RTX_COMPARE:
493 case RTX_COMM_COMPARE:
077a148b
RS
494 op0 = XEXP (x, 0);
495 op1 = XEXP (x, 1);
496 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
3af4ba41
RS
497 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
498 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
077a148b
RS
499 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
500 return x;
501 return simplify_gen_relational (code, mode, op_mode, op0, op1);
d9c695ff 502
ec8e098d
PB
503 case RTX_TERNARY:
504 case RTX_BITFIELD_OPS:
077a148b
RS
505 op0 = XEXP (x, 0);
506 op_mode = GET_MODE (op0);
3af4ba41
RS
507 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
508 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
509 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
077a148b
RS
510 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
511 return x;
512 if (op_mode == VOIDmode)
513 op_mode = GET_MODE (op0);
514 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
d9c695ff 515
ec8e098d 516 case RTX_EXTRA:
949c5d62
JH
517 if (code == SUBREG)
518 {
3af4ba41 519 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
077a148b
RS
520 if (op0 == SUBREG_REG (x))
521 return x;
522 op0 = simplify_gen_subreg (GET_MODE (x), op0,
949c5d62
JH
523 GET_MODE (SUBREG_REG (x)),
524 SUBREG_BYTE (x));
077a148b 525 return op0 ? op0 : x;
949c5d62 526 }
077a148b 527 break;
d9c695ff 528
ec8e098d 529 case RTX_OBJ:
60c86d4e 530 if (code == MEM)
077a148b 531 {
3af4ba41 532 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
077a148b
RS
533 if (op0 == XEXP (x, 0))
534 return x;
535 return replace_equiv_address_nv (x, op0);
536 }
f4e3e618
RH
537 else if (code == LO_SUM)
538 {
3af4ba41
RS
539 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
540 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
f4e3e618
RH
541
542 /* (lo_sum (high x) x) -> x */
543 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
544 return op1;
60c86d4e 545
077a148b
RS
546 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
547 return x;
f4e3e618
RH
548 return gen_rtx_LO_SUM (mode, op0, op1);
549 }
077a148b 550 break;
60c86d4e
RS
551
552 default:
077a148b 553 break;
d9c695ff 554 }
4fb296d9
RS
555
556 newx = x;
557 fmt = GET_RTX_FORMAT (code);
558 for (i = 0; fmt[i]; i++)
559 switch (fmt[i])
560 {
561 case 'E':
562 vec = XVEC (x, i);
563 newvec = XVEC (newx, i);
564 for (j = 0; j < GET_NUM_ELEM (vec); j++)
565 {
566 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
567 old_rtx, fn, data);
568 if (op != RTVEC_ELT (vec, j))
569 {
570 if (newvec == vec)
571 {
572 newvec = shallow_copy_rtvec (vec);
573 if (x == newx)
574 newx = shallow_copy_rtx (x);
575 XVEC (newx, i) = newvec;
576 }
577 RTVEC_ELT (newvec, j) = op;
578 }
579 }
580 break;
581
582 case 'e':
8a1eb57b 583 if (XEXP (x, i))
4fb296d9 584 {
8a1eb57b
UB
585 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
586 if (op != XEXP (x, i))
587 {
588 if (x == newx)
589 newx = shallow_copy_rtx (x);
590 XEXP (newx, i) = op;
591 }
4fb296d9
RS
592 }
593 break;
594 }
595 return newx;
d9c695ff 596}
3af4ba41
RS
597
598/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
599 resulting RTX. Return a new RTX which is as simplified as possible. */
600
601rtx
602simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
603{
604 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
605}
d9c695ff 606\f
40c5ed5b
RS
607/* Try to simplify a MODE truncation of OP, which has OP_MODE.
608 Only handle cases where the truncated value is inherently an rvalue.
609
610 RTL provides two ways of truncating a value:
611
612 1. a lowpart subreg. This form is only a truncation when both
613 the outer and inner modes (here MODE and OP_MODE respectively)
614 are scalar integers, and only then when the subreg is used as
615 an rvalue.
616
617 It is only valid to form such truncating subregs if the
618 truncation requires no action by the target. The onus for
619 proving this is on the creator of the subreg -- e.g. the
620 caller to simplify_subreg or simplify_gen_subreg -- and typically
621 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
622
623 2. a TRUNCATE. This form handles both scalar and compound integers.
624
625 The first form is preferred where valid. However, the TRUNCATE
626 handling in simplify_unary_operation turns the second form into the
627 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
628 so it is generally safe to form rvalue truncations using:
629
630 simplify_gen_unary (TRUNCATE, ...)
631
632 and leave simplify_unary_operation to work out which representation
633 should be used.
634
635 Because of the proof requirements on (1), simplify_truncation must
636 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
637 regardless of whether the outer truncation came from a SUBREG or a
638 TRUNCATE. For example, if the caller has proven that an SImode
639 truncation of:
640
641 (and:DI X Y)
642
643 is a no-op and can be represented as a subreg, it does not follow
644 that SImode truncations of X and Y are also no-ops. On a target
645 like 64-bit MIPS that requires SImode values to be stored in
646 sign-extended form, an SImode truncation of:
647
648 (and:DI (reg:DI X) (const_int 63))
649
650 is trivially a no-op because only the lower 6 bits can be set.
651 However, X is still an arbitrary 64-bit number and so we cannot
652 assume that truncating it too is a no-op. */
653
654static rtx
ef4bddc2
RS
655simplify_truncation (machine_mode mode, rtx op,
656 machine_mode op_mode)
40c5ed5b
RS
657{
658 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
659 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
660 gcc_assert (precision <= op_precision);
661
662 /* Optimize truncations of zero and sign extended values. */
663 if (GET_CODE (op) == ZERO_EXTEND
664 || GET_CODE (op) == SIGN_EXTEND)
665 {
666 /* There are three possibilities. If MODE is the same as the
667 origmode, we can omit both the extension and the subreg.
668 If MODE is not larger than the origmode, we can apply the
669 truncation without the extension. Finally, if the outermode
670 is larger than the origmode, we can just extend to the appropriate
671 mode. */
ef4bddc2 672 machine_mode origmode = GET_MODE (XEXP (op, 0));
40c5ed5b
RS
673 if (mode == origmode)
674 return XEXP (op, 0);
675 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
676 return simplify_gen_unary (TRUNCATE, mode,
677 XEXP (op, 0), origmode);
678 else
679 return simplify_gen_unary (GET_CODE (op), mode,
680 XEXP (op, 0), origmode);
681 }
682
808c4303
EB
683 /* If the machine can perform operations in the truncated mode, distribute
684 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
685 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
686 if (1
687#ifdef WORD_REGISTER_OPERATIONS
688 && precision >= BITS_PER_WORD
689#endif
690 && (GET_CODE (op) == PLUS
691 || GET_CODE (op) == MINUS
692 || GET_CODE (op) == MULT))
40c5ed5b
RS
693 {
694 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
695 if (op0)
696 {
697 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
698 if (op1)
699 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
700 }
701 }
702
703 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
704 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
705 the outer subreg is effectively a truncation to the original mode. */
706 if ((GET_CODE (op) == LSHIFTRT
707 || GET_CODE (op) == ASHIFTRT)
708 /* Ensure that OP_MODE is at least twice as wide as MODE
709 to avoid the possibility that an outer LSHIFTRT shifts by more
710 than the sign extension's sign_bit_copies and introduces zeros
711 into the high bits of the result. */
712 && 2 * precision <= op_precision
713 && CONST_INT_P (XEXP (op, 1))
714 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
715 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
0365ba7c 716 && UINTVAL (XEXP (op, 1)) < precision)
40c5ed5b
RS
717 return simplify_gen_binary (ASHIFTRT, mode,
718 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
719
720 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
721 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
722 the outer subreg is effectively a truncation to the original mode. */
723 if ((GET_CODE (op) == LSHIFTRT
724 || GET_CODE (op) == ASHIFTRT)
725 && CONST_INT_P (XEXP (op, 1))
726 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
727 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
0365ba7c 728 && UINTVAL (XEXP (op, 1)) < precision)
40c5ed5b
RS
729 return simplify_gen_binary (LSHIFTRT, mode,
730 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
731
732 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
733 to (ashift:QI (x:QI) C), where C is a suitable small constant and
734 the outer subreg is effectively a truncation to the original mode. */
735 if (GET_CODE (op) == ASHIFT
736 && CONST_INT_P (XEXP (op, 1))
737 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
738 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
739 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
0365ba7c 740 && UINTVAL (XEXP (op, 1)) < precision)
40c5ed5b
RS
741 return simplify_gen_binary (ASHIFT, mode,
742 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
743
744 /* Recognize a word extraction from a multi-word subreg. */
745 if ((GET_CODE (op) == LSHIFTRT
746 || GET_CODE (op) == ASHIFTRT)
747 && SCALAR_INT_MODE_P (mode)
748 && SCALAR_INT_MODE_P (op_mode)
749 && precision >= BITS_PER_WORD
750 && 2 * precision <= op_precision
751 && CONST_INT_P (XEXP (op, 1))
752 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
0365ba7c 753 && UINTVAL (XEXP (op, 1)) < op_precision)
40c5ed5b
RS
754 {
755 int byte = subreg_lowpart_offset (mode, op_mode);
756 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
757 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
758 (WORDS_BIG_ENDIAN
759 ? byte - shifted_bytes
760 : byte + shifted_bytes));
761 }
762
763 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
764 and try replacing the TRUNCATE and shift with it. Don't do this
765 if the MEM has a mode-dependent address. */
766 if ((GET_CODE (op) == LSHIFTRT
767 || GET_CODE (op) == ASHIFTRT)
768 && SCALAR_INT_MODE_P (op_mode)
769 && MEM_P (XEXP (op, 0))
770 && CONST_INT_P (XEXP (op, 1))
771 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
772 && INTVAL (XEXP (op, 1)) > 0
773 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
774 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
775 MEM_ADDR_SPACE (XEXP (op, 0)))
776 && ! MEM_VOLATILE_P (XEXP (op, 0))
777 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
778 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
779 {
780 int byte = subreg_lowpart_offset (mode, op_mode);
781 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
782 return adjust_address_nv (XEXP (op, 0), mode,
783 (WORDS_BIG_ENDIAN
784 ? byte - shifted_bytes
785 : byte + shifted_bytes));
786 }
787
788 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
789 (OP:SI foo:SI) if OP is NEG or ABS. */
790 if ((GET_CODE (op) == ABS
791 || GET_CODE (op) == NEG)
792 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
793 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
794 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
795 return simplify_gen_unary (GET_CODE (op), mode,
796 XEXP (XEXP (op, 0), 0), mode);
797
798 /* (truncate:A (subreg:B (truncate:C X) 0)) is
799 (truncate:A X). */
800 if (GET_CODE (op) == SUBREG
801 && SCALAR_INT_MODE_P (mode)
802 && SCALAR_INT_MODE_P (op_mode)
803 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
804 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
805 && subreg_lowpart_p (op))
86efb5cd
JJ
806 {
807 rtx inner = XEXP (SUBREG_REG (op), 0);
808 if (GET_MODE_PRECISION (mode)
809 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
810 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
811 else
812 /* If subreg above is paradoxical and C is narrower
813 than A, return (subreg:A (truncate:C X) 0). */
814 return simplify_gen_subreg (mode, SUBREG_REG (op),
815 GET_MODE (SUBREG_REG (op)), 0);
816 }
40c5ed5b
RS
817
818 /* (truncate:A (truncate:B X)) is (truncate:A X). */
819 if (GET_CODE (op) == TRUNCATE)
820 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
821 GET_MODE (XEXP (op, 0)));
822
823 return NULL_RTX;
824}
825\f
0cedb36c
JL
826/* Try to simplify a unary operation CODE whose output mode is to be
827 MODE with input operand OP whose mode was originally OP_MODE.
828 Return zero if no simplification can be made. */
0cedb36c 829rtx
ef4bddc2
RS
830simplify_unary_operation (enum rtx_code code, machine_mode mode,
831 rtx op, machine_mode op_mode)
0a67e02c
PB
832{
833 rtx trueop, tem;
834
0a67e02c
PB
835 trueop = avoid_constant_pool_reference (op);
836
837 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
838 if (tem)
839 return tem;
840
841 return simplify_unary_operation_1 (code, mode, op);
842}
843
844/* Perform some simplifications we can do even if the operands
845 aren't constant. */
846static rtx
ef4bddc2 847simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
0a67e02c
PB
848{
849 enum rtx_code reversed;
850 rtx temp;
851
852 switch (code)
853 {
854 case NOT:
855 /* (not (not X)) == X. */
856 if (GET_CODE (op) == NOT)
857 return XEXP (op, 0);
858
bd1ef757
PB
859 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
860 comparison is all ones. */
0a67e02c
PB
861 if (COMPARISON_P (op)
862 && (mode == BImode || STORE_FLAG_VALUE == -1)
863 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
864 return simplify_gen_relational (reversed, mode, VOIDmode,
865 XEXP (op, 0), XEXP (op, 1));
866
867 /* (not (plus X -1)) can become (neg X). */
868 if (GET_CODE (op) == PLUS
869 && XEXP (op, 1) == constm1_rtx)
870 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
871
872 /* Similarly, (not (neg X)) is (plus X -1). */
873 if (GET_CODE (op) == NEG)
088845a5
RS
874 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
875 CONSTM1_RTX (mode));
0a67e02c
PB
876
877 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
878 if (GET_CODE (op) == XOR
481683e1 879 && CONST_INT_P (XEXP (op, 1))
0a67e02c
PB
880 && (temp = simplify_unary_operation (NOT, mode,
881 XEXP (op, 1), mode)) != 0)
882 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
883
884 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
885 if (GET_CODE (op) == PLUS
481683e1 886 && CONST_INT_P (XEXP (op, 1))
0a67e02c
PB
887 && mode_signbit_p (mode, XEXP (op, 1))
888 && (temp = simplify_unary_operation (NOT, mode,
889 XEXP (op, 1), mode)) != 0)
890 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
891
892
893 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
894 operands other than 1, but that is not valid. We could do a
895 similar simplification for (not (lshiftrt C X)) where C is
896 just the sign bit, but this doesn't seem common enough to
897 bother with. */
898 if (GET_CODE (op) == ASHIFT
899 && XEXP (op, 0) == const1_rtx)
900 {
901 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
902 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
903 }
904
0a67e02c
PB
905 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
906 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
907 so we can perform the above simplification. */
0a67e02c
PB
908 if (STORE_FLAG_VALUE == -1
909 && GET_CODE (op) == ASHIFTRT
bddd3671 910 && CONST_INT_P (XEXP (op, 1))
5511bc5a 911 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
0a67e02c
PB
912 return simplify_gen_relational (GE, mode, VOIDmode,
913 XEXP (op, 0), const0_rtx);
914
bd1ef757
PB
915
916 if (GET_CODE (op) == SUBREG
917 && subreg_lowpart_p (op)
918 && (GET_MODE_SIZE (GET_MODE (op))
919 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
920 && GET_CODE (SUBREG_REG (op)) == ASHIFT
921 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
922 {
ef4bddc2 923 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
bd1ef757
PB
924 rtx x;
925
926 x = gen_rtx_ROTATE (inner_mode,
927 simplify_gen_unary (NOT, inner_mode, const1_rtx,
928 inner_mode),
929 XEXP (SUBREG_REG (op), 1));
76bd29f6
JJ
930 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
931 if (temp)
932 return temp;
bd1ef757
PB
933 }
934
935 /* Apply De Morgan's laws to reduce number of patterns for machines
936 with negating logical insns (and-not, nand, etc.). If result has
937 only one NOT, put it first, since that is how the patterns are
938 coded. */
bd1ef757
PB
939 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
940 {
941 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
ef4bddc2 942 machine_mode op_mode;
bd1ef757
PB
943
944 op_mode = GET_MODE (in1);
945 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
946
947 op_mode = GET_MODE (in2);
948 if (op_mode == VOIDmode)
949 op_mode = mode;
950 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
951
952 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
953 {
954 rtx tem = in2;
955 in2 = in1; in1 = tem;
956 }
957
958 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
959 mode, in1, in2);
960 }
b17c024f
EB
961
962 /* (not (bswap x)) -> (bswap (not x)). */
963 if (GET_CODE (op) == BSWAP)
964 {
965 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
966 return simplify_gen_unary (BSWAP, mode, x, mode);
967 }
0a67e02c
PB
968 break;
969
970 case NEG:
971 /* (neg (neg X)) == X. */
972 if (GET_CODE (op) == NEG)
973 return XEXP (op, 0);
974
975 /* (neg (plus X 1)) can become (not X). */
976 if (GET_CODE (op) == PLUS
977 && XEXP (op, 1) == const1_rtx)
978 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
b8698a0f 979
0a67e02c
PB
980 /* Similarly, (neg (not X)) is (plus X 1). */
981 if (GET_CODE (op) == NOT)
088845a5
RS
982 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
983 CONST1_RTX (mode));
b8698a0f 984
0a67e02c
PB
985 /* (neg (minus X Y)) can become (minus Y X). This transformation
986 isn't safe for modes with signed zeros, since if X and Y are
987 both +0, (minus Y X) is the same as (minus X Y). If the
988 rounding mode is towards +infinity (or -infinity) then the two
989 expressions will be rounded differently. */
990 if (GET_CODE (op) == MINUS
991 && !HONOR_SIGNED_ZEROS (mode)
992 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
993 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
b8698a0f 994
0a67e02c
PB
995 if (GET_CODE (op) == PLUS
996 && !HONOR_SIGNED_ZEROS (mode)
997 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
998 {
999 /* (neg (plus A C)) is simplified to (minus -C A). */
33ffb5c5
KZ
1000 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1001 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
0a67e02c
PB
1002 {
1003 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1004 if (temp)
1005 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1006 }
1007
1008 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1009 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1010 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1011 }
1012
707f9919 1013 /* (neg (mult A B)) becomes (mult A (neg B)).
0a67e02c
PB
1014 This works even for floating-point values. */
1015 if (GET_CODE (op) == MULT
1016 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1017 {
707f9919
JJ
1018 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1019 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
0a67e02c
PB
1020 }
1021
1022 /* NEG commutes with ASHIFT since it is multiplication. Only do
1023 this if we can then eliminate the NEG (e.g., if the operand
1024 is a constant). */
1025 if (GET_CODE (op) == ASHIFT)
1026 {
1027 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1028 if (temp)
1029 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1030 }
1031
1032 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1033 C is equal to the width of MODE minus 1. */
1034 if (GET_CODE (op) == ASHIFTRT
481683e1 1035 && CONST_INT_P (XEXP (op, 1))
5511bc5a 1036 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
0a67e02c
PB
1037 return simplify_gen_binary (LSHIFTRT, mode,
1038 XEXP (op, 0), XEXP (op, 1));
1039
1040 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1041 C is equal to the width of MODE minus 1. */
1042 if (GET_CODE (op) == LSHIFTRT
481683e1 1043 && CONST_INT_P (XEXP (op, 1))
5511bc5a 1044 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
0a67e02c
PB
1045 return simplify_gen_binary (ASHIFTRT, mode,
1046 XEXP (op, 0), XEXP (op, 1));
b8698a0f 1047
bd1ef757
PB
1048 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1049 if (GET_CODE (op) == XOR
1050 && XEXP (op, 1) == const1_rtx
1051 && nonzero_bits (XEXP (op, 0), mode) == 1)
0a81f074 1052 return plus_constant (mode, XEXP (op, 0), -1);
8305d786
RS
1053
1054 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1055 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1056 if (GET_CODE (op) == LT
71cca289
JJ
1057 && XEXP (op, 1) == const0_rtx
1058 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
8305d786 1059 {
ef4bddc2 1060 machine_mode inner = GET_MODE (XEXP (op, 0));
5511bc5a 1061 int isize = GET_MODE_PRECISION (inner);
8305d786 1062 if (STORE_FLAG_VALUE == 1)
0f2f71b5
RS
1063 {
1064 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1065 GEN_INT (isize - 1));
1066 if (mode == inner)
1067 return temp;
5511bc5a 1068 if (GET_MODE_PRECISION (mode) > isize)
0f2f71b5
RS
1069 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1070 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1071 }
8305d786 1072 else if (STORE_FLAG_VALUE == -1)
0f2f71b5
RS
1073 {
1074 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1075 GEN_INT (isize - 1));
1076 if (mode == inner)
1077 return temp;
5511bc5a 1078 if (GET_MODE_PRECISION (mode) > isize)
0f2f71b5
RS
1079 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1080 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1081 }
8305d786 1082 }
bd1ef757
PB
1083 break;
1084
1085 case TRUNCATE:
40c5ed5b
RS
1086 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1087 with the umulXi3_highpart patterns. */
1088 if (GET_CODE (op) == LSHIFTRT
1089 && GET_CODE (XEXP (op, 0)) == MULT)
1090 break;
bd1ef757 1091
40c5ed5b
RS
1092 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1093 {
1094 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
76bd29f6
JJ
1095 {
1096 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1097 if (temp)
1098 return temp;
1099 }
40c5ed5b
RS
1100 /* We can't handle truncation to a partial integer mode here
1101 because we don't know the real bitsize of the partial
1102 integer mode. */
1103 break;
1104 }
bd1ef757 1105
40c5ed5b
RS
1106 if (GET_MODE (op) != VOIDmode)
1107 {
1108 temp = simplify_truncation (mode, op, GET_MODE (op));
1109 if (temp)
1110 return temp;
1111 }
bd1ef757
PB
1112
1113 /* If we know that the value is already truncated, we can
40c5ed5b
RS
1114 replace the TRUNCATE with a SUBREG. */
1115 if (GET_MODE_NUNITS (mode) == 1
1116 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1117 || truncated_to_mode (mode, op)))
76bd29f6
JJ
1118 {
1119 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1120 if (temp)
1121 return temp;
1122 }
bd1ef757
PB
1123
1124 /* A truncate of a comparison can be replaced with a subreg if
1125 STORE_FLAG_VALUE permits. This is like the previous test,
1126 but it works even if the comparison is done in a mode larger
1127 than HOST_BITS_PER_WIDE_INT. */
46c9550f 1128 if (HWI_COMPUTABLE_MODE_P (mode)
bd1ef757 1129 && COMPARISON_P (op)
43c36287 1130 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
76bd29f6
JJ
1131 {
1132 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1133 if (temp)
1134 return temp;
1135 }
dcf8468f
AP
1136
1137 /* A truncate of a memory is just loading the low part of the memory
1138 if we are not changing the meaning of the address. */
1139 if (GET_CODE (op) == MEM
fa607dda 1140 && !VECTOR_MODE_P (mode)
dcf8468f 1141 && !MEM_VOLATILE_P (op)
5bfed9a9 1142 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
76bd29f6
JJ
1143 {
1144 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1145 if (temp)
1146 return temp;
1147 }
dcf8468f 1148
bd1ef757
PB
1149 break;
1150
1151 case FLOAT_TRUNCATE:
15ed7b52
JG
1152 if (DECIMAL_FLOAT_MODE_P (mode))
1153 break;
1154
bd1ef757
PB
1155 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1156 if (GET_CODE (op) == FLOAT_EXTEND
1157 && GET_MODE (XEXP (op, 0)) == mode)
1158 return XEXP (op, 0);
1159
1160 /* (float_truncate:SF (float_truncate:DF foo:XF))
1161 = (float_truncate:SF foo:XF).
1162 This may eliminate double rounding, so it is unsafe.
1163
1164 (float_truncate:SF (float_extend:XF foo:DF))
1165 = (float_truncate:SF foo:DF).
1166
1167 (float_truncate:DF (float_extend:XF foo:SF))
1168 = (float_extend:SF foo:DF). */
1169 if ((GET_CODE (op) == FLOAT_TRUNCATE
1170 && flag_unsafe_math_optimizations)
1171 || GET_CODE (op) == FLOAT_EXTEND)
1172 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1173 0)))
1174 > GET_MODE_SIZE (mode)
1175 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1176 mode,
1177 XEXP (op, 0), mode);
1178
1179 /* (float_truncate (float x)) is (float x) */
1180 if (GET_CODE (op) == FLOAT
1181 && (flag_unsafe_math_optimizations
a0c64295
UB
1182 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1183 && ((unsigned)significand_size (GET_MODE (op))
5511bc5a 1184 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
a0c64295
UB
1185 - num_sign_bit_copies (XEXP (op, 0),
1186 GET_MODE (XEXP (op, 0))))))))
bd1ef757
PB
1187 return simplify_gen_unary (FLOAT, mode,
1188 XEXP (op, 0),
1189 GET_MODE (XEXP (op, 0)));
1190
1191 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1192 (OP:SF foo:SF) if OP is NEG or ABS. */
1193 if ((GET_CODE (op) == ABS
1194 || GET_CODE (op) == NEG)
1195 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1196 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1197 return simplify_gen_unary (GET_CODE (op), mode,
1198 XEXP (XEXP (op, 0), 0), mode);
1199
1200 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1201 is (float_truncate:SF x). */
1202 if (GET_CODE (op) == SUBREG
1203 && subreg_lowpart_p (op)
1204 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1205 return SUBREG_REG (op);
1206 break;
1207
1208 case FLOAT_EXTEND:
15ed7b52
JG
1209 if (DECIMAL_FLOAT_MODE_P (mode))
1210 break;
1211
bd1ef757
PB
1212 /* (float_extend (float_extend x)) is (float_extend x)
1213
1214 (float_extend (float x)) is (float x) assuming that double
1215 rounding can't happen.
1216 */
1217 if (GET_CODE (op) == FLOAT_EXTEND
1218 || (GET_CODE (op) == FLOAT
a0c64295 1219 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
bd1ef757 1220 && ((unsigned)significand_size (GET_MODE (op))
5511bc5a 1221 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
bd1ef757
PB
1222 - num_sign_bit_copies (XEXP (op, 0),
1223 GET_MODE (XEXP (op, 0)))))))
1224 return simplify_gen_unary (GET_CODE (op), mode,
1225 XEXP (op, 0),
1226 GET_MODE (XEXP (op, 0)));
1227
1228 break;
1229
1230 case ABS:
1231 /* (abs (neg <foo>)) -> (abs <foo>) */
1232 if (GET_CODE (op) == NEG)
1233 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1234 GET_MODE (XEXP (op, 0)));
1235
1236 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1237 do nothing. */
1238 if (GET_MODE (op) == VOIDmode)
1239 break;
1240
1241 /* If operand is something known to be positive, ignore the ABS. */
1242 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
2d0c270f
BS
1243 || val_signbit_known_clear_p (GET_MODE (op),
1244 nonzero_bits (op, GET_MODE (op))))
bd1ef757
PB
1245 return op;
1246
1247 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
5511bc5a 1248 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
bd1ef757
PB
1249 return gen_rtx_NEG (mode, op);
1250
1251 break;
1252
1253 case FFS:
1254 /* (ffs (*_extend <X>)) = (ffs <X>) */
1255 if (GET_CODE (op) == SIGN_EXTEND
1256 || GET_CODE (op) == ZERO_EXTEND)
1257 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1258 GET_MODE (XEXP (op, 0)));
1259 break;
1260
1261 case POPCOUNT:
9f05adb0
RS
1262 switch (GET_CODE (op))
1263 {
1264 case BSWAP:
1265 case ZERO_EXTEND:
1266 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1267 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1269
1270 case ROTATE:
1271 case ROTATERT:
1272 /* Rotations don't affect popcount. */
1273 if (!side_effects_p (XEXP (op, 1)))
1274 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1275 GET_MODE (XEXP (op, 0)));
1276 break;
1277
1278 default:
1279 break;
1280 }
1281 break;
1282
bd1ef757 1283 case PARITY:
9f05adb0
RS
1284 switch (GET_CODE (op))
1285 {
1286 case NOT:
1287 case BSWAP:
1288 case ZERO_EXTEND:
1289 case SIGN_EXTEND:
1290 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1291 GET_MODE (XEXP (op, 0)));
1292
1293 case ROTATE:
1294 case ROTATERT:
1295 /* Rotations don't affect parity. */
1296 if (!side_effects_p (XEXP (op, 1)))
1297 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1298 GET_MODE (XEXP (op, 0)));
1299 break;
1300
1301 default:
1302 break;
1303 }
1304 break;
1305
1306 case BSWAP:
1307 /* (bswap (bswap x)) -> x. */
1308 if (GET_CODE (op) == BSWAP)
1309 return XEXP (op, 0);
bd1ef757
PB
1310 break;
1311
1312 case FLOAT:
1313 /* (float (sign_extend <X>)) = (float <X>). */
1314 if (GET_CODE (op) == SIGN_EXTEND)
1315 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1316 GET_MODE (XEXP (op, 0)));
0a67e02c
PB
1317 break;
1318
1319 case SIGN_EXTEND:
1320 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1321 becomes just the MINUS if its mode is MODE. This allows
1322 folding switch statements on machines using casesi (such as
1323 the VAX). */
1324 if (GET_CODE (op) == TRUNCATE
1325 && GET_MODE (XEXP (op, 0)) == mode
1326 && GET_CODE (XEXP (op, 0)) == MINUS
1327 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1328 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1329 return XEXP (op, 0);
1330
c536876e
AS
1331 /* Extending a widening multiplication should be canonicalized to
1332 a wider widening multiplication. */
1333 if (GET_CODE (op) == MULT)
1334 {
1335 rtx lhs = XEXP (op, 0);
1336 rtx rhs = XEXP (op, 1);
1337 enum rtx_code lcode = GET_CODE (lhs);
1338 enum rtx_code rcode = GET_CODE (rhs);
1339
1340 /* Widening multiplies usually extend both operands, but sometimes
1341 they use a shift to extract a portion of a register. */
1342 if ((lcode == SIGN_EXTEND
1343 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1344 && (rcode == SIGN_EXTEND
1345 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1346 {
ef4bddc2
RS
1347 machine_mode lmode = GET_MODE (lhs);
1348 machine_mode rmode = GET_MODE (rhs);
c536876e
AS
1349 int bits;
1350
1351 if (lcode == ASHIFTRT)
1352 /* Number of bits not shifted off the end. */
1353 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1354 else /* lcode == SIGN_EXTEND */
1355 /* Size of inner mode. */
1356 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1357
1358 if (rcode == ASHIFTRT)
1359 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1360 else /* rcode == SIGN_EXTEND */
1361 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1362
1363 /* We can only widen multiplies if the result is mathematiclly
1364 equivalent. I.e. if overflow was impossible. */
1365 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1366 return simplify_gen_binary
1367 (MULT, mode,
1368 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1369 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1370 }
1371 }
1372
0a67e02c
PB
1373 /* Check for a sign extension of a subreg of a promoted
1374 variable, where the promotion is sign-extended, and the
1375 target mode is the same as the variable's promotion. */
1376 if (GET_CODE (op) == SUBREG
1377 && SUBREG_PROMOTED_VAR_P (op)
362d42dc 1378 && SUBREG_PROMOTED_SIGNED_P (op)
4613543f 1379 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
76bd29f6
JJ
1380 {
1381 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1382 if (temp)
1383 return temp;
1384 }
0a67e02c 1385
561da6bc
JJ
1386 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1387 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1388 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1389 {
50b6ee8b
DD
1390 gcc_assert (GET_MODE_PRECISION (mode)
1391 > GET_MODE_PRECISION (GET_MODE (op)));
561da6bc
JJ
1392 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1393 GET_MODE (XEXP (op, 0)));
1394 }
a5d8253f
JJ
1395
1396 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1397 is (sign_extend:M (subreg:O <X>)) if there is mode with
561da6bc
JJ
1398 GET_MODE_BITSIZE (N) - I bits.
1399 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1400 is similarly (zero_extend:M (subreg:O <X>)). */
1401 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
a5d8253f
JJ
1402 && GET_CODE (XEXP (op, 0)) == ASHIFT
1403 && CONST_INT_P (XEXP (op, 1))
1404 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1405 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1406 {
ef4bddc2 1407 machine_mode tmode
a5d8253f
JJ
1408 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1409 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
561da6bc
JJ
1410 gcc_assert (GET_MODE_BITSIZE (mode)
1411 > GET_MODE_BITSIZE (GET_MODE (op)));
a5d8253f
JJ
1412 if (tmode != BLKmode)
1413 {
1414 rtx inner =
1415 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
76bd29f6
JJ
1416 if (inner)
1417 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1418 ? SIGN_EXTEND : ZERO_EXTEND,
1419 mode, inner, tmode);
a5d8253f
JJ
1420 }
1421 }
1422
0a67e02c 1423#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
5932a4d4 1424 /* As we do not know which address space the pointer is referring to,
d4ebfa65
BE
1425 we can do this only if the target does not support different pointer
1426 or address modes depending on the address space. */
1427 if (target_default_pointer_address_modes_p ()
1428 && ! POINTERS_EXTEND_UNSIGNED
0a67e02c
PB
1429 && mode == Pmode && GET_MODE (op) == ptr_mode
1430 && (CONSTANT_P (op)
1431 || (GET_CODE (op) == SUBREG
1432 && REG_P (SUBREG_REG (op))
1433 && REG_POINTER (SUBREG_REG (op))
1434 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1435 return convert_memory_address (Pmode, op);
1436#endif
1437 break;
1438
1439 case ZERO_EXTEND:
1440 /* Check for a zero extension of a subreg of a promoted
1441 variable, where the promotion is zero-extended, and the
1442 target mode is the same as the variable's promotion. */
1443 if (GET_CODE (op) == SUBREG
1444 && SUBREG_PROMOTED_VAR_P (op)
362d42dc 1445 && SUBREG_PROMOTED_UNSIGNED_P (op)
4613543f 1446 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
76bd29f6
JJ
1447 {
1448 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1449 if (temp)
1450 return temp;
1451 }
0a67e02c 1452
c536876e
AS
1453 /* Extending a widening multiplication should be canonicalized to
1454 a wider widening multiplication. */
1455 if (GET_CODE (op) == MULT)
1456 {
1457 rtx lhs = XEXP (op, 0);
1458 rtx rhs = XEXP (op, 1);
1459 enum rtx_code lcode = GET_CODE (lhs);
1460 enum rtx_code rcode = GET_CODE (rhs);
1461
1462 /* Widening multiplies usually extend both operands, but sometimes
1463 they use a shift to extract a portion of a register. */
1464 if ((lcode == ZERO_EXTEND
1465 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1466 && (rcode == ZERO_EXTEND
1467 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1468 {
ef4bddc2
RS
1469 machine_mode lmode = GET_MODE (lhs);
1470 machine_mode rmode = GET_MODE (rhs);
c536876e
AS
1471 int bits;
1472
1473 if (lcode == LSHIFTRT)
1474 /* Number of bits not shifted off the end. */
1475 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1476 else /* lcode == ZERO_EXTEND */
1477 /* Size of inner mode. */
1478 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1479
1480 if (rcode == LSHIFTRT)
1481 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1482 else /* rcode == ZERO_EXTEND */
1483 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1484
1485 /* We can only widen multiplies if the result is mathematiclly
1486 equivalent. I.e. if overflow was impossible. */
1487 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1488 return simplify_gen_binary
1489 (MULT, mode,
1490 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1491 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1492 }
1493 }
1494
a5d8253f
JJ
1495 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1496 if (GET_CODE (op) == ZERO_EXTEND)
1497 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1498 GET_MODE (XEXP (op, 0)));
1499
561da6bc
JJ
1500 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1501 is (zero_extend:M (subreg:O <X>)) if there is mode with
50b6ee8b 1502 GET_MODE_PRECISION (N) - I bits. */
561da6bc
JJ
1503 if (GET_CODE (op) == LSHIFTRT
1504 && GET_CODE (XEXP (op, 0)) == ASHIFT
1505 && CONST_INT_P (XEXP (op, 1))
1506 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
50b6ee8b 1507 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
561da6bc 1508 {
ef4bddc2 1509 machine_mode tmode
50b6ee8b 1510 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
561da6bc
JJ
1511 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1512 if (tmode != BLKmode)
1513 {
1514 rtx inner =
1515 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
76bd29f6
JJ
1516 if (inner)
1517 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
561da6bc
JJ
1518 }
1519 }
1520
8140c065
JJ
1521 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1522 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1523 of mode N. E.g.
1524 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1525 (and:SI (reg:SI) (const_int 63)). */
1526 if (GET_CODE (op) == SUBREG
1527 && GET_MODE_PRECISION (GET_MODE (op))
1528 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1529 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1530 <= HOST_BITS_PER_WIDE_INT
1531 && GET_MODE_PRECISION (mode)
1532 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1533 && subreg_lowpart_p (op)
1534 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1535 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1536 {
1537 if (GET_MODE_PRECISION (mode)
1538 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1539 return SUBREG_REG (op);
1540 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1541 GET_MODE (SUBREG_REG (op)));
1542 }
1543
0a67e02c 1544#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
5932a4d4 1545 /* As we do not know which address space the pointer is referring to,
d4ebfa65
BE
1546 we can do this only if the target does not support different pointer
1547 or address modes depending on the address space. */
1548 if (target_default_pointer_address_modes_p ()
1549 && POINTERS_EXTEND_UNSIGNED > 0
0a67e02c
PB
1550 && mode == Pmode && GET_MODE (op) == ptr_mode
1551 && (CONSTANT_P (op)
1552 || (GET_CODE (op) == SUBREG
1553 && REG_P (SUBREG_REG (op))
1554 && REG_POINTER (SUBREG_REG (op))
1555 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1556 return convert_memory_address (Pmode, op);
1557#endif
1558 break;
1559
1560 default:
1561 break;
1562 }
b8698a0f 1563
0a67e02c
PB
1564 return 0;
1565}
1566
1567/* Try to compute the value of a unary operation CODE whose output mode is to
1568 be MODE with input operand OP whose mode was originally OP_MODE.
1569 Return zero if the value cannot be computed. */
1570rtx
ef4bddc2
RS
1571simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1572 rtx op, machine_mode op_mode)
0cedb36c 1573{
5511bc5a 1574 unsigned int width = GET_MODE_PRECISION (mode);
0cedb36c 1575
d9deed68
JH
1576 if (code == VEC_DUPLICATE)
1577 {
41374e13 1578 gcc_assert (VECTOR_MODE_P (mode));
0a67e02c 1579 if (GET_MODE (op) != VOIDmode)
41374e13 1580 {
0a67e02c
PB
1581 if (!VECTOR_MODE_P (GET_MODE (op)))
1582 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
41374e13
NS
1583 else
1584 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
0a67e02c 1585 (GET_MODE (op)));
41374e13 1586 }
33ffb5c5 1587 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
0a67e02c 1588 || GET_CODE (op) == CONST_VECTOR)
d9deed68
JH
1589 {
1590 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1591 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1592 rtvec v = rtvec_alloc (n_elts);
1593 unsigned int i;
1594
0a67e02c 1595 if (GET_CODE (op) != CONST_VECTOR)
d9deed68 1596 for (i = 0; i < n_elts; i++)
0a67e02c 1597 RTVEC_ELT (v, i) = op;
d9deed68
JH
1598 else
1599 {
ef4bddc2 1600 machine_mode inmode = GET_MODE (op);
d9deed68
JH
1601 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1602 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1603
41374e13
NS
1604 gcc_assert (in_n_elts < n_elts);
1605 gcc_assert ((n_elts % in_n_elts) == 0);
d9deed68 1606 for (i = 0; i < n_elts; i++)
0a67e02c 1607 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
d9deed68
JH
1608 }
1609 return gen_rtx_CONST_VECTOR (mode, v);
1610 }
1611 }
1612
0a67e02c 1613 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
852c8ba1
JH
1614 {
1615 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1616 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
ef4bddc2 1617 machine_mode opmode = GET_MODE (op);
852c8ba1
JH
1618 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1619 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1620 rtvec v = rtvec_alloc (n_elts);
1621 unsigned int i;
1622
41374e13 1623 gcc_assert (op_n_elts == n_elts);
852c8ba1
JH
1624 for (i = 0; i < n_elts; i++)
1625 {
1626 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
0a67e02c 1627 CONST_VECTOR_ELT (op, i),
852c8ba1
JH
1628 GET_MODE_INNER (opmode));
1629 if (!x)
1630 return 0;
1631 RTVEC_ELT (v, i) = x;
1632 }
1633 return gen_rtx_CONST_VECTOR (mode, v);
1634 }
1635
0cedb36c
JL
1636 /* The order of these tests is critical so that, for example, we don't
1637 check the wrong mode (input vs. output) for a conversion operation,
1638 such as FIX. At some point, this should be simplified. */
1639
33ffb5c5 1640 if (code == FLOAT && CONST_SCALAR_INT_P (op))
0cedb36c 1641 {
0cedb36c
JL
1642 REAL_VALUE_TYPE d;
1643
807e902e
KZ
1644 if (op_mode == VOIDmode)
1645 {
1646 /* CONST_INT have VOIDmode as the mode. We assume that all
1647 the bits of the constant are significant, though, this is
1648 a dangerous assumption as many times CONST_INTs are
1649 created and used with garbage in the bits outside of the
1650 precision of the implied mode of the const_int. */
1651 op_mode = MAX_MODE_INT;
1652 }
0cedb36c 1653
807e902e 1654 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
0cedb36c
JL
1655 d = real_value_truncate (mode, d);
1656 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1657 }
33ffb5c5 1658 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
0cedb36c 1659 {
0cedb36c
JL
1660 REAL_VALUE_TYPE d;
1661
807e902e
KZ
1662 if (op_mode == VOIDmode)
1663 {
1664 /* CONST_INT have VOIDmode as the mode. We assume that all
1665 the bits of the constant are significant, though, this is
1666 a dangerous assumption as many times CONST_INTs are
1667 created and used with garbage in the bits outside of the
1668 precision of the implied mode of the const_int. */
1669 op_mode = MAX_MODE_INT;
1670 }
0cedb36c 1671
807e902e 1672 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
0cedb36c
JL
1673 d = real_value_truncate (mode, d);
1674 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1675 }
0cedb36c 1676
807e902e 1677 if (CONST_SCALAR_INT_P (op) && width > 0)
0cedb36c 1678 {
807e902e 1679 wide_int result;
ef4bddc2 1680 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
807e902e
KZ
1681 rtx_mode_t op0 = std::make_pair (op, imode);
1682 int int_value;
1683
1684#if TARGET_SUPPORTS_WIDE_INT == 0
1685 /* This assert keeps the simplification from producing a result
1686 that cannot be represented in a CONST_DOUBLE but a lot of
1687 upstream callers expect that this function never fails to
1688 simplify something and so you if you added this to the test
1689 above the code would die later anyway. If this assert
1690 happens, you just need to make the port support wide int. */
1691 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1692#endif
0cedb36c
JL
1693
1694 switch (code)
1695 {
1696 case NOT:
807e902e 1697 result = wi::bit_not (op0);
0cedb36c
JL
1698 break;
1699
1700 case NEG:
807e902e 1701 result = wi::neg (op0);
0cedb36c
JL
1702 break;
1703
1704 case ABS:
807e902e 1705 result = wi::abs (op0);
0cedb36c
JL
1706 break;
1707
1708 case FFS:
807e902e 1709 result = wi::shwi (wi::ffs (op0), mode);
0cedb36c
JL
1710 break;
1711
2928cd7a 1712 case CLZ:
807e902e
KZ
1713 if (wi::ne_p (op0, 0))
1714 int_value = wi::clz (op0);
1715 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1716 int_value = GET_MODE_PRECISION (mode);
1717 result = wi::shwi (int_value, mode);
3801c801
BS
1718 break;
1719
1720 case CLRSB:
807e902e 1721 result = wi::shwi (wi::clrsb (op0), mode);
2928cd7a
RH
1722 break;
1723
1724 case CTZ:
807e902e
KZ
1725 if (wi::ne_p (op0, 0))
1726 int_value = wi::ctz (op0);
1727 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1728 int_value = GET_MODE_PRECISION (mode);
1729 result = wi::shwi (int_value, mode);
2928cd7a
RH
1730 break;
1731
1732 case POPCOUNT:
807e902e 1733 result = wi::shwi (wi::popcount (op0), mode);
2928cd7a
RH
1734 break;
1735
1736 case PARITY:
807e902e 1737 result = wi::shwi (wi::parity (op0), mode);
2928cd7a
RH
1738 break;
1739
167fa32c 1740 case BSWAP:
807e902e 1741 result = wide_int (op0).bswap ();
9f05adb0 1742 break;
167fa32c 1743
0cedb36c 1744 case TRUNCATE:
0cedb36c 1745 case ZERO_EXTEND:
807e902e 1746 result = wide_int::from (op0, width, UNSIGNED);
0cedb36c
JL
1747 break;
1748
1749 case SIGN_EXTEND:
807e902e 1750 result = wide_int::from (op0, width, SIGNED);
0cedb36c
JL
1751 break;
1752
1753 case SQRT:
0cedb36c
JL
1754 default:
1755 return 0;
1756 }
1757
807e902e 1758 return immed_wide_int_const (result, mode);
0cedb36c
JL
1759 }
1760
48175537 1761 else if (CONST_DOUBLE_AS_FLOAT_P (op)
6f0c9f06
JJ
1762 && SCALAR_FLOAT_MODE_P (mode)
1763 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
0cedb36c 1764 {
3c8e8595 1765 REAL_VALUE_TYPE d;
0a67e02c 1766 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
0cedb36c 1767
15e5ad76
ZW
1768 switch (code)
1769 {
1770 case SQRT:
3c8e8595 1771 return 0;
94313f35 1772 case ABS:
d49b6e1e 1773 d = real_value_abs (&d);
94313f35
RH
1774 break;
1775 case NEG:
d49b6e1e 1776 d = real_value_negate (&d);
94313f35
RH
1777 break;
1778 case FLOAT_TRUNCATE:
1779 d = real_value_truncate (mode, d);
1780 break;
1781 case FLOAT_EXTEND:
6f0c9f06
JJ
1782 /* All this does is change the mode, unless changing
1783 mode class. */
1784 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1785 real_convert (&d, mode, &d);
94313f35
RH
1786 break;
1787 case FIX:
1788 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1789 break;
79ae63b1
JH
1790 case NOT:
1791 {
1792 long tmp[4];
1793 int i;
1794
0a67e02c 1795 real_to_target (tmp, &d, GET_MODE (op));
79ae63b1
JH
1796 for (i = 0; i < 4; i++)
1797 tmp[i] = ~tmp[i];
1798 real_from_target (&d, tmp, mode);
0a67e02c 1799 break;
79ae63b1 1800 }
15e5ad76 1801 default:
41374e13 1802 gcc_unreachable ();
15e5ad76
ZW
1803 }
1804 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
0cedb36c 1805 }
48175537 1806 else if (CONST_DOUBLE_AS_FLOAT_P (op)
3d8bf70f 1807 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
0cedb36c 1808 && GET_MODE_CLASS (mode) == MODE_INT
807e902e 1809 && width > 0)
0cedb36c 1810 {
875eda9c 1811 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2067c116 1812 operators are intentionally left unspecified (to ease implementation
875eda9c
RS
1813 by target backends), for consistency, this routine implements the
1814 same semantics for constant folding as used by the middle-end. */
1815
0a67e02c
PB
1816 /* This was formerly used only for non-IEEE float.
1817 eggert@twinsun.com says it is safe for IEEE also. */
875eda9c 1818 REAL_VALUE_TYPE x, t;
0a67e02c 1819 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
807e902e
KZ
1820 wide_int wmax, wmin;
1821 /* This is part of the abi to real_to_integer, but we check
1822 things before making this call. */
1823 bool fail;
1824
15e5ad76
ZW
1825 switch (code)
1826 {
875eda9c
RS
1827 case FIX:
1828 if (REAL_VALUE_ISNAN (x))
1829 return const0_rtx;
1830
1831 /* Test against the signed upper bound. */
807e902e
KZ
1832 wmax = wi::max_value (width, SIGNED);
1833 real_from_integer (&t, VOIDmode, wmax, SIGNED);
875eda9c 1834 if (REAL_VALUES_LESS (t, x))
807e902e 1835 return immed_wide_int_const (wmax, mode);
875eda9c
RS
1836
1837 /* Test against the signed lower bound. */
807e902e
KZ
1838 wmin = wi::min_value (width, SIGNED);
1839 real_from_integer (&t, VOIDmode, wmin, SIGNED);
875eda9c 1840 if (REAL_VALUES_LESS (x, t))
807e902e
KZ
1841 return immed_wide_int_const (wmin, mode);
1842
1843 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
875eda9c
RS
1844 break;
1845
1846 case UNSIGNED_FIX:
1847 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1848 return const0_rtx;
1849
1850 /* Test against the unsigned upper bound. */
807e902e
KZ
1851 wmax = wi::max_value (width, UNSIGNED);
1852 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
875eda9c 1853 if (REAL_VALUES_LESS (t, x))
807e902e 1854 return immed_wide_int_const (wmax, mode);
875eda9c 1855
807e902e
KZ
1856 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1857 mode);
875eda9c
RS
1858 break;
1859
15e5ad76 1860 default:
41374e13 1861 gcc_unreachable ();
15e5ad76 1862 }
0cedb36c 1863 }
ba31d94e 1864
0a67e02c 1865 return NULL_RTX;
0cedb36c
JL
1866}
1867\f
b17c024f
EB
1868/* Subroutine of simplify_binary_operation to simplify a binary operation
1869 CODE that can commute with byte swapping, with result mode MODE and
1870 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1871 Return zero if no simplification or canonicalization is possible. */
1872
1873static rtx
ef4bddc2 1874simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
b17c024f
EB
1875 rtx op0, rtx op1)
1876{
1877 rtx tem;
1878
1879 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
a8c50132 1880 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
b17c024f
EB
1881 {
1882 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1883 simplify_gen_unary (BSWAP, mode, op1, mode));
1884 return simplify_gen_unary (BSWAP, mode, tem, mode);
1885 }
1886
1887 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1888 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1889 {
1890 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1891 return simplify_gen_unary (BSWAP, mode, tem, mode);
1892 }
1893
1894 return NULL_RTX;
1895}
1896
9ce79a7a
RS
1897/* Subroutine of simplify_binary_operation to simplify a commutative,
1898 associative binary operation CODE with result mode MODE, operating
1899 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1900 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1901 canonicalization is possible. */
dd61aa98 1902
dd61aa98 1903static rtx
ef4bddc2 1904simplify_associative_operation (enum rtx_code code, machine_mode mode,
dd61aa98
RS
1905 rtx op0, rtx op1)
1906{
1907 rtx tem;
1908
9ce79a7a
RS
1909 /* Linearize the operator to the left. */
1910 if (GET_CODE (op1) == code)
dd61aa98 1911 {
9ce79a7a
RS
1912 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1913 if (GET_CODE (op0) == code)
1914 {
1915 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1916 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1917 }
dd61aa98 1918
9ce79a7a
RS
1919 /* "a op (b op c)" becomes "(b op c) op a". */
1920 if (! swap_commutative_operands_p (op1, op0))
1921 return simplify_gen_binary (code, mode, op1, op0);
dd61aa98 1922
9ce79a7a
RS
1923 tem = op0;
1924 op0 = op1;
1925 op1 = tem;
dd61aa98
RS
1926 }
1927
9ce79a7a 1928 if (GET_CODE (op0) == code)
dd61aa98 1929 {
9ce79a7a
RS
1930 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1931 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1932 {
1933 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1934 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1935 }
1936
1937 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
7e0b4eae 1938 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
9ce79a7a
RS
1939 if (tem != 0)
1940 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1941
1942 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
7e0b4eae 1943 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
9ce79a7a
RS
1944 if (tem != 0)
1945 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
dd61aa98
RS
1946 }
1947
1948 return 0;
1949}
1950
0a67e02c 1951
0cedb36c
JL
1952/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1953 and OP1. Return 0 if no simplification is possible.
1954
1955 Don't use this for relational operations such as EQ or LT.
1956 Use simplify_relational_operation instead. */
0cedb36c 1957rtx
ef4bddc2 1958simplify_binary_operation (enum rtx_code code, machine_mode mode,
46c5ad27 1959 rtx op0, rtx op1)
0cedb36c 1960{
9ce79a7a 1961 rtx trueop0, trueop1;
0cedb36c
JL
1962 rtx tem;
1963
1964 /* Relational operations don't work here. We must know the mode
1965 of the operands in order to do the comparison correctly.
1966 Assuming a full word can give incorrect results.
1967 Consider comparing 128 with -128 in QImode. */
41374e13
NS
1968 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1969 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
0cedb36c 1970
4ba5f925 1971 /* Make sure the constant is second. */
ec8e098d 1972 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9ce79a7a 1973 && swap_commutative_operands_p (op0, op1))
4ba5f925
JH
1974 {
1975 tem = op0, op0 = op1, op1 = tem;
4ba5f925
JH
1976 }
1977
9ce79a7a
RS
1978 trueop0 = avoid_constant_pool_reference (op0);
1979 trueop1 = avoid_constant_pool_reference (op1);
1980
0a67e02c
PB
1981 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1982 if (tem)
1983 return tem;
1984 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1985}
1986
1753331b
RS
1987/* Subroutine of simplify_binary_operation. Simplify a binary operation
1988 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1989 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1990 actual constants. */
1991
0a67e02c 1992static rtx
ef4bddc2 1993simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
0a67e02c
PB
1994 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1995{
bd1ef757 1996 rtx tem, reversed, opleft, opright;
0a67e02c 1997 HOST_WIDE_INT val;
5511bc5a 1998 unsigned int width = GET_MODE_PRECISION (mode);
0a67e02c
PB
1999
2000 /* Even if we can't compute a constant result,
2001 there are some cases worth simplifying. */
2002
2003 switch (code)
852c8ba1 2004 {
0a67e02c
PB
2005 case PLUS:
2006 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2007 when x is NaN, infinite, or finite and nonzero. They aren't
2008 when x is -0 and the rounding mode is not towards -infinity,
2009 since (-0) + 0 is then 0. */
2010 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2011 return op0;
2012
2013 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2014 transformations are safe even for IEEE. */
2015 if (GET_CODE (op0) == NEG)
2016 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2017 else if (GET_CODE (op1) == NEG)
2018 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2019
2020 /* (~a) + 1 -> -a */
2021 if (INTEGRAL_MODE_P (mode)
2022 && GET_CODE (op0) == NOT
2023 && trueop1 == const1_rtx)
2024 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2025
2026 /* Handle both-operands-constant cases. We can only add
2027 CONST_INTs to constants since the sum of relocatable symbols
2028 can't be handled by most assemblers. Don't add CONST_INT
2029 to CONST_INT since overflow won't be computed properly if wider
2030 than HOST_BITS_PER_WIDE_INT. */
2031
dd59ef13
RS
2032 if ((GET_CODE (op0) == CONST
2033 || GET_CODE (op0) == SYMBOL_REF
2034 || GET_CODE (op0) == LABEL_REF)
481683e1 2035 && CONST_INT_P (op1))
0a81f074 2036 return plus_constant (mode, op0, INTVAL (op1));
dd59ef13
RS
2037 else if ((GET_CODE (op1) == CONST
2038 || GET_CODE (op1) == SYMBOL_REF
2039 || GET_CODE (op1) == LABEL_REF)
481683e1 2040 && CONST_INT_P (op0))
0a81f074 2041 return plus_constant (mode, op1, INTVAL (op0));
0a67e02c
PB
2042
2043 /* See if this is something like X * C - X or vice versa or
2044 if the multiplication is written as a shift. If so, we can
2045 distribute and make a new multiply, shift, or maybe just
2046 have X (if C is 2 in the example above). But don't make
2047 something more expensive than we had before. */
2048
6800ea5c 2049 if (SCALAR_INT_MODE_P (mode))
0a67e02c 2050 {
0a67e02c
PB
2051 rtx lhs = op0, rhs = op1;
2052
807e902e
KZ
2053 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2054 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
54fb1ae0 2055
0a67e02c 2056 if (GET_CODE (lhs) == NEG)
fab2f52c 2057 {
807e902e 2058 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2059 lhs = XEXP (lhs, 0);
2060 }
0a67e02c 2061 else if (GET_CODE (lhs) == MULT
807e902e 2062 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
fab2f52c 2063 {
807e902e 2064 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
fab2f52c
AO
2065 lhs = XEXP (lhs, 0);
2066 }
0a67e02c 2067 else if (GET_CODE (lhs) == ASHIFT
481683e1 2068 && CONST_INT_P (XEXP (lhs, 1))
54fb1ae0 2069 && INTVAL (XEXP (lhs, 1)) >= 0
807e902e 2070 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2071 {
807e902e
KZ
2072 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2073 GET_MODE_PRECISION (mode));
0a67e02c
PB
2074 lhs = XEXP (lhs, 0);
2075 }
852c8ba1 2076
0a67e02c 2077 if (GET_CODE (rhs) == NEG)
fab2f52c 2078 {
807e902e 2079 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2080 rhs = XEXP (rhs, 0);
2081 }
0a67e02c 2082 else if (GET_CODE (rhs) == MULT
481683e1 2083 && CONST_INT_P (XEXP (rhs, 1)))
0a67e02c 2084 {
807e902e 2085 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
fab2f52c 2086 rhs = XEXP (rhs, 0);
0a67e02c
PB
2087 }
2088 else if (GET_CODE (rhs) == ASHIFT
481683e1 2089 && CONST_INT_P (XEXP (rhs, 1))
0a67e02c 2090 && INTVAL (XEXP (rhs, 1)) >= 0
807e902e 2091 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2092 {
807e902e
KZ
2093 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2094 GET_MODE_PRECISION (mode));
0a67e02c
PB
2095 rhs = XEXP (rhs, 0);
2096 }
2097
2098 if (rtx_equal_p (lhs, rhs))
2099 {
2100 rtx orig = gen_rtx_PLUS (mode, op0, op1);
fab2f52c 2101 rtx coeff;
f40751dd 2102 bool speed = optimize_function_for_speed_p (cfun);
fab2f52c 2103
807e902e 2104 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
fab2f52c
AO
2105
2106 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
5e8f01f4 2107 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
0a67e02c
PB
2108 ? tem : 0;
2109 }
2110 }
2111
2112 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
33ffb5c5 2113 if (CONST_SCALAR_INT_P (op1)
0a67e02c 2114 && GET_CODE (op0) == XOR
33ffb5c5 2115 && CONST_SCALAR_INT_P (XEXP (op0, 1))
0a67e02c
PB
2116 && mode_signbit_p (mode, op1))
2117 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2118 simplify_gen_binary (XOR, mode, op1,
2119 XEXP (op0, 1)));
2120
bd1ef757 2121 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
4bf371ea
RG
2122 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2123 && GET_CODE (op0) == MULT
bd1ef757
PB
2124 && GET_CODE (XEXP (op0, 0)) == NEG)
2125 {
2126 rtx in1, in2;
2127
2128 in1 = XEXP (XEXP (op0, 0), 0);
2129 in2 = XEXP (op0, 1);
2130 return simplify_gen_binary (MINUS, mode, op1,
2131 simplify_gen_binary (MULT, mode,
2132 in1, in2));
2133 }
2134
2135 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2136 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2137 is 1. */
2138 if (COMPARISON_P (op0)
2139 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2140 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2141 && (reversed = reversed_comparison (op0, mode)))
2142 return
2143 simplify_gen_unary (NEG, mode, reversed, mode);
2144
0a67e02c
PB
2145 /* If one of the operands is a PLUS or a MINUS, see if we can
2146 simplify this by the associative law.
2147 Don't use the associative law for floating point.
2148 The inaccuracy makes it nonassociative,
2149 and subtle programs can break if operations are associated. */
2150
2151 if (INTEGRAL_MODE_P (mode)
2152 && (plus_minus_operand_p (op0)
2153 || plus_minus_operand_p (op1))
1941069a 2154 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
0a67e02c
PB
2155 return tem;
2156
2157 /* Reassociate floating point addition only when the user
a1a82611 2158 specifies associative math operations. */
0a67e02c 2159 if (FLOAT_MODE_P (mode)
a1a82611 2160 && flag_associative_math)
852c8ba1 2161 {
0a67e02c
PB
2162 tem = simplify_associative_operation (code, mode, op0, op1);
2163 if (tem)
2164 return tem;
852c8ba1 2165 }
0a67e02c 2166 break;
852c8ba1 2167
0a67e02c 2168 case COMPARE:
0a67e02c
PB
2169 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2170 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2171 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2172 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
3198b947 2173 {
0a67e02c
PB
2174 rtx xop00 = XEXP (op0, 0);
2175 rtx xop10 = XEXP (op1, 0);
3198b947 2176
0a67e02c
PB
2177#ifdef HAVE_cc0
2178 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2179#else
2180 if (REG_P (xop00) && REG_P (xop10)
2181 && GET_MODE (xop00) == GET_MODE (xop10)
2182 && REGNO (xop00) == REGNO (xop10)
2183 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2184 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2185#endif
2186 return xop00;
3198b947 2187 }
0a67e02c
PB
2188 break;
2189
2190 case MINUS:
2191 /* We can't assume x-x is 0 even with non-IEEE floating point,
2192 but since it is zero except in very strange circumstances, we
81d2fb02 2193 will treat it as zero with -ffinite-math-only. */
0a67e02c
PB
2194 if (rtx_equal_p (trueop0, trueop1)
2195 && ! side_effects_p (op0)
81d2fb02 2196 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
0a67e02c
PB
2197 return CONST0_RTX (mode);
2198
2199 /* Change subtraction from zero into negation. (0 - x) is the
2200 same as -x when x is NaN, infinite, or finite and nonzero.
2201 But if the mode has signed zeros, and does not round towards
2202 -infinity, then 0 - 0 is 0, not -0. */
2203 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2204 return simplify_gen_unary (NEG, mode, op1, mode);
2205
2206 /* (-1 - a) is ~a. */
2207 if (trueop0 == constm1_rtx)
2208 return simplify_gen_unary (NOT, mode, op1, mode);
2209
2210 /* Subtracting 0 has no effect unless the mode has signed zeros
2211 and supports rounding towards -infinity. In such a case,
2212 0 - 0 is -0. */
2213 if (!(HONOR_SIGNED_ZEROS (mode)
2214 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2215 && trueop1 == CONST0_RTX (mode))
2216 return op0;
2217
2218 /* See if this is something like X * C - X or vice versa or
2219 if the multiplication is written as a shift. If so, we can
2220 distribute and make a new multiply, shift, or maybe just
2221 have X (if C is 2 in the example above). But don't make
2222 something more expensive than we had before. */
2223
6800ea5c 2224 if (SCALAR_INT_MODE_P (mode))
3198b947 2225 {
0a67e02c 2226 rtx lhs = op0, rhs = op1;
3198b947 2227
807e902e
KZ
2228 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2229 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
54fb1ae0 2230
0a67e02c 2231 if (GET_CODE (lhs) == NEG)
fab2f52c 2232 {
807e902e 2233 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2234 lhs = XEXP (lhs, 0);
2235 }
0a67e02c 2236 else if (GET_CODE (lhs) == MULT
807e902e 2237 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
0a67e02c 2238 {
807e902e 2239 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
fab2f52c 2240 lhs = XEXP (lhs, 0);
0a67e02c
PB
2241 }
2242 else if (GET_CODE (lhs) == ASHIFT
481683e1 2243 && CONST_INT_P (XEXP (lhs, 1))
0a67e02c 2244 && INTVAL (XEXP (lhs, 1)) >= 0
807e902e 2245 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2246 {
807e902e
KZ
2247 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2248 GET_MODE_PRECISION (mode));
0a67e02c
PB
2249 lhs = XEXP (lhs, 0);
2250 }
3198b947 2251
0a67e02c 2252 if (GET_CODE (rhs) == NEG)
fab2f52c 2253 {
807e902e 2254 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2255 rhs = XEXP (rhs, 0);
2256 }
0a67e02c 2257 else if (GET_CODE (rhs) == MULT
481683e1 2258 && CONST_INT_P (XEXP (rhs, 1)))
0a67e02c 2259 {
807e902e 2260 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
fab2f52c 2261 rhs = XEXP (rhs, 0);
0a67e02c
PB
2262 }
2263 else if (GET_CODE (rhs) == ASHIFT
481683e1 2264 && CONST_INT_P (XEXP (rhs, 1))
0a67e02c 2265 && INTVAL (XEXP (rhs, 1)) >= 0
807e902e 2266 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2267 {
807e902e
KZ
2268 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2269 GET_MODE_PRECISION (mode));
27bcd47c 2270 negcoeff1 = -negcoeff1;
0a67e02c
PB
2271 rhs = XEXP (rhs, 0);
2272 }
2273
2274 if (rtx_equal_p (lhs, rhs))
2275 {
2276 rtx orig = gen_rtx_MINUS (mode, op0, op1);
fab2f52c 2277 rtx coeff;
f40751dd 2278 bool speed = optimize_function_for_speed_p (cfun);
fab2f52c 2279
807e902e 2280 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
fab2f52c
AO
2281
2282 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
5e8f01f4 2283 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
0a67e02c
PB
2284 ? tem : 0;
2285 }
3198b947
RH
2286 }
2287
0a67e02c
PB
2288 /* (a - (-b)) -> (a + b). True even for IEEE. */
2289 if (GET_CODE (op1) == NEG)
2290 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3198b947 2291
0a67e02c
PB
2292 /* (-x - c) may be simplified as (-c - x). */
2293 if (GET_CODE (op0) == NEG
33ffb5c5 2294 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
79ae63b1 2295 {
0a67e02c
PB
2296 tem = simplify_unary_operation (NEG, mode, op1, mode);
2297 if (tem)
2298 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2299 }
79ae63b1 2300
0a67e02c 2301 /* Don't let a relocatable value get a negative coeff. */
481683e1 2302 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
0a67e02c
PB
2303 return simplify_gen_binary (PLUS, mode,
2304 op0,
2305 neg_const_int (mode, op1));
2306
2307 /* (x - (x & y)) -> (x & ~y) */
6b74529d 2308 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
0a67e02c
PB
2309 {
2310 if (rtx_equal_p (op0, XEXP (op1, 0)))
79ae63b1 2311 {
0a67e02c
PB
2312 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2313 GET_MODE (XEXP (op1, 1)));
2314 return simplify_gen_binary (AND, mode, op0, tem);
2315 }
2316 if (rtx_equal_p (op0, XEXP (op1, 1)))
2317 {
2318 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2319 GET_MODE (XEXP (op1, 0)));
2320 return simplify_gen_binary (AND, mode, op0, tem);
79ae63b1 2321 }
79ae63b1 2322 }
1941069a 2323
bd1ef757
PB
2324 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2325 by reversing the comparison code if valid. */
2326 if (STORE_FLAG_VALUE == 1
2327 && trueop0 == const1_rtx
2328 && COMPARISON_P (op1)
2329 && (reversed = reversed_comparison (op1, mode)))
2330 return reversed;
2331
2332 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
4bf371ea
RG
2333 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2334 && GET_CODE (op1) == MULT
bd1ef757
PB
2335 && GET_CODE (XEXP (op1, 0)) == NEG)
2336 {
2337 rtx in1, in2;
2338
2339 in1 = XEXP (XEXP (op1, 0), 0);
2340 in2 = XEXP (op1, 1);
2341 return simplify_gen_binary (PLUS, mode,
2342 simplify_gen_binary (MULT, mode,
2343 in1, in2),
2344 op0);
2345 }
2346
2347 /* Canonicalize (minus (neg A) (mult B C)) to
2348 (minus (mult (neg B) C) A). */
4bf371ea
RG
2349 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2350 && GET_CODE (op1) == MULT
bd1ef757
PB
2351 && GET_CODE (op0) == NEG)
2352 {
2353 rtx in1, in2;
2354
2355 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2356 in2 = XEXP (op1, 1);
2357 return simplify_gen_binary (MINUS, mode,
2358 simplify_gen_binary (MULT, mode,
2359 in1, in2),
2360 XEXP (op0, 0));
2361 }
2362
1941069a
PB
2363 /* If one of the operands is a PLUS or a MINUS, see if we can
2364 simplify this by the associative law. This will, for example,
2365 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2366 Don't use the associative law for floating point.
2367 The inaccuracy makes it nonassociative,
2368 and subtle programs can break if operations are associated. */
2369
2370 if (INTEGRAL_MODE_P (mode)
2371 && (plus_minus_operand_p (op0)
2372 || plus_minus_operand_p (op1))
2373 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2374 return tem;
0a67e02c 2375 break;
15e5ad76 2376
0a67e02c
PB
2377 case MULT:
2378 if (trueop1 == constm1_rtx)
2379 return simplify_gen_unary (NEG, mode, op0, mode);
2380
29b40d79
BS
2381 if (GET_CODE (op0) == NEG)
2382 {
2383 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
707f9919
JJ
2384 /* If op1 is a MULT as well and simplify_unary_operation
2385 just moved the NEG to the second operand, simplify_gen_binary
2386 below could through simplify_associative_operation move
2387 the NEG around again and recurse endlessly. */
2388 if (temp
2389 && GET_CODE (op1) == MULT
2390 && GET_CODE (temp) == MULT
2391 && XEXP (op1, 0) == XEXP (temp, 0)
2392 && GET_CODE (XEXP (temp, 1)) == NEG
2393 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2394 temp = NULL_RTX;
29b40d79
BS
2395 if (temp)
2396 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2397 }
2398 if (GET_CODE (op1) == NEG)
2399 {
2400 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
707f9919
JJ
2401 /* If op0 is a MULT as well and simplify_unary_operation
2402 just moved the NEG to the second operand, simplify_gen_binary
2403 below could through simplify_associative_operation move
2404 the NEG around again and recurse endlessly. */
2405 if (temp
2406 && GET_CODE (op0) == MULT
2407 && GET_CODE (temp) == MULT
2408 && XEXP (op0, 0) == XEXP (temp, 0)
2409 && GET_CODE (XEXP (temp, 1)) == NEG
2410 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2411 temp = NULL_RTX;
29b40d79
BS
2412 if (temp)
2413 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2414 }
2415
0a67e02c
PB
2416 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2417 x is NaN, since x * 0 is then also NaN. Nor is it valid
2418 when the mode has signed zeros, since multiplying a negative
2419 number by 0 will give -0, not 0. */
2420 if (!HONOR_NANS (mode)
2421 && !HONOR_SIGNED_ZEROS (mode)
2422 && trueop1 == CONST0_RTX (mode)
2423 && ! side_effects_p (op0))
2424 return op1;
2425
2426 /* In IEEE floating point, x*1 is not equivalent to x for
2427 signalling NaNs. */
2428 if (!HONOR_SNANS (mode)
2429 && trueop1 == CONST1_RTX (mode))
2430 return op0;
2431
807e902e
KZ
2432 /* Convert multiply by constant power of two into shift. */
2433 if (CONST_SCALAR_INT_P (trueop1))
2434 {
2435 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2436 if (val >= 0)
2437 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2438 }
fab2f52c 2439
0a67e02c 2440 /* x*2 is x+x and x*(-1) is -x */
48175537 2441 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3d8bf70f 2442 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
50cd60be 2443 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
0a67e02c
PB
2444 && GET_MODE (op0) == mode)
2445 {
2446 REAL_VALUE_TYPE d;
2447 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
15e5ad76 2448
0a67e02c
PB
2449 if (REAL_VALUES_EQUAL (d, dconst2))
2450 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
3e4093b6 2451
1753331b
RS
2452 if (!HONOR_SNANS (mode)
2453 && REAL_VALUES_EQUAL (d, dconstm1))
0a67e02c
PB
2454 return simplify_gen_unary (NEG, mode, op0, mode);
2455 }
15e5ad76 2456
1753331b
RS
2457 /* Optimize -x * -x as x * x. */
2458 if (FLOAT_MODE_P (mode)
2459 && GET_CODE (op0) == NEG
2460 && GET_CODE (op1) == NEG
2461 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2462 && !side_effects_p (XEXP (op0, 0)))
2463 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2464
2465 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2466 if (SCALAR_FLOAT_MODE_P (mode)
2467 && GET_CODE (op0) == ABS
2468 && GET_CODE (op1) == ABS
2469 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2470 && !side_effects_p (XEXP (op0, 0)))
2471 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2472
0a67e02c
PB
2473 /* Reassociate multiplication, but for floating point MULTs
2474 only when the user specifies unsafe math optimizations. */
2475 if (! FLOAT_MODE_P (mode)
2476 || flag_unsafe_math_optimizations)
2477 {
2478 tem = simplify_associative_operation (code, mode, op0, op1);
2479 if (tem)
2480 return tem;
2481 }
2482 break;
6355b2d5 2483
0a67e02c 2484 case IOR:
a82e045d 2485 if (trueop1 == CONST0_RTX (mode))
0a67e02c 2486 return op0;
e7160b27
JM
2487 if (INTEGRAL_MODE_P (mode)
2488 && trueop1 == CONSTM1_RTX (mode)
2489 && !side_effects_p (op0))
0a67e02c
PB
2490 return op1;
2491 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2492 return op0;
2493 /* A | (~A) -> -1 */
2494 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2495 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2496 && ! side_effects_p (op0)
3f2960d5 2497 && SCALAR_INT_MODE_P (mode))
0a67e02c 2498 return constm1_rtx;
bd1ef757
PB
2499
2500 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
481683e1 2501 if (CONST_INT_P (op1)
46c9550f 2502 && HWI_COMPUTABLE_MODE_P (mode)
e7160b27
JM
2503 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2504 && !side_effects_p (op0))
bd1ef757 2505 return op1;
b8698a0f 2506
49e7a9d4
RS
2507 /* Canonicalize (X & C1) | C2. */
2508 if (GET_CODE (op0) == AND
481683e1
SZ
2509 && CONST_INT_P (trueop1)
2510 && CONST_INT_P (XEXP (op0, 1)))
49e7a9d4
RS
2511 {
2512 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2513 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2514 HOST_WIDE_INT c2 = INTVAL (trueop1);
2515
2516 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2517 if ((c1 & c2) == c1
2518 && !side_effects_p (XEXP (op0, 0)))
2519 return trueop1;
2520
2521 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2522 if (((c1|c2) & mask) == mask)
2523 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2524
2525 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2526 if (((c1 & ~c2) & mask) != (c1 & mask))
2527 {
2528 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2529 gen_int_mode (c1 & ~c2, mode));
2530 return simplify_gen_binary (IOR, mode, tem, op1);
2531 }
2532 }
2533
bd1ef757
PB
2534 /* Convert (A & B) | A to A. */
2535 if (GET_CODE (op0) == AND
2536 && (rtx_equal_p (XEXP (op0, 0), op1)
2537 || rtx_equal_p (XEXP (op0, 1), op1))
2538 && ! side_effects_p (XEXP (op0, 0))
2539 && ! side_effects_p (XEXP (op0, 1)))
2540 return op1;
2541
2542 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2543 mode size to (rotate A CX). */
2544
2545 if (GET_CODE (op1) == ASHIFT
2546 || GET_CODE (op1) == SUBREG)
2547 {
2548 opleft = op1;
2549 opright = op0;
2550 }
2551 else
2552 {
2553 opright = op1;
2554 opleft = op0;
2555 }
2556
2557 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2558 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
481683e1
SZ
2559 && CONST_INT_P (XEXP (opleft, 1))
2560 && CONST_INT_P (XEXP (opright, 1))
bd1ef757 2561 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
5511bc5a 2562 == GET_MODE_PRECISION (mode)))
bd1ef757
PB
2563 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2564
2565 /* Same, but for ashift that has been "simplified" to a wider mode
2566 by simplify_shift_const. */
2567
2568 if (GET_CODE (opleft) == SUBREG
2569 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2570 && GET_CODE (opright) == LSHIFTRT
2571 && GET_CODE (XEXP (opright, 0)) == SUBREG
2572 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2573 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2574 && (GET_MODE_SIZE (GET_MODE (opleft))
2575 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2576 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2577 SUBREG_REG (XEXP (opright, 0)))
481683e1
SZ
2578 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2579 && CONST_INT_P (XEXP (opright, 1))
bd1ef757 2580 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
5511bc5a 2581 == GET_MODE_PRECISION (mode)))
bd1ef757 2582 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
01578564 2583 XEXP (SUBREG_REG (opleft), 1));
bd1ef757
PB
2584
2585 /* If we have (ior (and (X C1) C2)), simplify this by making
2586 C1 as small as possible if C1 actually changes. */
481683e1 2587 if (CONST_INT_P (op1)
46c9550f 2588 && (HWI_COMPUTABLE_MODE_P (mode)
bd1ef757
PB
2589 || INTVAL (op1) > 0)
2590 && GET_CODE (op0) == AND
481683e1
SZ
2591 && CONST_INT_P (XEXP (op0, 1))
2592 && CONST_INT_P (op1)
43c36287 2593 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
69a59f0f
RS
2594 {
2595 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2596 gen_int_mode (UINTVAL (XEXP (op0, 1))
2597 & ~UINTVAL (op1),
2598 mode));
2599 return simplify_gen_binary (IOR, mode, tmp, op1);
2600 }
bd1ef757
PB
2601
2602 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2603 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2604 the PLUS does not affect any of the bits in OP1: then we can do
2605 the IOR as a PLUS and we can associate. This is valid if OP1
2606 can be safely shifted left C bits. */
481683e1 2607 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
bd1ef757 2608 && GET_CODE (XEXP (op0, 0)) == PLUS
481683e1
SZ
2609 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2610 && CONST_INT_P (XEXP (op0, 1))
bd1ef757
PB
2611 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2612 {
2613 int count = INTVAL (XEXP (op0, 1));
2614 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2615
2616 if (mask >> count == INTVAL (trueop1)
046f1eee 2617 && trunc_int_for_mode (mask, mode) == mask
bd1ef757
PB
2618 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2619 return simplify_gen_binary (ASHIFTRT, mode,
0a81f074
RS
2620 plus_constant (mode, XEXP (op0, 0),
2621 mask),
bd1ef757
PB
2622 XEXP (op0, 1));
2623 }
2624
b17c024f
EB
2625 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2626 if (tem)
2627 return tem;
2628
0a67e02c
PB
2629 tem = simplify_associative_operation (code, mode, op0, op1);
2630 if (tem)
2631 return tem;
2632 break;
2633
2634 case XOR:
a82e045d 2635 if (trueop1 == CONST0_RTX (mode))
0a67e02c 2636 return op0;
e7c82a99 2637 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
0a67e02c 2638 return simplify_gen_unary (NOT, mode, op0, mode);
f5d1572a 2639 if (rtx_equal_p (trueop0, trueop1)
0a67e02c
PB
2640 && ! side_effects_p (op0)
2641 && GET_MODE_CLASS (mode) != MODE_CC)
6bd13540 2642 return CONST0_RTX (mode);
0a67e02c
PB
2643
2644 /* Canonicalize XOR of the most significant bit to PLUS. */
33ffb5c5 2645 if (CONST_SCALAR_INT_P (op1)
0a67e02c
PB
2646 && mode_signbit_p (mode, op1))
2647 return simplify_gen_binary (PLUS, mode, op0, op1);
2648 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
33ffb5c5 2649 if (CONST_SCALAR_INT_P (op1)
0a67e02c 2650 && GET_CODE (op0) == PLUS
33ffb5c5 2651 && CONST_SCALAR_INT_P (XEXP (op0, 1))
0a67e02c
PB
2652 && mode_signbit_p (mode, XEXP (op0, 1)))
2653 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2654 simplify_gen_binary (XOR, mode, op1,
2655 XEXP (op0, 1)));
bd1ef757
PB
2656
2657 /* If we are XORing two things that have no bits in common,
2658 convert them into an IOR. This helps to detect rotation encoded
2659 using those methods and possibly other simplifications. */
2660
46c9550f 2661 if (HWI_COMPUTABLE_MODE_P (mode)
bd1ef757
PB
2662 && (nonzero_bits (op0, mode)
2663 & nonzero_bits (op1, mode)) == 0)
2664 return (simplify_gen_binary (IOR, mode, op0, op1));
2665
2666 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2667 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2668 (NOT y). */
2669 {
2670 int num_negated = 0;
2671
2672 if (GET_CODE (op0) == NOT)
2673 num_negated++, op0 = XEXP (op0, 0);
2674 if (GET_CODE (op1) == NOT)
2675 num_negated++, op1 = XEXP (op1, 0);
2676
2677 if (num_negated == 2)
2678 return simplify_gen_binary (XOR, mode, op0, op1);
2679 else if (num_negated == 1)
2680 return simplify_gen_unary (NOT, mode,
2681 simplify_gen_binary (XOR, mode, op0, op1),
2682 mode);
2683 }
2684
2685 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2686 correspond to a machine insn or result in further simplifications
2687 if B is a constant. */
2688
2689 if (GET_CODE (op0) == AND
2690 && rtx_equal_p (XEXP (op0, 1), op1)
2691 && ! side_effects_p (op1))
2692 return simplify_gen_binary (AND, mode,
2693 simplify_gen_unary (NOT, mode,
2694 XEXP (op0, 0), mode),
2695 op1);
2696
2697 else if (GET_CODE (op0) == AND
2698 && rtx_equal_p (XEXP (op0, 0), op1)
2699 && ! side_effects_p (op1))
2700 return simplify_gen_binary (AND, mode,
2701 simplify_gen_unary (NOT, mode,
2702 XEXP (op0, 1), mode),
2703 op1);
2704
54833ec0
CLT
2705 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2706 we can transform like this:
2707 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2708 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2709 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2710 Attempt a few simplifications when B and C are both constants. */
2711 if (GET_CODE (op0) == AND
2712 && CONST_INT_P (op1)
2713 && CONST_INT_P (XEXP (op0, 1)))
2714 {
2715 rtx a = XEXP (op0, 0);
2716 rtx b = XEXP (op0, 1);
2717 rtx c = op1;
2718 HOST_WIDE_INT bval = INTVAL (b);
2719 HOST_WIDE_INT cval = INTVAL (c);
2720
2721 rtx na_c
2722 = simplify_binary_operation (AND, mode,
2723 simplify_gen_unary (NOT, mode, a, mode),
2724 c);
2725 if ((~cval & bval) == 0)
2726 {
2727 /* Try to simplify ~A&C | ~B&C. */
2728 if (na_c != NULL_RTX)
2729 return simplify_gen_binary (IOR, mode, na_c,
69a59f0f 2730 gen_int_mode (~bval & cval, mode));
54833ec0
CLT
2731 }
2732 else
2733 {
2734 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2735 if (na_c == const0_rtx)
2736 {
2737 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
69a59f0f
RS
2738 gen_int_mode (~cval & bval,
2739 mode));
54833ec0 2740 return simplify_gen_binary (IOR, mode, a_nc_b,
69a59f0f
RS
2741 gen_int_mode (~bval & cval,
2742 mode));
54833ec0
CLT
2743 }
2744 }
2745 }
2746
bd1ef757
PB
2747 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2748 comparison if STORE_FLAG_VALUE is 1. */
2749 if (STORE_FLAG_VALUE == 1
2750 && trueop1 == const1_rtx
2751 && COMPARISON_P (op0)
2752 && (reversed = reversed_comparison (op0, mode)))
2753 return reversed;
2754
2755 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2756 is (lt foo (const_int 0)), so we can perform the above
2757 simplification if STORE_FLAG_VALUE is 1. */
2758
2759 if (STORE_FLAG_VALUE == 1
2760 && trueop1 == const1_rtx
2761 && GET_CODE (op0) == LSHIFTRT
481683e1 2762 && CONST_INT_P (XEXP (op0, 1))
5511bc5a 2763 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
bd1ef757
PB
2764 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2765
2766 /* (xor (comparison foo bar) (const_int sign-bit))
2767 when STORE_FLAG_VALUE is the sign bit. */
2d0c270f 2768 if (val_signbit_p (mode, STORE_FLAG_VALUE)
bd1ef757
PB
2769 && trueop1 == const_true_rtx
2770 && COMPARISON_P (op0)
2771 && (reversed = reversed_comparison (op0, mode)))
2772 return reversed;
2773
b17c024f
EB
2774 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2775 if (tem)
2776 return tem;
2777
0a67e02c
PB
2778 tem = simplify_associative_operation (code, mode, op0, op1);
2779 if (tem)
2780 return tem;
2781 break;
2782
2783 case AND:
3f2960d5
RH
2784 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2785 return trueop1;
e7c82a99
JJ
2786 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2787 return op0;
46c9550f 2788 if (HWI_COMPUTABLE_MODE_P (mode))
dc5b3407
ZD
2789 {
2790 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
f5a17c43 2791 HOST_WIDE_INT nzop1;
481683e1 2792 if (CONST_INT_P (trueop1))
f5a17c43
BS
2793 {
2794 HOST_WIDE_INT val1 = INTVAL (trueop1);
2795 /* If we are turning off bits already known off in OP0, we need
2796 not do an AND. */
2797 if ((nzop0 & ~val1) == 0)
2798 return op0;
2799 }
2800 nzop1 = nonzero_bits (trueop1, mode);
dc5b3407 2801 /* If we are clearing all the nonzero bits, the result is zero. */
f5a17c43
BS
2802 if ((nzop1 & nzop0) == 0
2803 && !side_effects_p (op0) && !side_effects_p (op1))
dc5b3407
ZD
2804 return CONST0_RTX (mode);
2805 }
f5d1572a 2806 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
0a67e02c
PB
2807 && GET_MODE_CLASS (mode) != MODE_CC)
2808 return op0;
2809 /* A & (~A) -> 0 */
2810 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2811 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2812 && ! side_effects_p (op0)
2813 && GET_MODE_CLASS (mode) != MODE_CC)
3f2960d5 2814 return CONST0_RTX (mode);
0a67e02c
PB
2815
2816 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2817 there are no nonzero bits of C outside of X's mode. */
2818 if ((GET_CODE (op0) == SIGN_EXTEND
2819 || GET_CODE (op0) == ZERO_EXTEND)
481683e1 2820 && CONST_INT_P (trueop1)
46c9550f 2821 && HWI_COMPUTABLE_MODE_P (mode)
0a67e02c 2822 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
43c36287 2823 & UINTVAL (trueop1)) == 0)
0a67e02c 2824 {
ef4bddc2 2825 machine_mode imode = GET_MODE (XEXP (op0, 0));
0a67e02c
PB
2826 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2827 gen_int_mode (INTVAL (trueop1),
2828 imode));
2829 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2830 }
2831
fcaf7e12
AN
2832 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2833 we might be able to further simplify the AND with X and potentially
2834 remove the truncation altogether. */
2835 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2836 {
2837 rtx x = XEXP (op0, 0);
ef4bddc2 2838 machine_mode xmode = GET_MODE (x);
fcaf7e12
AN
2839 tem = simplify_gen_binary (AND, xmode, x,
2840 gen_int_mode (INTVAL (trueop1), xmode));
2841 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2842 }
2843
49e7a9d4
RS
2844 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2845 if (GET_CODE (op0) == IOR
481683e1
SZ
2846 && CONST_INT_P (trueop1)
2847 && CONST_INT_P (XEXP (op0, 1)))
49e7a9d4
RS
2848 {
2849 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2850 return simplify_gen_binary (IOR, mode,
2851 simplify_gen_binary (AND, mode,
2852 XEXP (op0, 0), op1),
2853 gen_int_mode (tmp, mode));
2854 }
2855
bd1ef757
PB
2856 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2857 insn (and may simplify more). */
2858 if (GET_CODE (op0) == XOR
2859 && rtx_equal_p (XEXP (op0, 0), op1)
2860 && ! side_effects_p (op1))
2861 return simplify_gen_binary (AND, mode,
2862 simplify_gen_unary (NOT, mode,
2863 XEXP (op0, 1), mode),
2864 op1);
2865
2866 if (GET_CODE (op0) == XOR
2867 && rtx_equal_p (XEXP (op0, 1), op1)
2868 && ! side_effects_p (op1))
2869 return simplify_gen_binary (AND, mode,
2870 simplify_gen_unary (NOT, mode,
2871 XEXP (op0, 0), mode),
2872 op1);
2873
2874 /* Similarly for (~(A ^ B)) & A. */
2875 if (GET_CODE (op0) == NOT
2876 && GET_CODE (XEXP (op0, 0)) == XOR
2877 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2878 && ! side_effects_p (op1))
2879 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2880
2881 if (GET_CODE (op0) == NOT
2882 && GET_CODE (XEXP (op0, 0)) == XOR
2883 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2884 && ! side_effects_p (op1))
2885 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2886
2887 /* Convert (A | B) & A to A. */
2888 if (GET_CODE (op0) == IOR
2889 && (rtx_equal_p (XEXP (op0, 0), op1)
2890 || rtx_equal_p (XEXP (op0, 1), op1))
2891 && ! side_effects_p (XEXP (op0, 0))
2892 && ! side_effects_p (XEXP (op0, 1)))
2893 return op1;
2894
0a67e02c
PB
2895 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2896 ((A & N) + B) & M -> (A + B) & M
2897 Similarly if (N & M) == 0,
2898 ((A | N) + B) & M -> (A + B) & M
dc5b3407
ZD
2899 and for - instead of + and/or ^ instead of |.
2900 Also, if (N & M) == 0, then
2901 (A +- N) & M -> A & M. */
481683e1 2902 if (CONST_INT_P (trueop1)
46c9550f 2903 && HWI_COMPUTABLE_MODE_P (mode)
43c36287
EB
2904 && ~UINTVAL (trueop1)
2905 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
0a67e02c
PB
2906 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2907 {
2908 rtx pmop[2];
2909 int which;
2910
2911 pmop[0] = XEXP (op0, 0);
2912 pmop[1] = XEXP (op0, 1);
2913
481683e1 2914 if (CONST_INT_P (pmop[1])
43c36287 2915 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
dc5b3407
ZD
2916 return simplify_gen_binary (AND, mode, pmop[0], op1);
2917
0a67e02c
PB
2918 for (which = 0; which < 2; which++)
2919 {
2920 tem = pmop[which];
2921 switch (GET_CODE (tem))
6355b2d5 2922 {
0a67e02c 2923 case AND:
481683e1 2924 if (CONST_INT_P (XEXP (tem, 1))
43c36287
EB
2925 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2926 == UINTVAL (trueop1))
0a67e02c 2927 pmop[which] = XEXP (tem, 0);
6355b2d5 2928 break;
0a67e02c
PB
2929 case IOR:
2930 case XOR:
481683e1 2931 if (CONST_INT_P (XEXP (tem, 1))
43c36287 2932 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
0a67e02c 2933 pmop[which] = XEXP (tem, 0);
6355b2d5 2934 break;
6355b2d5
JJ
2935 default:
2936 break;
2937 }
2938 }
2939
0a67e02c
PB
2940 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2941 {
2942 tem = simplify_gen_binary (GET_CODE (op0), mode,
2943 pmop[0], pmop[1]);
2944 return simplify_gen_binary (code, mode, tem, op1);
2945 }
2946 }
f79db4f6
AP
2947
2948 /* (and X (ior (not X) Y) -> (and X Y) */
2949 if (GET_CODE (op1) == IOR
2950 && GET_CODE (XEXP (op1, 0)) == NOT
31dd2a86 2951 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
f79db4f6
AP
2952 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2953
2954 /* (and (ior (not X) Y) X) -> (and X Y) */
2955 if (GET_CODE (op0) == IOR
2956 && GET_CODE (XEXP (op0, 0)) == NOT
31dd2a86 2957 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
f79db4f6
AP
2958 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2959
31dd2a86
SB
2960 /* (and X (ior Y (not X)) -> (and X Y) */
2961 if (GET_CODE (op1) == IOR
2962 && GET_CODE (XEXP (op1, 1)) == NOT
2963 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
2964 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
2965
2966 /* (and (ior Y (not X)) X) -> (and X Y) */
2967 if (GET_CODE (op0) == IOR
2968 && GET_CODE (XEXP (op0, 1)) == NOT
2969 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
2970 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
2971
b17c024f
EB
2972 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2973 if (tem)
2974 return tem;
2975
0a67e02c
PB
2976 tem = simplify_associative_operation (code, mode, op0, op1);
2977 if (tem)
2978 return tem;
2979 break;
762297d9 2980
0a67e02c
PB
2981 case UDIV:
2982 /* 0/x is 0 (or x&0 if x has side-effects). */
3f2960d5
RH
2983 if (trueop0 == CONST0_RTX (mode))
2984 {
2985 if (side_effects_p (op1))
2986 return simplify_gen_binary (AND, mode, op1, trueop0);
2987 return trueop0;
2988 }
2989 /* x/1 is x. */
2990 if (trueop1 == CONST1_RTX (mode))
76bd29f6
JJ
2991 {
2992 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2993 if (tem)
2994 return tem;
2995 }
3f2960d5 2996 /* Convert divide by power of two into shift. */
481683e1 2997 if (CONST_INT_P (trueop1)
43c36287 2998 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3f2960d5
RH
2999 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3000 break;
d284eb28 3001
0a67e02c
PB
3002 case DIV:
3003 /* Handle floating point and integers separately. */
3d8bf70f 3004 if (SCALAR_FLOAT_MODE_P (mode))
0a67e02c
PB
3005 {
3006 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3007 safe for modes with NaNs, since 0.0 / 0.0 will then be
3008 NaN rather than 0.0. Nor is it safe for modes with signed
3009 zeros, since dividing 0 by a negative number gives -0.0 */
3010 if (trueop0 == CONST0_RTX (mode)
3011 && !HONOR_NANS (mode)
3012 && !HONOR_SIGNED_ZEROS (mode)
3013 && ! side_effects_p (op1))
3014 return op0;
3015 /* x/1.0 is x. */
3016 if (trueop1 == CONST1_RTX (mode)
3017 && !HONOR_SNANS (mode))
3018 return op0;
0cedb36c 3019
48175537 3020 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
0a67e02c
PB
3021 && trueop1 != CONST0_RTX (mode))
3022 {
3023 REAL_VALUE_TYPE d;
3024 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
0cedb36c 3025
0a67e02c
PB
3026 /* x/-1.0 is -x. */
3027 if (REAL_VALUES_EQUAL (d, dconstm1)
3028 && !HONOR_SNANS (mode))
3029 return simplify_gen_unary (NEG, mode, op0, mode);
0cedb36c 3030
0a67e02c 3031 /* Change FP division by a constant into multiplication.
a1a82611
RE
3032 Only do this with -freciprocal-math. */
3033 if (flag_reciprocal_math
0a67e02c
PB
3034 && !REAL_VALUES_EQUAL (d, dconst0))
3035 {
3036 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3037 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3038 return simplify_gen_binary (MULT, mode, op0, tem);
3039 }
3040 }
3041 }
e46bf5d6 3042 else if (SCALAR_INT_MODE_P (mode))
0cedb36c 3043 {
0a67e02c 3044 /* 0/x is 0 (or x&0 if x has side-effects). */
0e1b8b10
ILT
3045 if (trueop0 == CONST0_RTX (mode)
3046 && !cfun->can_throw_non_call_exceptions)
3f2960d5
RH
3047 {
3048 if (side_effects_p (op1))
3049 return simplify_gen_binary (AND, mode, op1, trueop0);
3050 return trueop0;
3051 }
0a67e02c 3052 /* x/1 is x. */
3f2960d5 3053 if (trueop1 == CONST1_RTX (mode))
76bd29f6
JJ
3054 {
3055 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3056 if (tem)
3057 return tem;
3058 }
0a67e02c
PB
3059 /* x/-1 is -x. */
3060 if (trueop1 == constm1_rtx)
3061 {
9ce921ab 3062 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
76bd29f6
JJ
3063 if (x)
3064 return simplify_gen_unary (NEG, mode, x, mode);
0a67e02c
PB
3065 }
3066 }
3067 break;
0cedb36c 3068
0a67e02c
PB
3069 case UMOD:
3070 /* 0%x is 0 (or x&0 if x has side-effects). */
3f2960d5
RH
3071 if (trueop0 == CONST0_RTX (mode))
3072 {
3073 if (side_effects_p (op1))
3074 return simplify_gen_binary (AND, mode, op1, trueop0);
3075 return trueop0;
3076 }
3077 /* x%1 is 0 (of x&0 if x has side-effects). */
3078 if (trueop1 == CONST1_RTX (mode))
3079 {
3080 if (side_effects_p (op0))
3081 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3082 return CONST0_RTX (mode);
3083 }
3084 /* Implement modulus by power of two as AND. */
481683e1 3085 if (CONST_INT_P (trueop1)
43c36287 3086 && exact_log2 (UINTVAL (trueop1)) > 0)
3f2960d5 3087 return simplify_gen_binary (AND, mode, op0,
69a59f0f 3088 gen_int_mode (INTVAL (op1) - 1, mode));
3f2960d5 3089 break;
0cedb36c 3090
0a67e02c
PB
3091 case MOD:
3092 /* 0%x is 0 (or x&0 if x has side-effects). */
3f2960d5
RH
3093 if (trueop0 == CONST0_RTX (mode))
3094 {
3095 if (side_effects_p (op1))
3096 return simplify_gen_binary (AND, mode, op1, trueop0);
3097 return trueop0;
3098 }
3099 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3100 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3101 {
3102 if (side_effects_p (op0))
3103 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3104 return CONST0_RTX (mode);
3105 }
3106 break;
0cedb36c 3107
0a67e02c
PB
3108 case ROTATERT:
3109 case ROTATE:
75776c6d
JJ
3110 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3111 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3112 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3113 amount instead. */
4ed3092f 3114#if defined(HAVE_rotate) && defined(HAVE_rotatert)
75776c6d
JJ
3115 if (CONST_INT_P (trueop1)
3116 && IN_RANGE (INTVAL (trueop1),
50b6ee8b
DD
3117 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3118 GET_MODE_PRECISION (mode) - 1))
75776c6d 3119 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
50b6ee8b 3120 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
75776c6d 3121 - INTVAL (trueop1)));
4ed3092f 3122#endif
75776c6d 3123 /* FALLTHRU */
0a67e02c 3124 case ASHIFTRT:
70233f37
RS
3125 if (trueop1 == CONST0_RTX (mode))
3126 return op0;
3127 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3128 return op0;
0a67e02c 3129 /* Rotating ~0 always results in ~0. */
481683e1 3130 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
43c36287 3131 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
0a67e02c
PB
3132 && ! side_effects_p (op1))
3133 return op0;
96023bba 3134 /* Given:
05f9c675
JJ
3135 scalar modes M1, M2
3136 scalar constants c1, c2
3137 size (M2) > size (M1)
3138 c1 == size (M2) - size (M1)
3139 optimize:
3140 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3141 <low_part>)
3142 (const_int <c2>))
3143 to:
3144 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3145 <low_part>). */
3146 if (code == ASHIFTRT
3147 && !VECTOR_MODE_P (mode)
3148 && SUBREG_P (op0)
3149 && CONST_INT_P (op1)
3150 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3151 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3152 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3153 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3154 > GET_MODE_BITSIZE (mode))
3155 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3156 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3157 - GET_MODE_BITSIZE (mode)))
3158 && subreg_lowpart_p (op0))
3159 {
3160 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3161 + INTVAL (op1));
3162 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3163 tmp = simplify_gen_binary (ASHIFTRT,
3164 GET_MODE (SUBREG_REG (op0)),
3165 XEXP (SUBREG_REG (op0), 0),
3166 tmp);
3167 return simplify_gen_subreg (mode, tmp, inner_mode,
3168 subreg_lowpart_offset (mode,
3169 inner_mode));
3170 }
cbc9503d 3171 canonicalize_shift:
481683e1 3172 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
cbc9503d 3173 {
50b6ee8b 3174 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
cbc9503d
RS
3175 if (val != INTVAL (op1))
3176 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3177 }
70233f37 3178 break;
9d317251 3179
0a67e02c 3180 case ASHIFT:
e551ad26 3181 case SS_ASHIFT:
14c931f1 3182 case US_ASHIFT:
70233f37
RS
3183 if (trueop1 == CONST0_RTX (mode))
3184 return op0;
3185 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3186 return op0;
cbc9503d 3187 goto canonicalize_shift;
70233f37 3188
0a67e02c 3189 case LSHIFTRT:
3f2960d5 3190 if (trueop1 == CONST0_RTX (mode))
0a67e02c 3191 return op0;
3f2960d5 3192 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
0a67e02c 3193 return op0;
70233f37
RS
3194 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3195 if (GET_CODE (op0) == CLZ
481683e1 3196 && CONST_INT_P (trueop1)
70233f37 3197 && STORE_FLAG_VALUE == 1
e40122f0 3198 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
70233f37 3199 {
ef4bddc2 3200 machine_mode imode = GET_MODE (XEXP (op0, 0));
70233f37
RS
3201 unsigned HOST_WIDE_INT zero_val = 0;
3202
3203 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
5511bc5a 3204 && zero_val == GET_MODE_PRECISION (imode)
70233f37
RS
3205 && INTVAL (trueop1) == exact_log2 (zero_val))
3206 return simplify_gen_relational (EQ, mode, imode,
3207 XEXP (op0, 0), const0_rtx);
3208 }
cbc9503d 3209 goto canonicalize_shift;
9d317251 3210
0a67e02c
PB
3211 case SMIN:
3212 if (width <= HOST_BITS_PER_WIDE_INT
2d0c270f 3213 && mode_signbit_p (mode, trueop1)
0a67e02c
PB
3214 && ! side_effects_p (op0))
3215 return op1;
3216 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3217 return op0;
3218 tem = simplify_associative_operation (code, mode, op0, op1);
3219 if (tem)
3220 return tem;
3221 break;
0cedb36c 3222
0a67e02c
PB
3223 case SMAX:
3224 if (width <= HOST_BITS_PER_WIDE_INT
481683e1 3225 && CONST_INT_P (trueop1)
43c36287 3226 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
0a67e02c
PB
3227 && ! side_effects_p (op0))
3228 return op1;
3229 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3230 return op0;
3231 tem = simplify_associative_operation (code, mode, op0, op1);
3232 if (tem)
3233 return tem;
3234 break;
0cedb36c 3235
0a67e02c 3236 case UMIN:
3f2960d5 3237 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
0a67e02c
PB
3238 return op1;
3239 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3240 return op0;
3241 tem = simplify_associative_operation (code, mode, op0, op1);
3242 if (tem)
3243 return tem;
3244 break;
0cedb36c 3245
0a67e02c
PB
3246 case UMAX:
3247 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3248 return op1;
3249 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3250 return op0;
3251 tem = simplify_associative_operation (code, mode, op0, op1);
3252 if (tem)
3253 return tem;
3254 break;
0cedb36c 3255
0a67e02c
PB
3256 case SS_PLUS:
3257 case US_PLUS:
3258 case SS_MINUS:
3259 case US_MINUS:
14c931f1
CF
3260 case SS_MULT:
3261 case US_MULT:
3262 case SS_DIV:
3263 case US_DIV:
0a67e02c
PB
3264 /* ??? There are simplifications that can be done. */
3265 return 0;
0cedb36c 3266
0a67e02c
PB
3267 case VEC_SELECT:
3268 if (!VECTOR_MODE_P (mode))
3269 {
3270 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3271 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3272 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3273 gcc_assert (XVECLEN (trueop1, 0) == 1);
481683e1 3274 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
0a67e02c
PB
3275
3276 if (GET_CODE (trueop0) == CONST_VECTOR)
3277 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3278 (trueop1, 0, 0)));
7f97f938
UB
3279
3280 /* Extract a scalar element from a nested VEC_SELECT expression
3281 (with optional nested VEC_CONCAT expression). Some targets
3282 (i386) extract scalar element from a vector using chain of
3283 nested VEC_SELECT expressions. When input operand is a memory
3284 operand, this operation can be simplified to a simple scalar
3285 load from an offseted memory address. */
3286 if (GET_CODE (trueop0) == VEC_SELECT)
3287 {
3288 rtx op0 = XEXP (trueop0, 0);
3289 rtx op1 = XEXP (trueop0, 1);
3290
ef4bddc2 3291 machine_mode opmode = GET_MODE (op0);
7f97f938
UB
3292 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3293 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3294
3295 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3296 int elem;
3297
3298 rtvec vec;
3299 rtx tmp_op, tmp;
3300
3301 gcc_assert (GET_CODE (op1) == PARALLEL);
3302 gcc_assert (i < n_elts);
3303
3304 /* Select element, pointed by nested selector. */
3743c639 3305 elem = INTVAL (XVECEXP (op1, 0, i));
7f97f938
UB
3306
3307 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3308 if (GET_CODE (op0) == VEC_CONCAT)
3309 {
3310 rtx op00 = XEXP (op0, 0);
3311 rtx op01 = XEXP (op0, 1);
3312
ef4bddc2 3313 machine_mode mode00, mode01;
7f97f938
UB
3314 int n_elts00, n_elts01;
3315
3316 mode00 = GET_MODE (op00);
3317 mode01 = GET_MODE (op01);
3318
3319 /* Find out number of elements of each operand. */
3320 if (VECTOR_MODE_P (mode00))
3321 {
3322 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3323 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3324 }
3325 else
3326 n_elts00 = 1;
3327
3328 if (VECTOR_MODE_P (mode01))
3329 {
3330 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3331 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3332 }
3333 else
3334 n_elts01 = 1;
3335
3336 gcc_assert (n_elts == n_elts00 + n_elts01);
3337
3338 /* Select correct operand of VEC_CONCAT
3339 and adjust selector. */
3340 if (elem < n_elts01)
3341 tmp_op = op00;
3342 else
3343 {
3344 tmp_op = op01;
3345 elem -= n_elts00;
3346 }
3347 }
3348 else
3349 tmp_op = op0;
3350
3351 vec = rtvec_alloc (1);
3352 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3353
3354 tmp = gen_rtx_fmt_ee (code, mode,
3355 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3356 return tmp;
3357 }
0e159e0f
AP
3358 if (GET_CODE (trueop0) == VEC_DUPLICATE
3359 && GET_MODE (XEXP (trueop0, 0)) == mode)
3360 return XEXP (trueop0, 0);
0a67e02c
PB
3361 }
3362 else
3363 {
3364 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3365 gcc_assert (GET_MODE_INNER (mode)
3366 == GET_MODE_INNER (GET_MODE (trueop0)));
3367 gcc_assert (GET_CODE (trueop1) == PARALLEL);
0cedb36c 3368
0a67e02c
PB
3369 if (GET_CODE (trueop0) == CONST_VECTOR)
3370 {
3371 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3372 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3373 rtvec v = rtvec_alloc (n_elts);
3374 unsigned int i;
0cedb36c 3375
0a67e02c
PB
3376 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3377 for (i = 0; i < n_elts; i++)
3378 {
3379 rtx x = XVECEXP (trueop1, 0, i);
0cedb36c 3380
481683e1 3381 gcc_assert (CONST_INT_P (x));
0a67e02c
PB
3382 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3383 INTVAL (x));
0cedb36c
JL
3384 }
3385
0a67e02c 3386 return gen_rtx_CONST_VECTOR (mode, v);
dd61aa98 3387 }
66c540d2 3388
5f6e1c55
MG
3389 /* Recognize the identity. */
3390 if (GET_MODE (trueop0) == mode)
3391 {
3392 bool maybe_ident = true;
3393 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3394 {
3395 rtx j = XVECEXP (trueop1, 0, i);
3396 if (!CONST_INT_P (j) || INTVAL (j) != i)
3397 {
3398 maybe_ident = false;
3399 break;
3400 }
3401 }
3402 if (maybe_ident)
3403 return trueop0;
3404 }
3405
66c540d2
MG
3406 /* If we build {a,b} then permute it, build the result directly. */
3407 if (XVECLEN (trueop1, 0) == 2
3408 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3409 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3410 && GET_CODE (trueop0) == VEC_CONCAT
3411 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3412 && GET_MODE (XEXP (trueop0, 0)) == mode
3413 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3414 && GET_MODE (XEXP (trueop0, 1)) == mode)
3415 {
3416 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3417 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3418 rtx subop0, subop1;
3419
3420 gcc_assert (i0 < 4 && i1 < 4);
3421 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3422 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3423
fd9da2c8
MG
3424 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3425 }
3426
3427 if (XVECLEN (trueop1, 0) == 2
3428 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3429 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3430 && GET_CODE (trueop0) == VEC_CONCAT
3431 && GET_MODE (trueop0) == mode)
3432 {
3433 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3434 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3435 rtx subop0, subop1;
3436
3437 gcc_assert (i0 < 2 && i1 < 2);
3438 subop0 = XEXP (trueop0, i0);
3439 subop1 = XEXP (trueop0, i1);
3440
66c540d2
MG
3441 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3442 }
82e3a719
MG
3443
3444 /* If we select one half of a vec_concat, return that. */
3445 if (GET_CODE (trueop0) == VEC_CONCAT
3446 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3447 {
3448 rtx subop0 = XEXP (trueop0, 0);
3449 rtx subop1 = XEXP (trueop0, 1);
ef4bddc2
RS
3450 machine_mode mode0 = GET_MODE (subop0);
3451 machine_mode mode1 = GET_MODE (subop1);
82e3a719
MG
3452 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3453 int l0 = GET_MODE_SIZE (mode0) / li;
3454 int l1 = GET_MODE_SIZE (mode1) / li;
3455 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3456 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3457 {
3458 bool success = true;
3459 for (int i = 1; i < l0; ++i)
3460 {
3461 rtx j = XVECEXP (trueop1, 0, i);
3462 if (!CONST_INT_P (j) || INTVAL (j) != i)
3463 {
3464 success = false;
3465 break;
3466 }
3467 }
3468 if (success)
3469 return subop0;
3470 }
3471 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3472 {
3473 bool success = true;
3474 for (int i = 1; i < l1; ++i)
3475 {
3476 rtx j = XVECEXP (trueop1, 0, i);
3477 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3478 {
3479 success = false;
3480 break;
3481 }
3482 }
3483 if (success)
3484 return subop1;
3485 }
3486 }
0a67e02c 3487 }
bd1ef757
PB
3488
3489 if (XVECLEN (trueop1, 0) == 1
481683e1 3490 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
bd1ef757
PB
3491 && GET_CODE (trueop0) == VEC_CONCAT)
3492 {
3493 rtx vec = trueop0;
3494 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3495
3496 /* Try to find the element in the VEC_CONCAT. */
3497 while (GET_MODE (vec) != mode
3498 && GET_CODE (vec) == VEC_CONCAT)
3499 {
3500 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3501 if (offset < vec_size)
3502 vec = XEXP (vec, 0);
3503 else
3504 {
3505 offset -= vec_size;
3506 vec = XEXP (vec, 1);
3507 }
3508 vec = avoid_constant_pool_reference (vec);
3509 }
3510
3511 if (GET_MODE (vec) == mode)
3512 return vec;
3513 }
3514
da694a77
MG
3515 /* If we select elements in a vec_merge that all come from the same
3516 operand, select from that operand directly. */
3517 if (GET_CODE (op0) == VEC_MERGE)
3518 {
3519 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3520 if (CONST_INT_P (trueop02))
3521 {
3522 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3523 bool all_operand0 = true;
3524 bool all_operand1 = true;
3525 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3526 {
3527 rtx j = XVECEXP (trueop1, 0, i);
3528 if (sel & (1 << UINTVAL (j)))
3529 all_operand1 = false;
3530 else
3531 all_operand0 = false;
3532 }
3533 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3534 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3535 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3536 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3537 }
3538 }
3539
ec217bd8
BS
3540 /* If we have two nested selects that are inverses of each
3541 other, replace them with the source operand. */
3542 if (GET_CODE (trueop0) == VEC_SELECT
3543 && GET_MODE (XEXP (trueop0, 0)) == mode)
3544 {
3545 rtx op0_subop1 = XEXP (trueop0, 1);
3546 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3547 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3548
3549 /* Apply the outer ordering vector to the inner one. (The inner
3550 ordering vector is expressly permitted to be of a different
3551 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3552 then the two VEC_SELECTs cancel. */
3553 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3554 {
3555 rtx x = XVECEXP (trueop1, 0, i);
3556 if (!CONST_INT_P (x))
3557 return 0;
3558 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3559 if (!CONST_INT_P (y) || i != INTVAL (y))
3560 return 0;
3561 }
3562 return XEXP (trueop0, 0);
3563 }
3564
0a67e02c
PB
3565 return 0;
3566 case VEC_CONCAT:
3567 {
ef4bddc2 3568 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
0a67e02c
PB
3569 ? GET_MODE (trueop0)
3570 : GET_MODE_INNER (mode));
ef4bddc2 3571 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
0a67e02c
PB
3572 ? GET_MODE (trueop1)
3573 : GET_MODE_INNER (mode));
3574
3575 gcc_assert (VECTOR_MODE_P (mode));
3576 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3577 == GET_MODE_SIZE (mode));
3578
3579 if (VECTOR_MODE_P (op0_mode))
3580 gcc_assert (GET_MODE_INNER (mode)
3581 == GET_MODE_INNER (op0_mode));
3582 else
3583 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
0cedb36c 3584
0a67e02c
PB
3585 if (VECTOR_MODE_P (op1_mode))
3586 gcc_assert (GET_MODE_INNER (mode)
3587 == GET_MODE_INNER (op1_mode));
3588 else
3589 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3590
3591 if ((GET_CODE (trueop0) == CONST_VECTOR
33ffb5c5
KZ
3592 || CONST_SCALAR_INT_P (trueop0)
3593 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
0a67e02c 3594 && (GET_CODE (trueop1) == CONST_VECTOR
33ffb5c5
KZ
3595 || CONST_SCALAR_INT_P (trueop1)
3596 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
0a67e02c
PB
3597 {
3598 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3599 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3600 rtvec v = rtvec_alloc (n_elts);
3601 unsigned int i;
3602 unsigned in_n_elts = 1;
c877353c 3603
0a67e02c
PB
3604 if (VECTOR_MODE_P (op0_mode))
3605 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3606 for (i = 0; i < n_elts; i++)
3607 {
3608 if (i < in_n_elts)
3609 {
3610 if (!VECTOR_MODE_P (op0_mode))
3611 RTVEC_ELT (v, i) = trueop0;
3612 else
3613 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3614 }
3615 else
3616 {
3617 if (!VECTOR_MODE_P (op1_mode))
3618 RTVEC_ELT (v, i) = trueop1;
3619 else
3620 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3621 i - in_n_elts);
3622 }
3623 }
0cedb36c 3624
0a67e02c
PB
3625 return gen_rtx_CONST_VECTOR (mode, v);
3626 }
5f6e1c55 3627
d08633b4
MG
3628 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3629 Restrict the transformation to avoid generating a VEC_SELECT with a
3630 mode unrelated to its operand. */
5f6e1c55
MG
3631 if (GET_CODE (trueop0) == VEC_SELECT
3632 && GET_CODE (trueop1) == VEC_SELECT
d08633b4
MG
3633 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3634 && GET_MODE (XEXP (trueop0, 0)) == mode)
5f6e1c55
MG
3635 {
3636 rtx par0 = XEXP (trueop0, 1);
3637 rtx par1 = XEXP (trueop1, 1);
3638 int len0 = XVECLEN (par0, 0);
3639 int len1 = XVECLEN (par1, 0);
3640 rtvec vec = rtvec_alloc (len0 + len1);
3641 for (int i = 0; i < len0; i++)
3642 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3643 for (int i = 0; i < len1; i++)
3644 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3645 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3646 gen_rtx_PARALLEL (VOIDmode, vec));
3647 }
0a67e02c
PB
3648 }
3649 return 0;
0cedb36c 3650
0a67e02c
PB
3651 default:
3652 gcc_unreachable ();
3653 }
0cedb36c 3654
0a67e02c
PB
3655 return 0;
3656}
0cedb36c 3657
0a67e02c 3658rtx
ef4bddc2 3659simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
0a67e02c
PB
3660 rtx op0, rtx op1)
3661{
5511bc5a 3662 unsigned int width = GET_MODE_PRECISION (mode);
0cedb36c 3663
0a67e02c
PB
3664 if (VECTOR_MODE_P (mode)
3665 && code != VEC_CONCAT
3666 && GET_CODE (op0) == CONST_VECTOR
3667 && GET_CODE (op1) == CONST_VECTOR)
3668 {
3669 unsigned n_elts = GET_MODE_NUNITS (mode);
ef4bddc2 3670 machine_mode op0mode = GET_MODE (op0);
0a67e02c 3671 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
ef4bddc2 3672 machine_mode op1mode = GET_MODE (op1);
0a67e02c
PB
3673 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3674 rtvec v = rtvec_alloc (n_elts);
3675 unsigned int i;
0cedb36c 3676
0a67e02c
PB
3677 gcc_assert (op0_n_elts == n_elts);
3678 gcc_assert (op1_n_elts == n_elts);
3679 for (i = 0; i < n_elts; i++)
3680 {
3681 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3682 CONST_VECTOR_ELT (op0, i),
3683 CONST_VECTOR_ELT (op1, i));
3684 if (!x)
3685 return 0;
3686 RTVEC_ELT (v, i) = x;
3687 }
0cedb36c 3688
0a67e02c
PB
3689 return gen_rtx_CONST_VECTOR (mode, v);
3690 }
0cedb36c 3691
0a67e02c
PB
3692 if (VECTOR_MODE_P (mode)
3693 && code == VEC_CONCAT
33ffb5c5 3694 && (CONST_SCALAR_INT_P (op0)
48175537 3695 || GET_CODE (op0) == CONST_FIXED
33ffb5c5
KZ
3696 || CONST_DOUBLE_AS_FLOAT_P (op0))
3697 && (CONST_SCALAR_INT_P (op1)
3698 || CONST_DOUBLE_AS_FLOAT_P (op1)
d1f0728e 3699 || GET_CODE (op1) == CONST_FIXED))
0a67e02c
PB
3700 {
3701 unsigned n_elts = GET_MODE_NUNITS (mode);
3702 rtvec v = rtvec_alloc (n_elts);
0cedb36c 3703
0a67e02c
PB
3704 gcc_assert (n_elts >= 2);
3705 if (n_elts == 2)
3706 {
3707 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3708 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
dd61aa98 3709
0a67e02c
PB
3710 RTVEC_ELT (v, 0) = op0;
3711 RTVEC_ELT (v, 1) = op1;
3712 }
3713 else
3714 {
3715 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3716 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3717 unsigned i;
0cedb36c 3718
0a67e02c
PB
3719 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3720 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3721 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
0cedb36c 3722
0a67e02c
PB
3723 for (i = 0; i < op0_n_elts; ++i)
3724 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3725 for (i = 0; i < op1_n_elts; ++i)
3726 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3727 }
0b24db88 3728
0a67e02c
PB
3729 return gen_rtx_CONST_VECTOR (mode, v);
3730 }
0cedb36c 3731
3d8bf70f 3732 if (SCALAR_FLOAT_MODE_P (mode)
48175537
KZ
3733 && CONST_DOUBLE_AS_FLOAT_P (op0)
3734 && CONST_DOUBLE_AS_FLOAT_P (op1)
0a67e02c
PB
3735 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3736 {
3737 if (code == AND
3738 || code == IOR
3739 || code == XOR)
3740 {
3741 long tmp0[4];
3742 long tmp1[4];
3743 REAL_VALUE_TYPE r;
3744 int i;
a0ee8b5f 3745
0a67e02c
PB
3746 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3747 GET_MODE (op0));
3748 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3749 GET_MODE (op1));
3750 for (i = 0; i < 4; i++)
a0ee8b5f 3751 {
0a67e02c
PB
3752 switch (code)
3753 {
3754 case AND:
3755 tmp0[i] &= tmp1[i];
3756 break;
3757 case IOR:
3758 tmp0[i] |= tmp1[i];
3759 break;
3760 case XOR:
3761 tmp0[i] ^= tmp1[i];
3762 break;
3763 default:
3764 gcc_unreachable ();
3765 }
a0ee8b5f 3766 }
0a67e02c
PB
3767 real_from_target (&r, tmp0, mode);
3768 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3769 }
3770 else
3771 {
3772 REAL_VALUE_TYPE f0, f1, value, result;
3773 bool inexact;
a0ee8b5f 3774
0a67e02c
PB
3775 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3776 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3777 real_convert (&f0, mode, &f0);
3778 real_convert (&f1, mode, &f1);
df62f18a 3779
0a67e02c
PB
3780 if (HONOR_SNANS (mode)
3781 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3782 return 0;
0cedb36c 3783
0a67e02c
PB
3784 if (code == DIV
3785 && REAL_VALUES_EQUAL (f1, dconst0)
3786 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3787 return 0;
0cedb36c 3788
0a67e02c
PB
3789 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3790 && flag_trapping_math
3791 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
0cedb36c 3792 {
0a67e02c
PB
3793 int s0 = REAL_VALUE_NEGATIVE (f0);
3794 int s1 = REAL_VALUE_NEGATIVE (f1);
0cedb36c 3795
0a67e02c 3796 switch (code)
1e9b78b0 3797 {
0a67e02c
PB
3798 case PLUS:
3799 /* Inf + -Inf = NaN plus exception. */
3800 if (s0 != s1)
3801 return 0;
3802 break;
3803 case MINUS:
3804 /* Inf - Inf = NaN plus exception. */
3805 if (s0 == s1)
3806 return 0;
3807 break;
3808 case DIV:
3809 /* Inf / Inf = NaN plus exception. */
3810 return 0;
3811 default:
3812 break;
0cedb36c
JL
3813 }
3814 }
0cedb36c 3815
0a67e02c
PB
3816 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3817 && flag_trapping_math
3818 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3819 || (REAL_VALUE_ISINF (f1)
3820 && REAL_VALUES_EQUAL (f0, dconst0))))
3821 /* Inf * 0 = NaN plus exception. */
3822 return 0;
852c8ba1 3823
0a67e02c
PB
3824 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3825 &f0, &f1);
3826 real_convert (&result, mode, &value);
41374e13 3827
68328cda
EB
3828 /* Don't constant fold this floating point operation if
3829 the result has overflowed and flag_trapping_math. */
3830
3831 if (flag_trapping_math
3832 && MODE_HAS_INFINITIES (mode)
3833 && REAL_VALUE_ISINF (result)
3834 && !REAL_VALUE_ISINF (f0)
3835 && !REAL_VALUE_ISINF (f1))
3836 /* Overflow plus exception. */
3837 return 0;
3838
0a67e02c
PB
3839 /* Don't constant fold this floating point operation if the
3840 result may dependent upon the run-time rounding mode and
3841 flag_rounding_math is set, or if GCC's software emulation
3842 is unable to accurately represent the result. */
852c8ba1 3843
0a67e02c 3844 if ((flag_rounding_math
4099e2c2 3845 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
0a67e02c
PB
3846 && (inexact || !real_identical (&result, &value)))
3847 return NULL_RTX;
d9deed68 3848
0a67e02c 3849 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
0cedb36c 3850 }
0cedb36c
JL
3851 }
3852
0a67e02c 3853 /* We can fold some multi-word operations. */
50b6ee8b
DD
3854 if ((GET_MODE_CLASS (mode) == MODE_INT
3855 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
807e902e
KZ
3856 && CONST_SCALAR_INT_P (op0)
3857 && CONST_SCALAR_INT_P (op1))
0a67e02c 3858 {
807e902e 3859 wide_int result;
9be0ac8c 3860 bool overflow;
807e902e
KZ
3861 rtx_mode_t pop0 = std::make_pair (op0, mode);
3862 rtx_mode_t pop1 = std::make_pair (op1, mode);
3863
3864#if TARGET_SUPPORTS_WIDE_INT == 0
3865 /* This assert keeps the simplification from producing a result
3866 that cannot be represented in a CONST_DOUBLE but a lot of
3867 upstream callers expect that this function never fails to
3868 simplify something and so you if you added this to the test
3869 above the code would die later anyway. If this assert
3870 happens, you just need to make the port support wide int. */
3871 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3872#endif
0a67e02c
PB
3873 switch (code)
3874 {
3875 case MINUS:
807e902e
KZ
3876 result = wi::sub (pop0, pop1);
3877 break;
0cedb36c 3878
0a67e02c 3879 case PLUS:
807e902e 3880 result = wi::add (pop0, pop1);
0a67e02c 3881 break;
0cedb36c 3882
0a67e02c 3883 case MULT:
807e902e 3884 result = wi::mul (pop0, pop1);
0a67e02c 3885 break;
0cedb36c 3886
0a67e02c 3887 case DIV:
807e902e 3888 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
9be0ac8c 3889 if (overflow)
807e902e 3890 return NULL_RTX;
0a67e02c 3891 break;
0cedb36c 3892
0a67e02c 3893 case MOD:
807e902e 3894 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
9be0ac8c 3895 if (overflow)
807e902e 3896 return NULL_RTX;
0a67e02c 3897 break;
0cedb36c 3898
0a67e02c 3899 case UDIV:
807e902e 3900 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
9be0ac8c 3901 if (overflow)
807e902e 3902 return NULL_RTX;
0a67e02c 3903 break;
0cedb36c 3904
0a67e02c 3905 case UMOD:
807e902e 3906 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
9be0ac8c 3907 if (overflow)
807e902e 3908 return NULL_RTX;
0a67e02c 3909 break;
0cedb36c 3910
0a67e02c 3911 case AND:
807e902e 3912 result = wi::bit_and (pop0, pop1);
0a67e02c 3913 break;
0cedb36c 3914
0a67e02c 3915 case IOR:
807e902e 3916 result = wi::bit_or (pop0, pop1);
0a67e02c 3917 break;
0cedb36c 3918
0a67e02c 3919 case XOR:
807e902e 3920 result = wi::bit_xor (pop0, pop1);
0a67e02c 3921 break;
0cedb36c 3922
0a67e02c 3923 case SMIN:
807e902e 3924 result = wi::smin (pop0, pop1);
0a67e02c 3925 break;
0cedb36c 3926
0a67e02c 3927 case SMAX:
807e902e 3928 result = wi::smax (pop0, pop1);
0a67e02c 3929 break;
0cedb36c 3930
0a67e02c 3931 case UMIN:
807e902e 3932 result = wi::umin (pop0, pop1);
0a67e02c 3933 break;
0cedb36c 3934
0a67e02c 3935 case UMAX:
807e902e 3936 result = wi::umax (pop0, pop1);
0a67e02c 3937 break;
0cedb36c 3938
807e902e
KZ
3939 case LSHIFTRT:
3940 case ASHIFTRT:
0a67e02c 3941 case ASHIFT:
fd7de64c 3942 {
807e902e 3943 wide_int wop1 = pop1;
fd7de64c 3944 if (SHIFT_COUNT_TRUNCATED)
807e902e
KZ
3945 wop1 = wi::umod_trunc (wop1, width);
3946 else if (wi::geu_p (wop1, width))
3947 return NULL_RTX;
b8698a0f 3948
807e902e
KZ
3949 switch (code)
3950 {
3951 case LSHIFTRT:
3952 result = wi::lrshift (pop0, wop1);
3953 break;
b8698a0f 3954
807e902e
KZ
3955 case ASHIFTRT:
3956 result = wi::arshift (pop0, wop1);
3957 break;
b8698a0f 3958
807e902e
KZ
3959 case ASHIFT:
3960 result = wi::lshift (pop0, wop1);
3961 break;
b8698a0f 3962
807e902e
KZ
3963 default:
3964 gcc_unreachable ();
3965 }
3966 break;
3967 }
0a67e02c 3968 case ROTATE:
807e902e
KZ
3969 case ROTATERT:
3970 {
3971 if (wi::neg_p (pop1))
3972 return NULL_RTX;
b8698a0f 3973
807e902e
KZ
3974 switch (code)
3975 {
3976 case ROTATE:
3977 result = wi::lrotate (pop0, pop1);
3978 break;
b8698a0f 3979
807e902e
KZ
3980 case ROTATERT:
3981 result = wi::rrotate (pop0, pop1);
3982 break;
b8698a0f 3983
807e902e
KZ
3984 default:
3985 gcc_unreachable ();
3986 }
3987 break;
3988 }
0a67e02c 3989 default:
807e902e 3990 return NULL_RTX;
0a67e02c 3991 }
807e902e 3992 return immed_wide_int_const (result, mode);
0a67e02c 3993 }
0cedb36c 3994
0a67e02c 3995 return NULL_RTX;
0cedb36c 3996}
0a67e02c
PB
3997
3998
0cedb36c
JL
3999\f
4000/* Simplify a PLUS or MINUS, at least one of whose operands may be another
4001 PLUS or MINUS.
4002
4003 Rather than test for specific case, we do this by a brute-force method
4004 and do all possible simplifications until no more changes occur. Then
1941069a 4005 we rebuild the operation. */
0cedb36c 4006
9b3bd424
RH
4007struct simplify_plus_minus_op_data
4008{
4009 rtx op;
f805670f 4010 short neg;
9b3bd424
RH
4011};
4012
7e0b4eae
PB
4013static bool
4014simplify_plus_minus_op_data_cmp (rtx x, rtx y)
9b3bd424 4015{
f805670f 4016 int result;
9b3bd424 4017
7e0b4eae
PB
4018 result = (commutative_operand_precedence (y)
4019 - commutative_operand_precedence (x));
f805670f 4020 if (result)
7e0b4eae 4021 return result > 0;
d26cef13
PB
4022
4023 /* Group together equal REGs to do more simplification. */
7e0b4eae
PB
4024 if (REG_P (x) && REG_P (y))
4025 return REGNO (x) > REGNO (y);
d26cef13 4026 else
7e0b4eae 4027 return false;
9b3bd424
RH
4028}
4029
0cedb36c 4030static rtx
ef4bddc2 4031simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
1941069a 4032 rtx op1)
0cedb36c 4033{
c11c8664 4034 struct simplify_plus_minus_op_data ops[16];
0cedb36c 4035 rtx result, tem;
c11c8664
AM
4036 int n_ops = 2;
4037 int changed, n_constants, canonicalized = 0;
0cedb36c
JL
4038 int i, j;
4039
703ad42b 4040 memset (ops, 0, sizeof ops);
786de7eb 4041
0cedb36c
JL
4042 /* Set up the two operands and then expand them until nothing has been
4043 changed. If we run out of room in our array, give up; this should
4044 almost never happen. */
4045
9b3bd424
RH
4046 ops[0].op = op0;
4047 ops[0].neg = 0;
4048 ops[1].op = op1;
4049 ops[1].neg = (code == MINUS);
0cedb36c 4050
9b3bd424 4051 do
0cedb36c
JL
4052 {
4053 changed = 0;
c11c8664 4054 n_constants = 0;
0cedb36c
JL
4055
4056 for (i = 0; i < n_ops; i++)
9b3bd424
RH
4057 {
4058 rtx this_op = ops[i].op;
4059 int this_neg = ops[i].neg;
4060 enum rtx_code this_code = GET_CODE (this_op);
0cedb36c 4061
9b3bd424
RH
4062 switch (this_code)
4063 {
4064 case PLUS:
4065 case MINUS:
c11c8664 4066 if (n_ops == ARRAY_SIZE (ops))
e16e3291 4067 return NULL_RTX;
0cedb36c 4068
9b3bd424
RH
4069 ops[n_ops].op = XEXP (this_op, 1);
4070 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4071 n_ops++;
4072
4073 ops[i].op = XEXP (this_op, 0);
9b3bd424 4074 changed = 1;
f27dfe87 4075 canonicalized |= this_neg || i != n_ops - 2;
9b3bd424
RH
4076 break;
4077
4078 case NEG:
4079 ops[i].op = XEXP (this_op, 0);
4080 ops[i].neg = ! this_neg;
4081 changed = 1;
1941069a 4082 canonicalized = 1;
9b3bd424
RH
4083 break;
4084
4085 case CONST:
c11c8664 4086 if (n_ops != ARRAY_SIZE (ops)
e3c8ea67
RH
4087 && GET_CODE (XEXP (this_op, 0)) == PLUS
4088 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4089 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4090 {
4091 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4092 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4093 ops[n_ops].neg = this_neg;
4094 n_ops++;
e3c8ea67 4095 changed = 1;
1941069a 4096 canonicalized = 1;
e3c8ea67 4097 }
9b3bd424
RH
4098 break;
4099
4100 case NOT:
4101 /* ~a -> (-a - 1) */
c11c8664 4102 if (n_ops != ARRAY_SIZE (ops))
9b3bd424 4103 {
ea72cc1d 4104 ops[n_ops].op = CONSTM1_RTX (mode);
2e951384 4105 ops[n_ops++].neg = this_neg;
9b3bd424
RH
4106 ops[i].op = XEXP (this_op, 0);
4107 ops[i].neg = !this_neg;
4108 changed = 1;
1941069a 4109 canonicalized = 1;
9b3bd424
RH
4110 }
4111 break;
0cedb36c 4112
9b3bd424 4113 case CONST_INT:
d26cef13 4114 n_constants++;
9b3bd424
RH
4115 if (this_neg)
4116 {
aff8a8d5 4117 ops[i].op = neg_const_int (mode, this_op);
9b3bd424
RH
4118 ops[i].neg = 0;
4119 changed = 1;
1941069a 4120 canonicalized = 1;
9b3bd424
RH
4121 }
4122 break;
0cedb36c 4123
9b3bd424
RH
4124 default:
4125 break;
4126 }
4127 }
0cedb36c 4128 }
9b3bd424 4129 while (changed);
0cedb36c 4130
d26cef13
PB
4131 if (n_constants > 1)
4132 canonicalized = 1;
36686ad6 4133
d26cef13 4134 gcc_assert (n_ops >= 2);
0cedb36c 4135
1941069a
PB
4136 /* If we only have two operands, we can avoid the loops. */
4137 if (n_ops == 2)
4138 {
4139 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4140 rtx lhs, rhs;
4141
4142 /* Get the two operands. Be careful with the order, especially for
4143 the cases where code == MINUS. */
4144 if (ops[0].neg && ops[1].neg)
4145 {
4146 lhs = gen_rtx_NEG (mode, ops[0].op);
4147 rhs = ops[1].op;
4148 }
4149 else if (ops[0].neg)
4150 {
4151 lhs = ops[1].op;
4152 rhs = ops[0].op;
4153 }
4154 else
4155 {
4156 lhs = ops[0].op;
4157 rhs = ops[1].op;
4158 }
4159
4160 return simplify_const_binary_operation (code, mode, lhs, rhs);
4161 }
4162
d26cef13 4163 /* Now simplify each pair of operands until nothing changes. */
9b3bd424 4164 do
0cedb36c 4165 {
c11c8664 4166 /* Insertion sort is good enough for a small array. */
d26cef13
PB
4167 for (i = 1; i < n_ops; i++)
4168 {
4169 struct simplify_plus_minus_op_data save;
4170 j = i - 1;
7e0b4eae 4171 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
d26cef13
PB
4172 continue;
4173
4174 canonicalized = 1;
4175 save = ops[i];
4176 do
4177 ops[j + 1] = ops[j];
7e0b4eae 4178 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
d26cef13
PB
4179 ops[j + 1] = save;
4180 }
0cedb36c 4181
d26cef13
PB
4182 changed = 0;
4183 for (i = n_ops - 1; i > 0; i--)
4184 for (j = i - 1; j >= 0; j--)
9b3bd424 4185 {
d26cef13
PB
4186 rtx lhs = ops[j].op, rhs = ops[i].op;
4187 int lneg = ops[j].neg, rneg = ops[i].neg;
0cedb36c 4188
d26cef13 4189 if (lhs != 0 && rhs != 0)
9b3bd424
RH
4190 {
4191 enum rtx_code ncode = PLUS;
4192
4193 if (lneg != rneg)
4194 {
4195 ncode = MINUS;
4196 if (lneg)
4197 tem = lhs, lhs = rhs, rhs = tem;
4198 }
4199 else if (swap_commutative_operands_p (lhs, rhs))
4200 tem = lhs, lhs = rhs, rhs = tem;
4201
481683e1
SZ
4202 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4203 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
349f4ea1
AK
4204 {
4205 rtx tem_lhs, tem_rhs;
4206
4207 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4208 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4209 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
9b3bd424 4210
349f4ea1
AK
4211 if (tem && !CONSTANT_P (tem))
4212 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4213 }
4214 else
4215 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
b8698a0f 4216
c11c8664 4217 if (tem)
9b3bd424 4218 {
c11c8664
AM
4219 /* Reject "simplifications" that just wrap the two
4220 arguments in a CONST. Failure to do so can result
4221 in infinite recursion with simplify_binary_operation
4222 when it calls us to simplify CONST operations.
4223 Also, if we find such a simplification, don't try
4224 any more combinations with this rhs: We must have
4225 something like symbol+offset, ie. one of the
4226 trivial CONST expressions we handle later. */
4227 if (GET_CODE (tem) == CONST
4228 && GET_CODE (XEXP (tem, 0)) == ncode
4229 && XEXP (XEXP (tem, 0), 0) == lhs
4230 && XEXP (XEXP (tem, 0), 1) == rhs)
4231 break;
9b3bd424
RH
4232 lneg &= rneg;
4233 if (GET_CODE (tem) == NEG)
4234 tem = XEXP (tem, 0), lneg = !lneg;
481683e1 4235 if (CONST_INT_P (tem) && lneg)
aff8a8d5 4236 tem = neg_const_int (mode, tem), lneg = 0;
9b3bd424
RH
4237
4238 ops[i].op = tem;
4239 ops[i].neg = lneg;
4240 ops[j].op = NULL_RTX;
4241 changed = 1;
dc5b3407 4242 canonicalized = 1;
9b3bd424
RH
4243 }
4244 }
4245 }
0cedb36c 4246
dc5b3407
ZD
4247 /* If nothing changed, fail. */
4248 if (!canonicalized)
4249 return NULL_RTX;
4250
d26cef13
PB
4251 /* Pack all the operands to the lower-numbered entries. */
4252 for (i = 0, j = 0; j < n_ops; j++)
4253 if (ops[j].op)
4254 {
4255 ops[i] = ops[j];
4256 i++;
4257 }
4258 n_ops = i;
0cedb36c 4259 }
9b3bd424 4260 while (changed);
0cedb36c 4261
c877353c
RS
4262 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4263 if (n_ops == 2
481683e1 4264 && CONST_INT_P (ops[1].op)
c877353c
RS
4265 && CONSTANT_P (ops[0].op)
4266 && ops[0].neg)
4267 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
b8698a0f 4268
9b3bd424
RH
4269 /* We suppressed creation of trivial CONST expressions in the
4270 combination loop to avoid recursion. Create one manually now.
4271 The combination loop should have ensured that there is exactly
4272 one CONST_INT, and the sort will have ensured that it is last
4273 in the array and that any other constant will be next-to-last. */
0cedb36c 4274
9b3bd424 4275 if (n_ops > 1
481683e1 4276 && CONST_INT_P (ops[n_ops - 1].op)
9b3bd424
RH
4277 && CONSTANT_P (ops[n_ops - 2].op))
4278 {
aff8a8d5 4279 rtx value = ops[n_ops - 1].op;
4768dbdd 4280 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
aff8a8d5 4281 value = neg_const_int (mode, value);
0a81f074
RS
4282 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4283 INTVAL (value));
9b3bd424
RH
4284 n_ops--;
4285 }
4286
0786ca87 4287 /* Put a non-negated operand first, if possible. */
0cedb36c 4288
9b3bd424
RH
4289 for (i = 0; i < n_ops && ops[i].neg; i++)
4290 continue;
0cedb36c 4291 if (i == n_ops)
0786ca87 4292 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
0cedb36c
JL
4293 else if (i != 0)
4294 {
9b3bd424
RH
4295 tem = ops[0].op;
4296 ops[0] = ops[i];
4297 ops[i].op = tem;
4298 ops[i].neg = 1;
0cedb36c
JL
4299 }
4300
4301 /* Now make the result by performing the requested operations. */
9b3bd424 4302 result = ops[0].op;
0cedb36c 4303 for (i = 1; i < n_ops; i++)
9b3bd424
RH
4304 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4305 mode, result, ops[i].op);
0cedb36c 4306
0786ca87 4307 return result;
0cedb36c
JL
4308}
4309
5ac20c1a
RS
4310/* Check whether an operand is suitable for calling simplify_plus_minus. */
4311static bool
f7d504c2 4312plus_minus_operand_p (const_rtx x)
5ac20c1a
RS
4313{
4314 return GET_CODE (x) == PLUS
4315 || GET_CODE (x) == MINUS
4316 || (GET_CODE (x) == CONST
4317 && GET_CODE (XEXP (x, 0)) == PLUS
4318 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4319 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4320}
4321
0cedb36c 4322/* Like simplify_binary_operation except used for relational operators.
c6fb08ad 4323 MODE is the mode of the result. If MODE is VOIDmode, both operands must
fc7ca5fd 4324 not also be VOIDmode.
c6fb08ad
PB
4325
4326 CMP_MODE specifies in which mode the comparison is done in, so it is
4327 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4328 the operands or, if both are VOIDmode, the operands are compared in
4329 "infinite precision". */
4330rtx
ef4bddc2
RS
4331simplify_relational_operation (enum rtx_code code, machine_mode mode,
4332 machine_mode cmp_mode, rtx op0, rtx op1)
c6fb08ad
PB
4333{
4334 rtx tem, trueop0, trueop1;
4335
4336 if (cmp_mode == VOIDmode)
4337 cmp_mode = GET_MODE (op0);
4338 if (cmp_mode == VOIDmode)
4339 cmp_mode = GET_MODE (op1);
4340
4341 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4342 if (tem)
4343 {
3d8bf70f 4344 if (SCALAR_FLOAT_MODE_P (mode))
c6fb08ad
PB
4345 {
4346 if (tem == const0_rtx)
4347 return CONST0_RTX (mode);
fc7ca5fd
RS
4348#ifdef FLOAT_STORE_FLAG_VALUE
4349 {
4350 REAL_VALUE_TYPE val;
4351 val = FLOAT_STORE_FLAG_VALUE (mode);
4352 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4353 }
4354#else
4355 return NULL_RTX;
b8698a0f 4356#endif
c6fb08ad 4357 }
fc7ca5fd
RS
4358 if (VECTOR_MODE_P (mode))
4359 {
4360 if (tem == const0_rtx)
4361 return CONST0_RTX (mode);
4362#ifdef VECTOR_STORE_FLAG_VALUE
4363 {
4364 int i, units;
21e5076a 4365 rtvec v;
fc7ca5fd
RS
4366
4367 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4368 if (val == NULL_RTX)
4369 return NULL_RTX;
4370 if (val == const1_rtx)
4371 return CONST1_RTX (mode);
4372
4373 units = GET_MODE_NUNITS (mode);
4374 v = rtvec_alloc (units);
4375 for (i = 0; i < units; i++)
4376 RTVEC_ELT (v, i) = val;
4377 return gen_rtx_raw_CONST_VECTOR (mode, v);
4378 }
4379#else
4380 return NULL_RTX;
c6fb08ad 4381#endif
fc7ca5fd 4382 }
c6fb08ad
PB
4383
4384 return tem;
4385 }
4386
4387 /* For the following tests, ensure const0_rtx is op1. */
4388 if (swap_commutative_operands_p (op0, op1)
4389 || (op0 == const0_rtx && op1 != const0_rtx))
4390 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4391
4392 /* If op0 is a compare, extract the comparison arguments from it. */
4393 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
f90b7a5a
PB
4394 return simplify_gen_relational (code, mode, VOIDmode,
4395 XEXP (op0, 0), XEXP (op0, 1));
c6fb08ad 4396
30a440a7 4397 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
c6fb08ad
PB
4398 || CC0_P (op0))
4399 return NULL_RTX;
4400
4401 trueop0 = avoid_constant_pool_reference (op0);
4402 trueop1 = avoid_constant_pool_reference (op1);
4403 return simplify_relational_operation_1 (code, mode, cmp_mode,
4404 trueop0, trueop1);
4405}
4406
4407/* This part of simplify_relational_operation is only used when CMP_MODE
4408 is not in class MODE_CC (i.e. it is a real comparison).
4409
4410 MODE is the mode of the result, while CMP_MODE specifies in which
4411 mode the comparison is done in, so it is the mode of the operands. */
bc4ad38c
ZD
4412
4413static rtx
ef4bddc2
RS
4414simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4415 machine_mode cmp_mode, rtx op0, rtx op1)
c6fb08ad 4416{
bc4ad38c
ZD
4417 enum rtx_code op0code = GET_CODE (op0);
4418
3fa0cacd 4419 if (op1 == const0_rtx && COMPARISON_P (op0))
c6fb08ad 4420 {
3fa0cacd
RS
4421 /* If op0 is a comparison, extract the comparison arguments
4422 from it. */
4423 if (code == NE)
c6fb08ad 4424 {
3fa0cacd
RS
4425 if (GET_MODE (op0) == mode)
4426 return simplify_rtx (op0);
4427 else
4428 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4429 XEXP (op0, 0), XEXP (op0, 1));
4430 }
4431 else if (code == EQ)
4432 {
4433 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4434 if (new_code != UNKNOWN)
4435 return simplify_gen_relational (new_code, mode, VOIDmode,
4436 XEXP (op0, 0), XEXP (op0, 1));
4437 }
4438 }
4439
1d1eb80c
BS
4440 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4441 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4442 if ((code == LTU || code == GEU)
4443 && GET_CODE (op0) == PLUS
481683e1 4444 && CONST_INT_P (XEXP (op0, 1))
1d1eb80c 4445 && (rtx_equal_p (op1, XEXP (op0, 0))
5352ea68
AP
4446 || rtx_equal_p (op1, XEXP (op0, 1)))
4447 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4448 && XEXP (op0, 1) != const0_rtx)
1d1eb80c
BS
4449 {
4450 rtx new_cmp
4451 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4452 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4453 cmp_mode, XEXP (op0, 0), new_cmp);
4454 }
4455
921c4418
RIL
4456 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4457 if ((code == LTU || code == GEU)
4458 && GET_CODE (op0) == PLUS
cf369845
HPN
4459 && rtx_equal_p (op1, XEXP (op0, 1))
4460 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4461 && !rtx_equal_p (op1, XEXP (op0, 0)))
4942b76b
JJ
4462 return simplify_gen_relational (code, mode, cmp_mode, op0,
4463 copy_rtx (XEXP (op0, 0)));
921c4418 4464
3fa0cacd
RS
4465 if (op1 == const0_rtx)
4466 {
4467 /* Canonicalize (GTU x 0) as (NE x 0). */
4468 if (code == GTU)
4469 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4470 /* Canonicalize (LEU x 0) as (EQ x 0). */
4471 if (code == LEU)
4472 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4473 }
4474 else if (op1 == const1_rtx)
4475 {
4476 switch (code)
4477 {
4478 case GE:
4479 /* Canonicalize (GE x 1) as (GT x 0). */
4480 return simplify_gen_relational (GT, mode, cmp_mode,
4481 op0, const0_rtx);
4482 case GEU:
4483 /* Canonicalize (GEU x 1) as (NE x 0). */
4484 return simplify_gen_relational (NE, mode, cmp_mode,
4485 op0, const0_rtx);
4486 case LT:
4487 /* Canonicalize (LT x 1) as (LE x 0). */
4488 return simplify_gen_relational (LE, mode, cmp_mode,
4489 op0, const0_rtx);
4490 case LTU:
4491 /* Canonicalize (LTU x 1) as (EQ x 0). */
4492 return simplify_gen_relational (EQ, mode, cmp_mode,
4493 op0, const0_rtx);
4494 default:
4495 break;
c6fb08ad
PB
4496 }
4497 }
3fa0cacd
RS
4498 else if (op1 == constm1_rtx)
4499 {
4500 /* Canonicalize (LE x -1) as (LT x 0). */
4501 if (code == LE)
4502 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4503 /* Canonicalize (GT x -1) as (GE x 0). */
4504 if (code == GT)
4505 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4506 }
0cedb36c 4507
bc4ad38c
ZD
4508 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4509 if ((code == EQ || code == NE)
4510 && (op0code == PLUS || op0code == MINUS)
4511 && CONSTANT_P (op1)
551a3297
RH
4512 && CONSTANT_P (XEXP (op0, 1))
4513 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
bc4ad38c
ZD
4514 {
4515 rtx x = XEXP (op0, 0);
4516 rtx c = XEXP (op0, 1);
d303c992
CLT
4517 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4518 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4519
4520 /* Detect an infinite recursive condition, where we oscillate at this
4521 simplification case between:
4522 A + B == C <---> C - B == A,
4523 where A, B, and C are all constants with non-simplifiable expressions,
4524 usually SYMBOL_REFs. */
4525 if (GET_CODE (tem) == invcode
4526 && CONSTANT_P (x)
4527 && rtx_equal_p (c, XEXP (tem, 1)))
4528 return NULL_RTX;
4529
4530 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
bc4ad38c
ZD
4531 }
4532
1419a885
RS
4533 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4534 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4535 if (code == NE
4536 && op1 == const0_rtx
4537 && GET_MODE_CLASS (mode) == MODE_INT
4538 && cmp_mode != VOIDmode
61961eff
RS
4539 /* ??? Work-around BImode bugs in the ia64 backend. */
4540 && mode != BImode
f8eacd97 4541 && cmp_mode != BImode
1419a885
RS
4542 && nonzero_bits (op0, cmp_mode) == 1
4543 && STORE_FLAG_VALUE == 1)
f8eacd97
RS
4544 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4545 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4546 : lowpart_subreg (mode, op0, cmp_mode);
1419a885 4547
5484a3c3
RS
4548 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4549 if ((code == EQ || code == NE)
4550 && op1 == const0_rtx
4551 && op0code == XOR)
4552 return simplify_gen_relational (code, mode, cmp_mode,
4553 XEXP (op0, 0), XEXP (op0, 1));
4554
4d49d44d 4555 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5484a3c3
RS
4556 if ((code == EQ || code == NE)
4557 && op0code == XOR
4558 && rtx_equal_p (XEXP (op0, 0), op1)
4d49d44d 4559 && !side_effects_p (XEXP (op0, 0)))
9d31ea5b
RS
4560 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4561 CONST0_RTX (mode));
4d49d44d
KH
4562
4563 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5484a3c3
RS
4564 if ((code == EQ || code == NE)
4565 && op0code == XOR
4566 && rtx_equal_p (XEXP (op0, 1), op1)
4d49d44d 4567 && !side_effects_p (XEXP (op0, 1)))
9d31ea5b
RS
4568 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4569 CONST0_RTX (mode));
5484a3c3
RS
4570
4571 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4572 if ((code == EQ || code == NE)
4573 && op0code == XOR
33ffb5c5
KZ
4574 && CONST_SCALAR_INT_P (op1)
4575 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5484a3c3
RS
4576 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4577 simplify_gen_binary (XOR, cmp_mode,
4578 XEXP (op0, 1), op1));
4579
10828a01
SL
4580 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4581 can be implemented with a BICS instruction on some targets, or
4582 constant-folded if y is a constant. */
4583 if ((code == EQ || code == NE)
4584 && op0code == AND
4585 && rtx_equal_p (XEXP (op0, 0), op1)
4586 && !side_effects_p (op1))
4587 {
4588 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4589 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4590
3202dccc
JJ
4591 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4592 CONST0_RTX (cmp_mode));
10828a01
SL
4593 }
4594
4595 /* Likewise for (eq/ne (and x y) y). */
4596 if ((code == EQ || code == NE)
4597 && op0code == AND
4598 && rtx_equal_p (XEXP (op0, 1), op1)
4599 && !side_effects_p (op1))
4600 {
4601 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4602 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4603
3202dccc
JJ
4604 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4605 CONST0_RTX (cmp_mode));
10828a01
SL
4606 }
4607
b17c024f
EB
4608 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4609 if ((code == EQ || code == NE)
4610 && GET_CODE (op0) == BSWAP
a8c50132 4611 && CONST_SCALAR_INT_P (op1))
b17c024f
EB
4612 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4613 simplify_gen_unary (BSWAP, cmp_mode,
4614 op1, cmp_mode));
4615
4616 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4617 if ((code == EQ || code == NE)
4618 && GET_CODE (op0) == BSWAP
4619 && GET_CODE (op1) == BSWAP)
4620 return simplify_gen_relational (code, mode, cmp_mode,
4621 XEXP (op0, 0), XEXP (op1, 0));
4622
69fce32f
RS
4623 if (op0code == POPCOUNT && op1 == const0_rtx)
4624 switch (code)
4625 {
4626 case EQ:
4627 case LE:
4628 case LEU:
4629 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4630 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4631 XEXP (op0, 0), const0_rtx);
4632
4633 case NE:
4634 case GT:
4635 case GTU:
4636 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
2376c58f 4637 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
69fce32f
RS
4638 XEXP (op0, 0), const0_rtx);
4639
4640 default:
4641 break;
4642 }
4643
c6fb08ad
PB
4644 return NULL_RTX;
4645}
4646
b8698a0f 4647enum
39641489 4648{
a567207e
PB
4649 CMP_EQ = 1,
4650 CMP_LT = 2,
4651 CMP_GT = 4,
4652 CMP_LTU = 8,
4653 CMP_GTU = 16
39641489
PB
4654};
4655
4656
4657/* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4658 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
b8698a0f 4659 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
a567207e 4660 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
39641489
PB
4661 For floating-point comparisons, assume that the operands were ordered. */
4662
4663static rtx
4664comparison_result (enum rtx_code code, int known_results)
4665{
39641489
PB
4666 switch (code)
4667 {
4668 case EQ:
4669 case UNEQ:
a567207e 4670 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
39641489
PB
4671 case NE:
4672 case LTGT:
a567207e 4673 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
39641489
PB
4674
4675 case LT:
4676 case UNLT:
a567207e 4677 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
39641489
PB
4678 case GE:
4679 case UNGE:
a567207e 4680 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
39641489
PB
4681
4682 case GT:
4683 case UNGT:
a567207e 4684 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
39641489
PB
4685 case LE:
4686 case UNLE:
a567207e 4687 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
39641489
PB
4688
4689 case LTU:
a567207e 4690 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
39641489 4691 case GEU:
a567207e 4692 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
39641489
PB
4693
4694 case GTU:
a567207e 4695 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
39641489 4696 case LEU:
a567207e 4697 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
39641489
PB
4698
4699 case ORDERED:
4700 return const_true_rtx;
4701 case UNORDERED:
4702 return const0_rtx;
4703 default:
4704 gcc_unreachable ();
4705 }
4706}
4707
807e902e
KZ
4708/* Check if the given comparison (done in the given MODE) is actually
4709 a tautology or a contradiction. If the mode is VOID_mode, the
4710 comparison is done in "infinite precision". If no simplification
4711 is possible, this function returns zero. Otherwise, it returns
4712 either const_true_rtx or const0_rtx. */
0cedb36c
JL
4713
4714rtx
7ce3e360 4715simplify_const_relational_operation (enum rtx_code code,
ef4bddc2 4716 machine_mode mode,
7ce3e360 4717 rtx op0, rtx op1)
0cedb36c 4718{
0cedb36c 4719 rtx tem;
4ba5f925
JH
4720 rtx trueop0;
4721 rtx trueop1;
0cedb36c 4722
41374e13
NS
4723 gcc_assert (mode != VOIDmode
4724 || (GET_MODE (op0) == VOIDmode
4725 && GET_MODE (op1) == VOIDmode));
47b1e19b 4726
0cedb36c
JL
4727 /* If op0 is a compare, extract the comparison arguments from it. */
4728 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5b5dc475
UW
4729 {
4730 op1 = XEXP (op0, 1);
4731 op0 = XEXP (op0, 0);
4732
4733 if (GET_MODE (op0) != VOIDmode)
4734 mode = GET_MODE (op0);
4735 else if (GET_MODE (op1) != VOIDmode)
4736 mode = GET_MODE (op1);
4737 else
4738 return 0;
4739 }
0cedb36c
JL
4740
4741 /* We can't simplify MODE_CC values since we don't know what the
4742 actual comparison is. */
8beccec8 4743 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
0cedb36c
JL
4744 return 0;
4745
52a75c3c 4746 /* Make sure the constant is second. */
9ce79a7a 4747 if (swap_commutative_operands_p (op0, op1))
52a75c3c
RH
4748 {
4749 tem = op0, op0 = op1, op1 = tem;
4750 code = swap_condition (code);
4751 }
4752
9ce79a7a
RS
4753 trueop0 = avoid_constant_pool_reference (op0);
4754 trueop1 = avoid_constant_pool_reference (op1);
4755
0cedb36c
JL
4756 /* For integer comparisons of A and B maybe we can simplify A - B and can
4757 then simplify a comparison of that with zero. If A and B are both either
4758 a register or a CONST_INT, this can't help; testing for these cases will
4759 prevent infinite recursion here and speed things up.
4760
e0d0c193
RG
4761 We can only do this for EQ and NE comparisons as otherwise we may
4762 lose or introduce overflow which we cannot disregard as undefined as
4763 we do not know the signedness of the operation on either the left or
4764 the right hand side of the comparison. */
0cedb36c 4765
e0d0c193
RG
4766 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4767 && (code == EQ || code == NE)
481683e1
SZ
4768 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4769 && (REG_P (op1) || CONST_INT_P (trueop1)))
0cedb36c 4770 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
e0d0c193
RG
4771 /* We cannot do this if tem is a nonzero address. */
4772 && ! nonzero_address_p (tem))
7ce3e360
RS
4773 return simplify_const_relational_operation (signed_condition (code),
4774 mode, tem, const0_rtx);
0cedb36c 4775
bdbb0460 4776 if (! HONOR_NANS (mode) && code == ORDERED)
1f36a2dd
JH
4777 return const_true_rtx;
4778
bdbb0460 4779 if (! HONOR_NANS (mode) && code == UNORDERED)
1f36a2dd
JH
4780 return const0_rtx;
4781
71925bc0 4782 /* For modes without NaNs, if the two operands are equal, we know the
39641489
PB
4783 result except if they have side-effects. Even with NaNs we know
4784 the result of unordered comparisons and, if signaling NaNs are
4785 irrelevant, also the result of LT/GT/LTGT. */
1b457aa4 4786 if ((! HONOR_NANS (trueop0)
39641489
PB
4787 || code == UNEQ || code == UNLE || code == UNGE
4788 || ((code == LT || code == GT || code == LTGT)
3d3dbadd 4789 && ! HONOR_SNANS (trueop0)))
8821d091
EB
4790 && rtx_equal_p (trueop0, trueop1)
4791 && ! side_effects_p (trueop0))
a567207e 4792 return comparison_result (code, CMP_EQ);
0cedb36c
JL
4793
4794 /* If the operands are floating-point constants, see if we can fold
4795 the result. */
48175537
KZ
4796 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4797 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
39641489 4798 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
0cedb36c 4799 {
15e5ad76 4800 REAL_VALUE_TYPE d0, d1;
0cedb36c 4801
15e5ad76
ZW
4802 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4803 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
90a74703 4804
1eeeb6a4 4805 /* Comparisons are unordered iff at least one of the values is NaN. */
15e5ad76 4806 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
90a74703
JH
4807 switch (code)
4808 {
4809 case UNEQ:
4810 case UNLT:
4811 case UNGT:
4812 case UNLE:
4813 case UNGE:
4814 case NE:
4815 case UNORDERED:
4816 return const_true_rtx;
4817 case EQ:
4818 case LT:
4819 case GT:
4820 case LE:
4821 case GE:
4822 case LTGT:
4823 case ORDERED:
4824 return const0_rtx;
4825 default:
4826 return 0;
4827 }
0cedb36c 4828
39641489 4829 return comparison_result (code,
a567207e
PB
4830 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4831 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
0cedb36c 4832 }
0cedb36c
JL
4833
4834 /* Otherwise, see if the operands are both integers. */
39641489 4835 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
807e902e 4836 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
0cedb36c 4837 {
807e902e
KZ
4838 /* It would be nice if we really had a mode here. However, the
4839 largest int representable on the target is as good as
4840 infinite. */
ef4bddc2 4841 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
807e902e
KZ
4842 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4843 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4844
4845 if (wi::eq_p (ptrueop0, ptrueop1))
a567207e 4846 return comparison_result (code, CMP_EQ);
39641489
PB
4847 else
4848 {
807e902e
KZ
4849 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4850 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
a567207e 4851 return comparison_result (code, cr);
39641489 4852 }
0cedb36c
JL
4853 }
4854
39641489 4855 /* Optimize comparisons with upper and lower bounds. */
46c9550f 4856 if (HWI_COMPUTABLE_MODE_P (mode)
481683e1 4857 && CONST_INT_P (trueop1))
0cedb36c 4858 {
39641489
PB
4859 int sign;
4860 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4861 HOST_WIDE_INT val = INTVAL (trueop1);
4862 HOST_WIDE_INT mmin, mmax;
4863
4864 if (code == GEU
4865 || code == LEU
4866 || code == GTU
4867 || code == LTU)
4868 sign = 0;
4869 else
4870 sign = 1;
0aea6467 4871
39641489
PB
4872 /* Get a reduced range if the sign bit is zero. */
4873 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4874 {
4875 mmin = 0;
4876 mmax = nonzero;
4877 }
4878 else
4879 {
4880 rtx mmin_rtx, mmax_rtx;
a567207e 4881 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
39641489 4882
dc7c279e
JJ
4883 mmin = INTVAL (mmin_rtx);
4884 mmax = INTVAL (mmax_rtx);
4885 if (sign)
4886 {
4887 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4888
4889 mmin >>= (sign_copies - 1);
4890 mmax >>= (sign_copies - 1);
4891 }
0aea6467
ZD
4892 }
4893
0cedb36c
JL
4894 switch (code)
4895 {
39641489
PB
4896 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4897 case GEU:
4898 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4899 return const_true_rtx;
4900 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4901 return const0_rtx;
4902 break;
4903 case GE:
4904 if (val <= mmin)
4905 return const_true_rtx;
4906 if (val > mmax)
4907 return const0_rtx;
4908 break;
4909
4910 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4911 case LEU:
4912 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4913 return const_true_rtx;
4914 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4915 return const0_rtx;
4916 break;
4917 case LE:
4918 if (val >= mmax)
4919 return const_true_rtx;
4920 if (val < mmin)
4921 return const0_rtx;
4922 break;
4923
0cedb36c 4924 case EQ:
39641489
PB
4925 /* x == y is always false for y out of range. */
4926 if (val < mmin || val > mmax)
4927 return const0_rtx;
4928 break;
4929
4930 /* x > y is always false for y >= mmax, always true for y < mmin. */
4931 case GTU:
4932 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4933 return const0_rtx;
4934 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4935 return const_true_rtx;
4936 break;
4937 case GT:
4938 if (val >= mmax)
4939 return const0_rtx;
4940 if (val < mmin)
4941 return const_true_rtx;
4942 break;
4943
4944 /* x < y is always false for y <= mmin, always true for y > mmax. */
4945 case LTU:
4946 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4947 return const0_rtx;
4948 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4949 return const_true_rtx;
4950 break;
4951 case LT:
4952 if (val <= mmin)
0cedb36c 4953 return const0_rtx;
39641489
PB
4954 if (val > mmax)
4955 return const_true_rtx;
0cedb36c
JL
4956 break;
4957
4958 case NE:
39641489
PB
4959 /* x != y is always true for y out of range. */
4960 if (val < mmin || val > mmax)
0cedb36c
JL
4961 return const_true_rtx;
4962 break;
4963
39641489
PB
4964 default:
4965 break;
4966 }
4967 }
4968
4969 /* Optimize integer comparisons with zero. */
4970 if (trueop1 == const0_rtx)
4971 {
4972 /* Some addresses are known to be nonzero. We don't know
a567207e 4973 their sign, but equality comparisons are known. */
39641489 4974 if (nonzero_address_p (trueop0))
a567207e 4975 {
39641489
PB
4976 if (code == EQ || code == LEU)
4977 return const0_rtx;
4978 if (code == NE || code == GTU)
4979 return const_true_rtx;
a567207e 4980 }
39641489
PB
4981
4982 /* See if the first operand is an IOR with a constant. If so, we
4983 may be able to determine the result of this comparison. */
4984 if (GET_CODE (op0) == IOR)
a567207e 4985 {
39641489 4986 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
481683e1 4987 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
39641489 4988 {
5511bc5a 4989 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
a567207e 4990 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
43c36287
EB
4991 && (UINTVAL (inner_const)
4992 & ((unsigned HOST_WIDE_INT) 1
4993 << sign_bitnum)));
a567207e
PB
4994
4995 switch (code)
4996 {
4997 case EQ:
39641489 4998 case LEU:
a567207e
PB
4999 return const0_rtx;
5000 case NE:
39641489 5001 case GTU:
a567207e
PB
5002 return const_true_rtx;
5003 case LT:
5004 case LE:
5005 if (has_sign)
5006 return const_true_rtx;
5007 break;
5008 case GT:
39641489 5009 case GE:
a567207e
PB
5010 if (has_sign)
5011 return const0_rtx;
5012 break;
5013 default:
5014 break;
5015 }
5016 }
39641489
PB
5017 }
5018 }
5019
5020 /* Optimize comparison of ABS with zero. */
5021 if (trueop1 == CONST0_RTX (mode)
5022 && (GET_CODE (trueop0) == ABS
5023 || (GET_CODE (trueop0) == FLOAT_EXTEND
5024 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5025 {
5026 switch (code)
5027 {
0da65b89
RS
5028 case LT:
5029 /* Optimize abs(x) < 0.0. */
39641489 5030 if (!HONOR_SNANS (mode)
eeef0e45
ILT
5031 && (!INTEGRAL_MODE_P (mode)
5032 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
0da65b89 5033 {
39641489
PB
5034 if (INTEGRAL_MODE_P (mode)
5035 && (issue_strict_overflow_warning
5036 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5037 warning (OPT_Wstrict_overflow,
5038 ("assuming signed overflow does not occur when "
5039 "assuming abs (x) < 0 is false"));
5040 return const0_rtx;
0da65b89
RS
5041 }
5042 break;
5043
5044 case GE:
5045 /* Optimize abs(x) >= 0.0. */
39641489 5046 if (!HONOR_NANS (mode)
eeef0e45
ILT
5047 && (!INTEGRAL_MODE_P (mode)
5048 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
0da65b89 5049 {
39641489
PB
5050 if (INTEGRAL_MODE_P (mode)
5051 && (issue_strict_overflow_warning
5052 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5053 warning (OPT_Wstrict_overflow,
5054 ("assuming signed overflow does not occur when "
5055 "assuming abs (x) >= 0 is true"));
5056 return const_true_rtx;
0da65b89
RS
5057 }
5058 break;
5059
8d90f9c0
GK
5060 case UNGE:
5061 /* Optimize ! (abs(x) < 0.0). */
39641489 5062 return const_true_rtx;
46c5ad27 5063
0cedb36c
JL
5064 default:
5065 break;
5066 }
0cedb36c
JL
5067 }
5068
39641489 5069 return 0;
0cedb36c
JL
5070}
5071\f
5072/* Simplify CODE, an operation with result mode MODE and three operands,
5073 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5074 a constant. Return 0 if no simplifications is possible. */
5075
5076rtx
ef4bddc2
RS
5077simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5078 machine_mode op0_mode, rtx op0, rtx op1,
46c5ad27 5079 rtx op2)
0cedb36c 5080{
5511bc5a 5081 unsigned int width = GET_MODE_PRECISION (mode);
5c822194 5082 bool any_change = false;
da694a77 5083 rtx tem, trueop2;
0cedb36c
JL
5084
5085 /* VOIDmode means "infinite" precision. */
5086 if (width == 0)
5087 width = HOST_BITS_PER_WIDE_INT;
5088
5089 switch (code)
5090 {
1b1562a5 5091 case FMA:
5c822194
RH
5092 /* Simplify negations around the multiplication. */
5093 /* -a * -b + c => a * b + c. */
5094 if (GET_CODE (op0) == NEG)
5095 {
5096 tem = simplify_unary_operation (NEG, mode, op1, mode);
5097 if (tem)
5098 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5099 }
5100 else if (GET_CODE (op1) == NEG)
5101 {
5102 tem = simplify_unary_operation (NEG, mode, op0, mode);
5103 if (tem)
5104 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5105 }
5106
5107 /* Canonicalize the two multiplication operands. */
5108 /* a * -b + c => -b * a + c. */
5109 if (swap_commutative_operands_p (op0, op1))
5110 tem = op0, op0 = op1, op1 = tem, any_change = true;
5111
5112 if (any_change)
5113 return gen_rtx_FMA (mode, op0, op1, op2);
1b1562a5
MM
5114 return NULL_RTX;
5115
0cedb36c
JL
5116 case SIGN_EXTRACT:
5117 case ZERO_EXTRACT:
481683e1
SZ
5118 if (CONST_INT_P (op0)
5119 && CONST_INT_P (op1)
5120 && CONST_INT_P (op2)
d882fe51 5121 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
f9e158c3 5122 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
0cedb36c
JL
5123 {
5124 /* Extracting a bit-field from a constant */
43c36287 5125 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5511bc5a
BS
5126 HOST_WIDE_INT op1val = INTVAL (op1);
5127 HOST_WIDE_INT op2val = INTVAL (op2);
0cedb36c 5128 if (BITS_BIG_ENDIAN)
5511bc5a 5129 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
0cedb36c 5130 else
5511bc5a 5131 val >>= op2val;
0cedb36c 5132
5511bc5a 5133 if (HOST_BITS_PER_WIDE_INT != op1val)
0cedb36c
JL
5134 {
5135 /* First zero-extend. */
5511bc5a 5136 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
0cedb36c
JL
5137 /* If desired, propagate sign bit. */
5138 if (code == SIGN_EXTRACT
5511bc5a 5139 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
43c36287 5140 != 0)
5511bc5a 5141 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
0cedb36c
JL
5142 }
5143
449ecb09 5144 return gen_int_mode (val, mode);
0cedb36c
JL
5145 }
5146 break;
5147
5148 case IF_THEN_ELSE:
481683e1 5149 if (CONST_INT_P (op0))
0cedb36c
JL
5150 return op0 != const0_rtx ? op1 : op2;
5151
31f0f571
RS
5152 /* Convert c ? a : a into "a". */
5153 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
0cedb36c 5154 return op1;
31f0f571
RS
5155
5156 /* Convert a != b ? a : b into "a". */
5157 if (GET_CODE (op0) == NE
5158 && ! side_effects_p (op0)
5159 && ! HONOR_NANS (mode)
5160 && ! HONOR_SIGNED_ZEROS (mode)
5161 && ((rtx_equal_p (XEXP (op0, 0), op1)
5162 && rtx_equal_p (XEXP (op0, 1), op2))
5163 || (rtx_equal_p (XEXP (op0, 0), op2)
5164 && rtx_equal_p (XEXP (op0, 1), op1))))
5165 return op1;
5166
5167 /* Convert a == b ? a : b into "b". */
5168 if (GET_CODE (op0) == EQ
5169 && ! side_effects_p (op0)
5170 && ! HONOR_NANS (mode)
5171 && ! HONOR_SIGNED_ZEROS (mode)
5172 && ((rtx_equal_p (XEXP (op0, 0), op1)
5173 && rtx_equal_p (XEXP (op0, 1), op2))
5174 || (rtx_equal_p (XEXP (op0, 0), op2)
5175 && rtx_equal_p (XEXP (op0, 1), op1))))
0cedb36c 5176 return op2;
31f0f571 5177
ec8e098d 5178 if (COMPARISON_P (op0) && ! side_effects_p (op0))
0cedb36c 5179 {
ef4bddc2 5180 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
47b1e19b
JH
5181 ? GET_MODE (XEXP (op0, 1))
5182 : GET_MODE (XEXP (op0, 0)));
3e882897 5183 rtx temp;
a774e06e
RH
5184
5185 /* Look for happy constants in op1 and op2. */
481683e1 5186 if (CONST_INT_P (op1) && CONST_INT_P (op2))
a774e06e
RH
5187 {
5188 HOST_WIDE_INT t = INTVAL (op1);
5189 HOST_WIDE_INT f = INTVAL (op2);
786de7eb 5190
a774e06e
RH
5191 if (t == STORE_FLAG_VALUE && f == 0)
5192 code = GET_CODE (op0);
261efdef
JH
5193 else if (t == 0 && f == STORE_FLAG_VALUE)
5194 {
5195 enum rtx_code tmp;
5196 tmp = reversed_comparison_code (op0, NULL_RTX);
5197 if (tmp == UNKNOWN)
5198 break;
5199 code = tmp;
5200 }
a774e06e
RH
5201 else
5202 break;
5203
77306e3e 5204 return simplify_gen_relational (code, mode, cmp_mode,
c6fb08ad
PB
5205 XEXP (op0, 0), XEXP (op0, 1));
5206 }
5207
5208 if (cmp_mode == VOIDmode)
5209 cmp_mode = op0_mode;
5210 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5211 cmp_mode, XEXP (op0, 0),
5212 XEXP (op0, 1));
5213
5214 /* See if any simplifications were possible. */
5215 if (temp)
5216 {
481683e1 5217 if (CONST_INT_P (temp))
c6fb08ad
PB
5218 return temp == const0_rtx ? op2 : op1;
5219 else if (temp)
5220 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
a774e06e 5221 }
0cedb36c
JL
5222 }
5223 break;
31f0f571 5224
d9deed68 5225 case VEC_MERGE:
41374e13
NS
5226 gcc_assert (GET_MODE (op0) == mode);
5227 gcc_assert (GET_MODE (op1) == mode);
5228 gcc_assert (VECTOR_MODE_P (mode));
da694a77
MG
5229 trueop2 = avoid_constant_pool_reference (op2);
5230 if (CONST_INT_P (trueop2))
d9deed68 5231 {
da694a77 5232 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
d9deed68 5233 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
da694a77
MG
5234 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5235 unsigned HOST_WIDE_INT mask;
5236 if (n_elts == HOST_BITS_PER_WIDE_INT)
5237 mask = -1;
5238 else
5239 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
d9deed68 5240
da694a77 5241 if (!(sel & mask) && !side_effects_p (op0))
852c8ba1 5242 return op1;
da694a77 5243 if ((sel & mask) == mask && !side_effects_p (op1))
852c8ba1
JH
5244 return op0;
5245
da694a77
MG
5246 rtx trueop0 = avoid_constant_pool_reference (op0);
5247 rtx trueop1 = avoid_constant_pool_reference (op1);
5248 if (GET_CODE (trueop0) == CONST_VECTOR
5249 && GET_CODE (trueop1) == CONST_VECTOR)
852c8ba1
JH
5250 {
5251 rtvec v = rtvec_alloc (n_elts);
5252 unsigned int i;
5253
5254 for (i = 0; i < n_elts; i++)
da694a77
MG
5255 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5256 ? CONST_VECTOR_ELT (trueop0, i)
5257 : CONST_VECTOR_ELT (trueop1, i));
852c8ba1
JH
5258 return gen_rtx_CONST_VECTOR (mode, v);
5259 }
da694a77
MG
5260
5261 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5262 if no element from a appears in the result. */
5263 if (GET_CODE (op0) == VEC_MERGE)
5264 {
5265 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5266 if (CONST_INT_P (tem))
5267 {
5268 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5269 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5270 return simplify_gen_ternary (code, mode, mode,
5271 XEXP (op0, 1), op1, op2);
5272 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5273 return simplify_gen_ternary (code, mode, mode,
5274 XEXP (op0, 0), op1, op2);
5275 }
5276 }
5277 if (GET_CODE (op1) == VEC_MERGE)
5278 {
5279 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5280 if (CONST_INT_P (tem))
5281 {
5282 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5283 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5284 return simplify_gen_ternary (code, mode, mode,
5285 op0, XEXP (op1, 1), op2);
5286 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5287 return simplify_gen_ternary (code, mode, mode,
5288 op0, XEXP (op1, 0), op2);
5289 }
5290 }
691b9fb7
PM
5291
5292 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5293 with a. */
5294 if (GET_CODE (op0) == VEC_DUPLICATE
5295 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5296 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5297 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5298 {
5299 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5300 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5301 {
5302 if (XEXP (XEXP (op0, 0), 0) == op1
5303 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5304 return op1;
5305 }
5306 }
d9deed68 5307 }
da694a77
MG
5308
5309 if (rtx_equal_p (op0, op1)
5310 && !side_effects_p (op2) && !side_effects_p (op1))
5311 return op0;
5312
d9deed68 5313 break;
0cedb36c
JL
5314
5315 default:
41374e13 5316 gcc_unreachable ();
0cedb36c
JL
5317 }
5318
5319 return 0;
5320}
5321
807e902e
KZ
5322/* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5323 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5324 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
eea50aa0 5325
550d1387
GK
5326 Works by unpacking OP into a collection of 8-bit values
5327 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5328 and then repacking them again for OUTERMODE. */
eea50aa0 5329
550d1387 5330static rtx
ef4bddc2
RS
5331simplify_immed_subreg (machine_mode outermode, rtx op,
5332 machine_mode innermode, unsigned int byte)
550d1387 5333{
550d1387 5334 enum {
550d1387
GK
5335 value_bit = 8,
5336 value_mask = (1 << value_bit) - 1
5337 };
807e902e 5338 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
550d1387
GK
5339 int value_start;
5340 int i;
5341 int elem;
5342
5343 int num_elem;
5344 rtx * elems;
5345 int elem_bitsize;
5346 rtx result_s;
5347 rtvec result_v = NULL;
5348 enum mode_class outer_class;
ef4bddc2 5349 machine_mode outer_submode;
807e902e 5350 int max_bitsize;
550d1387
GK
5351
5352 /* Some ports misuse CCmode. */
481683e1 5353 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
e5c56fd9
JH
5354 return op;
5355
6e4b5aaf
RH
5356 /* We have no way to represent a complex constant at the rtl level. */
5357 if (COMPLEX_MODE_P (outermode))
5358 return NULL_RTX;
5359
807e902e
KZ
5360 /* We support any size mode. */
5361 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5362 GET_MODE_BITSIZE (innermode));
5363
550d1387
GK
5364 /* Unpack the value. */
5365
cb2a532e
AH
5366 if (GET_CODE (op) == CONST_VECTOR)
5367 {
550d1387
GK
5368 num_elem = CONST_VECTOR_NUNITS (op);
5369 elems = &CONST_VECTOR_ELT (op, 0);
5370 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5371 }
5372 else
5373 {
5374 num_elem = 1;
5375 elems = &op;
5376 elem_bitsize = max_bitsize;
5377 }
41374e13
NS
5378 /* If this asserts, it is too complicated; reducing value_bit may help. */
5379 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5380 /* I don't know how to handle endianness of sub-units. */
5381 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
b8698a0f 5382
550d1387
GK
5383 for (elem = 0; elem < num_elem; elem++)
5384 {
5385 unsigned char * vp;
5386 rtx el = elems[elem];
b8698a0f 5387
550d1387
GK
5388 /* Vectors are kept in target memory order. (This is probably
5389 a mistake.) */
5390 {
5391 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
558c51c5 5392 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
550d1387
GK
5393 / BITS_PER_UNIT);
5394 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5395 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5396 unsigned bytele = (subword_byte % UNITS_PER_WORD
5397 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5398 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5399 }
b8698a0f 5400
550d1387 5401 switch (GET_CODE (el))
34a80643 5402 {
550d1387
GK
5403 case CONST_INT:
5404 for (i = 0;
b8698a0f 5405 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
550d1387
GK
5406 i += value_bit)
5407 *vp++ = INTVAL (el) >> i;
5408 /* CONST_INTs are always logically sign-extended. */
5409 for (; i < elem_bitsize; i += value_bit)
5410 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5411 break;
b8698a0f 5412
807e902e
KZ
5413 case CONST_WIDE_INT:
5414 {
5415 rtx_mode_t val = std::make_pair (el, innermode);
5416 unsigned char extend = wi::sign_mask (val);
5417
5418 for (i = 0; i < elem_bitsize; i += value_bit)
5419 *vp++ = wi::extract_uhwi (val, i, value_bit);
5420 for (; i < elem_bitsize; i += value_bit)
5421 *vp++ = extend;
5422 }
5423 break;
5424
550d1387 5425 case CONST_DOUBLE:
807e902e 5426 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
550d1387 5427 {
929e10f4 5428 unsigned char extend = 0;
550d1387
GK
5429 /* If this triggers, someone should have generated a
5430 CONST_INT instead. */
41374e13 5431 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
cb2a532e 5432
550d1387
GK
5433 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5434 *vp++ = CONST_DOUBLE_LOW (el) >> i;
49ab6098 5435 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
550d1387 5436 {
8064d930
RE
5437 *vp++
5438 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
550d1387
GK
5439 i += value_bit;
5440 }
929e10f4
MS
5441
5442 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5443 extend = -1;
1125164c 5444 for (; i < elem_bitsize; i += value_bit)
929e10f4 5445 *vp++ = extend;
550d1387 5446 }
41374e13 5447 else
34a80643 5448 {
807e902e
KZ
5449 /* This is big enough for anything on the platform. */
5450 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
550d1387 5451 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
41374e13 5452
3d8bf70f 5453 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
41374e13
NS
5454 gcc_assert (bitsize <= elem_bitsize);
5455 gcc_assert (bitsize % value_bit == 0);
550d1387
GK
5456
5457 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5458 GET_MODE (el));
5459
5460 /* real_to_target produces its result in words affected by
5461 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5462 and use WORDS_BIG_ENDIAN instead; see the documentation
5463 of SUBREG in rtl.texi. */
5464 for (i = 0; i < bitsize; i += value_bit)
226cfe61 5465 {
550d1387
GK
5466 int ibase;
5467 if (WORDS_BIG_ENDIAN)
5468 ibase = bitsize - 1 - i;
5469 else
5470 ibase = i;
5471 *vp++ = tmp[ibase / 32] >> i % 32;
226cfe61 5472 }
b8698a0f 5473
550d1387
GK
5474 /* It shouldn't matter what's done here, so fill it with
5475 zero. */
5476 for (; i < elem_bitsize; i += value_bit)
5477 *vp++ = 0;
34a80643 5478 }
550d1387 5479 break;
14c931f1
CF
5480
5481 case CONST_FIXED:
5482 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5483 {
5484 for (i = 0; i < elem_bitsize; i += value_bit)
5485 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5486 }
5487 else
5488 {
5489 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5490 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
49ab6098 5491 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
14c931f1
CF
5492 i += value_bit)
5493 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5494 >> (i - HOST_BITS_PER_WIDE_INT);
5495 for (; i < elem_bitsize; i += value_bit)
5496 *vp++ = 0;
5497 }
5498 break;
b8698a0f 5499
550d1387 5500 default:
41374e13 5501 gcc_unreachable ();
226cfe61 5502 }
cb2a532e
AH
5503 }
5504
550d1387
GK
5505 /* Now, pick the right byte to start with. */
5506 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5507 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5508 will already have offset 0. */
5509 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
eea50aa0 5510 {
558c51c5 5511 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
550d1387
GK
5512 - byte);
5513 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5514 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5515 byte = (subword_byte % UNITS_PER_WORD
5516 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5517 }
eea50aa0 5518
550d1387
GK
5519 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5520 so if it's become negative it will instead be very large.) */
41374e13 5521 gcc_assert (byte < GET_MODE_SIZE (innermode));
3767c0fd 5522
550d1387
GK
5523 /* Convert from bytes to chunks of size value_bit. */
5524 value_start = byte * (BITS_PER_UNIT / value_bit);
eea50aa0 5525
550d1387 5526 /* Re-pack the value. */
b8698a0f 5527
550d1387
GK
5528 if (VECTOR_MODE_P (outermode))
5529 {
5530 num_elem = GET_MODE_NUNITS (outermode);
5531 result_v = rtvec_alloc (num_elem);
5532 elems = &RTVEC_ELT (result_v, 0);
5533 outer_submode = GET_MODE_INNER (outermode);
5534 }
5535 else
5536 {
5537 num_elem = 1;
5538 elems = &result_s;
5539 outer_submode = outermode;
5540 }
eea50aa0 5541
550d1387
GK
5542 outer_class = GET_MODE_CLASS (outer_submode);
5543 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
451f86fd 5544
41374e13
NS
5545 gcc_assert (elem_bitsize % value_bit == 0);
5546 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
451f86fd 5547
550d1387
GK
5548 for (elem = 0; elem < num_elem; elem++)
5549 {
5550 unsigned char *vp;
b8698a0f 5551
550d1387
GK
5552 /* Vectors are stored in target memory order. (This is probably
5553 a mistake.) */
5554 {
5555 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
558c51c5 5556 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
550d1387
GK
5557 / BITS_PER_UNIT);
5558 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5559 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5560 unsigned bytele = (subword_byte % UNITS_PER_WORD
5561 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5562 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5563 }
5564
5565 switch (outer_class)
eea50aa0 5566 {
550d1387
GK
5567 case MODE_INT:
5568 case MODE_PARTIAL_INT:
5569 {
807e902e
KZ
5570 int u;
5571 int base = 0;
5572 int units
5573 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5574 / HOST_BITS_PER_WIDE_INT;
5575 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5576 wide_int r;
5577
0daaf8aa
JJ
5578 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5579 return NULL_RTX;
807e902e
KZ
5580 for (u = 0; u < units; u++)
5581 {
5582 unsigned HOST_WIDE_INT buf = 0;
5583 for (i = 0;
5584 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5585 i += value_bit)
5586 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5587
5588 tmp[u] = buf;
5589 base += HOST_BITS_PER_WIDE_INT;
5590 }
807e902e
KZ
5591 r = wide_int::from_array (tmp, units,
5592 GET_MODE_PRECISION (outer_submode));
0daaf8aa
JJ
5593#if TARGET_SUPPORTS_WIDE_INT == 0
5594 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5595 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5596 return NULL_RTX;
5597#endif
807e902e 5598 elems[elem] = immed_wide_int_const (r, outer_submode);
550d1387
GK
5599 }
5600 break;
b8698a0f 5601
550d1387 5602 case MODE_FLOAT:
15ed7b52 5603 case MODE_DECIMAL_FLOAT:
550d1387
GK
5604 {
5605 REAL_VALUE_TYPE r;
807e902e 5606 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
b8698a0f 5607
550d1387
GK
5608 /* real_from_target wants its input in words affected by
5609 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5610 and use WORDS_BIG_ENDIAN instead; see the documentation
5611 of SUBREG in rtl.texi. */
5612 for (i = 0; i < max_bitsize / 32; i++)
5613 tmp[i] = 0;
5614 for (i = 0; i < elem_bitsize; i += value_bit)
5615 {
5616 int ibase;
5617 if (WORDS_BIG_ENDIAN)
5618 ibase = elem_bitsize - 1 - i;
5619 else
5620 ibase = i;
effdb493 5621 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
550d1387 5622 }
eea50aa0 5623
550d1387
GK
5624 real_from_target (&r, tmp, outer_submode);
5625 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5626 }
5627 break;
14c931f1
CF
5628
5629 case MODE_FRACT:
5630 case MODE_UFRACT:
5631 case MODE_ACCUM:
5632 case MODE_UACCUM:
5633 {
5634 FIXED_VALUE_TYPE f;
5635 f.data.low = 0;
5636 f.data.high = 0;
5637 f.mode = outer_submode;
5638
5639 for (i = 0;
5640 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5641 i += value_bit)
43c36287 5642 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
14c931f1 5643 for (; i < elem_bitsize; i += value_bit)
43c36287 5644 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
14c931f1
CF
5645 << (i - HOST_BITS_PER_WIDE_INT));
5646
5647 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5648 }
5649 break;
b8698a0f 5650
550d1387 5651 default:
41374e13 5652 gcc_unreachable ();
550d1387
GK
5653 }
5654 }
5655 if (VECTOR_MODE_P (outermode))
5656 return gen_rtx_CONST_VECTOR (outermode, result_v);
5657 else
5658 return result_s;
5659}
eea50aa0 5660
550d1387
GK
5661/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5662 Return 0 if no simplifications are possible. */
5663rtx
ef4bddc2
RS
5664simplify_subreg (machine_mode outermode, rtx op,
5665 machine_mode innermode, unsigned int byte)
550d1387
GK
5666{
5667 /* Little bit of sanity checking. */
41374e13
NS
5668 gcc_assert (innermode != VOIDmode);
5669 gcc_assert (outermode != VOIDmode);
5670 gcc_assert (innermode != BLKmode);
5671 gcc_assert (outermode != BLKmode);
eea50aa0 5672
41374e13
NS
5673 gcc_assert (GET_MODE (op) == innermode
5674 || GET_MODE (op) == VOIDmode);
eea50aa0 5675
0343822b
RS
5676 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5677 return NULL_RTX;
5678
5679 if (byte >= GET_MODE_SIZE (innermode))
5680 return NULL_RTX;
eea50aa0 5681
550d1387
GK
5682 if (outermode == innermode && !byte)
5683 return op;
eea50aa0 5684
33ffb5c5
KZ
5685 if (CONST_SCALAR_INT_P (op)
5686 || CONST_DOUBLE_AS_FLOAT_P (op)
14c931f1 5687 || GET_CODE (op) == CONST_FIXED
550d1387
GK
5688 || GET_CODE (op) == CONST_VECTOR)
5689 return simplify_immed_subreg (outermode, op, innermode, byte);
eea50aa0
JH
5690
5691 /* Changing mode twice with SUBREG => just change it once,
5692 or not at all if changing back op starting mode. */
5693 if (GET_CODE (op) == SUBREG)
5694 {
ef4bddc2 5695 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
1ffb3f9a 5696 int final_offset = byte + SUBREG_BYTE (op);
53ed1a12 5697 rtx newx;
eea50aa0
JH
5698
5699 if (outermode == innermostmode
5700 && byte == 0 && SUBREG_BYTE (op) == 0)
5701 return SUBREG_REG (op);
5702
1ffb3f9a
JH
5703 /* The SUBREG_BYTE represents offset, as if the value were stored
5704 in memory. Irritating exception is paradoxical subreg, where
5705 we define SUBREG_BYTE to be 0. On big endian machines, this
2d76cb1a 5706 value should be negative. For a moment, undo this exception. */
1ffb3f9a 5707 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
eea50aa0 5708 {
1ffb3f9a
JH
5709 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5710 if (WORDS_BIG_ENDIAN)
5711 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5712 if (BYTES_BIG_ENDIAN)
5713 final_offset += difference % UNITS_PER_WORD;
5714 }
5715 if (SUBREG_BYTE (op) == 0
5716 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5717 {
5718 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5719 if (WORDS_BIG_ENDIAN)
5720 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5721 if (BYTES_BIG_ENDIAN)
5722 final_offset += difference % UNITS_PER_WORD;
5723 }
5724
5725 /* See whether resulting subreg will be paradoxical. */
2fe7bb35 5726 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
1ffb3f9a
JH
5727 {
5728 /* In nonparadoxical subregs we can't handle negative offsets. */
5729 if (final_offset < 0)
5730 return NULL_RTX;
5731 /* Bail out in case resulting subreg would be incorrect. */
5732 if (final_offset % GET_MODE_SIZE (outermode)
ae0ed63a
JM
5733 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5734 return NULL_RTX;
1ffb3f9a
JH
5735 }
5736 else
5737 {
5738 int offset = 0;
5739 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5740
5741 /* In paradoxical subreg, see if we are still looking on lower part.
5742 If so, our SUBREG_BYTE will be 0. */
5743 if (WORDS_BIG_ENDIAN)
5744 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5745 if (BYTES_BIG_ENDIAN)
5746 offset += difference % UNITS_PER_WORD;
5747 if (offset == final_offset)
5748 final_offset = 0;
eea50aa0 5749 else
ae0ed63a 5750 return NULL_RTX;
eea50aa0
JH
5751 }
5752
4d6922ee 5753 /* Recurse for further possible simplifications. */
beb72684
RH
5754 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5755 final_offset);
53ed1a12
BI
5756 if (newx)
5757 return newx;
beb72684
RH
5758 if (validate_subreg (outermode, innermostmode,
5759 SUBREG_REG (op), final_offset))
4613543f
RS
5760 {
5761 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5762 if (SUBREG_PROMOTED_VAR_P (op)
362d42dc 5763 && SUBREG_PROMOTED_SIGN (op) >= 0
4613543f
RS
5764 && GET_MODE_CLASS (outermode) == MODE_INT
5765 && IN_RANGE (GET_MODE_SIZE (outermode),
5766 GET_MODE_SIZE (innermode),
5767 GET_MODE_SIZE (innermostmode))
5768 && subreg_lowpart_p (newx))
5769 {
5770 SUBREG_PROMOTED_VAR_P (newx) = 1;
362d42dc 5771 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
4613543f
RS
5772 }
5773 return newx;
5774 }
beb72684 5775 return NULL_RTX;
eea50aa0
JH
5776 }
5777
5778 /* SUBREG of a hard register => just change the register number
5779 and/or mode. If the hard register is not valid in that mode,
5780 suppress this simplification. If the hard register is the stack,
5781 frame, or argument pointer, leave this as a SUBREG. */
5782
eef302d2 5783 if (REG_P (op) && HARD_REGISTER_P (op))
eea50aa0 5784 {
eef302d2
RS
5785 unsigned int regno, final_regno;
5786
5787 regno = REGNO (op);
5788 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5789 if (HARD_REGISTER_NUM_P (final_regno))
49d801d3 5790 {
dedc1e6d
AO
5791 rtx x;
5792 int final_offset = byte;
5793
5794 /* Adjust offset for paradoxical subregs. */
5795 if (byte == 0
5796 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5797 {
5798 int difference = (GET_MODE_SIZE (innermode)
5799 - GET_MODE_SIZE (outermode));
5800 if (WORDS_BIG_ENDIAN)
5801 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5802 if (BYTES_BIG_ENDIAN)
5803 final_offset += difference % UNITS_PER_WORD;
5804 }
5805
5806 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
49d801d3
JH
5807
5808 /* Propagate original regno. We don't have any way to specify
14b493d6 5809 the offset inside original regno, so do so only for lowpart.
49d801d3
JH
5810 The information is used only by alias analysis that can not
5811 grog partial register anyway. */
5812
5813 if (subreg_lowpart_offset (outermode, innermode) == byte)
5814 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5815 return x;
5816 }
eea50aa0
JH
5817 }
5818
5819 /* If we have a SUBREG of a register that we are replacing and we are
5820 replacing it with a MEM, make a new MEM and try replacing the
5821 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5822 or if we would be widening it. */
5823
3c0cb5de 5824 if (MEM_P (op)
5bfed9a9 5825 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
04864a46
JH
5826 /* Allow splitting of volatile memory references in case we don't
5827 have instruction to move the whole thing. */
5828 && (! MEM_VOLATILE_P (op)
ef89d648 5829 || ! have_insn_for (SET, innermode))
eea50aa0 5830 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
f1ec5147 5831 return adjust_address_nv (op, outermode, byte);
e5c56fd9
JH
5832
5833 /* Handle complex values represented as CONCAT
5834 of real and imaginary part. */
5835 if (GET_CODE (op) == CONCAT)
5836 {
a957d77f 5837 unsigned int part_size, final_offset;
4f1da2e9
RS
5838 rtx part, res;
5839
a957d77f
RS
5840 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5841 if (byte < part_size)
5842 {
5843 part = XEXP (op, 0);
5844 final_offset = byte;
5845 }
5846 else
5847 {
5848 part = XEXP (op, 1);
5849 final_offset = byte - part_size;
5850 }
5851
5852 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4f1da2e9 5853 return NULL_RTX;
e5c56fd9 5854
9199d62b
DD
5855 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5856 if (res)
5857 return res;
beb72684 5858 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4f1da2e9 5859 return gen_rtx_SUBREG (outermode, part, final_offset);
beb72684 5860 return NULL_RTX;
e5c56fd9
JH
5861 }
5862
40c5ed5b
RS
5863 /* A SUBREG resulting from a zero extension may fold to zero if
5864 it extracts higher bits that the ZERO_EXTEND's source bits. */
373b9e78 5865 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
bb51e270
RS
5866 {
5867 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
40c5ed5b 5868 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
bb51e270
RS
5869 return CONST0_RTX (outermode);
5870 }
5871
40c5ed5b 5872 if (SCALAR_INT_MODE_P (outermode)
992103ad
UB
5873 && SCALAR_INT_MODE_P (innermode)
5874 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5875 && byte == subreg_lowpart_offset (outermode, innermode))
5876 {
40c5ed5b
RS
5877 rtx tem = simplify_truncation (outermode, op, innermode);
5878 if (tem)
5879 return tem;
509dd380
JJ
5880 }
5881
eea50aa0
JH
5882 return NULL_RTX;
5883}
550d1387 5884
949c5d62
JH
5885/* Make a SUBREG operation or equivalent if it folds. */
5886
5887rtx
ef4bddc2
RS
5888simplify_gen_subreg (machine_mode outermode, rtx op,
5889 machine_mode innermode, unsigned int byte)
949c5d62 5890{
53ed1a12 5891 rtx newx;
949c5d62 5892
53ed1a12
BI
5893 newx = simplify_subreg (outermode, op, innermode, byte);
5894 if (newx)
5895 return newx;
949c5d62 5896
4f1da2e9
RS
5897 if (GET_CODE (op) == SUBREG
5898 || GET_CODE (op) == CONCAT
5899 || GET_MODE (op) == VOIDmode)
949c5d62
JH
5900 return NULL_RTX;
5901
beb72684
RH
5902 if (validate_subreg (outermode, innermode, op, byte))
5903 return gen_rtx_SUBREG (outermode, op, byte);
5904
5905 return NULL_RTX;
949c5d62 5906}
beb72684 5907
0cedb36c
JL
5908/* Simplify X, an rtx expression.
5909
5910 Return the simplified expression or NULL if no simplifications
5911 were possible.
5912
5913 This is the preferred entry point into the simplification routines;
5914 however, we still allow passes to call the more specific routines.
5915
14b493d6 5916 Right now GCC has three (yes, three) major bodies of RTL simplification
0cedb36c
JL
5917 code that need to be unified.
5918
5919 1. fold_rtx in cse.c. This code uses various CSE specific
5920 information to aid in RTL simplification.
5921
5922 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5923 it uses combine specific information to aid in RTL
5924 simplification.
5925
5926 3. The routines in this file.
5927
5928
5929 Long term we want to only have one body of simplification code; to
5930 get to that state I recommend the following steps:
5931
5932 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5933 which are not pass dependent state into these routines.
5934
5935 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5936 use this routine whenever possible.
5937
5938 3. Allow for pass dependent state to be provided to these
5939 routines and add simplifications based on the pass dependent
5940 state. Remove code from cse.c & combine.c that becomes
5941 redundant/dead.
5942
5943 It will take time, but ultimately the compiler will be easier to
5944 maintain and improve. It's totally silly that when we add a
5945 simplification that it needs to be added to 4 places (3 for RTL
5946 simplification and 1 for tree simplification. */
786de7eb 5947
0cedb36c 5948rtx
58f9752a 5949simplify_rtx (const_rtx x)
0cedb36c 5950{
58f9752a 5951 const enum rtx_code code = GET_CODE (x);
ef4bddc2 5952 const machine_mode mode = GET_MODE (x);
0cedb36c
JL
5953
5954 switch (GET_RTX_CLASS (code))
5955 {
ec8e098d 5956 case RTX_UNARY:
0cedb36c
JL
5957 return simplify_unary_operation (code, mode,
5958 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
ec8e098d 5959 case RTX_COMM_ARITH:
df0afdbe 5960 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
cf6bcbd0 5961 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
b42abad8 5962
2b72593e 5963 /* Fall through.... */
b42abad8 5964
ec8e098d 5965 case RTX_BIN_ARITH:
0cedb36c
JL
5966 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5967
ec8e098d
PB
5968 case RTX_TERNARY:
5969 case RTX_BITFIELD_OPS:
0cedb36c 5970 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
d9c695ff
RK
5971 XEXP (x, 0), XEXP (x, 1),
5972 XEXP (x, 2));
0cedb36c 5973
ec8e098d
PB
5974 case RTX_COMPARE:
5975 case RTX_COMM_COMPARE:
c6fb08ad
PB
5976 return simplify_relational_operation (code, mode,
5977 ((GET_MODE (XEXP (x, 0))
5978 != VOIDmode)
5979 ? GET_MODE (XEXP (x, 0))
5980 : GET_MODE (XEXP (x, 1))),
5981 XEXP (x, 0),
5982 XEXP (x, 1));
d41ba56f 5983
ec8e098d 5984 case RTX_EXTRA:
949c5d62 5985 if (code == SUBREG)
e2561558
RS
5986 return simplify_subreg (mode, SUBREG_REG (x),
5987 GET_MODE (SUBREG_REG (x)),
5988 SUBREG_BYTE (x));
d41ba56f
RS
5989 break;
5990
ec8e098d 5991 case RTX_OBJ:
d41ba56f
RS
5992 if (code == LO_SUM)
5993 {
5994 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5995 if (GET_CODE (XEXP (x, 0)) == HIGH
5996 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5997 return XEXP (x, 1);
5998 }
5999 break;
6000
0cedb36c 6001 default:
d41ba56f 6002 break;
0cedb36c 6003 }
d41ba56f 6004 return NULL;
0cedb36c 6005}