]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/simplify-rtx.c
fix PR68343: disable fuse-*.c tests for isl 0.14 or earlier
[thirdparty/gcc.git] / gcc / simplify-rtx.c
CommitLineData
749a2da1 1/* RTL simplification functions for GNU compiler.
818ab71a 2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
0cedb36c 3
1322177d 4This file is part of GCC.
0cedb36c 5
1322177d
LB
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
9dcd6f09 8Software Foundation; either version 3, or (at your option) any later
1322177d 9version.
0cedb36c 10
1322177d
LB
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
0cedb36c
JL
15
16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
0cedb36c
JL
19
20
21#include "config.h"
0cedb36c 22#include "system.h"
4977bab6 23#include "coretypes.h"
c7131fb2 24#include "backend.h"
957060b5 25#include "target.h"
0cedb36c 26#include "rtl.h"
efdc7e19 27#include "tree.h"
957060b5 28#include "predict.h"
957060b5 29#include "optabs.h"
957060b5 30#include "emit-rtl.h"
0cedb36c 31#include "recog.h"
957060b5 32#include "diagnostic-core.h"
957060b5
AM
33#include "varasm.h"
34#include "flags.h"
0cedb36c
JL
35
36/* Simplification and canonicalization of RTL. */
37
3839069b
ZW
38/* Much code operates on (low, high) pairs; the low value is an
39 unsigned wide int, the high value a signed wide int. We
40 occasionally need to sign extend from low to high as if low were a
41 signed wide int. */
ba34d877 42#define HWI_SIGN_EXTEND(low) \
3839069b 43 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
0cedb36c 44
ef4bddc2 45static rtx neg_const_int (machine_mode, const_rtx);
f7d504c2 46static bool plus_minus_operand_p (const_rtx);
ef4bddc2
RS
47static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
48static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
550d1387 49 unsigned int);
ef4bddc2 50static rtx simplify_associative_operation (enum rtx_code, machine_mode,
dd61aa98 51 rtx, rtx);
ef4bddc2
RS
52static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
53 machine_mode, rtx, rtx);
54static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
55static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
0a67e02c 56 rtx, rtx, rtx, rtx);
aff8a8d5
CM
57\f
58/* Negate a CONST_INT rtx, truncating (because a conversion from a
23d1aac4 59 maximally negative number can overflow). */
aff8a8d5 60static rtx
ef4bddc2 61neg_const_int (machine_mode mode, const_rtx i)
aff8a8d5 62{
eb87c7c4 63 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
aff8a8d5
CM
64}
65
0b24db88
RS
66/* Test whether expression, X, is an immediate constant that represents
67 the most significant bit of machine mode MODE. */
68
b757b9f8 69bool
ef4bddc2 70mode_signbit_p (machine_mode mode, const_rtx x)
0b24db88
RS
71{
72 unsigned HOST_WIDE_INT val;
73 unsigned int width;
74
75 if (GET_MODE_CLASS (mode) != MODE_INT)
76 return false;
77
2d0c270f 78 width = GET_MODE_PRECISION (mode);
0b24db88
RS
79 if (width == 0)
80 return false;
b8698a0f 81
0b24db88 82 if (width <= HOST_BITS_PER_WIDE_INT
481683e1 83 && CONST_INT_P (x))
0b24db88 84 val = INTVAL (x);
807e902e
KZ
85#if TARGET_SUPPORTS_WIDE_INT
86 else if (CONST_WIDE_INT_P (x))
87 {
88 unsigned int i;
89 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
90 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
91 return false;
92 for (i = 0; i < elts - 1; i++)
93 if (CONST_WIDE_INT_ELT (x, i) != 0)
94 return false;
95 val = CONST_WIDE_INT_ELT (x, elts - 1);
96 width %= HOST_BITS_PER_WIDE_INT;
97 if (width == 0)
98 width = HOST_BITS_PER_WIDE_INT;
99 }
100#else
49ab6098 101 else if (width <= HOST_BITS_PER_DOUBLE_INT
48175537 102 && CONST_DOUBLE_AS_INT_P (x)
0b24db88
RS
103 && CONST_DOUBLE_LOW (x) == 0)
104 {
105 val = CONST_DOUBLE_HIGH (x);
106 width -= HOST_BITS_PER_WIDE_INT;
107 }
807e902e 108#endif
0b24db88 109 else
807e902e 110 /* X is not an integer constant. */
0b24db88
RS
111 return false;
112
113 if (width < HOST_BITS_PER_WIDE_INT)
114 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
115 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
116}
2d0c270f
BS
117
118/* Test whether VAL is equal to the most significant bit of mode MODE
119 (after masking with the mode mask of MODE). Returns false if the
120 precision of MODE is too large to handle. */
121
122bool
ef4bddc2 123val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
2d0c270f
BS
124{
125 unsigned int width;
126
127 if (GET_MODE_CLASS (mode) != MODE_INT)
128 return false;
129
130 width = GET_MODE_PRECISION (mode);
131 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
132 return false;
133
134 val &= GET_MODE_MASK (mode);
135 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
136}
137
138/* Test whether the most significant bit of mode MODE is set in VAL.
139 Returns false if the precision of MODE is too large to handle. */
140bool
ef4bddc2 141val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
2d0c270f
BS
142{
143 unsigned int width;
144
145 if (GET_MODE_CLASS (mode) != MODE_INT)
146 return false;
147
148 width = GET_MODE_PRECISION (mode);
149 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
150 return false;
151
152 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
153 return val != 0;
154}
155
156/* Test whether the most significant bit of mode MODE is clear in VAL.
157 Returns false if the precision of MODE is too large to handle. */
158bool
ef4bddc2 159val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
2d0c270f
BS
160{
161 unsigned int width;
162
163 if (GET_MODE_CLASS (mode) != MODE_INT)
164 return false;
165
166 width = GET_MODE_PRECISION (mode);
167 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
168 return false;
169
170 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
171 return val == 0;
172}
749a2da1 173\f
786de7eb 174/* Make a binary operation by properly ordering the operands and
0cedb36c
JL
175 seeing if the expression folds. */
176
177rtx
ef4bddc2 178simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
46c5ad27 179 rtx op1)
0cedb36c
JL
180{
181 rtx tem;
182
0cedb36c
JL
183 /* If this simplifies, do it. */
184 tem = simplify_binary_operation (code, mode, op0, op1);
0cedb36c
JL
185 if (tem)
186 return tem;
187
68162a97
ILT
188 /* Put complex operands first and constants second if commutative. */
189 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
190 && swap_commutative_operands_p (op0, op1))
e2be0590 191 std::swap (op0, op1);
68162a97 192
e16e3291 193 return gen_rtx_fmt_ee (code, mode, op0, op1);
0cedb36c
JL
194}
195\f
5a2aa3bd 196/* If X is a MEM referencing the constant pool, return the real value.
4ba5f925 197 Otherwise return X. */
732910b9 198rtx
46c5ad27 199avoid_constant_pool_reference (rtx x)
4ba5f925 200{
7daebb7a 201 rtx c, tmp, addr;
ef4bddc2 202 machine_mode cmode;
bdb82177 203 HOST_WIDE_INT offset = 0;
5a2aa3bd 204
7daebb7a
RS
205 switch (GET_CODE (x))
206 {
207 case MEM:
208 break;
209
210 case FLOAT_EXTEND:
211 /* Handle float extensions of constant pool references. */
212 tmp = XEXP (x, 0);
213 c = avoid_constant_pool_reference (tmp);
48175537 214 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
34a72c33
RS
215 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
216 GET_MODE (x));
7daebb7a
RS
217 return x;
218
219 default:
220 return x;
221 }
222
d82a02fa
AK
223 if (GET_MODE (x) == BLKmode)
224 return x;
225
5a2aa3bd
RH
226 addr = XEXP (x, 0);
227
59e4e217 228 /* Call target hook to avoid the effects of -fpic etc.... */
5fd9b178 229 addr = targetm.delegitimize_address (addr);
7daebb7a 230
bdb82177
PB
231 /* Split the address into a base and integer offset. */
232 if (GET_CODE (addr) == CONST
233 && GET_CODE (XEXP (addr, 0)) == PLUS
481683e1 234 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
bdb82177
PB
235 {
236 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
237 addr = XEXP (XEXP (addr, 0), 0);
238 }
239
11f3e4c7
RS
240 if (GET_CODE (addr) == LO_SUM)
241 addr = XEXP (addr, 1);
242
bdb82177
PB
243 /* If this is a constant pool reference, we can turn it into its
244 constant and hope that simplifications happen. */
245 if (GET_CODE (addr) == SYMBOL_REF
246 && CONSTANT_POOL_ADDRESS_P (addr))
5a2aa3bd 247 {
bdb82177
PB
248 c = get_pool_constant (addr);
249 cmode = get_pool_mode (addr);
250
251 /* If we're accessing the constant in a different mode than it was
252 originally stored, attempt to fix that up via subreg simplifications.
253 If that fails we have no choice but to return the original memory. */
b63fe007
UB
254 if ((offset != 0 || cmode != GET_MODE (x))
255 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
bdb82177
PB
256 {
257 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
258 if (tem && CONSTANT_P (tem))
259 return tem;
260 }
261 else
262 return c;
5a2aa3bd
RH
263 }
264
bdb82177 265 return x;
4ba5f925
JH
266}
267\f
b5b8b0ac
AO
268/* Simplify a MEM based on its attributes. This is the default
269 delegitimize_address target hook, and it's recommended that every
270 overrider call it. */
271
272rtx
273delegitimize_mem_from_attrs (rtx x)
274{
e0a80069
AO
275 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
276 use their base addresses as equivalent. */
b5b8b0ac
AO
277 if (MEM_P (x)
278 && MEM_EXPR (x)
527210c4 279 && MEM_OFFSET_KNOWN_P (x))
b5b8b0ac
AO
280 {
281 tree decl = MEM_EXPR (x);
ef4bddc2 282 machine_mode mode = GET_MODE (x);
b5b8b0ac
AO
283 HOST_WIDE_INT offset = 0;
284
285 switch (TREE_CODE (decl))
286 {
287 default:
288 decl = NULL;
289 break;
290
291 case VAR_DECL:
292 break;
293
294 case ARRAY_REF:
295 case ARRAY_RANGE_REF:
296 case COMPONENT_REF:
297 case BIT_FIELD_REF:
298 case REALPART_EXPR:
299 case IMAGPART_EXPR:
300 case VIEW_CONVERT_EXPR:
301 {
302 HOST_WIDE_INT bitsize, bitpos;
303 tree toffset;
ee45a32d 304 int unsignedp, reversep, volatilep = 0;
b5b8b0ac 305
ee45a32d
EB
306 decl
307 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
308 &unsignedp, &reversep, &volatilep, false);
b5b8b0ac
AO
309 if (bitsize != GET_MODE_BITSIZE (mode)
310 || (bitpos % BITS_PER_UNIT)
9541ffee 311 || (toffset && !tree_fits_shwi_p (toffset)))
b5b8b0ac
AO
312 decl = NULL;
313 else
314 {
315 offset += bitpos / BITS_PER_UNIT;
316 if (toffset)
eb1ce453 317 offset += tree_to_shwi (toffset);
b5b8b0ac
AO
318 }
319 break;
320 }
321 }
322
323 if (decl
324 && mode == GET_MODE (x)
325 && TREE_CODE (decl) == VAR_DECL
326 && (TREE_STATIC (decl)
327 || DECL_THREAD_LOCAL_P (decl))
328 && DECL_RTL_SET_P (decl)
329 && MEM_P (DECL_RTL (decl)))
330 {
331 rtx newx;
332
527210c4 333 offset += MEM_OFFSET (x);
b5b8b0ac
AO
334
335 newx = DECL_RTL (decl);
336
337 if (MEM_P (newx))
338 {
339 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
340
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 if (!((offset == 0
348 || (GET_CODE (o) == PLUS
349 && GET_CODE (XEXP (o, 1)) == CONST_INT
350 && (offset == INTVAL (XEXP (o, 1))
351 || (GET_CODE (n) == PLUS
352 && GET_CODE (XEXP (n, 1)) == CONST_INT
353 && (INTVAL (XEXP (n, 1)) + offset
354 == INTVAL (XEXP (o, 1)))
355 && (n = XEXP (n, 0))))
356 && (o = XEXP (o, 0))))
357 && rtx_equal_p (o, n)))
358 x = adjust_address_nv (newx, mode, offset);
359 }
360 else if (GET_MODE (x) == GET_MODE (newx)
361 && offset == 0)
362 x = newx;
363 }
364 }
365
366 return x;
367}
368\f
d9c695ff
RK
369/* Make a unary operation by first seeing if it folds and otherwise making
370 the specified operation. */
371
372rtx
ef4bddc2
RS
373simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
374 machine_mode op_mode)
d9c695ff
RK
375{
376 rtx tem;
377
378 /* If this simplifies, use it. */
379 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
380 return tem;
381
382 return gen_rtx_fmt_e (code, mode, op);
383}
384
385/* Likewise for ternary operations. */
386
387rtx
ef4bddc2
RS
388simplify_gen_ternary (enum rtx_code code, machine_mode mode,
389 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
d9c695ff
RK
390{
391 rtx tem;
392
393 /* If this simplifies, use it. */
394 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
395 op0, op1, op2)))
396 return tem;
397
398 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
399}
c6fb08ad 400
141e454b 401/* Likewise, for relational operations.
c6fb08ad 402 CMP_MODE specifies mode comparison is done in. */
d9c695ff
RK
403
404rtx
ef4bddc2
RS
405simplify_gen_relational (enum rtx_code code, machine_mode mode,
406 machine_mode cmp_mode, rtx op0, rtx op1)
d9c695ff
RK
407{
408 rtx tem;
409
c6fb08ad
PB
410 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
411 op0, op1)))
412 return tem;
bc9c18c3 413
d9c695ff
RK
414 return gen_rtx_fmt_ee (code, mode, op0, op1);
415}
416\f
457eeaae
JJ
417/* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
418 and simplify the result. If FN is non-NULL, call this callback on each
419 X, if it returns non-NULL, replace X with its return value and simplify the
420 result. */
d9c695ff
RK
421
422rtx
3af4ba41 423simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
457eeaae 424 rtx (*fn) (rtx, const_rtx, void *), void *data)
d9c695ff
RK
425{
426 enum rtx_code code = GET_CODE (x);
ef4bddc2
RS
427 machine_mode mode = GET_MODE (x);
428 machine_mode op_mode;
4fb296d9
RS
429 const char *fmt;
430 rtx op0, op1, op2, newx, op;
431 rtvec vec, newvec;
432 int i, j;
d9c695ff 433
457eeaae 434 if (__builtin_expect (fn != NULL, 0))
3af4ba41 435 {
457eeaae
JJ
436 newx = fn (x, old_rtx, data);
437 if (newx)
438 return newx;
3af4ba41 439 }
457eeaae
JJ
440 else if (rtx_equal_p (x, old_rtx))
441 return copy_rtx ((rtx) data);
d9c695ff
RK
442
443 switch (GET_RTX_CLASS (code))
444 {
ec8e098d 445 case RTX_UNARY:
077a148b
RS
446 op0 = XEXP (x, 0);
447 op_mode = GET_MODE (op0);
3af4ba41 448 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
077a148b
RS
449 if (op0 == XEXP (x, 0))
450 return x;
451 return simplify_gen_unary (code, mode, op0, op_mode);
d9c695ff 452
ec8e098d
PB
453 case RTX_BIN_ARITH:
454 case RTX_COMM_ARITH:
3af4ba41
RS
455 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
456 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
077a148b
RS
457 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
458 return x;
459 return simplify_gen_binary (code, mode, op0, op1);
460
ec8e098d
PB
461 case RTX_COMPARE:
462 case RTX_COMM_COMPARE:
077a148b
RS
463 op0 = XEXP (x, 0);
464 op1 = XEXP (x, 1);
465 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
3af4ba41
RS
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
077a148b
RS
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_relational (code, mode, op_mode, op0, op1);
d9c695ff 471
ec8e098d
PB
472 case RTX_TERNARY:
473 case RTX_BITFIELD_OPS:
077a148b
RS
474 op0 = XEXP (x, 0);
475 op_mode = GET_MODE (op0);
3af4ba41
RS
476 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
477 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
478 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
077a148b
RS
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
480 return x;
481 if (op_mode == VOIDmode)
482 op_mode = GET_MODE (op0);
483 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
d9c695ff 484
ec8e098d 485 case RTX_EXTRA:
949c5d62
JH
486 if (code == SUBREG)
487 {
3af4ba41 488 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
077a148b
RS
489 if (op0 == SUBREG_REG (x))
490 return x;
491 op0 = simplify_gen_subreg (GET_MODE (x), op0,
949c5d62
JH
492 GET_MODE (SUBREG_REG (x)),
493 SUBREG_BYTE (x));
077a148b 494 return op0 ? op0 : x;
949c5d62 495 }
077a148b 496 break;
d9c695ff 497
ec8e098d 498 case RTX_OBJ:
60c86d4e 499 if (code == MEM)
077a148b 500 {
3af4ba41 501 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
077a148b
RS
502 if (op0 == XEXP (x, 0))
503 return x;
504 return replace_equiv_address_nv (x, op0);
505 }
f4e3e618
RH
506 else if (code == LO_SUM)
507 {
3af4ba41
RS
508 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
509 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
f4e3e618 510
ece4d1ac
RS
511 /* (lo_sum (high x) y) -> y where x and y have the same base. */
512 if (GET_CODE (op0) == HIGH)
513 {
514 rtx base0, base1, offset0, offset1;
515 split_const (XEXP (op0, 0), &base0, &offset0);
516 split_const (op1, &base1, &offset1);
517 if (rtx_equal_p (base0, base1))
518 return op1;
519 }
60c86d4e 520
077a148b
RS
521 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
522 return x;
f4e3e618
RH
523 return gen_rtx_LO_SUM (mode, op0, op1);
524 }
077a148b 525 break;
60c86d4e
RS
526
527 default:
077a148b 528 break;
d9c695ff 529 }
4fb296d9
RS
530
531 newx = x;
532 fmt = GET_RTX_FORMAT (code);
533 for (i = 0; fmt[i]; i++)
534 switch (fmt[i])
535 {
536 case 'E':
537 vec = XVEC (x, i);
538 newvec = XVEC (newx, i);
539 for (j = 0; j < GET_NUM_ELEM (vec); j++)
540 {
541 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
542 old_rtx, fn, data);
543 if (op != RTVEC_ELT (vec, j))
544 {
545 if (newvec == vec)
546 {
547 newvec = shallow_copy_rtvec (vec);
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XVEC (newx, i) = newvec;
551 }
552 RTVEC_ELT (newvec, j) = op;
553 }
554 }
555 break;
556
557 case 'e':
8a1eb57b 558 if (XEXP (x, i))
4fb296d9 559 {
8a1eb57b
UB
560 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
561 if (op != XEXP (x, i))
562 {
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XEXP (newx, i) = op;
566 }
4fb296d9
RS
567 }
568 break;
569 }
570 return newx;
d9c695ff 571}
3af4ba41
RS
572
573/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
574 resulting RTX. Return a new RTX which is as simplified as possible. */
575
576rtx
577simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
578{
579 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
580}
d9c695ff 581\f
40c5ed5b
RS
582/* Try to simplify a MODE truncation of OP, which has OP_MODE.
583 Only handle cases where the truncated value is inherently an rvalue.
584
585 RTL provides two ways of truncating a value:
586
587 1. a lowpart subreg. This form is only a truncation when both
588 the outer and inner modes (here MODE and OP_MODE respectively)
589 are scalar integers, and only then when the subreg is used as
590 an rvalue.
591
592 It is only valid to form such truncating subregs if the
593 truncation requires no action by the target. The onus for
594 proving this is on the creator of the subreg -- e.g. the
595 caller to simplify_subreg or simplify_gen_subreg -- and typically
596 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
597
598 2. a TRUNCATE. This form handles both scalar and compound integers.
599
600 The first form is preferred where valid. However, the TRUNCATE
601 handling in simplify_unary_operation turns the second form into the
602 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
603 so it is generally safe to form rvalue truncations using:
604
605 simplify_gen_unary (TRUNCATE, ...)
606
607 and leave simplify_unary_operation to work out which representation
608 should be used.
609
610 Because of the proof requirements on (1), simplify_truncation must
611 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
612 regardless of whether the outer truncation came from a SUBREG or a
613 TRUNCATE. For example, if the caller has proven that an SImode
614 truncation of:
615
616 (and:DI X Y)
617
618 is a no-op and can be represented as a subreg, it does not follow
619 that SImode truncations of X and Y are also no-ops. On a target
620 like 64-bit MIPS that requires SImode values to be stored in
621 sign-extended form, an SImode truncation of:
622
623 (and:DI (reg:DI X) (const_int 63))
624
625 is trivially a no-op because only the lower 6 bits can be set.
626 However, X is still an arbitrary 64-bit number and so we cannot
627 assume that truncating it too is a no-op. */
628
629static rtx
ef4bddc2
RS
630simplify_truncation (machine_mode mode, rtx op,
631 machine_mode op_mode)
40c5ed5b
RS
632{
633 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
634 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
635 gcc_assert (precision <= op_precision);
636
637 /* Optimize truncations of zero and sign extended values. */
638 if (GET_CODE (op) == ZERO_EXTEND
639 || GET_CODE (op) == SIGN_EXTEND)
640 {
641 /* There are three possibilities. If MODE is the same as the
642 origmode, we can omit both the extension and the subreg.
643 If MODE is not larger than the origmode, we can apply the
644 truncation without the extension. Finally, if the outermode
645 is larger than the origmode, we can just extend to the appropriate
646 mode. */
ef4bddc2 647 machine_mode origmode = GET_MODE (XEXP (op, 0));
40c5ed5b
RS
648 if (mode == origmode)
649 return XEXP (op, 0);
650 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
651 return simplify_gen_unary (TRUNCATE, mode,
652 XEXP (op, 0), origmode);
653 else
654 return simplify_gen_unary (GET_CODE (op), mode,
655 XEXP (op, 0), origmode);
656 }
657
808c4303
EB
658 /* If the machine can perform operations in the truncated mode, distribute
659 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
660 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
661 if (1
9e11bfef 662 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
808c4303
EB
663 && (GET_CODE (op) == PLUS
664 || GET_CODE (op) == MINUS
665 || GET_CODE (op) == MULT))
40c5ed5b
RS
666 {
667 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
668 if (op0)
669 {
670 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
671 if (op1)
672 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
673 }
674 }
675
676 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
677 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
678 the outer subreg is effectively a truncation to the original mode. */
679 if ((GET_CODE (op) == LSHIFTRT
680 || GET_CODE (op) == ASHIFTRT)
681 /* Ensure that OP_MODE is at least twice as wide as MODE
682 to avoid the possibility that an outer LSHIFTRT shifts by more
683 than the sign extension's sign_bit_copies and introduces zeros
684 into the high bits of the result. */
685 && 2 * precision <= op_precision
686 && CONST_INT_P (XEXP (op, 1))
687 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
688 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
0365ba7c 689 && UINTVAL (XEXP (op, 1)) < precision)
40c5ed5b
RS
690 return simplify_gen_binary (ASHIFTRT, mode,
691 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
692
693 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
694 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
695 the outer subreg is effectively a truncation to the original mode. */
696 if ((GET_CODE (op) == LSHIFTRT
697 || GET_CODE (op) == ASHIFTRT)
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
0365ba7c 701 && UINTVAL (XEXP (op, 1)) < precision)
40c5ed5b
RS
702 return simplify_gen_binary (LSHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704
705 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
706 to (ashift:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if (GET_CODE (op) == ASHIFT
709 && CONST_INT_P (XEXP (op, 1))
710 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
0365ba7c 713 && UINTVAL (XEXP (op, 1)) < precision)
40c5ed5b
RS
714 return simplify_gen_binary (ASHIFT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716
3b1da8bb
SB
717 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
718 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
719 and C2. */
720 if (GET_CODE (op) == AND
721 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
722 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
723 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
724 && CONST_INT_P (XEXP (op, 1)))
725 {
726 rtx op0 = (XEXP (XEXP (op, 0), 0));
727 rtx shift_op = XEXP (XEXP (op, 0), 1);
728 rtx mask_op = XEXP (op, 1);
729 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
730 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
731
732 if (shift < precision
733 /* If doing this transform works for an X with all bits set,
734 it works for any X. */
735 && ((GET_MODE_MASK (mode) >> shift) & mask)
736 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
737 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
738 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
739 {
740 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
741 return simplify_gen_binary (AND, mode, op0, mask_op);
742 }
743 }
744
40c5ed5b
RS
745 /* Recognize a word extraction from a multi-word subreg. */
746 if ((GET_CODE (op) == LSHIFTRT
747 || GET_CODE (op) == ASHIFTRT)
748 && SCALAR_INT_MODE_P (mode)
749 && SCALAR_INT_MODE_P (op_mode)
750 && precision >= BITS_PER_WORD
751 && 2 * precision <= op_precision
752 && CONST_INT_P (XEXP (op, 1))
753 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
0365ba7c 754 && UINTVAL (XEXP (op, 1)) < op_precision)
40c5ed5b
RS
755 {
756 int byte = subreg_lowpart_offset (mode, op_mode);
757 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
758 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
759 (WORDS_BIG_ENDIAN
760 ? byte - shifted_bytes
761 : byte + shifted_bytes));
762 }
763
764 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
765 and try replacing the TRUNCATE and shift with it. Don't do this
766 if the MEM has a mode-dependent address. */
767 if ((GET_CODE (op) == LSHIFTRT
768 || GET_CODE (op) == ASHIFTRT)
769 && SCALAR_INT_MODE_P (op_mode)
770 && MEM_P (XEXP (op, 0))
771 && CONST_INT_P (XEXP (op, 1))
772 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
773 && INTVAL (XEXP (op, 1)) > 0
774 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
775 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
776 MEM_ADDR_SPACE (XEXP (op, 0)))
777 && ! MEM_VOLATILE_P (XEXP (op, 0))
778 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
779 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
780 {
781 int byte = subreg_lowpart_offset (mode, op_mode);
782 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
783 return adjust_address_nv (XEXP (op, 0), mode,
784 (WORDS_BIG_ENDIAN
785 ? byte - shifted_bytes
786 : byte + shifted_bytes));
787 }
788
789 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
790 (OP:SI foo:SI) if OP is NEG or ABS. */
791 if ((GET_CODE (op) == ABS
792 || GET_CODE (op) == NEG)
793 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
794 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
795 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
796 return simplify_gen_unary (GET_CODE (op), mode,
797 XEXP (XEXP (op, 0), 0), mode);
798
799 /* (truncate:A (subreg:B (truncate:C X) 0)) is
800 (truncate:A X). */
801 if (GET_CODE (op) == SUBREG
802 && SCALAR_INT_MODE_P (mode)
803 && SCALAR_INT_MODE_P (op_mode)
804 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
805 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
806 && subreg_lowpart_p (op))
86efb5cd
JJ
807 {
808 rtx inner = XEXP (SUBREG_REG (op), 0);
809 if (GET_MODE_PRECISION (mode)
810 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
811 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
812 else
813 /* If subreg above is paradoxical and C is narrower
814 than A, return (subreg:A (truncate:C X) 0). */
815 return simplify_gen_subreg (mode, SUBREG_REG (op),
816 GET_MODE (SUBREG_REG (op)), 0);
817 }
40c5ed5b
RS
818
819 /* (truncate:A (truncate:B X)) is (truncate:A X). */
820 if (GET_CODE (op) == TRUNCATE)
821 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
823
824 return NULL_RTX;
825}
826\f
0cedb36c
JL
827/* Try to simplify a unary operation CODE whose output mode is to be
828 MODE with input operand OP whose mode was originally OP_MODE.
829 Return zero if no simplification can be made. */
0cedb36c 830rtx
ef4bddc2
RS
831simplify_unary_operation (enum rtx_code code, machine_mode mode,
832 rtx op, machine_mode op_mode)
0a67e02c
PB
833{
834 rtx trueop, tem;
835
0a67e02c
PB
836 trueop = avoid_constant_pool_reference (op);
837
838 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
839 if (tem)
840 return tem;
841
842 return simplify_unary_operation_1 (code, mode, op);
843}
844
a1c045ca
RS
845/* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
846 to be exact. */
847
848static bool
849exact_int_to_float_conversion_p (const_rtx op)
850{
851 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
852 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
853 /* Constants shouldn't reach here. */
854 gcc_assert (op0_mode != VOIDmode);
855 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
856 int in_bits = in_prec;
857 if (HWI_COMPUTABLE_MODE_P (op0_mode))
858 {
859 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
860 if (GET_CODE (op) == FLOAT)
861 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
862 else if (GET_CODE (op) == UNSIGNED_FLOAT)
863 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
864 else
865 gcc_unreachable ();
866 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
867 }
868 return in_bits <= out_bits;
869}
870
0a67e02c
PB
871/* Perform some simplifications we can do even if the operands
872 aren't constant. */
873static rtx
ef4bddc2 874simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
0a67e02c
PB
875{
876 enum rtx_code reversed;
877 rtx temp;
878
879 switch (code)
880 {
881 case NOT:
882 /* (not (not X)) == X. */
883 if (GET_CODE (op) == NOT)
884 return XEXP (op, 0);
885
bd1ef757
PB
886 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
887 comparison is all ones. */
0a67e02c
PB
888 if (COMPARISON_P (op)
889 && (mode == BImode || STORE_FLAG_VALUE == -1)
890 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
891 return simplify_gen_relational (reversed, mode, VOIDmode,
892 XEXP (op, 0), XEXP (op, 1));
893
894 /* (not (plus X -1)) can become (neg X). */
895 if (GET_CODE (op) == PLUS
896 && XEXP (op, 1) == constm1_rtx)
897 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
898
899 /* Similarly, (not (neg X)) is (plus X -1). */
900 if (GET_CODE (op) == NEG)
088845a5
RS
901 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
902 CONSTM1_RTX (mode));
0a67e02c
PB
903
904 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
905 if (GET_CODE (op) == XOR
481683e1 906 && CONST_INT_P (XEXP (op, 1))
0a67e02c
PB
907 && (temp = simplify_unary_operation (NOT, mode,
908 XEXP (op, 1), mode)) != 0)
909 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
910
911 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
912 if (GET_CODE (op) == PLUS
481683e1 913 && CONST_INT_P (XEXP (op, 1))
0a67e02c
PB
914 && mode_signbit_p (mode, XEXP (op, 1))
915 && (temp = simplify_unary_operation (NOT, mode,
916 XEXP (op, 1), mode)) != 0)
917 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
918
919
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
924 bother with. */
925 if (GET_CODE (op) == ASHIFT
926 && XEXP (op, 0) == const1_rtx)
927 {
928 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
929 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
930 }
931
0a67e02c
PB
932 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
933 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
934 so we can perform the above simplification. */
0a67e02c
PB
935 if (STORE_FLAG_VALUE == -1
936 && GET_CODE (op) == ASHIFTRT
bddd3671 937 && CONST_INT_P (XEXP (op, 1))
5511bc5a 938 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
0a67e02c
PB
939 return simplify_gen_relational (GE, mode, VOIDmode,
940 XEXP (op, 0), const0_rtx);
941
bd1ef757
PB
942
943 if (GET_CODE (op) == SUBREG
944 && subreg_lowpart_p (op)
945 && (GET_MODE_SIZE (GET_MODE (op))
946 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
947 && GET_CODE (SUBREG_REG (op)) == ASHIFT
948 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
949 {
ef4bddc2 950 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
bd1ef757
PB
951 rtx x;
952
953 x = gen_rtx_ROTATE (inner_mode,
954 simplify_gen_unary (NOT, inner_mode, const1_rtx,
955 inner_mode),
956 XEXP (SUBREG_REG (op), 1));
76bd29f6
JJ
957 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
958 if (temp)
959 return temp;
bd1ef757
PB
960 }
961
962 /* Apply De Morgan's laws to reduce number of patterns for machines
963 with negating logical insns (and-not, nand, etc.). If result has
964 only one NOT, put it first, since that is how the patterns are
965 coded. */
bd1ef757
PB
966 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
967 {
968 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
ef4bddc2 969 machine_mode op_mode;
bd1ef757
PB
970
971 op_mode = GET_MODE (in1);
972 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
973
974 op_mode = GET_MODE (in2);
975 if (op_mode == VOIDmode)
976 op_mode = mode;
977 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
978
979 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
6b4db501 980 std::swap (in1, in2);
bd1ef757
PB
981
982 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
983 mode, in1, in2);
984 }
b17c024f
EB
985
986 /* (not (bswap x)) -> (bswap (not x)). */
987 if (GET_CODE (op) == BSWAP)
988 {
989 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
990 return simplify_gen_unary (BSWAP, mode, x, mode);
991 }
0a67e02c
PB
992 break;
993
994 case NEG:
995 /* (neg (neg X)) == X. */
996 if (GET_CODE (op) == NEG)
997 return XEXP (op, 0);
998
7040e903
KT
999 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1000 If comparison is not reversible use
1001 x ? y : (neg y). */
1002 if (GET_CODE (op) == IF_THEN_ELSE)
1003 {
1004 rtx cond = XEXP (op, 0);
1005 rtx true_rtx = XEXP (op, 1);
1006 rtx false_rtx = XEXP (op, 2);
1007
1008 if ((GET_CODE (true_rtx) == NEG
1009 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1010 || (GET_CODE (false_rtx) == NEG
1011 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1012 {
1013 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
1014 temp = reversed_comparison (cond, mode);
1015 else
1016 {
1017 temp = cond;
1018 std::swap (true_rtx, false_rtx);
1019 }
1020 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1021 mode, temp, true_rtx, false_rtx);
1022 }
1023 }
1024
0a67e02c
PB
1025 /* (neg (plus X 1)) can become (not X). */
1026 if (GET_CODE (op) == PLUS
1027 && XEXP (op, 1) == const1_rtx)
1028 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
b8698a0f 1029
0a67e02c
PB
1030 /* Similarly, (neg (not X)) is (plus X 1). */
1031 if (GET_CODE (op) == NOT)
088845a5
RS
1032 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1033 CONST1_RTX (mode));
b8698a0f 1034
0a67e02c
PB
1035 /* (neg (minus X Y)) can become (minus Y X). This transformation
1036 isn't safe for modes with signed zeros, since if X and Y are
1037 both +0, (minus Y X) is the same as (minus X Y). If the
1038 rounding mode is towards +infinity (or -infinity) then the two
1039 expressions will be rounded differently. */
1040 if (GET_CODE (op) == MINUS
1041 && !HONOR_SIGNED_ZEROS (mode)
1042 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1043 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
b8698a0f 1044
0a67e02c
PB
1045 if (GET_CODE (op) == PLUS
1046 && !HONOR_SIGNED_ZEROS (mode)
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1048 {
1049 /* (neg (plus A C)) is simplified to (minus -C A). */
33ffb5c5
KZ
1050 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1051 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
0a67e02c
PB
1052 {
1053 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1054 if (temp)
1055 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1056 }
1057
1058 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1059 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1060 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1061 }
1062
707f9919 1063 /* (neg (mult A B)) becomes (mult A (neg B)).
0a67e02c
PB
1064 This works even for floating-point values. */
1065 if (GET_CODE (op) == MULT
1066 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1067 {
707f9919
JJ
1068 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1069 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
0a67e02c
PB
1070 }
1071
1072 /* NEG commutes with ASHIFT since it is multiplication. Only do
1073 this if we can then eliminate the NEG (e.g., if the operand
1074 is a constant). */
1075 if (GET_CODE (op) == ASHIFT)
1076 {
1077 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1078 if (temp)
1079 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1080 }
1081
1082 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1083 C is equal to the width of MODE minus 1. */
1084 if (GET_CODE (op) == ASHIFTRT
481683e1 1085 && CONST_INT_P (XEXP (op, 1))
5511bc5a 1086 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
0a67e02c
PB
1087 return simplify_gen_binary (LSHIFTRT, mode,
1088 XEXP (op, 0), XEXP (op, 1));
1089
1090 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1091 C is equal to the width of MODE minus 1. */
1092 if (GET_CODE (op) == LSHIFTRT
481683e1 1093 && CONST_INT_P (XEXP (op, 1))
5511bc5a 1094 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
0a67e02c
PB
1095 return simplify_gen_binary (ASHIFTRT, mode,
1096 XEXP (op, 0), XEXP (op, 1));
b8698a0f 1097
bd1ef757
PB
1098 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1099 if (GET_CODE (op) == XOR
1100 && XEXP (op, 1) == const1_rtx
1101 && nonzero_bits (XEXP (op, 0), mode) == 1)
0a81f074 1102 return plus_constant (mode, XEXP (op, 0), -1);
8305d786
RS
1103
1104 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1105 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1106 if (GET_CODE (op) == LT
71cca289
JJ
1107 && XEXP (op, 1) == const0_rtx
1108 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
8305d786 1109 {
ef4bddc2 1110 machine_mode inner = GET_MODE (XEXP (op, 0));
5511bc5a 1111 int isize = GET_MODE_PRECISION (inner);
8305d786 1112 if (STORE_FLAG_VALUE == 1)
0f2f71b5
RS
1113 {
1114 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1115 GEN_INT (isize - 1));
1116 if (mode == inner)
1117 return temp;
5511bc5a 1118 if (GET_MODE_PRECISION (mode) > isize)
0f2f71b5
RS
1119 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1120 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1121 }
8305d786 1122 else if (STORE_FLAG_VALUE == -1)
0f2f71b5
RS
1123 {
1124 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1125 GEN_INT (isize - 1));
1126 if (mode == inner)
1127 return temp;
5511bc5a 1128 if (GET_MODE_PRECISION (mode) > isize)
0f2f71b5
RS
1129 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1130 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1131 }
8305d786 1132 }
bd1ef757
PB
1133 break;
1134
1135 case TRUNCATE:
40c5ed5b
RS
1136 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1137 with the umulXi3_highpart patterns. */
1138 if (GET_CODE (op) == LSHIFTRT
1139 && GET_CODE (XEXP (op, 0)) == MULT)
1140 break;
bd1ef757 1141
40c5ed5b
RS
1142 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1143 {
1144 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
76bd29f6
JJ
1145 {
1146 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1147 if (temp)
1148 return temp;
1149 }
40c5ed5b
RS
1150 /* We can't handle truncation to a partial integer mode here
1151 because we don't know the real bitsize of the partial
1152 integer mode. */
1153 break;
1154 }
bd1ef757 1155
40c5ed5b
RS
1156 if (GET_MODE (op) != VOIDmode)
1157 {
1158 temp = simplify_truncation (mode, op, GET_MODE (op));
1159 if (temp)
1160 return temp;
1161 }
bd1ef757
PB
1162
1163 /* If we know that the value is already truncated, we can
40c5ed5b
RS
1164 replace the TRUNCATE with a SUBREG. */
1165 if (GET_MODE_NUNITS (mode) == 1
1166 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1167 || truncated_to_mode (mode, op)))
76bd29f6
JJ
1168 {
1169 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1170 if (temp)
1171 return temp;
1172 }
bd1ef757
PB
1173
1174 /* A truncate of a comparison can be replaced with a subreg if
1175 STORE_FLAG_VALUE permits. This is like the previous test,
1176 but it works even if the comparison is done in a mode larger
1177 than HOST_BITS_PER_WIDE_INT. */
46c9550f 1178 if (HWI_COMPUTABLE_MODE_P (mode)
bd1ef757 1179 && COMPARISON_P (op)
43c36287 1180 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
76bd29f6
JJ
1181 {
1182 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1183 if (temp)
1184 return temp;
1185 }
dcf8468f
AP
1186
1187 /* A truncate of a memory is just loading the low part of the memory
1188 if we are not changing the meaning of the address. */
1189 if (GET_CODE (op) == MEM
fa607dda 1190 && !VECTOR_MODE_P (mode)
dcf8468f 1191 && !MEM_VOLATILE_P (op)
5bfed9a9 1192 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
76bd29f6
JJ
1193 {
1194 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1195 if (temp)
1196 return temp;
1197 }
dcf8468f 1198
bd1ef757
PB
1199 break;
1200
1201 case FLOAT_TRUNCATE:
15ed7b52
JG
1202 if (DECIMAL_FLOAT_MODE_P (mode))
1203 break;
1204
bd1ef757
PB
1205 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1206 if (GET_CODE (op) == FLOAT_EXTEND
1207 && GET_MODE (XEXP (op, 0)) == mode)
1208 return XEXP (op, 0);
1209
1210 /* (float_truncate:SF (float_truncate:DF foo:XF))
1211 = (float_truncate:SF foo:XF).
1212 This may eliminate double rounding, so it is unsafe.
1213
1214 (float_truncate:SF (float_extend:XF foo:DF))
1215 = (float_truncate:SF foo:DF).
1216
1217 (float_truncate:DF (float_extend:XF foo:SF))
76978b21 1218 = (float_extend:DF foo:SF). */
bd1ef757
PB
1219 if ((GET_CODE (op) == FLOAT_TRUNCATE
1220 && flag_unsafe_math_optimizations)
1221 || GET_CODE (op) == FLOAT_EXTEND)
1222 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1223 0)))
1224 > GET_MODE_SIZE (mode)
1225 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1226 mode,
1227 XEXP (op, 0), mode);
1228
1229 /* (float_truncate (float x)) is (float x) */
76978b21 1230 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
bd1ef757 1231 && (flag_unsafe_math_optimizations
a1c045ca 1232 || exact_int_to_float_conversion_p (op)))
76978b21 1233 return simplify_gen_unary (GET_CODE (op), mode,
bd1ef757
PB
1234 XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1236
1237 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1238 (OP:SF foo:SF) if OP is NEG or ABS. */
1239 if ((GET_CODE (op) == ABS
1240 || GET_CODE (op) == NEG)
1241 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1242 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1243 return simplify_gen_unary (GET_CODE (op), mode,
1244 XEXP (XEXP (op, 0), 0), mode);
1245
1246 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1247 is (float_truncate:SF x). */
1248 if (GET_CODE (op) == SUBREG
1249 && subreg_lowpart_p (op)
1250 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1251 return SUBREG_REG (op);
1252 break;
1253
1254 case FLOAT_EXTEND:
15ed7b52
JG
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1257
bd1ef757
PB
1258 /* (float_extend (float_extend x)) is (float_extend x)
1259
1260 (float_extend (float x)) is (float x) assuming that double
1261 rounding can't happen.
1262 */
1263 if (GET_CODE (op) == FLOAT_EXTEND
76978b21 1264 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
a1c045ca 1265 && exact_int_to_float_conversion_p (op)))
bd1ef757
PB
1266 return simplify_gen_unary (GET_CODE (op), mode,
1267 XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1269
1270 break;
1271
1272 case ABS:
1273 /* (abs (neg <foo>)) -> (abs <foo>) */
1274 if (GET_CODE (op) == NEG)
1275 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1277
1278 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1279 do nothing. */
1280 if (GET_MODE (op) == VOIDmode)
1281 break;
1282
1283 /* If operand is something known to be positive, ignore the ABS. */
1284 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
2d0c270f
BS
1285 || val_signbit_known_clear_p (GET_MODE (op),
1286 nonzero_bits (op, GET_MODE (op))))
bd1ef757
PB
1287 return op;
1288
1289 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
5511bc5a 1290 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
bd1ef757
PB
1291 return gen_rtx_NEG (mode, op);
1292
1293 break;
1294
1295 case FFS:
1296 /* (ffs (*_extend <X>)) = (ffs <X>) */
1297 if (GET_CODE (op) == SIGN_EXTEND
1298 || GET_CODE (op) == ZERO_EXTEND)
1299 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1300 GET_MODE (XEXP (op, 0)));
1301 break;
1302
1303 case POPCOUNT:
9f05adb0
RS
1304 switch (GET_CODE (op))
1305 {
1306 case BSWAP:
1307 case ZERO_EXTEND:
1308 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1309 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1311
1312 case ROTATE:
1313 case ROTATERT:
1314 /* Rotations don't affect popcount. */
1315 if (!side_effects_p (XEXP (op, 1)))
1316 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1317 GET_MODE (XEXP (op, 0)));
1318 break;
1319
1320 default:
1321 break;
1322 }
1323 break;
1324
bd1ef757 1325 case PARITY:
9f05adb0
RS
1326 switch (GET_CODE (op))
1327 {
1328 case NOT:
1329 case BSWAP:
1330 case ZERO_EXTEND:
1331 case SIGN_EXTEND:
1332 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1333 GET_MODE (XEXP (op, 0)));
1334
1335 case ROTATE:
1336 case ROTATERT:
1337 /* Rotations don't affect parity. */
1338 if (!side_effects_p (XEXP (op, 1)))
1339 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1341 break;
1342
1343 default:
1344 break;
1345 }
1346 break;
1347
1348 case BSWAP:
1349 /* (bswap (bswap x)) -> x. */
1350 if (GET_CODE (op) == BSWAP)
1351 return XEXP (op, 0);
bd1ef757
PB
1352 break;
1353
1354 case FLOAT:
1355 /* (float (sign_extend <X>)) = (float <X>). */
1356 if (GET_CODE (op) == SIGN_EXTEND)
1357 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1358 GET_MODE (XEXP (op, 0)));
0a67e02c
PB
1359 break;
1360
1361 case SIGN_EXTEND:
1362 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1363 becomes just the MINUS if its mode is MODE. This allows
1364 folding switch statements on machines using casesi (such as
1365 the VAX). */
1366 if (GET_CODE (op) == TRUNCATE
1367 && GET_MODE (XEXP (op, 0)) == mode
1368 && GET_CODE (XEXP (op, 0)) == MINUS
1369 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1370 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1371 return XEXP (op, 0);
1372
c536876e
AS
1373 /* Extending a widening multiplication should be canonicalized to
1374 a wider widening multiplication. */
1375 if (GET_CODE (op) == MULT)
1376 {
1377 rtx lhs = XEXP (op, 0);
1378 rtx rhs = XEXP (op, 1);
1379 enum rtx_code lcode = GET_CODE (lhs);
1380 enum rtx_code rcode = GET_CODE (rhs);
1381
1382 /* Widening multiplies usually extend both operands, but sometimes
1383 they use a shift to extract a portion of a register. */
1384 if ((lcode == SIGN_EXTEND
1385 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1386 && (rcode == SIGN_EXTEND
1387 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1388 {
ef4bddc2
RS
1389 machine_mode lmode = GET_MODE (lhs);
1390 machine_mode rmode = GET_MODE (rhs);
c536876e
AS
1391 int bits;
1392
1393 if (lcode == ASHIFTRT)
1394 /* Number of bits not shifted off the end. */
1395 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1396 else /* lcode == SIGN_EXTEND */
1397 /* Size of inner mode. */
1398 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1399
1400 if (rcode == ASHIFTRT)
1401 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1402 else /* rcode == SIGN_EXTEND */
1403 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1404
1405 /* We can only widen multiplies if the result is mathematiclly
1406 equivalent. I.e. if overflow was impossible. */
1407 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1408 return simplify_gen_binary
1409 (MULT, mode,
1410 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1411 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1412 }
1413 }
1414
0a67e02c
PB
1415 /* Check for a sign extension of a subreg of a promoted
1416 variable, where the promotion is sign-extended, and the
1417 target mode is the same as the variable's promotion. */
1418 if (GET_CODE (op) == SUBREG
1419 && SUBREG_PROMOTED_VAR_P (op)
362d42dc 1420 && SUBREG_PROMOTED_SIGNED_P (op)
4613543f 1421 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
76bd29f6
JJ
1422 {
1423 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1424 if (temp)
1425 return temp;
1426 }
0a67e02c 1427
561da6bc
JJ
1428 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1429 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1430 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1431 {
50b6ee8b
DD
1432 gcc_assert (GET_MODE_PRECISION (mode)
1433 > GET_MODE_PRECISION (GET_MODE (op)));
561da6bc
JJ
1434 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1435 GET_MODE (XEXP (op, 0)));
1436 }
a5d8253f
JJ
1437
1438 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1439 is (sign_extend:M (subreg:O <X>)) if there is mode with
561da6bc
JJ
1440 GET_MODE_BITSIZE (N) - I bits.
1441 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1442 is similarly (zero_extend:M (subreg:O <X>)). */
1443 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
a5d8253f
JJ
1444 && GET_CODE (XEXP (op, 0)) == ASHIFT
1445 && CONST_INT_P (XEXP (op, 1))
1446 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1447 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1448 {
ef4bddc2 1449 machine_mode tmode
a5d8253f
JJ
1450 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1451 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
561da6bc
JJ
1452 gcc_assert (GET_MODE_BITSIZE (mode)
1453 > GET_MODE_BITSIZE (GET_MODE (op)));
a5d8253f
JJ
1454 if (tmode != BLKmode)
1455 {
1456 rtx inner =
1457 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
76bd29f6
JJ
1458 if (inner)
1459 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1460 ? SIGN_EXTEND : ZERO_EXTEND,
1461 mode, inner, tmode);
a5d8253f
JJ
1462 }
1463 }
1464
7d006b0d
SB
1465 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1466 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1467 if (GET_CODE (op) == LSHIFTRT
1468 && CONST_INT_P (XEXP (op, 1))
1469 && XEXP (op, 1) != const0_rtx)
1470 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1471
2a870875 1472#if defined(POINTERS_EXTEND_UNSIGNED)
5932a4d4 1473 /* As we do not know which address space the pointer is referring to,
d4ebfa65
BE
1474 we can do this only if the target does not support different pointer
1475 or address modes depending on the address space. */
1476 if (target_default_pointer_address_modes_p ()
1477 && ! POINTERS_EXTEND_UNSIGNED
0a67e02c
PB
1478 && mode == Pmode && GET_MODE (op) == ptr_mode
1479 && (CONSTANT_P (op)
1480 || (GET_CODE (op) == SUBREG
1481 && REG_P (SUBREG_REG (op))
1482 && REG_POINTER (SUBREG_REG (op))
2a870875
RS
1483 && GET_MODE (SUBREG_REG (op)) == Pmode))
1484 && !targetm.have_ptr_extend ())
0a67e02c
PB
1485 return convert_memory_address (Pmode, op);
1486#endif
1487 break;
1488
1489 case ZERO_EXTEND:
1490 /* Check for a zero extension of a subreg of a promoted
1491 variable, where the promotion is zero-extended, and the
1492 target mode is the same as the variable's promotion. */
1493 if (GET_CODE (op) == SUBREG
1494 && SUBREG_PROMOTED_VAR_P (op)
362d42dc 1495 && SUBREG_PROMOTED_UNSIGNED_P (op)
4613543f 1496 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
76bd29f6
JJ
1497 {
1498 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1499 if (temp)
1500 return temp;
1501 }
0a67e02c 1502
c536876e
AS
1503 /* Extending a widening multiplication should be canonicalized to
1504 a wider widening multiplication. */
1505 if (GET_CODE (op) == MULT)
1506 {
1507 rtx lhs = XEXP (op, 0);
1508 rtx rhs = XEXP (op, 1);
1509 enum rtx_code lcode = GET_CODE (lhs);
1510 enum rtx_code rcode = GET_CODE (rhs);
1511
1512 /* Widening multiplies usually extend both operands, but sometimes
1513 they use a shift to extract a portion of a register. */
1514 if ((lcode == ZERO_EXTEND
1515 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1516 && (rcode == ZERO_EXTEND
1517 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1518 {
ef4bddc2
RS
1519 machine_mode lmode = GET_MODE (lhs);
1520 machine_mode rmode = GET_MODE (rhs);
c536876e
AS
1521 int bits;
1522
1523 if (lcode == LSHIFTRT)
1524 /* Number of bits not shifted off the end. */
1525 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1526 else /* lcode == ZERO_EXTEND */
1527 /* Size of inner mode. */
1528 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1529
1530 if (rcode == LSHIFTRT)
1531 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1532 else /* rcode == ZERO_EXTEND */
1533 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1534
1535 /* We can only widen multiplies if the result is mathematiclly
1536 equivalent. I.e. if overflow was impossible. */
1537 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1538 return simplify_gen_binary
1539 (MULT, mode,
1540 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1541 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1542 }
1543 }
1544
a5d8253f
JJ
1545 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1546 if (GET_CODE (op) == ZERO_EXTEND)
1547 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1548 GET_MODE (XEXP (op, 0)));
1549
561da6bc
JJ
1550 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1551 is (zero_extend:M (subreg:O <X>)) if there is mode with
50b6ee8b 1552 GET_MODE_PRECISION (N) - I bits. */
561da6bc
JJ
1553 if (GET_CODE (op) == LSHIFTRT
1554 && GET_CODE (XEXP (op, 0)) == ASHIFT
1555 && CONST_INT_P (XEXP (op, 1))
1556 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
50b6ee8b 1557 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
561da6bc 1558 {
ef4bddc2 1559 machine_mode tmode
50b6ee8b 1560 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
561da6bc
JJ
1561 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1562 if (tmode != BLKmode)
1563 {
1564 rtx inner =
1565 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
76bd29f6
JJ
1566 if (inner)
1567 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
561da6bc
JJ
1568 }
1569 }
1570
8140c065
JJ
1571 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1572 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1573 of mode N. E.g.
1574 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1575 (and:SI (reg:SI) (const_int 63)). */
1576 if (GET_CODE (op) == SUBREG
1577 && GET_MODE_PRECISION (GET_MODE (op))
1578 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1579 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1580 <= HOST_BITS_PER_WIDE_INT
1581 && GET_MODE_PRECISION (mode)
1582 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1583 && subreg_lowpart_p (op)
1584 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1585 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1586 {
1587 if (GET_MODE_PRECISION (mode)
1588 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1589 return SUBREG_REG (op);
1590 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1591 GET_MODE (SUBREG_REG (op)));
1592 }
1593
2a870875 1594#if defined(POINTERS_EXTEND_UNSIGNED)
5932a4d4 1595 /* As we do not know which address space the pointer is referring to,
d4ebfa65
BE
1596 we can do this only if the target does not support different pointer
1597 or address modes depending on the address space. */
1598 if (target_default_pointer_address_modes_p ()
1599 && POINTERS_EXTEND_UNSIGNED > 0
0a67e02c
PB
1600 && mode == Pmode && GET_MODE (op) == ptr_mode
1601 && (CONSTANT_P (op)
1602 || (GET_CODE (op) == SUBREG
1603 && REG_P (SUBREG_REG (op))
1604 && REG_POINTER (SUBREG_REG (op))
2a870875
RS
1605 && GET_MODE (SUBREG_REG (op)) == Pmode))
1606 && !targetm.have_ptr_extend ())
0a67e02c
PB
1607 return convert_memory_address (Pmode, op);
1608#endif
1609 break;
1610
1611 default:
1612 break;
1613 }
b8698a0f 1614
0a67e02c
PB
1615 return 0;
1616}
1617
1618/* Try to compute the value of a unary operation CODE whose output mode is to
1619 be MODE with input operand OP whose mode was originally OP_MODE.
1620 Return zero if the value cannot be computed. */
1621rtx
ef4bddc2
RS
1622simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1623 rtx op, machine_mode op_mode)
0cedb36c 1624{
5511bc5a 1625 unsigned int width = GET_MODE_PRECISION (mode);
0cedb36c 1626
d9deed68
JH
1627 if (code == VEC_DUPLICATE)
1628 {
41374e13 1629 gcc_assert (VECTOR_MODE_P (mode));
0a67e02c 1630 if (GET_MODE (op) != VOIDmode)
41374e13 1631 {
0a67e02c
PB
1632 if (!VECTOR_MODE_P (GET_MODE (op)))
1633 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
41374e13
NS
1634 else
1635 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
0a67e02c 1636 (GET_MODE (op)));
41374e13 1637 }
33ffb5c5 1638 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
0a67e02c 1639 || GET_CODE (op) == CONST_VECTOR)
d9deed68 1640 {
cb5ca315 1641 int elt_size = GET_MODE_UNIT_SIZE (mode);
d9deed68
JH
1642 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1643 rtvec v = rtvec_alloc (n_elts);
1644 unsigned int i;
1645
0a67e02c 1646 if (GET_CODE (op) != CONST_VECTOR)
d9deed68 1647 for (i = 0; i < n_elts; i++)
0a67e02c 1648 RTVEC_ELT (v, i) = op;
d9deed68
JH
1649 else
1650 {
ef4bddc2 1651 machine_mode inmode = GET_MODE (op);
cb5ca315 1652 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
d9deed68
JH
1653 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1654
41374e13
NS
1655 gcc_assert (in_n_elts < n_elts);
1656 gcc_assert ((n_elts % in_n_elts) == 0);
d9deed68 1657 for (i = 0; i < n_elts; i++)
0a67e02c 1658 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
d9deed68
JH
1659 }
1660 return gen_rtx_CONST_VECTOR (mode, v);
1661 }
1662 }
1663
0a67e02c 1664 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
852c8ba1 1665 {
cb5ca315 1666 int elt_size = GET_MODE_UNIT_SIZE (mode);
852c8ba1 1667 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
ef4bddc2 1668 machine_mode opmode = GET_MODE (op);
cb5ca315 1669 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
852c8ba1
JH
1670 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1671 rtvec v = rtvec_alloc (n_elts);
1672 unsigned int i;
1673
41374e13 1674 gcc_assert (op_n_elts == n_elts);
852c8ba1
JH
1675 for (i = 0; i < n_elts; i++)
1676 {
1677 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
0a67e02c 1678 CONST_VECTOR_ELT (op, i),
852c8ba1
JH
1679 GET_MODE_INNER (opmode));
1680 if (!x)
1681 return 0;
1682 RTVEC_ELT (v, i) = x;
1683 }
1684 return gen_rtx_CONST_VECTOR (mode, v);
1685 }
1686
0cedb36c
JL
1687 /* The order of these tests is critical so that, for example, we don't
1688 check the wrong mode (input vs. output) for a conversion operation,
1689 such as FIX. At some point, this should be simplified. */
1690
33ffb5c5 1691 if (code == FLOAT && CONST_SCALAR_INT_P (op))
0cedb36c 1692 {
0cedb36c
JL
1693 REAL_VALUE_TYPE d;
1694
807e902e
KZ
1695 if (op_mode == VOIDmode)
1696 {
1697 /* CONST_INT have VOIDmode as the mode. We assume that all
1698 the bits of the constant are significant, though, this is
1699 a dangerous assumption as many times CONST_INTs are
1700 created and used with garbage in the bits outside of the
1701 precision of the implied mode of the const_int. */
1702 op_mode = MAX_MODE_INT;
1703 }
0cedb36c 1704
807e902e 1705 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
5a00b0aa
SS
1706
1707 /* Avoid the folding if flag_signaling_nans is on and
1708 operand is a signaling NaN. */
1709 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1710 return 0;
1711
0cedb36c 1712 d = real_value_truncate (mode, d);
555affd7 1713 return const_double_from_real_value (d, mode);
0cedb36c 1714 }
33ffb5c5 1715 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
0cedb36c 1716 {
0cedb36c
JL
1717 REAL_VALUE_TYPE d;
1718
807e902e
KZ
1719 if (op_mode == VOIDmode)
1720 {
1721 /* CONST_INT have VOIDmode as the mode. We assume that all
1722 the bits of the constant are significant, though, this is
1723 a dangerous assumption as many times CONST_INTs are
1724 created and used with garbage in the bits outside of the
1725 precision of the implied mode of the const_int. */
1726 op_mode = MAX_MODE_INT;
1727 }
0cedb36c 1728
807e902e 1729 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
5a00b0aa
SS
1730
1731 /* Avoid the folding if flag_signaling_nans is on and
1732 operand is a signaling NaN. */
1733 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1734 return 0;
1735
0cedb36c 1736 d = real_value_truncate (mode, d);
555affd7 1737 return const_double_from_real_value (d, mode);
0cedb36c 1738 }
0cedb36c 1739
807e902e 1740 if (CONST_SCALAR_INT_P (op) && width > 0)
0cedb36c 1741 {
807e902e 1742 wide_int result;
ef4bddc2 1743 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
807e902e
KZ
1744 rtx_mode_t op0 = std::make_pair (op, imode);
1745 int int_value;
1746
1747#if TARGET_SUPPORTS_WIDE_INT == 0
1748 /* This assert keeps the simplification from producing a result
1749 that cannot be represented in a CONST_DOUBLE but a lot of
1750 upstream callers expect that this function never fails to
1751 simplify something and so you if you added this to the test
1752 above the code would die later anyway. If this assert
1753 happens, you just need to make the port support wide int. */
1754 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1755#endif
0cedb36c
JL
1756
1757 switch (code)
1758 {
1759 case NOT:
807e902e 1760 result = wi::bit_not (op0);
0cedb36c
JL
1761 break;
1762
1763 case NEG:
807e902e 1764 result = wi::neg (op0);
0cedb36c
JL
1765 break;
1766
1767 case ABS:
807e902e 1768 result = wi::abs (op0);
0cedb36c
JL
1769 break;
1770
1771 case FFS:
807e902e 1772 result = wi::shwi (wi::ffs (op0), mode);
0cedb36c
JL
1773 break;
1774
2928cd7a 1775 case CLZ:
807e902e
KZ
1776 if (wi::ne_p (op0, 0))
1777 int_value = wi::clz (op0);
1778 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1779 int_value = GET_MODE_PRECISION (mode);
1780 result = wi::shwi (int_value, mode);
3801c801
BS
1781 break;
1782
1783 case CLRSB:
807e902e 1784 result = wi::shwi (wi::clrsb (op0), mode);
2928cd7a
RH
1785 break;
1786
1787 case CTZ:
807e902e
KZ
1788 if (wi::ne_p (op0, 0))
1789 int_value = wi::ctz (op0);
1790 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1791 int_value = GET_MODE_PRECISION (mode);
1792 result = wi::shwi (int_value, mode);
2928cd7a
RH
1793 break;
1794
1795 case POPCOUNT:
807e902e 1796 result = wi::shwi (wi::popcount (op0), mode);
2928cd7a
RH
1797 break;
1798
1799 case PARITY:
807e902e 1800 result = wi::shwi (wi::parity (op0), mode);
2928cd7a
RH
1801 break;
1802
167fa32c 1803 case BSWAP:
807e902e 1804 result = wide_int (op0).bswap ();
9f05adb0 1805 break;
167fa32c 1806
0cedb36c 1807 case TRUNCATE:
0cedb36c 1808 case ZERO_EXTEND:
807e902e 1809 result = wide_int::from (op0, width, UNSIGNED);
0cedb36c
JL
1810 break;
1811
1812 case SIGN_EXTEND:
807e902e 1813 result = wide_int::from (op0, width, SIGNED);
0cedb36c
JL
1814 break;
1815
1816 case SQRT:
0cedb36c
JL
1817 default:
1818 return 0;
1819 }
1820
807e902e 1821 return immed_wide_int_const (result, mode);
0cedb36c
JL
1822 }
1823
48175537 1824 else if (CONST_DOUBLE_AS_FLOAT_P (op)
6f0c9f06
JJ
1825 && SCALAR_FLOAT_MODE_P (mode)
1826 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
0cedb36c 1827 {
34a72c33 1828 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
15e5ad76
ZW
1829 switch (code)
1830 {
1831 case SQRT:
3c8e8595 1832 return 0;
94313f35 1833 case ABS:
d49b6e1e 1834 d = real_value_abs (&d);
94313f35
RH
1835 break;
1836 case NEG:
d49b6e1e 1837 d = real_value_negate (&d);
94313f35
RH
1838 break;
1839 case FLOAT_TRUNCATE:
5a00b0aa
SS
1840 /* Don't perform the operation if flag_signaling_nans is on
1841 and the operand is a signaling NaN. */
1842 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1843 d = real_value_truncate (mode, d);
94313f35
RH
1844 break;
1845 case FLOAT_EXTEND:
6f0c9f06
JJ
1846 /* All this does is change the mode, unless changing
1847 mode class. */
5a00b0aa
SS
1848 /* Don't perform the operation if flag_signaling_nans is on
1849 and the operand is a signaling NaN. */
1850 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))
1851 && !(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
6f0c9f06 1852 real_convert (&d, mode, &d);
94313f35
RH
1853 break;
1854 case FIX:
5a00b0aa
SS
1855 /* Don't perform the operation if flag_signaling_nans is on
1856 and the operand is a signaling NaN. */
1857 if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1858 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
94313f35 1859 break;
79ae63b1
JH
1860 case NOT:
1861 {
1862 long tmp[4];
1863 int i;
1864
0a67e02c 1865 real_to_target (tmp, &d, GET_MODE (op));
79ae63b1
JH
1866 for (i = 0; i < 4; i++)
1867 tmp[i] = ~tmp[i];
1868 real_from_target (&d, tmp, mode);
0a67e02c 1869 break;
79ae63b1 1870 }
15e5ad76 1871 default:
41374e13 1872 gcc_unreachable ();
15e5ad76 1873 }
555affd7 1874 return const_double_from_real_value (d, mode);
0cedb36c 1875 }
48175537 1876 else if (CONST_DOUBLE_AS_FLOAT_P (op)
3d8bf70f 1877 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
0cedb36c 1878 && GET_MODE_CLASS (mode) == MODE_INT
807e902e 1879 && width > 0)
0cedb36c 1880 {
875eda9c 1881 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2067c116 1882 operators are intentionally left unspecified (to ease implementation
875eda9c
RS
1883 by target backends), for consistency, this routine implements the
1884 same semantics for constant folding as used by the middle-end. */
1885
0a67e02c
PB
1886 /* This was formerly used only for non-IEEE float.
1887 eggert@twinsun.com says it is safe for IEEE also. */
34a72c33
RS
1888 REAL_VALUE_TYPE t;
1889 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
807e902e
KZ
1890 wide_int wmax, wmin;
1891 /* This is part of the abi to real_to_integer, but we check
1892 things before making this call. */
1893 bool fail;
1894
15e5ad76
ZW
1895 switch (code)
1896 {
875eda9c 1897 case FIX:
34a72c33 1898 if (REAL_VALUE_ISNAN (*x))
875eda9c
RS
1899 return const0_rtx;
1900
1901 /* Test against the signed upper bound. */
807e902e
KZ
1902 wmax = wi::max_value (width, SIGNED);
1903 real_from_integer (&t, VOIDmode, wmax, SIGNED);
34a72c33 1904 if (real_less (&t, x))
807e902e 1905 return immed_wide_int_const (wmax, mode);
875eda9c
RS
1906
1907 /* Test against the signed lower bound. */
807e902e
KZ
1908 wmin = wi::min_value (width, SIGNED);
1909 real_from_integer (&t, VOIDmode, wmin, SIGNED);
34a72c33 1910 if (real_less (x, &t))
807e902e
KZ
1911 return immed_wide_int_const (wmin, mode);
1912
34a72c33
RS
1913 return immed_wide_int_const (real_to_integer (x, &fail, width),
1914 mode);
875eda9c
RS
1915
1916 case UNSIGNED_FIX:
34a72c33 1917 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
875eda9c
RS
1918 return const0_rtx;
1919
1920 /* Test against the unsigned upper bound. */
807e902e
KZ
1921 wmax = wi::max_value (width, UNSIGNED);
1922 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
34a72c33 1923 if (real_less (&t, x))
807e902e 1924 return immed_wide_int_const (wmax, mode);
875eda9c 1925
34a72c33 1926 return immed_wide_int_const (real_to_integer (x, &fail, width),
807e902e 1927 mode);
875eda9c 1928
15e5ad76 1929 default:
41374e13 1930 gcc_unreachable ();
15e5ad76 1931 }
0cedb36c 1932 }
ba31d94e 1933
0a67e02c 1934 return NULL_RTX;
0cedb36c
JL
1935}
1936\f
b17c024f
EB
1937/* Subroutine of simplify_binary_operation to simplify a binary operation
1938 CODE that can commute with byte swapping, with result mode MODE and
1939 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1940 Return zero if no simplification or canonicalization is possible. */
1941
1942static rtx
ef4bddc2 1943simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
b17c024f
EB
1944 rtx op0, rtx op1)
1945{
1946 rtx tem;
1947
1948 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
a8c50132 1949 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
b17c024f
EB
1950 {
1951 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1952 simplify_gen_unary (BSWAP, mode, op1, mode));
1953 return simplify_gen_unary (BSWAP, mode, tem, mode);
1954 }
1955
1956 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1957 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1958 {
1959 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1960 return simplify_gen_unary (BSWAP, mode, tem, mode);
1961 }
1962
1963 return NULL_RTX;
1964}
1965
9ce79a7a
RS
1966/* Subroutine of simplify_binary_operation to simplify a commutative,
1967 associative binary operation CODE with result mode MODE, operating
1968 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1969 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1970 canonicalization is possible. */
dd61aa98 1971
dd61aa98 1972static rtx
ef4bddc2 1973simplify_associative_operation (enum rtx_code code, machine_mode mode,
dd61aa98
RS
1974 rtx op0, rtx op1)
1975{
1976 rtx tem;
1977
9ce79a7a
RS
1978 /* Linearize the operator to the left. */
1979 if (GET_CODE (op1) == code)
dd61aa98 1980 {
9ce79a7a
RS
1981 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1982 if (GET_CODE (op0) == code)
1983 {
1984 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1985 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1986 }
dd61aa98 1987
9ce79a7a
RS
1988 /* "a op (b op c)" becomes "(b op c) op a". */
1989 if (! swap_commutative_operands_p (op1, op0))
1990 return simplify_gen_binary (code, mode, op1, op0);
dd61aa98 1991
e2be0590 1992 std::swap (op0, op1);
dd61aa98
RS
1993 }
1994
9ce79a7a 1995 if (GET_CODE (op0) == code)
dd61aa98 1996 {
9ce79a7a
RS
1997 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1998 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1999 {
2000 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2001 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2002 }
2003
2004 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
7e0b4eae 2005 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
9ce79a7a
RS
2006 if (tem != 0)
2007 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2008
2009 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
7e0b4eae 2010 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
9ce79a7a
RS
2011 if (tem != 0)
2012 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
dd61aa98
RS
2013 }
2014
2015 return 0;
2016}
2017
0a67e02c 2018
0cedb36c
JL
2019/* Simplify a binary operation CODE with result mode MODE, operating on OP0
2020 and OP1. Return 0 if no simplification is possible.
2021
2022 Don't use this for relational operations such as EQ or LT.
2023 Use simplify_relational_operation instead. */
0cedb36c 2024rtx
ef4bddc2 2025simplify_binary_operation (enum rtx_code code, machine_mode mode,
46c5ad27 2026 rtx op0, rtx op1)
0cedb36c 2027{
9ce79a7a 2028 rtx trueop0, trueop1;
0cedb36c
JL
2029 rtx tem;
2030
2031 /* Relational operations don't work here. We must know the mode
2032 of the operands in order to do the comparison correctly.
2033 Assuming a full word can give incorrect results.
2034 Consider comparing 128 with -128 in QImode. */
41374e13
NS
2035 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2036 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
0cedb36c 2037
4ba5f925 2038 /* Make sure the constant is second. */
ec8e098d 2039 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9ce79a7a 2040 && swap_commutative_operands_p (op0, op1))
e2be0590 2041 std::swap (op0, op1);
4ba5f925 2042
9ce79a7a
RS
2043 trueop0 = avoid_constant_pool_reference (op0);
2044 trueop1 = avoid_constant_pool_reference (op1);
2045
0a67e02c
PB
2046 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2047 if (tem)
2048 return tem;
bfb792b6
KT
2049 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2050
2051 if (tem)
2052 return tem;
2053
2054 /* If the above steps did not result in a simplification and op0 or op1
2055 were constant pool references, use the referenced constants directly. */
2056 if (trueop0 != op0 || trueop1 != op1)
2057 return simplify_gen_binary (code, mode, trueop0, trueop1);
2058
2059 return NULL_RTX;
0a67e02c
PB
2060}
2061
1753331b
RS
2062/* Subroutine of simplify_binary_operation. Simplify a binary operation
2063 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2064 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2065 actual constants. */
2066
0a67e02c 2067static rtx
ef4bddc2 2068simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
0a67e02c
PB
2069 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2070{
bd1ef757 2071 rtx tem, reversed, opleft, opright;
0a67e02c 2072 HOST_WIDE_INT val;
5511bc5a 2073 unsigned int width = GET_MODE_PRECISION (mode);
0a67e02c
PB
2074
2075 /* Even if we can't compute a constant result,
2076 there are some cases worth simplifying. */
2077
2078 switch (code)
852c8ba1 2079 {
0a67e02c
PB
2080 case PLUS:
2081 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2082 when x is NaN, infinite, or finite and nonzero. They aren't
2083 when x is -0 and the rounding mode is not towards -infinity,
2084 since (-0) + 0 is then 0. */
2085 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2086 return op0;
2087
2088 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2089 transformations are safe even for IEEE. */
2090 if (GET_CODE (op0) == NEG)
2091 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2092 else if (GET_CODE (op1) == NEG)
2093 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2094
2095 /* (~a) + 1 -> -a */
2096 if (INTEGRAL_MODE_P (mode)
2097 && GET_CODE (op0) == NOT
2098 && trueop1 == const1_rtx)
2099 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2100
2101 /* Handle both-operands-constant cases. We can only add
2102 CONST_INTs to constants since the sum of relocatable symbols
2103 can't be handled by most assemblers. Don't add CONST_INT
2104 to CONST_INT since overflow won't be computed properly if wider
2105 than HOST_BITS_PER_WIDE_INT. */
2106
dd59ef13
RS
2107 if ((GET_CODE (op0) == CONST
2108 || GET_CODE (op0) == SYMBOL_REF
2109 || GET_CODE (op0) == LABEL_REF)
481683e1 2110 && CONST_INT_P (op1))
0a81f074 2111 return plus_constant (mode, op0, INTVAL (op1));
dd59ef13
RS
2112 else if ((GET_CODE (op1) == CONST
2113 || GET_CODE (op1) == SYMBOL_REF
2114 || GET_CODE (op1) == LABEL_REF)
481683e1 2115 && CONST_INT_P (op0))
0a81f074 2116 return plus_constant (mode, op1, INTVAL (op0));
0a67e02c
PB
2117
2118 /* See if this is something like X * C - X or vice versa or
2119 if the multiplication is written as a shift. If so, we can
2120 distribute and make a new multiply, shift, or maybe just
2121 have X (if C is 2 in the example above). But don't make
2122 something more expensive than we had before. */
2123
6800ea5c 2124 if (SCALAR_INT_MODE_P (mode))
0a67e02c 2125 {
0a67e02c
PB
2126 rtx lhs = op0, rhs = op1;
2127
807e902e
KZ
2128 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2129 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
54fb1ae0 2130
0a67e02c 2131 if (GET_CODE (lhs) == NEG)
fab2f52c 2132 {
807e902e 2133 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2134 lhs = XEXP (lhs, 0);
2135 }
0a67e02c 2136 else if (GET_CODE (lhs) == MULT
807e902e 2137 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
fab2f52c 2138 {
807e902e 2139 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
fab2f52c
AO
2140 lhs = XEXP (lhs, 0);
2141 }
0a67e02c 2142 else if (GET_CODE (lhs) == ASHIFT
481683e1 2143 && CONST_INT_P (XEXP (lhs, 1))
54fb1ae0 2144 && INTVAL (XEXP (lhs, 1)) >= 0
807e902e 2145 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2146 {
807e902e
KZ
2147 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2148 GET_MODE_PRECISION (mode));
0a67e02c
PB
2149 lhs = XEXP (lhs, 0);
2150 }
852c8ba1 2151
0a67e02c 2152 if (GET_CODE (rhs) == NEG)
fab2f52c 2153 {
807e902e 2154 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2155 rhs = XEXP (rhs, 0);
2156 }
0a67e02c 2157 else if (GET_CODE (rhs) == MULT
481683e1 2158 && CONST_INT_P (XEXP (rhs, 1)))
0a67e02c 2159 {
807e902e 2160 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
fab2f52c 2161 rhs = XEXP (rhs, 0);
0a67e02c
PB
2162 }
2163 else if (GET_CODE (rhs) == ASHIFT
481683e1 2164 && CONST_INT_P (XEXP (rhs, 1))
0a67e02c 2165 && INTVAL (XEXP (rhs, 1)) >= 0
807e902e 2166 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2167 {
807e902e
KZ
2168 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2169 GET_MODE_PRECISION (mode));
0a67e02c
PB
2170 rhs = XEXP (rhs, 0);
2171 }
2172
2173 if (rtx_equal_p (lhs, rhs))
2174 {
2175 rtx orig = gen_rtx_PLUS (mode, op0, op1);
fab2f52c 2176 rtx coeff;
f40751dd 2177 bool speed = optimize_function_for_speed_p (cfun);
fab2f52c 2178
807e902e 2179 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
fab2f52c
AO
2180
2181 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
e548c9df
AM
2182 return (set_src_cost (tem, mode, speed)
2183 <= set_src_cost (orig, mode, speed) ? tem : 0);
0a67e02c
PB
2184 }
2185 }
2186
2187 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
33ffb5c5 2188 if (CONST_SCALAR_INT_P (op1)
0a67e02c 2189 && GET_CODE (op0) == XOR
33ffb5c5 2190 && CONST_SCALAR_INT_P (XEXP (op0, 1))
0a67e02c
PB
2191 && mode_signbit_p (mode, op1))
2192 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2193 simplify_gen_binary (XOR, mode, op1,
2194 XEXP (op0, 1)));
2195
bd1ef757 2196 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
4bf371ea
RG
2197 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2198 && GET_CODE (op0) == MULT
bd1ef757
PB
2199 && GET_CODE (XEXP (op0, 0)) == NEG)
2200 {
2201 rtx in1, in2;
2202
2203 in1 = XEXP (XEXP (op0, 0), 0);
2204 in2 = XEXP (op0, 1);
2205 return simplify_gen_binary (MINUS, mode, op1,
2206 simplify_gen_binary (MULT, mode,
2207 in1, in2));
2208 }
2209
2210 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2211 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2212 is 1. */
2213 if (COMPARISON_P (op0)
2214 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2215 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2216 && (reversed = reversed_comparison (op0, mode)))
2217 return
2218 simplify_gen_unary (NEG, mode, reversed, mode);
2219
0a67e02c
PB
2220 /* If one of the operands is a PLUS or a MINUS, see if we can
2221 simplify this by the associative law.
2222 Don't use the associative law for floating point.
2223 The inaccuracy makes it nonassociative,
2224 and subtle programs can break if operations are associated. */
2225
2226 if (INTEGRAL_MODE_P (mode)
2227 && (plus_minus_operand_p (op0)
2228 || plus_minus_operand_p (op1))
1941069a 2229 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
0a67e02c
PB
2230 return tem;
2231
2232 /* Reassociate floating point addition only when the user
a1a82611 2233 specifies associative math operations. */
0a67e02c 2234 if (FLOAT_MODE_P (mode)
a1a82611 2235 && flag_associative_math)
852c8ba1 2236 {
0a67e02c
PB
2237 tem = simplify_associative_operation (code, mode, op0, op1);
2238 if (tem)
2239 return tem;
852c8ba1 2240 }
0a67e02c 2241 break;
852c8ba1 2242
0a67e02c 2243 case COMPARE:
0a67e02c
PB
2244 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2245 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2246 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2247 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
3198b947 2248 {
0a67e02c
PB
2249 rtx xop00 = XEXP (op0, 0);
2250 rtx xop10 = XEXP (op1, 0);
3198b947 2251
0a67e02c 2252 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
176cb568
TS
2253 return xop00;
2254
0a67e02c
PB
2255 if (REG_P (xop00) && REG_P (xop10)
2256 && GET_MODE (xop00) == GET_MODE (xop10)
2257 && REGNO (xop00) == REGNO (xop10)
2258 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2259 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
0a67e02c 2260 return xop00;
3198b947 2261 }
0a67e02c
PB
2262 break;
2263
2264 case MINUS:
2265 /* We can't assume x-x is 0 even with non-IEEE floating point,
2266 but since it is zero except in very strange circumstances, we
81d2fb02 2267 will treat it as zero with -ffinite-math-only. */
0a67e02c
PB
2268 if (rtx_equal_p (trueop0, trueop1)
2269 && ! side_effects_p (op0)
81d2fb02 2270 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
0a67e02c
PB
2271 return CONST0_RTX (mode);
2272
2273 /* Change subtraction from zero into negation. (0 - x) is the
2274 same as -x when x is NaN, infinite, or finite and nonzero.
2275 But if the mode has signed zeros, and does not round towards
2276 -infinity, then 0 - 0 is 0, not -0. */
2277 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2278 return simplify_gen_unary (NEG, mode, op1, mode);
2279
2efc1455
BS
2280 /* (-1 - a) is ~a, unless the expression contains symbolic
2281 constants, in which case not retaining additions and
2282 subtractions could cause invalid assembly to be produced. */
9776e692
BS
2283 if (trueop0 == constm1_rtx
2284 && !contains_symbolic_reference_p (op1))
0a67e02c
PB
2285 return simplify_gen_unary (NOT, mode, op1, mode);
2286
2287 /* Subtracting 0 has no effect unless the mode has signed zeros
2288 and supports rounding towards -infinity. In such a case,
2289 0 - 0 is -0. */
2290 if (!(HONOR_SIGNED_ZEROS (mode)
2291 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2292 && trueop1 == CONST0_RTX (mode))
2293 return op0;
2294
2295 /* See if this is something like X * C - X or vice versa or
2296 if the multiplication is written as a shift. If so, we can
2297 distribute and make a new multiply, shift, or maybe just
2298 have X (if C is 2 in the example above). But don't make
2299 something more expensive than we had before. */
2300
6800ea5c 2301 if (SCALAR_INT_MODE_P (mode))
3198b947 2302 {
0a67e02c 2303 rtx lhs = op0, rhs = op1;
3198b947 2304
807e902e
KZ
2305 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2306 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
54fb1ae0 2307
0a67e02c 2308 if (GET_CODE (lhs) == NEG)
fab2f52c 2309 {
807e902e 2310 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2311 lhs = XEXP (lhs, 0);
2312 }
0a67e02c 2313 else if (GET_CODE (lhs) == MULT
807e902e 2314 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
0a67e02c 2315 {
807e902e 2316 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
fab2f52c 2317 lhs = XEXP (lhs, 0);
0a67e02c
PB
2318 }
2319 else if (GET_CODE (lhs) == ASHIFT
481683e1 2320 && CONST_INT_P (XEXP (lhs, 1))
0a67e02c 2321 && INTVAL (XEXP (lhs, 1)) >= 0
807e902e 2322 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2323 {
807e902e
KZ
2324 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2325 GET_MODE_PRECISION (mode));
0a67e02c
PB
2326 lhs = XEXP (lhs, 0);
2327 }
3198b947 2328
0a67e02c 2329 if (GET_CODE (rhs) == NEG)
fab2f52c 2330 {
807e902e 2331 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
fab2f52c
AO
2332 rhs = XEXP (rhs, 0);
2333 }
0a67e02c 2334 else if (GET_CODE (rhs) == MULT
481683e1 2335 && CONST_INT_P (XEXP (rhs, 1)))
0a67e02c 2336 {
807e902e 2337 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
fab2f52c 2338 rhs = XEXP (rhs, 0);
0a67e02c
PB
2339 }
2340 else if (GET_CODE (rhs) == ASHIFT
481683e1 2341 && CONST_INT_P (XEXP (rhs, 1))
0a67e02c 2342 && INTVAL (XEXP (rhs, 1)) >= 0
807e902e 2343 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
0a67e02c 2344 {
807e902e
KZ
2345 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2346 GET_MODE_PRECISION (mode));
27bcd47c 2347 negcoeff1 = -negcoeff1;
0a67e02c
PB
2348 rhs = XEXP (rhs, 0);
2349 }
2350
2351 if (rtx_equal_p (lhs, rhs))
2352 {
2353 rtx orig = gen_rtx_MINUS (mode, op0, op1);
fab2f52c 2354 rtx coeff;
f40751dd 2355 bool speed = optimize_function_for_speed_p (cfun);
fab2f52c 2356
807e902e 2357 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
fab2f52c
AO
2358
2359 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
e548c9df
AM
2360 return (set_src_cost (tem, mode, speed)
2361 <= set_src_cost (orig, mode, speed) ? tem : 0);
0a67e02c 2362 }
3198b947
RH
2363 }
2364
0a67e02c
PB
2365 /* (a - (-b)) -> (a + b). True even for IEEE. */
2366 if (GET_CODE (op1) == NEG)
2367 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3198b947 2368
0a67e02c
PB
2369 /* (-x - c) may be simplified as (-c - x). */
2370 if (GET_CODE (op0) == NEG
33ffb5c5 2371 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
79ae63b1 2372 {
0a67e02c
PB
2373 tem = simplify_unary_operation (NEG, mode, op1, mode);
2374 if (tem)
2375 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2376 }
79ae63b1 2377
0a67e02c 2378 /* Don't let a relocatable value get a negative coeff. */
481683e1 2379 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
0a67e02c
PB
2380 return simplify_gen_binary (PLUS, mode,
2381 op0,
2382 neg_const_int (mode, op1));
2383
2384 /* (x - (x & y)) -> (x & ~y) */
6b74529d 2385 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
0a67e02c
PB
2386 {
2387 if (rtx_equal_p (op0, XEXP (op1, 0)))
79ae63b1 2388 {
0a67e02c
PB
2389 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2390 GET_MODE (XEXP (op1, 1)));
2391 return simplify_gen_binary (AND, mode, op0, tem);
2392 }
2393 if (rtx_equal_p (op0, XEXP (op1, 1)))
2394 {
2395 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2396 GET_MODE (XEXP (op1, 0)));
2397 return simplify_gen_binary (AND, mode, op0, tem);
79ae63b1 2398 }
79ae63b1 2399 }
1941069a 2400
bd1ef757
PB
2401 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2402 by reversing the comparison code if valid. */
2403 if (STORE_FLAG_VALUE == 1
2404 && trueop0 == const1_rtx
2405 && COMPARISON_P (op1)
2406 && (reversed = reversed_comparison (op1, mode)))
2407 return reversed;
2408
2409 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
4bf371ea
RG
2410 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2411 && GET_CODE (op1) == MULT
bd1ef757
PB
2412 && GET_CODE (XEXP (op1, 0)) == NEG)
2413 {
2414 rtx in1, in2;
2415
2416 in1 = XEXP (XEXP (op1, 0), 0);
2417 in2 = XEXP (op1, 1);
2418 return simplify_gen_binary (PLUS, mode,
2419 simplify_gen_binary (MULT, mode,
2420 in1, in2),
2421 op0);
2422 }
2423
2424 /* Canonicalize (minus (neg A) (mult B C)) to
2425 (minus (mult (neg B) C) A). */
4bf371ea
RG
2426 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2427 && GET_CODE (op1) == MULT
bd1ef757
PB
2428 && GET_CODE (op0) == NEG)
2429 {
2430 rtx in1, in2;
2431
2432 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2433 in2 = XEXP (op1, 1);
2434 return simplify_gen_binary (MINUS, mode,
2435 simplify_gen_binary (MULT, mode,
2436 in1, in2),
2437 XEXP (op0, 0));
2438 }
2439
1941069a
PB
2440 /* If one of the operands is a PLUS or a MINUS, see if we can
2441 simplify this by the associative law. This will, for example,
2442 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2443 Don't use the associative law for floating point.
2444 The inaccuracy makes it nonassociative,
2445 and subtle programs can break if operations are associated. */
2446
2447 if (INTEGRAL_MODE_P (mode)
2448 && (plus_minus_operand_p (op0)
2449 || plus_minus_operand_p (op1))
2450 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2451 return tem;
0a67e02c 2452 break;
15e5ad76 2453
0a67e02c
PB
2454 case MULT:
2455 if (trueop1 == constm1_rtx)
2456 return simplify_gen_unary (NEG, mode, op0, mode);
2457
29b40d79
BS
2458 if (GET_CODE (op0) == NEG)
2459 {
2460 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
707f9919
JJ
2461 /* If op1 is a MULT as well and simplify_unary_operation
2462 just moved the NEG to the second operand, simplify_gen_binary
2463 below could through simplify_associative_operation move
2464 the NEG around again and recurse endlessly. */
2465 if (temp
2466 && GET_CODE (op1) == MULT
2467 && GET_CODE (temp) == MULT
2468 && XEXP (op1, 0) == XEXP (temp, 0)
2469 && GET_CODE (XEXP (temp, 1)) == NEG
2470 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2471 temp = NULL_RTX;
29b40d79
BS
2472 if (temp)
2473 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2474 }
2475 if (GET_CODE (op1) == NEG)
2476 {
2477 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
707f9919
JJ
2478 /* If op0 is a MULT as well and simplify_unary_operation
2479 just moved the NEG to the second operand, simplify_gen_binary
2480 below could through simplify_associative_operation move
2481 the NEG around again and recurse endlessly. */
2482 if (temp
2483 && GET_CODE (op0) == MULT
2484 && GET_CODE (temp) == MULT
2485 && XEXP (op0, 0) == XEXP (temp, 0)
2486 && GET_CODE (XEXP (temp, 1)) == NEG
2487 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2488 temp = NULL_RTX;
29b40d79
BS
2489 if (temp)
2490 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2491 }
2492
0a67e02c
PB
2493 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2494 x is NaN, since x * 0 is then also NaN. Nor is it valid
2495 when the mode has signed zeros, since multiplying a negative
2496 number by 0 will give -0, not 0. */
2497 if (!HONOR_NANS (mode)
2498 && !HONOR_SIGNED_ZEROS (mode)
2499 && trueop1 == CONST0_RTX (mode)
2500 && ! side_effects_p (op0))
2501 return op1;
2502
2503 /* In IEEE floating point, x*1 is not equivalent to x for
2504 signalling NaNs. */
2505 if (!HONOR_SNANS (mode)
2506 && trueop1 == CONST1_RTX (mode))
2507 return op0;
2508
807e902e
KZ
2509 /* Convert multiply by constant power of two into shift. */
2510 if (CONST_SCALAR_INT_P (trueop1))
2511 {
2512 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2513 if (val >= 0)
2514 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2515 }
fab2f52c 2516
0a67e02c 2517 /* x*2 is x+x and x*(-1) is -x */
48175537 2518 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3d8bf70f 2519 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
50cd60be 2520 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
0a67e02c
PB
2521 && GET_MODE (op0) == mode)
2522 {
34a72c33 2523 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
15e5ad76 2524
34a72c33 2525 if (real_equal (d1, &dconst2))
0a67e02c 2526 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
3e4093b6 2527
1753331b 2528 if (!HONOR_SNANS (mode)
34a72c33 2529 && real_equal (d1, &dconstm1))
0a67e02c
PB
2530 return simplify_gen_unary (NEG, mode, op0, mode);
2531 }
15e5ad76 2532
1753331b
RS
2533 /* Optimize -x * -x as x * x. */
2534 if (FLOAT_MODE_P (mode)
2535 && GET_CODE (op0) == NEG
2536 && GET_CODE (op1) == NEG
2537 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2538 && !side_effects_p (XEXP (op0, 0)))
2539 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2540
2541 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2542 if (SCALAR_FLOAT_MODE_P (mode)
2543 && GET_CODE (op0) == ABS
2544 && GET_CODE (op1) == ABS
2545 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2546 && !side_effects_p (XEXP (op0, 0)))
2547 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2548
0a67e02c
PB
2549 /* Reassociate multiplication, but for floating point MULTs
2550 only when the user specifies unsafe math optimizations. */
2551 if (! FLOAT_MODE_P (mode)
2552 || flag_unsafe_math_optimizations)
2553 {
2554 tem = simplify_associative_operation (code, mode, op0, op1);
2555 if (tem)
2556 return tem;
2557 }
2558 break;
6355b2d5 2559
0a67e02c 2560 case IOR:
a82e045d 2561 if (trueop1 == CONST0_RTX (mode))
0a67e02c 2562 return op0;
e7160b27
JM
2563 if (INTEGRAL_MODE_P (mode)
2564 && trueop1 == CONSTM1_RTX (mode)
2565 && !side_effects_p (op0))
0a67e02c
PB
2566 return op1;
2567 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2568 return op0;
2569 /* A | (~A) -> -1 */
2570 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2571 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2572 && ! side_effects_p (op0)
3f2960d5 2573 && SCALAR_INT_MODE_P (mode))
0a67e02c 2574 return constm1_rtx;
bd1ef757
PB
2575
2576 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
481683e1 2577 if (CONST_INT_P (op1)
46c9550f 2578 && HWI_COMPUTABLE_MODE_P (mode)
e7160b27
JM
2579 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2580 && !side_effects_p (op0))
bd1ef757 2581 return op1;
b8698a0f 2582
49e7a9d4
RS
2583 /* Canonicalize (X & C1) | C2. */
2584 if (GET_CODE (op0) == AND
481683e1
SZ
2585 && CONST_INT_P (trueop1)
2586 && CONST_INT_P (XEXP (op0, 1)))
49e7a9d4
RS
2587 {
2588 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2589 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2590 HOST_WIDE_INT c2 = INTVAL (trueop1);
2591
2592 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2593 if ((c1 & c2) == c1
2594 && !side_effects_p (XEXP (op0, 0)))
2595 return trueop1;
2596
2597 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2598 if (((c1|c2) & mask) == mask)
2599 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2600
2601 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2602 if (((c1 & ~c2) & mask) != (c1 & mask))
2603 {
2604 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2605 gen_int_mode (c1 & ~c2, mode));
2606 return simplify_gen_binary (IOR, mode, tem, op1);
2607 }
2608 }
2609
bd1ef757
PB
2610 /* Convert (A & B) | A to A. */
2611 if (GET_CODE (op0) == AND
2612 && (rtx_equal_p (XEXP (op0, 0), op1)
2613 || rtx_equal_p (XEXP (op0, 1), op1))
2614 && ! side_effects_p (XEXP (op0, 0))
2615 && ! side_effects_p (XEXP (op0, 1)))
2616 return op1;
2617
2618 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2619 mode size to (rotate A CX). */
2620
2621 if (GET_CODE (op1) == ASHIFT
2622 || GET_CODE (op1) == SUBREG)
2623 {
2624 opleft = op1;
2625 opright = op0;
2626 }
2627 else
2628 {
2629 opright = op1;
2630 opleft = op0;
2631 }
2632
2633 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2634 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
481683e1
SZ
2635 && CONST_INT_P (XEXP (opleft, 1))
2636 && CONST_INT_P (XEXP (opright, 1))
bd1ef757 2637 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
5511bc5a 2638 == GET_MODE_PRECISION (mode)))
bd1ef757
PB
2639 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2640
2641 /* Same, but for ashift that has been "simplified" to a wider mode
2642 by simplify_shift_const. */
2643
2644 if (GET_CODE (opleft) == SUBREG
2645 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2646 && GET_CODE (opright) == LSHIFTRT
2647 && GET_CODE (XEXP (opright, 0)) == SUBREG
2648 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2649 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2650 && (GET_MODE_SIZE (GET_MODE (opleft))
2651 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2652 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2653 SUBREG_REG (XEXP (opright, 0)))
481683e1
SZ
2654 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2655 && CONST_INT_P (XEXP (opright, 1))
bd1ef757 2656 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
5511bc5a 2657 == GET_MODE_PRECISION (mode)))
bd1ef757 2658 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
01578564 2659 XEXP (SUBREG_REG (opleft), 1));
bd1ef757
PB
2660
2661 /* If we have (ior (and (X C1) C2)), simplify this by making
2662 C1 as small as possible if C1 actually changes. */
481683e1 2663 if (CONST_INT_P (op1)
46c9550f 2664 && (HWI_COMPUTABLE_MODE_P (mode)
bd1ef757
PB
2665 || INTVAL (op1) > 0)
2666 && GET_CODE (op0) == AND
481683e1
SZ
2667 && CONST_INT_P (XEXP (op0, 1))
2668 && CONST_INT_P (op1)
43c36287 2669 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
69a59f0f
RS
2670 {
2671 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2672 gen_int_mode (UINTVAL (XEXP (op0, 1))
2673 & ~UINTVAL (op1),
2674 mode));
2675 return simplify_gen_binary (IOR, mode, tmp, op1);
2676 }
bd1ef757
PB
2677
2678 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2679 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2680 the PLUS does not affect any of the bits in OP1: then we can do
2681 the IOR as a PLUS and we can associate. This is valid if OP1
2682 can be safely shifted left C bits. */
481683e1 2683 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
bd1ef757 2684 && GET_CODE (XEXP (op0, 0)) == PLUS
481683e1
SZ
2685 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2686 && CONST_INT_P (XEXP (op0, 1))
bd1ef757
PB
2687 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2688 {
2689 int count = INTVAL (XEXP (op0, 1));
2690 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2691
2692 if (mask >> count == INTVAL (trueop1)
046f1eee 2693 && trunc_int_for_mode (mask, mode) == mask
bd1ef757
PB
2694 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2695 return simplify_gen_binary (ASHIFTRT, mode,
0a81f074
RS
2696 plus_constant (mode, XEXP (op0, 0),
2697 mask),
bd1ef757
PB
2698 XEXP (op0, 1));
2699 }
2700
b17c024f
EB
2701 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2702 if (tem)
2703 return tem;
2704
0a67e02c
PB
2705 tem = simplify_associative_operation (code, mode, op0, op1);
2706 if (tem)
2707 return tem;
2708 break;
2709
2710 case XOR:
a82e045d 2711 if (trueop1 == CONST0_RTX (mode))
0a67e02c 2712 return op0;
e7c82a99 2713 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
0a67e02c 2714 return simplify_gen_unary (NOT, mode, op0, mode);
f5d1572a 2715 if (rtx_equal_p (trueop0, trueop1)
0a67e02c
PB
2716 && ! side_effects_p (op0)
2717 && GET_MODE_CLASS (mode) != MODE_CC)
6bd13540 2718 return CONST0_RTX (mode);
0a67e02c
PB
2719
2720 /* Canonicalize XOR of the most significant bit to PLUS. */
33ffb5c5 2721 if (CONST_SCALAR_INT_P (op1)
0a67e02c
PB
2722 && mode_signbit_p (mode, op1))
2723 return simplify_gen_binary (PLUS, mode, op0, op1);
2724 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
33ffb5c5 2725 if (CONST_SCALAR_INT_P (op1)
0a67e02c 2726 && GET_CODE (op0) == PLUS
33ffb5c5 2727 && CONST_SCALAR_INT_P (XEXP (op0, 1))
0a67e02c
PB
2728 && mode_signbit_p (mode, XEXP (op0, 1)))
2729 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2730 simplify_gen_binary (XOR, mode, op1,
2731 XEXP (op0, 1)));
bd1ef757
PB
2732
2733 /* If we are XORing two things that have no bits in common,
2734 convert them into an IOR. This helps to detect rotation encoded
2735 using those methods and possibly other simplifications. */
2736
46c9550f 2737 if (HWI_COMPUTABLE_MODE_P (mode)
bd1ef757
PB
2738 && (nonzero_bits (op0, mode)
2739 & nonzero_bits (op1, mode)) == 0)
2740 return (simplify_gen_binary (IOR, mode, op0, op1));
2741
2742 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2743 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2744 (NOT y). */
2745 {
2746 int num_negated = 0;
2747
2748 if (GET_CODE (op0) == NOT)
2749 num_negated++, op0 = XEXP (op0, 0);
2750 if (GET_CODE (op1) == NOT)
2751 num_negated++, op1 = XEXP (op1, 0);
2752
2753 if (num_negated == 2)
2754 return simplify_gen_binary (XOR, mode, op0, op1);
2755 else if (num_negated == 1)
2756 return simplify_gen_unary (NOT, mode,
2757 simplify_gen_binary (XOR, mode, op0, op1),
2758 mode);
2759 }
2760
2761 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2762 correspond to a machine insn or result in further simplifications
2763 if B is a constant. */
2764
2765 if (GET_CODE (op0) == AND
2766 && rtx_equal_p (XEXP (op0, 1), op1)
2767 && ! side_effects_p (op1))
2768 return simplify_gen_binary (AND, mode,
2769 simplify_gen_unary (NOT, mode,
2770 XEXP (op0, 0), mode),
2771 op1);
2772
2773 else if (GET_CODE (op0) == AND
2774 && rtx_equal_p (XEXP (op0, 0), op1)
2775 && ! side_effects_p (op1))
2776 return simplify_gen_binary (AND, mode,
2777 simplify_gen_unary (NOT, mode,
2778 XEXP (op0, 1), mode),
2779 op1);
2780
af9f5d95
AO
2781 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2782 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2783 out bits inverted twice and not set by C. Similarly, given
2784 (xor (and (xor A B) C) D), simplify without inverting C in
2785 the xor operand: (xor (and A C) (B&C)^D).
2786 */
2787 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2788 && GET_CODE (XEXP (op0, 0)) == XOR
2789 && CONST_INT_P (op1)
2790 && CONST_INT_P (XEXP (op0, 1))
2791 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2792 {
2793 enum rtx_code op = GET_CODE (op0);
2794 rtx a = XEXP (XEXP (op0, 0), 0);
2795 rtx b = XEXP (XEXP (op0, 0), 1);
2796 rtx c = XEXP (op0, 1);
2797 rtx d = op1;
2798 HOST_WIDE_INT bval = INTVAL (b);
2799 HOST_WIDE_INT cval = INTVAL (c);
2800 HOST_WIDE_INT dval = INTVAL (d);
2801 HOST_WIDE_INT xcval;
2802
2803 if (op == IOR)
af9f5d95 2804 xcval = ~cval;
d26ac279
JJ
2805 else
2806 xcval = cval;
af9f5d95
AO
2807
2808 return simplify_gen_binary (XOR, mode,
2809 simplify_gen_binary (op, mode, a, c),
2810 gen_int_mode ((bval & xcval) ^ dval,
2811 mode));
2812 }
2813
54833ec0
CLT
2814 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2815 we can transform like this:
2816 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2817 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2818 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2819 Attempt a few simplifications when B and C are both constants. */
2820 if (GET_CODE (op0) == AND
2821 && CONST_INT_P (op1)
2822 && CONST_INT_P (XEXP (op0, 1)))
2823 {
2824 rtx a = XEXP (op0, 0);
2825 rtx b = XEXP (op0, 1);
2826 rtx c = op1;
2827 HOST_WIDE_INT bval = INTVAL (b);
2828 HOST_WIDE_INT cval = INTVAL (c);
2829
a720f0ef
AO
2830 /* Instead of computing ~A&C, we compute its negated value,
2831 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2832 optimize for sure. If it does not simplify, we still try
2833 to compute ~A&C below, but since that always allocates
2834 RTL, we don't try that before committing to returning a
2835 simplified expression. */
2836 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2837 GEN_INT (~cval));
2838
54833ec0
CLT
2839 if ((~cval & bval) == 0)
2840 {
a720f0ef
AO
2841 rtx na_c = NULL_RTX;
2842 if (n_na_c)
2843 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2844 else
2845 {
2846 /* If ~A does not simplify, don't bother: we don't
2847 want to simplify 2 operations into 3, and if na_c
2848 were to simplify with na, n_na_c would have
2849 simplified as well. */
2850 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2851 if (na)
2852 na_c = simplify_gen_binary (AND, mode, na, c);
2853 }
2854
54833ec0
CLT
2855 /* Try to simplify ~A&C | ~B&C. */
2856 if (na_c != NULL_RTX)
2857 return simplify_gen_binary (IOR, mode, na_c,
69a59f0f 2858 gen_int_mode (~bval & cval, mode));
54833ec0
CLT
2859 }
2860 else
2861 {
2862 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
a720f0ef 2863 if (n_na_c == CONSTM1_RTX (mode))
54833ec0
CLT
2864 {
2865 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
69a59f0f
RS
2866 gen_int_mode (~cval & bval,
2867 mode));
54833ec0 2868 return simplify_gen_binary (IOR, mode, a_nc_b,
69a59f0f
RS
2869 gen_int_mode (~bval & cval,
2870 mode));
54833ec0
CLT
2871 }
2872 }
2873 }
2874
bd1ef757
PB
2875 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2876 comparison if STORE_FLAG_VALUE is 1. */
2877 if (STORE_FLAG_VALUE == 1
2878 && trueop1 == const1_rtx
2879 && COMPARISON_P (op0)
2880 && (reversed = reversed_comparison (op0, mode)))
2881 return reversed;
2882
2883 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2884 is (lt foo (const_int 0)), so we can perform the above
2885 simplification if STORE_FLAG_VALUE is 1. */
2886
2887 if (STORE_FLAG_VALUE == 1
2888 && trueop1 == const1_rtx
2889 && GET_CODE (op0) == LSHIFTRT
481683e1 2890 && CONST_INT_P (XEXP (op0, 1))
5511bc5a 2891 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
bd1ef757
PB
2892 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2893
2894 /* (xor (comparison foo bar) (const_int sign-bit))
2895 when STORE_FLAG_VALUE is the sign bit. */
2d0c270f 2896 if (val_signbit_p (mode, STORE_FLAG_VALUE)
bd1ef757
PB
2897 && trueop1 == const_true_rtx
2898 && COMPARISON_P (op0)
2899 && (reversed = reversed_comparison (op0, mode)))
2900 return reversed;
2901
b17c024f
EB
2902 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2903 if (tem)
2904 return tem;
2905
0a67e02c
PB
2906 tem = simplify_associative_operation (code, mode, op0, op1);
2907 if (tem)
2908 return tem;
2909 break;
2910
2911 case AND:
3f2960d5
RH
2912 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2913 return trueop1;
e7c82a99
JJ
2914 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2915 return op0;
46c9550f 2916 if (HWI_COMPUTABLE_MODE_P (mode))
dc5b3407
ZD
2917 {
2918 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
f5a17c43 2919 HOST_WIDE_INT nzop1;
481683e1 2920 if (CONST_INT_P (trueop1))
f5a17c43
BS
2921 {
2922 HOST_WIDE_INT val1 = INTVAL (trueop1);
2923 /* If we are turning off bits already known off in OP0, we need
2924 not do an AND. */
2925 if ((nzop0 & ~val1) == 0)
2926 return op0;
2927 }
2928 nzop1 = nonzero_bits (trueop1, mode);
dc5b3407 2929 /* If we are clearing all the nonzero bits, the result is zero. */
f5a17c43
BS
2930 if ((nzop1 & nzop0) == 0
2931 && !side_effects_p (op0) && !side_effects_p (op1))
dc5b3407
ZD
2932 return CONST0_RTX (mode);
2933 }
f5d1572a 2934 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
0a67e02c
PB
2935 && GET_MODE_CLASS (mode) != MODE_CC)
2936 return op0;
2937 /* A & (~A) -> 0 */
2938 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2939 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2940 && ! side_effects_p (op0)
2941 && GET_MODE_CLASS (mode) != MODE_CC)
3f2960d5 2942 return CONST0_RTX (mode);
0a67e02c
PB
2943
2944 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2945 there are no nonzero bits of C outside of X's mode. */
2946 if ((GET_CODE (op0) == SIGN_EXTEND
2947 || GET_CODE (op0) == ZERO_EXTEND)
481683e1 2948 && CONST_INT_P (trueop1)
46c9550f 2949 && HWI_COMPUTABLE_MODE_P (mode)
0a67e02c 2950 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
43c36287 2951 & UINTVAL (trueop1)) == 0)
0a67e02c 2952 {
ef4bddc2 2953 machine_mode imode = GET_MODE (XEXP (op0, 0));
0a67e02c
PB
2954 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2955 gen_int_mode (INTVAL (trueop1),
2956 imode));
2957 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2958 }
2959
fcaf7e12
AN
2960 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2961 we might be able to further simplify the AND with X and potentially
2962 remove the truncation altogether. */
2963 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2964 {
2965 rtx x = XEXP (op0, 0);
ef4bddc2 2966 machine_mode xmode = GET_MODE (x);
fcaf7e12
AN
2967 tem = simplify_gen_binary (AND, xmode, x,
2968 gen_int_mode (INTVAL (trueop1), xmode));
2969 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2970 }
2971
49e7a9d4
RS
2972 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2973 if (GET_CODE (op0) == IOR
481683e1
SZ
2974 && CONST_INT_P (trueop1)
2975 && CONST_INT_P (XEXP (op0, 1)))
49e7a9d4
RS
2976 {
2977 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2978 return simplify_gen_binary (IOR, mode,
2979 simplify_gen_binary (AND, mode,
2980 XEXP (op0, 0), op1),
2981 gen_int_mode (tmp, mode));
2982 }
2983
bd1ef757
PB
2984 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2985 insn (and may simplify more). */
2986 if (GET_CODE (op0) == XOR
2987 && rtx_equal_p (XEXP (op0, 0), op1)
2988 && ! side_effects_p (op1))
2989 return simplify_gen_binary (AND, mode,
2990 simplify_gen_unary (NOT, mode,
2991 XEXP (op0, 1), mode),
2992 op1);
2993
2994 if (GET_CODE (op0) == XOR
2995 && rtx_equal_p (XEXP (op0, 1), op1)
2996 && ! side_effects_p (op1))
2997 return simplify_gen_binary (AND, mode,
2998 simplify_gen_unary (NOT, mode,
2999 XEXP (op0, 0), mode),
3000 op1);
3001
3002 /* Similarly for (~(A ^ B)) & A. */
3003 if (GET_CODE (op0) == NOT
3004 && GET_CODE (XEXP (op0, 0)) == XOR
3005 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3006 && ! side_effects_p (op1))
3007 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3008
3009 if (GET_CODE (op0) == NOT
3010 && GET_CODE (XEXP (op0, 0)) == XOR
3011 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3012 && ! side_effects_p (op1))
3013 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3014
3015 /* Convert (A | B) & A to A. */
3016 if (GET_CODE (op0) == IOR
3017 && (rtx_equal_p (XEXP (op0, 0), op1)
3018 || rtx_equal_p (XEXP (op0, 1), op1))
3019 && ! side_effects_p (XEXP (op0, 0))
3020 && ! side_effects_p (XEXP (op0, 1)))
3021 return op1;
3022
0a67e02c
PB
3023 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3024 ((A & N) + B) & M -> (A + B) & M
3025 Similarly if (N & M) == 0,
3026 ((A | N) + B) & M -> (A + B) & M
dc5b3407
ZD
3027 and for - instead of + and/or ^ instead of |.
3028 Also, if (N & M) == 0, then
3029 (A +- N) & M -> A & M. */
481683e1 3030 if (CONST_INT_P (trueop1)
46c9550f 3031 && HWI_COMPUTABLE_MODE_P (mode)
43c36287
EB
3032 && ~UINTVAL (trueop1)
3033 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
0a67e02c
PB
3034 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3035 {
3036 rtx pmop[2];
3037 int which;
3038
3039 pmop[0] = XEXP (op0, 0);
3040 pmop[1] = XEXP (op0, 1);
3041
481683e1 3042 if (CONST_INT_P (pmop[1])
43c36287 3043 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
dc5b3407
ZD
3044 return simplify_gen_binary (AND, mode, pmop[0], op1);
3045
0a67e02c
PB
3046 for (which = 0; which < 2; which++)
3047 {
3048 tem = pmop[which];
3049 switch (GET_CODE (tem))
6355b2d5 3050 {
0a67e02c 3051 case AND:
481683e1 3052 if (CONST_INT_P (XEXP (tem, 1))
43c36287
EB
3053 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3054 == UINTVAL (trueop1))
0a67e02c 3055 pmop[which] = XEXP (tem, 0);
6355b2d5 3056 break;
0a67e02c
PB
3057 case IOR:
3058 case XOR:
481683e1 3059 if (CONST_INT_P (XEXP (tem, 1))
43c36287 3060 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
0a67e02c 3061 pmop[which] = XEXP (tem, 0);
6355b2d5 3062 break;
6355b2d5
JJ
3063 default:
3064 break;
3065 }
3066 }
3067
0a67e02c
PB
3068 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3069 {
3070 tem = simplify_gen_binary (GET_CODE (op0), mode,
3071 pmop[0], pmop[1]);
3072 return simplify_gen_binary (code, mode, tem, op1);
3073 }
3074 }
f79db4f6
AP
3075
3076 /* (and X (ior (not X) Y) -> (and X Y) */
3077 if (GET_CODE (op1) == IOR
3078 && GET_CODE (XEXP (op1, 0)) == NOT
31dd2a86 3079 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
f79db4f6
AP
3080 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3081
3082 /* (and (ior (not X) Y) X) -> (and X Y) */
3083 if (GET_CODE (op0) == IOR
3084 && GET_CODE (XEXP (op0, 0)) == NOT
31dd2a86 3085 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
f79db4f6
AP
3086 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3087
31dd2a86
SB
3088 /* (and X (ior Y (not X)) -> (and X Y) */
3089 if (GET_CODE (op1) == IOR
3090 && GET_CODE (XEXP (op1, 1)) == NOT
3091 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3092 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3093
3094 /* (and (ior Y (not X)) X) -> (and X Y) */
3095 if (GET_CODE (op0) == IOR
3096 && GET_CODE (XEXP (op0, 1)) == NOT
3097 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3098 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3099
b17c024f
EB
3100 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3101 if (tem)
3102 return tem;
3103
0a67e02c
PB
3104 tem = simplify_associative_operation (code, mode, op0, op1);
3105 if (tem)
3106 return tem;
3107 break;
762297d9 3108
0a67e02c
PB
3109 case UDIV:
3110 /* 0/x is 0 (or x&0 if x has side-effects). */
3f2960d5
RH
3111 if (trueop0 == CONST0_RTX (mode))
3112 {
3113 if (side_effects_p (op1))
3114 return simplify_gen_binary (AND, mode, op1, trueop0);
3115 return trueop0;
3116 }
3117 /* x/1 is x. */
3118 if (trueop1 == CONST1_RTX (mode))
76bd29f6
JJ
3119 {
3120 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3121 if (tem)
3122 return tem;
3123 }
3f2960d5 3124 /* Convert divide by power of two into shift. */
481683e1 3125 if (CONST_INT_P (trueop1)
43c36287 3126 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3f2960d5
RH
3127 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3128 break;
d284eb28 3129
0a67e02c
PB
3130 case DIV:
3131 /* Handle floating point and integers separately. */
3d8bf70f 3132 if (SCALAR_FLOAT_MODE_P (mode))
0a67e02c
PB
3133 {
3134 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3135 safe for modes with NaNs, since 0.0 / 0.0 will then be
3136 NaN rather than 0.0. Nor is it safe for modes with signed
3137 zeros, since dividing 0 by a negative number gives -0.0 */
3138 if (trueop0 == CONST0_RTX (mode)
3139 && !HONOR_NANS (mode)
3140 && !HONOR_SIGNED_ZEROS (mode)
3141 && ! side_effects_p (op1))
3142 return op0;
3143 /* x/1.0 is x. */
3144 if (trueop1 == CONST1_RTX (mode)
3145 && !HONOR_SNANS (mode))
3146 return op0;
0cedb36c 3147
48175537 3148 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
0a67e02c
PB
3149 && trueop1 != CONST0_RTX (mode))
3150 {
34a72c33 3151 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
0cedb36c 3152
0a67e02c 3153 /* x/-1.0 is -x. */
34a72c33 3154 if (real_equal (d1, &dconstm1)
0a67e02c
PB
3155 && !HONOR_SNANS (mode))
3156 return simplify_gen_unary (NEG, mode, op0, mode);
0cedb36c 3157
0a67e02c 3158 /* Change FP division by a constant into multiplication.
a1a82611
RE
3159 Only do this with -freciprocal-math. */
3160 if (flag_reciprocal_math
34a72c33 3161 && !real_equal (d1, &dconst0))
0a67e02c 3162 {
34a72c33
RS
3163 REAL_VALUE_TYPE d;
3164 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
555affd7 3165 tem = const_double_from_real_value (d, mode);
0a67e02c
PB
3166 return simplify_gen_binary (MULT, mode, op0, tem);
3167 }
3168 }
3169 }
e46bf5d6 3170 else if (SCALAR_INT_MODE_P (mode))
0cedb36c 3171 {
0a67e02c 3172 /* 0/x is 0 (or x&0 if x has side-effects). */
0e1b8b10
ILT
3173 if (trueop0 == CONST0_RTX (mode)
3174 && !cfun->can_throw_non_call_exceptions)
3f2960d5
RH
3175 {
3176 if (side_effects_p (op1))
3177 return simplify_gen_binary (AND, mode, op1, trueop0);
3178 return trueop0;
3179 }
0a67e02c 3180 /* x/1 is x. */
3f2960d5 3181 if (trueop1 == CONST1_RTX (mode))
76bd29f6
JJ
3182 {
3183 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3184 if (tem)
3185 return tem;
3186 }
0a67e02c
PB
3187 /* x/-1 is -x. */
3188 if (trueop1 == constm1_rtx)
3189 {
9ce921ab 3190 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
76bd29f6
JJ
3191 if (x)
3192 return simplify_gen_unary (NEG, mode, x, mode);
0a67e02c
PB
3193 }
3194 }
3195 break;
0cedb36c 3196
0a67e02c
PB
3197 case UMOD:
3198 /* 0%x is 0 (or x&0 if x has side-effects). */
3f2960d5
RH
3199 if (trueop0 == CONST0_RTX (mode))
3200 {
3201 if (side_effects_p (op1))
3202 return simplify_gen_binary (AND, mode, op1, trueop0);
3203 return trueop0;
3204 }
3205 /* x%1 is 0 (of x&0 if x has side-effects). */
3206 if (trueop1 == CONST1_RTX (mode))
3207 {
3208 if (side_effects_p (op0))
3209 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3210 return CONST0_RTX (mode);
3211 }
3212 /* Implement modulus by power of two as AND. */
481683e1 3213 if (CONST_INT_P (trueop1)
43c36287 3214 && exact_log2 (UINTVAL (trueop1)) > 0)
3f2960d5 3215 return simplify_gen_binary (AND, mode, op0,
69a59f0f 3216 gen_int_mode (INTVAL (op1) - 1, mode));
3f2960d5 3217 break;
0cedb36c 3218
0a67e02c
PB
3219 case MOD:
3220 /* 0%x is 0 (or x&0 if x has side-effects). */
3f2960d5
RH
3221 if (trueop0 == CONST0_RTX (mode))
3222 {
3223 if (side_effects_p (op1))
3224 return simplify_gen_binary (AND, mode, op1, trueop0);
3225 return trueop0;
3226 }
3227 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3228 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3229 {
3230 if (side_effects_p (op0))
3231 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3232 return CONST0_RTX (mode);
3233 }
3234 break;
0cedb36c 3235
0a67e02c
PB
3236 case ROTATERT:
3237 case ROTATE:
75776c6d
JJ
3238 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3239 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3240 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3241 amount instead. */
4ed3092f 3242#if defined(HAVE_rotate) && defined(HAVE_rotatert)
75776c6d
JJ
3243 if (CONST_INT_P (trueop1)
3244 && IN_RANGE (INTVAL (trueop1),
50b6ee8b
DD
3245 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3246 GET_MODE_PRECISION (mode) - 1))
75776c6d 3247 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
50b6ee8b 3248 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
75776c6d 3249 - INTVAL (trueop1)));
4ed3092f 3250#endif
75776c6d 3251 /* FALLTHRU */
0a67e02c 3252 case ASHIFTRT:
70233f37
RS
3253 if (trueop1 == CONST0_RTX (mode))
3254 return op0;
3255 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3256 return op0;
0a67e02c 3257 /* Rotating ~0 always results in ~0. */
481683e1 3258 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
43c36287 3259 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
0a67e02c
PB
3260 && ! side_effects_p (op1))
3261 return op0;
96023bba 3262 /* Given:
05f9c675
JJ
3263 scalar modes M1, M2
3264 scalar constants c1, c2
3265 size (M2) > size (M1)
3266 c1 == size (M2) - size (M1)
3267 optimize:
3268 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3269 <low_part>)
3270 (const_int <c2>))
3271 to:
3272 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3273 <low_part>). */
3274 if (code == ASHIFTRT
3275 && !VECTOR_MODE_P (mode)
3276 && SUBREG_P (op0)
3277 && CONST_INT_P (op1)
3278 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3279 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3280 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3281 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3282 > GET_MODE_BITSIZE (mode))
3283 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3284 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3285 - GET_MODE_BITSIZE (mode)))
3286 && subreg_lowpart_p (op0))
3287 {
3288 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3289 + INTVAL (op1));
3290 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3291 tmp = simplify_gen_binary (ASHIFTRT,
3292 GET_MODE (SUBREG_REG (op0)),
3293 XEXP (SUBREG_REG (op0), 0),
3294 tmp);
3403a1a9 3295 return lowpart_subreg (mode, tmp, inner_mode);
05f9c675 3296 }
cbc9503d 3297 canonicalize_shift:
481683e1 3298 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
cbc9503d 3299 {
50b6ee8b 3300 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
cbc9503d
RS
3301 if (val != INTVAL (op1))
3302 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3303 }
70233f37 3304 break;
9d317251 3305
0a67e02c 3306 case ASHIFT:
e551ad26 3307 case SS_ASHIFT:
14c931f1 3308 case US_ASHIFT:
70233f37
RS
3309 if (trueop1 == CONST0_RTX (mode))
3310 return op0;
3311 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3312 return op0;
cbc9503d 3313 goto canonicalize_shift;
70233f37 3314
0a67e02c 3315 case LSHIFTRT:
3f2960d5 3316 if (trueop1 == CONST0_RTX (mode))
0a67e02c 3317 return op0;
3f2960d5 3318 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
0a67e02c 3319 return op0;
70233f37
RS
3320 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3321 if (GET_CODE (op0) == CLZ
481683e1 3322 && CONST_INT_P (trueop1)
70233f37 3323 && STORE_FLAG_VALUE == 1
e40122f0 3324 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
70233f37 3325 {
ef4bddc2 3326 machine_mode imode = GET_MODE (XEXP (op0, 0));
70233f37
RS
3327 unsigned HOST_WIDE_INT zero_val = 0;
3328
3329 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
5511bc5a 3330 && zero_val == GET_MODE_PRECISION (imode)
70233f37
RS
3331 && INTVAL (trueop1) == exact_log2 (zero_val))
3332 return simplify_gen_relational (EQ, mode, imode,
3333 XEXP (op0, 0), const0_rtx);
3334 }
cbc9503d 3335 goto canonicalize_shift;
9d317251 3336
0a67e02c
PB
3337 case SMIN:
3338 if (width <= HOST_BITS_PER_WIDE_INT
2d0c270f 3339 && mode_signbit_p (mode, trueop1)
0a67e02c
PB
3340 && ! side_effects_p (op0))
3341 return op1;
3342 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3343 return op0;
3344 tem = simplify_associative_operation (code, mode, op0, op1);
3345 if (tem)
3346 return tem;
3347 break;
0cedb36c 3348
0a67e02c
PB
3349 case SMAX:
3350 if (width <= HOST_BITS_PER_WIDE_INT
481683e1 3351 && CONST_INT_P (trueop1)
43c36287 3352 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
0a67e02c
PB
3353 && ! side_effects_p (op0))
3354 return op1;
3355 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3356 return op0;
3357 tem = simplify_associative_operation (code, mode, op0, op1);
3358 if (tem)
3359 return tem;
3360 break;
0cedb36c 3361
0a67e02c 3362 case UMIN:
3f2960d5 3363 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
0a67e02c
PB
3364 return op1;
3365 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3366 return op0;
3367 tem = simplify_associative_operation (code, mode, op0, op1);
3368 if (tem)
3369 return tem;
3370 break;
0cedb36c 3371
0a67e02c
PB
3372 case UMAX:
3373 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3374 return op1;
3375 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3376 return op0;
3377 tem = simplify_associative_operation (code, mode, op0, op1);
3378 if (tem)
3379 return tem;
3380 break;
0cedb36c 3381
0a67e02c
PB
3382 case SS_PLUS:
3383 case US_PLUS:
3384 case SS_MINUS:
3385 case US_MINUS:
14c931f1
CF
3386 case SS_MULT:
3387 case US_MULT:
3388 case SS_DIV:
3389 case US_DIV:
0a67e02c
PB
3390 /* ??? There are simplifications that can be done. */
3391 return 0;
0cedb36c 3392
0a67e02c
PB
3393 case VEC_SELECT:
3394 if (!VECTOR_MODE_P (mode))
3395 {
3396 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3397 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3398 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3399 gcc_assert (XVECLEN (trueop1, 0) == 1);
481683e1 3400 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
0a67e02c
PB
3401
3402 if (GET_CODE (trueop0) == CONST_VECTOR)
3403 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3404 (trueop1, 0, 0)));
7f97f938
UB
3405
3406 /* Extract a scalar element from a nested VEC_SELECT expression
3407 (with optional nested VEC_CONCAT expression). Some targets
3408 (i386) extract scalar element from a vector using chain of
3409 nested VEC_SELECT expressions. When input operand is a memory
3410 operand, this operation can be simplified to a simple scalar
3411 load from an offseted memory address. */
3412 if (GET_CODE (trueop0) == VEC_SELECT)
3413 {
3414 rtx op0 = XEXP (trueop0, 0);
3415 rtx op1 = XEXP (trueop0, 1);
3416
ef4bddc2 3417 machine_mode opmode = GET_MODE (op0);
cb5ca315 3418 int elt_size = GET_MODE_UNIT_SIZE (opmode);
7f97f938
UB
3419 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3420
3421 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3422 int elem;
3423
3424 rtvec vec;
3425 rtx tmp_op, tmp;
3426
3427 gcc_assert (GET_CODE (op1) == PARALLEL);
3428 gcc_assert (i < n_elts);
3429
3430 /* Select element, pointed by nested selector. */
3743c639 3431 elem = INTVAL (XVECEXP (op1, 0, i));
7f97f938
UB
3432
3433 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3434 if (GET_CODE (op0) == VEC_CONCAT)
3435 {
3436 rtx op00 = XEXP (op0, 0);
3437 rtx op01 = XEXP (op0, 1);
3438
ef4bddc2 3439 machine_mode mode00, mode01;
7f97f938
UB
3440 int n_elts00, n_elts01;
3441
3442 mode00 = GET_MODE (op00);
3443 mode01 = GET_MODE (op01);
3444
3445 /* Find out number of elements of each operand. */
3446 if (VECTOR_MODE_P (mode00))
3447 {
cb5ca315 3448 elt_size = GET_MODE_UNIT_SIZE (mode00);
7f97f938
UB
3449 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3450 }
3451 else
3452 n_elts00 = 1;
3453
3454 if (VECTOR_MODE_P (mode01))
3455 {
cb5ca315 3456 elt_size = GET_MODE_UNIT_SIZE (mode01);
7f97f938
UB
3457 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3458 }
3459 else
3460 n_elts01 = 1;
3461
3462 gcc_assert (n_elts == n_elts00 + n_elts01);
3463
3464 /* Select correct operand of VEC_CONCAT
3465 and adjust selector. */
3466 if (elem < n_elts01)
3467 tmp_op = op00;
3468 else
3469 {
3470 tmp_op = op01;
3471 elem -= n_elts00;
3472 }
3473 }
3474 else
3475 tmp_op = op0;
3476
3477 vec = rtvec_alloc (1);
3478 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3479
3480 tmp = gen_rtx_fmt_ee (code, mode,
3481 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3482 return tmp;
3483 }
0e159e0f
AP
3484 if (GET_CODE (trueop0) == VEC_DUPLICATE
3485 && GET_MODE (XEXP (trueop0, 0)) == mode)
3486 return XEXP (trueop0, 0);
0a67e02c
PB
3487 }
3488 else
3489 {
3490 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3491 gcc_assert (GET_MODE_INNER (mode)
3492 == GET_MODE_INNER (GET_MODE (trueop0)));
3493 gcc_assert (GET_CODE (trueop1) == PARALLEL);
0cedb36c 3494
0a67e02c
PB
3495 if (GET_CODE (trueop0) == CONST_VECTOR)
3496 {
cb5ca315 3497 int elt_size = GET_MODE_UNIT_SIZE (mode);
0a67e02c
PB
3498 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3499 rtvec v = rtvec_alloc (n_elts);
3500 unsigned int i;
0cedb36c 3501
0a67e02c
PB
3502 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3503 for (i = 0; i < n_elts; i++)
3504 {
3505 rtx x = XVECEXP (trueop1, 0, i);
0cedb36c 3506
481683e1 3507 gcc_assert (CONST_INT_P (x));
0a67e02c
PB
3508 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3509 INTVAL (x));
0cedb36c
JL
3510 }
3511
0a67e02c 3512 return gen_rtx_CONST_VECTOR (mode, v);
dd61aa98 3513 }
66c540d2 3514
5f6e1c55
MG
3515 /* Recognize the identity. */
3516 if (GET_MODE (trueop0) == mode)
3517 {
3518 bool maybe_ident = true;
3519 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3520 {
3521 rtx j = XVECEXP (trueop1, 0, i);
3522 if (!CONST_INT_P (j) || INTVAL (j) != i)
3523 {
3524 maybe_ident = false;
3525 break;
3526 }
3527 }
3528 if (maybe_ident)
3529 return trueop0;
3530 }
3531
66c540d2
MG
3532 /* If we build {a,b} then permute it, build the result directly. */
3533 if (XVECLEN (trueop1, 0) == 2
3534 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3535 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3536 && GET_CODE (trueop0) == VEC_CONCAT
3537 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3538 && GET_MODE (XEXP (trueop0, 0)) == mode
3539 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3540 && GET_MODE (XEXP (trueop0, 1)) == mode)
3541 {
3542 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3543 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3544 rtx subop0, subop1;
3545
3546 gcc_assert (i0 < 4 && i1 < 4);
3547 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3548 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3549
fd9da2c8
MG
3550 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3551 }
3552
3553 if (XVECLEN (trueop1, 0) == 2
3554 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3555 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3556 && GET_CODE (trueop0) == VEC_CONCAT
3557 && GET_MODE (trueop0) == mode)
3558 {
3559 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3560 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3561 rtx subop0, subop1;
3562
3563 gcc_assert (i0 < 2 && i1 < 2);
3564 subop0 = XEXP (trueop0, i0);
3565 subop1 = XEXP (trueop0, i1);
3566
66c540d2
MG
3567 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3568 }
82e3a719
MG
3569
3570 /* If we select one half of a vec_concat, return that. */
3571 if (GET_CODE (trueop0) == VEC_CONCAT
3572 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3573 {
3574 rtx subop0 = XEXP (trueop0, 0);
3575 rtx subop1 = XEXP (trueop0, 1);
ef4bddc2
RS
3576 machine_mode mode0 = GET_MODE (subop0);
3577 machine_mode mode1 = GET_MODE (subop1);
cb5ca315 3578 int li = GET_MODE_UNIT_SIZE (mode0);
82e3a719
MG
3579 int l0 = GET_MODE_SIZE (mode0) / li;
3580 int l1 = GET_MODE_SIZE (mode1) / li;
3581 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3582 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3583 {
3584 bool success = true;
3585 for (int i = 1; i < l0; ++i)
3586 {
3587 rtx j = XVECEXP (trueop1, 0, i);
3588 if (!CONST_INT_P (j) || INTVAL (j) != i)
3589 {
3590 success = false;
3591 break;
3592 }
3593 }
3594 if (success)
3595 return subop0;
3596 }
3597 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3598 {
3599 bool success = true;
3600 for (int i = 1; i < l1; ++i)
3601 {
3602 rtx j = XVECEXP (trueop1, 0, i);
3603 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3604 {
3605 success = false;
3606 break;
3607 }
3608 }
3609 if (success)
3610 return subop1;
3611 }
3612 }
0a67e02c 3613 }
bd1ef757
PB
3614
3615 if (XVECLEN (trueop1, 0) == 1
481683e1 3616 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
bd1ef757
PB
3617 && GET_CODE (trueop0) == VEC_CONCAT)
3618 {
3619 rtx vec = trueop0;
3620 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3621
3622 /* Try to find the element in the VEC_CONCAT. */
3623 while (GET_MODE (vec) != mode
3624 && GET_CODE (vec) == VEC_CONCAT)
3625 {
644f6fd8
KT
3626 HOST_WIDE_INT vec_size;
3627
3628 if (CONST_INT_P (XEXP (vec, 0)))
3629 {
3630 /* vec_concat of two const_ints doesn't make sense with
3631 respect to modes. */
3632 if (CONST_INT_P (XEXP (vec, 1)))
3633 return 0;
3634
3635 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3636 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3637 }
3638 else
3639 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3640
bd1ef757
PB
3641 if (offset < vec_size)
3642 vec = XEXP (vec, 0);
3643 else
3644 {
3645 offset -= vec_size;
3646 vec = XEXP (vec, 1);
3647 }
3648 vec = avoid_constant_pool_reference (vec);
3649 }
3650
3651 if (GET_MODE (vec) == mode)
3652 return vec;
3653 }
3654
da694a77
MG
3655 /* If we select elements in a vec_merge that all come from the same
3656 operand, select from that operand directly. */
3657 if (GET_CODE (op0) == VEC_MERGE)
3658 {
3659 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3660 if (CONST_INT_P (trueop02))
3661 {
3662 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3663 bool all_operand0 = true;
3664 bool all_operand1 = true;
3665 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3666 {
3667 rtx j = XVECEXP (trueop1, 0, i);
3668 if (sel & (1 << UINTVAL (j)))
3669 all_operand1 = false;
3670 else
3671 all_operand0 = false;
3672 }
3673 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3674 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3675 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3676 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3677 }
3678 }
3679
ec217bd8
BS
3680 /* If we have two nested selects that are inverses of each
3681 other, replace them with the source operand. */
3682 if (GET_CODE (trueop0) == VEC_SELECT
3683 && GET_MODE (XEXP (trueop0, 0)) == mode)
3684 {
3685 rtx op0_subop1 = XEXP (trueop0, 1);
3686 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3687 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3688
3689 /* Apply the outer ordering vector to the inner one. (The inner
3690 ordering vector is expressly permitted to be of a different
3691 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3692 then the two VEC_SELECTs cancel. */
3693 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3694 {
3695 rtx x = XVECEXP (trueop1, 0, i);
3696 if (!CONST_INT_P (x))
3697 return 0;
3698 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3699 if (!CONST_INT_P (y) || i != INTVAL (y))
3700 return 0;
3701 }
3702 return XEXP (trueop0, 0);
3703 }
3704
0a67e02c
PB
3705 return 0;
3706 case VEC_CONCAT:
3707 {
ef4bddc2 3708 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
0a67e02c
PB
3709 ? GET_MODE (trueop0)
3710 : GET_MODE_INNER (mode));
ef4bddc2 3711 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
0a67e02c
PB
3712 ? GET_MODE (trueop1)
3713 : GET_MODE_INNER (mode));
3714
3715 gcc_assert (VECTOR_MODE_P (mode));
3716 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3717 == GET_MODE_SIZE (mode));
3718
3719 if (VECTOR_MODE_P (op0_mode))
3720 gcc_assert (GET_MODE_INNER (mode)
3721 == GET_MODE_INNER (op0_mode));
3722 else
3723 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
0cedb36c 3724
0a67e02c
PB
3725 if (VECTOR_MODE_P (op1_mode))
3726 gcc_assert (GET_MODE_INNER (mode)
3727 == GET_MODE_INNER (op1_mode));
3728 else
3729 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3730
3731 if ((GET_CODE (trueop0) == CONST_VECTOR
33ffb5c5
KZ
3732 || CONST_SCALAR_INT_P (trueop0)
3733 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
0a67e02c 3734 && (GET_CODE (trueop1) == CONST_VECTOR
33ffb5c5
KZ
3735 || CONST_SCALAR_INT_P (trueop1)
3736 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
0a67e02c 3737 {
cb5ca315 3738 int elt_size = GET_MODE_UNIT_SIZE (mode);
0a67e02c
PB
3739 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3740 rtvec v = rtvec_alloc (n_elts);
3741 unsigned int i;
3742 unsigned in_n_elts = 1;
c877353c 3743
0a67e02c
PB
3744 if (VECTOR_MODE_P (op0_mode))
3745 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3746 for (i = 0; i < n_elts; i++)
3747 {
3748 if (i < in_n_elts)
3749 {
3750 if (!VECTOR_MODE_P (op0_mode))
3751 RTVEC_ELT (v, i) = trueop0;
3752 else
3753 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3754 }
3755 else
3756 {
3757 if (!VECTOR_MODE_P (op1_mode))
3758 RTVEC_ELT (v, i) = trueop1;
3759 else
3760 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3761 i - in_n_elts);
3762 }
3763 }
0cedb36c 3764
0a67e02c
PB
3765 return gen_rtx_CONST_VECTOR (mode, v);
3766 }
5f6e1c55 3767
d08633b4
MG
3768 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3769 Restrict the transformation to avoid generating a VEC_SELECT with a
3770 mode unrelated to its operand. */
5f6e1c55
MG
3771 if (GET_CODE (trueop0) == VEC_SELECT
3772 && GET_CODE (trueop1) == VEC_SELECT
d08633b4
MG
3773 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3774 && GET_MODE (XEXP (trueop0, 0)) == mode)
5f6e1c55
MG
3775 {
3776 rtx par0 = XEXP (trueop0, 1);
3777 rtx par1 = XEXP (trueop1, 1);
3778 int len0 = XVECLEN (par0, 0);
3779 int len1 = XVECLEN (par1, 0);
3780 rtvec vec = rtvec_alloc (len0 + len1);
3781 for (int i = 0; i < len0; i++)
3782 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3783 for (int i = 0; i < len1; i++)
3784 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3785 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3786 gen_rtx_PARALLEL (VOIDmode, vec));
3787 }
0a67e02c
PB
3788 }
3789 return 0;
0cedb36c 3790
0a67e02c
PB
3791 default:
3792 gcc_unreachable ();
3793 }
0cedb36c 3794
0a67e02c
PB
3795 return 0;
3796}
0cedb36c 3797
0a67e02c 3798rtx
ef4bddc2 3799simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
0a67e02c
PB
3800 rtx op0, rtx op1)
3801{
5511bc5a 3802 unsigned int width = GET_MODE_PRECISION (mode);
0cedb36c 3803
0a67e02c
PB
3804 if (VECTOR_MODE_P (mode)
3805 && code != VEC_CONCAT
3806 && GET_CODE (op0) == CONST_VECTOR
3807 && GET_CODE (op1) == CONST_VECTOR)
3808 {
3809 unsigned n_elts = GET_MODE_NUNITS (mode);
ef4bddc2 3810 machine_mode op0mode = GET_MODE (op0);
0a67e02c 3811 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
ef4bddc2 3812 machine_mode op1mode = GET_MODE (op1);
0a67e02c
PB
3813 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3814 rtvec v = rtvec_alloc (n_elts);
3815 unsigned int i;
0cedb36c 3816
0a67e02c
PB
3817 gcc_assert (op0_n_elts == n_elts);
3818 gcc_assert (op1_n_elts == n_elts);
3819 for (i = 0; i < n_elts; i++)
3820 {
3821 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3822 CONST_VECTOR_ELT (op0, i),
3823 CONST_VECTOR_ELT (op1, i));
3824 if (!x)
3825 return 0;
3826 RTVEC_ELT (v, i) = x;
3827 }
0cedb36c 3828
0a67e02c
PB
3829 return gen_rtx_CONST_VECTOR (mode, v);
3830 }
0cedb36c 3831
0a67e02c
PB
3832 if (VECTOR_MODE_P (mode)
3833 && code == VEC_CONCAT
33ffb5c5 3834 && (CONST_SCALAR_INT_P (op0)
48175537 3835 || GET_CODE (op0) == CONST_FIXED
33ffb5c5
KZ
3836 || CONST_DOUBLE_AS_FLOAT_P (op0))
3837 && (CONST_SCALAR_INT_P (op1)
3838 || CONST_DOUBLE_AS_FLOAT_P (op1)
d1f0728e 3839 || GET_CODE (op1) == CONST_FIXED))
0a67e02c
PB
3840 {
3841 unsigned n_elts = GET_MODE_NUNITS (mode);
3842 rtvec v = rtvec_alloc (n_elts);
0cedb36c 3843
0a67e02c
PB
3844 gcc_assert (n_elts >= 2);
3845 if (n_elts == 2)
3846 {
3847 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3848 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
dd61aa98 3849
0a67e02c
PB
3850 RTVEC_ELT (v, 0) = op0;
3851 RTVEC_ELT (v, 1) = op1;
3852 }
3853 else
3854 {
3855 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3856 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3857 unsigned i;
0cedb36c 3858
0a67e02c
PB
3859 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3860 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3861 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
0cedb36c 3862
0a67e02c
PB
3863 for (i = 0; i < op0_n_elts; ++i)
3864 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3865 for (i = 0; i < op1_n_elts; ++i)
3866 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3867 }
0b24db88 3868
0a67e02c
PB
3869 return gen_rtx_CONST_VECTOR (mode, v);
3870 }
0cedb36c 3871
3d8bf70f 3872 if (SCALAR_FLOAT_MODE_P (mode)
48175537
KZ
3873 && CONST_DOUBLE_AS_FLOAT_P (op0)
3874 && CONST_DOUBLE_AS_FLOAT_P (op1)
0a67e02c
PB
3875 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3876 {
3877 if (code == AND
3878 || code == IOR
3879 || code == XOR)
3880 {
3881 long tmp0[4];
3882 long tmp1[4];
3883 REAL_VALUE_TYPE r;
3884 int i;
a0ee8b5f 3885
0a67e02c
PB
3886 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3887 GET_MODE (op0));
3888 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3889 GET_MODE (op1));
3890 for (i = 0; i < 4; i++)
a0ee8b5f 3891 {
0a67e02c
PB
3892 switch (code)
3893 {
3894 case AND:
3895 tmp0[i] &= tmp1[i];
3896 break;
3897 case IOR:
3898 tmp0[i] |= tmp1[i];
3899 break;
3900 case XOR:
3901 tmp0[i] ^= tmp1[i];
3902 break;
3903 default:
3904 gcc_unreachable ();
3905 }
a0ee8b5f 3906 }
0a67e02c 3907 real_from_target (&r, tmp0, mode);
555affd7 3908 return const_double_from_real_value (r, mode);
0a67e02c
PB
3909 }
3910 else
3911 {
3912 REAL_VALUE_TYPE f0, f1, value, result;
5a00b0aa 3913 const REAL_VALUE_TYPE *opr0, *opr1;
0a67e02c 3914 bool inexact;
a0ee8b5f 3915
5a00b0aa
SS
3916 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
3917 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
df62f18a 3918
0a67e02c 3919 if (HONOR_SNANS (mode)
5a00b0aa
SS
3920 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
3921 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
0a67e02c 3922 return 0;
0cedb36c 3923
5a00b0aa
SS
3924 real_convert (&f0, mode, opr0);
3925 real_convert (&f1, mode, opr1);
3926
0a67e02c 3927 if (code == DIV
624d31fe 3928 && real_equal (&f1, &dconst0)
0a67e02c
PB
3929 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3930 return 0;
0cedb36c 3931
0a67e02c
PB
3932 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3933 && flag_trapping_math
3934 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
0cedb36c 3935 {
0a67e02c
PB
3936 int s0 = REAL_VALUE_NEGATIVE (f0);
3937 int s1 = REAL_VALUE_NEGATIVE (f1);
0cedb36c 3938
0a67e02c 3939 switch (code)
1e9b78b0 3940 {
0a67e02c
PB
3941 case PLUS:
3942 /* Inf + -Inf = NaN plus exception. */
3943 if (s0 != s1)
3944 return 0;
3945 break;
3946 case MINUS:
3947 /* Inf - Inf = NaN plus exception. */
3948 if (s0 == s1)
3949 return 0;
3950 break;
3951 case DIV:
3952 /* Inf / Inf = NaN plus exception. */
3953 return 0;
3954 default:
3955 break;
0cedb36c
JL
3956 }
3957 }
0cedb36c 3958
0a67e02c
PB
3959 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3960 && flag_trapping_math
624d31fe 3961 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
0a67e02c 3962 || (REAL_VALUE_ISINF (f1)
624d31fe 3963 && real_equal (&f0, &dconst0))))
0a67e02c
PB
3964 /* Inf * 0 = NaN plus exception. */
3965 return 0;
852c8ba1 3966
0a67e02c
PB
3967 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3968 &f0, &f1);
3969 real_convert (&result, mode, &value);
41374e13 3970
68328cda
EB
3971 /* Don't constant fold this floating point operation if
3972 the result has overflowed and flag_trapping_math. */
3973
3974 if (flag_trapping_math
3975 && MODE_HAS_INFINITIES (mode)
3976 && REAL_VALUE_ISINF (result)
3977 && !REAL_VALUE_ISINF (f0)
3978 && !REAL_VALUE_ISINF (f1))
3979 /* Overflow plus exception. */
3980 return 0;
3981
0a67e02c
PB
3982 /* Don't constant fold this floating point operation if the
3983 result may dependent upon the run-time rounding mode and
3984 flag_rounding_math is set, or if GCC's software emulation
3985 is unable to accurately represent the result. */
852c8ba1 3986
0a67e02c 3987 if ((flag_rounding_math
4099e2c2 3988 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
0a67e02c
PB
3989 && (inexact || !real_identical (&result, &value)))
3990 return NULL_RTX;
d9deed68 3991
555affd7 3992 return const_double_from_real_value (result, mode);
0cedb36c 3993 }
0cedb36c
JL
3994 }
3995
0a67e02c 3996 /* We can fold some multi-word operations. */
50b6ee8b
DD
3997 if ((GET_MODE_CLASS (mode) == MODE_INT
3998 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
807e902e
KZ
3999 && CONST_SCALAR_INT_P (op0)
4000 && CONST_SCALAR_INT_P (op1))
0a67e02c 4001 {
807e902e 4002 wide_int result;
9be0ac8c 4003 bool overflow;
807e902e
KZ
4004 rtx_mode_t pop0 = std::make_pair (op0, mode);
4005 rtx_mode_t pop1 = std::make_pair (op1, mode);
4006
4007#if TARGET_SUPPORTS_WIDE_INT == 0
4008 /* This assert keeps the simplification from producing a result
4009 that cannot be represented in a CONST_DOUBLE but a lot of
4010 upstream callers expect that this function never fails to
4011 simplify something and so you if you added this to the test
4012 above the code would die later anyway. If this assert
4013 happens, you just need to make the port support wide int. */
4014 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4015#endif
0a67e02c
PB
4016 switch (code)
4017 {
4018 case MINUS:
807e902e
KZ
4019 result = wi::sub (pop0, pop1);
4020 break;
0cedb36c 4021
0a67e02c 4022 case PLUS:
807e902e 4023 result = wi::add (pop0, pop1);
0a67e02c 4024 break;
0cedb36c 4025
0a67e02c 4026 case MULT:
807e902e 4027 result = wi::mul (pop0, pop1);
0a67e02c 4028 break;
0cedb36c 4029
0a67e02c 4030 case DIV:
807e902e 4031 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
9be0ac8c 4032 if (overflow)
807e902e 4033 return NULL_RTX;
0a67e02c 4034 break;
0cedb36c 4035
0a67e02c 4036 case MOD:
807e902e 4037 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
9be0ac8c 4038 if (overflow)
807e902e 4039 return NULL_RTX;
0a67e02c 4040 break;
0cedb36c 4041
0a67e02c 4042 case UDIV:
807e902e 4043 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
9be0ac8c 4044 if (overflow)
807e902e 4045 return NULL_RTX;
0a67e02c 4046 break;
0cedb36c 4047
0a67e02c 4048 case UMOD:
807e902e 4049 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
9be0ac8c 4050 if (overflow)
807e902e 4051 return NULL_RTX;
0a67e02c 4052 break;
0cedb36c 4053
0a67e02c 4054 case AND:
807e902e 4055 result = wi::bit_and (pop0, pop1);
0a67e02c 4056 break;
0cedb36c 4057
0a67e02c 4058 case IOR:
807e902e 4059 result = wi::bit_or (pop0, pop1);
0a67e02c 4060 break;
0cedb36c 4061
0a67e02c 4062 case XOR:
807e902e 4063 result = wi::bit_xor (pop0, pop1);
0a67e02c 4064 break;
0cedb36c 4065
0a67e02c 4066 case SMIN:
807e902e 4067 result = wi::smin (pop0, pop1);
0a67e02c 4068 break;
0cedb36c 4069
0a67e02c 4070 case SMAX:
807e902e 4071 result = wi::smax (pop0, pop1);
0a67e02c 4072 break;
0cedb36c 4073
0a67e02c 4074 case UMIN:
807e902e 4075 result = wi::umin (pop0, pop1);
0a67e02c 4076 break;
0cedb36c 4077
0a67e02c 4078 case UMAX:
807e902e 4079 result = wi::umax (pop0, pop1);
0a67e02c 4080 break;
0cedb36c 4081
807e902e
KZ
4082 case LSHIFTRT:
4083 case ASHIFTRT:
0a67e02c 4084 case ASHIFT:
fd7de64c 4085 {
807e902e 4086 wide_int wop1 = pop1;
fd7de64c 4087 if (SHIFT_COUNT_TRUNCATED)
807e902e
KZ
4088 wop1 = wi::umod_trunc (wop1, width);
4089 else if (wi::geu_p (wop1, width))
4090 return NULL_RTX;
b8698a0f 4091
807e902e
KZ
4092 switch (code)
4093 {
4094 case LSHIFTRT:
4095 result = wi::lrshift (pop0, wop1);
4096 break;
b8698a0f 4097
807e902e
KZ
4098 case ASHIFTRT:
4099 result = wi::arshift (pop0, wop1);
4100 break;
b8698a0f 4101
807e902e
KZ
4102 case ASHIFT:
4103 result = wi::lshift (pop0, wop1);
4104 break;
b8698a0f 4105
807e902e
KZ
4106 default:
4107 gcc_unreachable ();
4108 }
4109 break;
4110 }
0a67e02c 4111 case ROTATE:
807e902e
KZ
4112 case ROTATERT:
4113 {
4114 if (wi::neg_p (pop1))
4115 return NULL_RTX;
b8698a0f 4116
807e902e
KZ
4117 switch (code)
4118 {
4119 case ROTATE:
4120 result = wi::lrotate (pop0, pop1);
4121 break;
b8698a0f 4122
807e902e
KZ
4123 case ROTATERT:
4124 result = wi::rrotate (pop0, pop1);
4125 break;
b8698a0f 4126
807e902e
KZ
4127 default:
4128 gcc_unreachable ();
4129 }
4130 break;
4131 }
0a67e02c 4132 default:
807e902e 4133 return NULL_RTX;
0a67e02c 4134 }
807e902e 4135 return immed_wide_int_const (result, mode);
0a67e02c 4136 }
0cedb36c 4137
0a67e02c 4138 return NULL_RTX;
0cedb36c 4139}
0a67e02c
PB
4140
4141
0cedb36c 4142\f
350911e6
AM
4143/* Return a positive integer if X should sort after Y. The value
4144 returned is 1 if and only if X and Y are both regs. */
0cedb36c 4145
350911e6 4146static int
7e0b4eae 4147simplify_plus_minus_op_data_cmp (rtx x, rtx y)
9b3bd424 4148{
f805670f 4149 int result;
9b3bd424 4150
7e0b4eae
PB
4151 result = (commutative_operand_precedence (y)
4152 - commutative_operand_precedence (x));
f805670f 4153 if (result)
350911e6 4154 return result + result;
d26cef13
PB
4155
4156 /* Group together equal REGs to do more simplification. */
7e0b4eae
PB
4157 if (REG_P (x) && REG_P (y))
4158 return REGNO (x) > REGNO (y);
350911e6
AM
4159
4160 return 0;
9b3bd424
RH
4161}
4162
350911e6
AM
4163/* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4164 operands may be another PLUS or MINUS.
4165
4166 Rather than test for specific case, we do this by a brute-force method
4167 and do all possible simplifications until no more changes occur. Then
4168 we rebuild the operation.
4169
4170 May return NULL_RTX when no changes were made. */
4171
0cedb36c 4172static rtx
ef4bddc2 4173simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
1941069a 4174 rtx op1)
0cedb36c 4175{
350911e6
AM
4176 struct simplify_plus_minus_op_data
4177 {
4178 rtx op;
4179 short neg;
4180 } ops[16];
0cedb36c 4181 rtx result, tem;
c11c8664
AM
4182 int n_ops = 2;
4183 int changed, n_constants, canonicalized = 0;
0cedb36c
JL
4184 int i, j;
4185
703ad42b 4186 memset (ops, 0, sizeof ops);
786de7eb 4187
0cedb36c
JL
4188 /* Set up the two operands and then expand them until nothing has been
4189 changed. If we run out of room in our array, give up; this should
4190 almost never happen. */
4191
9b3bd424
RH
4192 ops[0].op = op0;
4193 ops[0].neg = 0;
4194 ops[1].op = op1;
4195 ops[1].neg = (code == MINUS);
0cedb36c 4196
9b3bd424 4197 do
0cedb36c
JL
4198 {
4199 changed = 0;
c11c8664 4200 n_constants = 0;
0cedb36c
JL
4201
4202 for (i = 0; i < n_ops; i++)
9b3bd424
RH
4203 {
4204 rtx this_op = ops[i].op;
4205 int this_neg = ops[i].neg;
4206 enum rtx_code this_code = GET_CODE (this_op);
0cedb36c 4207
9b3bd424
RH
4208 switch (this_code)
4209 {
4210 case PLUS:
4211 case MINUS:
c11c8664 4212 if (n_ops == ARRAY_SIZE (ops))
e16e3291 4213 return NULL_RTX;
0cedb36c 4214
9b3bd424
RH
4215 ops[n_ops].op = XEXP (this_op, 1);
4216 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4217 n_ops++;
4218
4219 ops[i].op = XEXP (this_op, 0);
9b3bd424 4220 changed = 1;
350911e6
AM
4221 /* If this operand was negated then we will potentially
4222 canonicalize the expression. Similarly if we don't
4223 place the operands adjacent we're re-ordering the
4224 expression and thus might be performing a
4225 canonicalization. Ignore register re-ordering.
4226 ??? It might be better to shuffle the ops array here,
4227 but then (plus (plus (A, B), plus (C, D))) wouldn't
4228 be seen as non-canonical. */
4229 if (this_neg
4230 || (i != n_ops - 2
4231 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4232 canonicalized = 1;
9b3bd424
RH
4233 break;
4234
4235 case NEG:
4236 ops[i].op = XEXP (this_op, 0);
4237 ops[i].neg = ! this_neg;
4238 changed = 1;
1941069a 4239 canonicalized = 1;
9b3bd424
RH
4240 break;
4241
4242 case CONST:
c11c8664 4243 if (n_ops != ARRAY_SIZE (ops)
e3c8ea67
RH
4244 && GET_CODE (XEXP (this_op, 0)) == PLUS
4245 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4246 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4247 {
4248 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4249 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4250 ops[n_ops].neg = this_neg;
4251 n_ops++;
e3c8ea67 4252 changed = 1;
350911e6 4253 canonicalized = 1;
e3c8ea67 4254 }
9b3bd424
RH
4255 break;
4256
4257 case NOT:
4258 /* ~a -> (-a - 1) */
c11c8664 4259 if (n_ops != ARRAY_SIZE (ops))
9b3bd424 4260 {
ea72cc1d 4261 ops[n_ops].op = CONSTM1_RTX (mode);
2e951384 4262 ops[n_ops++].neg = this_neg;
9b3bd424
RH
4263 ops[i].op = XEXP (this_op, 0);
4264 ops[i].neg = !this_neg;
4265 changed = 1;
350911e6 4266 canonicalized = 1;
9b3bd424
RH
4267 }
4268 break;
0cedb36c 4269
9b3bd424 4270 case CONST_INT:
d26cef13 4271 n_constants++;
9b3bd424
RH
4272 if (this_neg)
4273 {
aff8a8d5 4274 ops[i].op = neg_const_int (mode, this_op);
9b3bd424
RH
4275 ops[i].neg = 0;
4276 changed = 1;
350911e6 4277 canonicalized = 1;
9b3bd424
RH
4278 }
4279 break;
0cedb36c 4280
9b3bd424
RH
4281 default:
4282 break;
4283 }
4284 }
0cedb36c 4285 }
9b3bd424 4286 while (changed);
0cedb36c 4287
d26cef13
PB
4288 if (n_constants > 1)
4289 canonicalized = 1;
36686ad6 4290
d26cef13 4291 gcc_assert (n_ops >= 2);
0cedb36c 4292
1941069a
PB
4293 /* If we only have two operands, we can avoid the loops. */
4294 if (n_ops == 2)
4295 {
4296 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4297 rtx lhs, rhs;
4298
4299 /* Get the two operands. Be careful with the order, especially for
4300 the cases where code == MINUS. */
4301 if (ops[0].neg && ops[1].neg)
4302 {
4303 lhs = gen_rtx_NEG (mode, ops[0].op);
4304 rhs = ops[1].op;
4305 }
4306 else if (ops[0].neg)
4307 {
4308 lhs = ops[1].op;
4309 rhs = ops[0].op;
4310 }
4311 else
4312 {
4313 lhs = ops[0].op;
4314 rhs = ops[1].op;
4315 }
4316
4317 return simplify_const_binary_operation (code, mode, lhs, rhs);
4318 }
4319
d26cef13 4320 /* Now simplify each pair of operands until nothing changes. */
350911e6 4321 while (1)
0cedb36c 4322 {
c11c8664 4323 /* Insertion sort is good enough for a small array. */
d26cef13 4324 for (i = 1; i < n_ops; i++)
350911e6
AM
4325 {
4326 struct simplify_plus_minus_op_data save;
4327 int cmp;
4328
4329 j = i - 1;
4330 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4331 if (cmp <= 0)
d26cef13 4332 continue;
350911e6
AM
4333 /* Just swapping registers doesn't count as canonicalization. */
4334 if (cmp != 1)
4335 canonicalized = 1;
d26cef13 4336
350911e6
AM
4337 save = ops[i];
4338 do
d26cef13 4339 ops[j + 1] = ops[j];
350911e6
AM
4340 while (j--
4341 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4342 ops[j + 1] = save;
4343 }
0cedb36c 4344
d26cef13
PB
4345 changed = 0;
4346 for (i = n_ops - 1; i > 0; i--)
4347 for (j = i - 1; j >= 0; j--)
9b3bd424 4348 {
d26cef13
PB
4349 rtx lhs = ops[j].op, rhs = ops[i].op;
4350 int lneg = ops[j].neg, rneg = ops[i].neg;
0cedb36c 4351
d26cef13 4352 if (lhs != 0 && rhs != 0)
9b3bd424
RH
4353 {
4354 enum rtx_code ncode = PLUS;
4355
4356 if (lneg != rneg)
4357 {
4358 ncode = MINUS;
4359 if (lneg)
e2be0590 4360 std::swap (lhs, rhs);
9b3bd424
RH
4361 }
4362 else if (swap_commutative_operands_p (lhs, rhs))
e2be0590 4363 std::swap (lhs, rhs);
9b3bd424 4364
481683e1
SZ
4365 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4366 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
349f4ea1
AK
4367 {
4368 rtx tem_lhs, tem_rhs;
4369
4370 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4371 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
350911e6
AM
4372 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4373 tem_rhs);
9b3bd424 4374
349f4ea1
AK
4375 if (tem && !CONSTANT_P (tem))
4376 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4377 }
4378 else
4379 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
b8698a0f 4380
c11c8664 4381 if (tem)
9b3bd424 4382 {
c11c8664
AM
4383 /* Reject "simplifications" that just wrap the two
4384 arguments in a CONST. Failure to do so can result
4385 in infinite recursion with simplify_binary_operation
4386 when it calls us to simplify CONST operations.
4387 Also, if we find such a simplification, don't try
4388 any more combinations with this rhs: We must have
4389 something like symbol+offset, ie. one of the
4390 trivial CONST expressions we handle later. */
4391 if (GET_CODE (tem) == CONST
4392 && GET_CODE (XEXP (tem, 0)) == ncode
4393 && XEXP (XEXP (tem, 0), 0) == lhs
4394 && XEXP (XEXP (tem, 0), 1) == rhs)
4395 break;
9b3bd424
RH
4396 lneg &= rneg;
4397 if (GET_CODE (tem) == NEG)
4398 tem = XEXP (tem, 0), lneg = !lneg;
481683e1 4399 if (CONST_INT_P (tem) && lneg)
aff8a8d5 4400 tem = neg_const_int (mode, tem), lneg = 0;
9b3bd424
RH
4401
4402 ops[i].op = tem;
4403 ops[i].neg = lneg;
4404 ops[j].op = NULL_RTX;
4405 changed = 1;
dc5b3407 4406 canonicalized = 1;
9b3bd424
RH
4407 }
4408 }
4409 }
0cedb36c 4410
350911e6
AM
4411 if (!changed)
4412 break;
dc5b3407 4413
d26cef13
PB
4414 /* Pack all the operands to the lower-numbered entries. */
4415 for (i = 0, j = 0; j < n_ops; j++)
350911e6
AM
4416 if (ops[j].op)
4417 {
d26cef13
PB
4418 ops[i] = ops[j];
4419 i++;
350911e6 4420 }
d26cef13 4421 n_ops = i;
0cedb36c 4422 }
350911e6
AM
4423
4424 /* If nothing changed, fail. */
4425 if (!canonicalized)
4426 return NULL_RTX;
0cedb36c 4427
c877353c
RS
4428 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4429 if (n_ops == 2
481683e1 4430 && CONST_INT_P (ops[1].op)
c877353c
RS
4431 && CONSTANT_P (ops[0].op)
4432 && ops[0].neg)
4433 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
b8698a0f 4434
9b3bd424
RH
4435 /* We suppressed creation of trivial CONST expressions in the
4436 combination loop to avoid recursion. Create one manually now.
4437 The combination loop should have ensured that there is exactly
4438 one CONST_INT, and the sort will have ensured that it is last
4439 in the array and that any other constant will be next-to-last. */
0cedb36c 4440
9b3bd424 4441 if (n_ops > 1
481683e1 4442 && CONST_INT_P (ops[n_ops - 1].op)
9b3bd424
RH
4443 && CONSTANT_P (ops[n_ops - 2].op))
4444 {
aff8a8d5 4445 rtx value = ops[n_ops - 1].op;
4768dbdd 4446 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
aff8a8d5 4447 value = neg_const_int (mode, value);
0a81f074
RS
4448 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4449 INTVAL (value));
9b3bd424
RH
4450 n_ops--;
4451 }
4452
0786ca87 4453 /* Put a non-negated operand first, if possible. */
0cedb36c 4454
9b3bd424
RH
4455 for (i = 0; i < n_ops && ops[i].neg; i++)
4456 continue;
0cedb36c 4457 if (i == n_ops)
0786ca87 4458 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
0cedb36c
JL
4459 else if (i != 0)
4460 {
9b3bd424
RH
4461 tem = ops[0].op;
4462 ops[0] = ops[i];
4463 ops[i].op = tem;
4464 ops[i].neg = 1;
0cedb36c
JL
4465 }
4466
4467 /* Now make the result by performing the requested operations. */
9b3bd424 4468 result = ops[0].op;
0cedb36c 4469 for (i = 1; i < n_ops; i++)
9b3bd424
RH
4470 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4471 mode, result, ops[i].op);
0cedb36c 4472
0786ca87 4473 return result;
0cedb36c
JL
4474}
4475
5ac20c1a
RS
4476/* Check whether an operand is suitable for calling simplify_plus_minus. */
4477static bool
f7d504c2 4478plus_minus_operand_p (const_rtx x)
5ac20c1a
RS
4479{
4480 return GET_CODE (x) == PLUS
4481 || GET_CODE (x) == MINUS
4482 || (GET_CODE (x) == CONST
4483 && GET_CODE (XEXP (x, 0)) == PLUS
4484 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4485 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4486}
4487
0cedb36c 4488/* Like simplify_binary_operation except used for relational operators.
c6fb08ad 4489 MODE is the mode of the result. If MODE is VOIDmode, both operands must
fc7ca5fd 4490 not also be VOIDmode.
c6fb08ad
PB
4491
4492 CMP_MODE specifies in which mode the comparison is done in, so it is
4493 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4494 the operands or, if both are VOIDmode, the operands are compared in
4495 "infinite precision". */
4496rtx
ef4bddc2
RS
4497simplify_relational_operation (enum rtx_code code, machine_mode mode,
4498 machine_mode cmp_mode, rtx op0, rtx op1)
c6fb08ad
PB
4499{
4500 rtx tem, trueop0, trueop1;
4501
4502 if (cmp_mode == VOIDmode)
4503 cmp_mode = GET_MODE (op0);
4504 if (cmp_mode == VOIDmode)
4505 cmp_mode = GET_MODE (op1);
4506
4507 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4508 if (tem)
4509 {
3d8bf70f 4510 if (SCALAR_FLOAT_MODE_P (mode))
c6fb08ad
PB
4511 {
4512 if (tem == const0_rtx)
4513 return CONST0_RTX (mode);
fc7ca5fd
RS
4514#ifdef FLOAT_STORE_FLAG_VALUE
4515 {
4516 REAL_VALUE_TYPE val;
4517 val = FLOAT_STORE_FLAG_VALUE (mode);
555affd7 4518 return const_double_from_real_value (val, mode);
fc7ca5fd
RS
4519 }
4520#else
4521 return NULL_RTX;
b8698a0f 4522#endif
c6fb08ad 4523 }
fc7ca5fd
RS
4524 if (VECTOR_MODE_P (mode))
4525 {
4526 if (tem == const0_rtx)
4527 return CONST0_RTX (mode);
4528#ifdef VECTOR_STORE_FLAG_VALUE
4529 {
4530 int i, units;
21e5076a 4531 rtvec v;
fc7ca5fd
RS
4532
4533 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4534 if (val == NULL_RTX)
4535 return NULL_RTX;
4536 if (val == const1_rtx)
4537 return CONST1_RTX (mode);
4538
4539 units = GET_MODE_NUNITS (mode);
4540 v = rtvec_alloc (units);
4541 for (i = 0; i < units; i++)
4542 RTVEC_ELT (v, i) = val;
4543 return gen_rtx_raw_CONST_VECTOR (mode, v);
4544 }
4545#else
4546 return NULL_RTX;
c6fb08ad 4547#endif
fc7ca5fd 4548 }
c6fb08ad
PB
4549
4550 return tem;
4551 }
4552
4553 /* For the following tests, ensure const0_rtx is op1. */
4554 if (swap_commutative_operands_p (op0, op1)
4555 || (op0 == const0_rtx && op1 != const0_rtx))
e2be0590 4556 std::swap (op0, op1), code = swap_condition (code);
c6fb08ad
PB
4557
4558 /* If op0 is a compare, extract the comparison arguments from it. */
4559 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
f90b7a5a
PB
4560 return simplify_gen_relational (code, mode, VOIDmode,
4561 XEXP (op0, 0), XEXP (op0, 1));
c6fb08ad 4562
30a440a7 4563 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
c6fb08ad
PB
4564 || CC0_P (op0))
4565 return NULL_RTX;
4566
4567 trueop0 = avoid_constant_pool_reference (op0);
4568 trueop1 = avoid_constant_pool_reference (op1);
4569 return simplify_relational_operation_1 (code, mode, cmp_mode,
4570 trueop0, trueop1);
4571}
4572
4573/* This part of simplify_relational_operation is only used when CMP_MODE
4574 is not in class MODE_CC (i.e. it is a real comparison).
4575
4576 MODE is the mode of the result, while CMP_MODE specifies in which
4577 mode the comparison is done in, so it is the mode of the operands. */
bc4ad38c
ZD
4578
4579static rtx
ef4bddc2
RS
4580simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4581 machine_mode cmp_mode, rtx op0, rtx op1)
c6fb08ad 4582{
bc4ad38c
ZD
4583 enum rtx_code op0code = GET_CODE (op0);
4584
3fa0cacd 4585 if (op1 == const0_rtx && COMPARISON_P (op0))
c6fb08ad 4586 {
3fa0cacd
RS
4587 /* If op0 is a comparison, extract the comparison arguments
4588 from it. */
4589 if (code == NE)
c6fb08ad 4590 {
3fa0cacd
RS
4591 if (GET_MODE (op0) == mode)
4592 return simplify_rtx (op0);
4593 else
4594 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4595 XEXP (op0, 0), XEXP (op0, 1));
4596 }
4597 else if (code == EQ)
4598 {
4599 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4600 if (new_code != UNKNOWN)
4601 return simplify_gen_relational (new_code, mode, VOIDmode,
4602 XEXP (op0, 0), XEXP (op0, 1));
4603 }
4604 }
4605
1d1eb80c
BS
4606 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4607 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4608 if ((code == LTU || code == GEU)
4609 && GET_CODE (op0) == PLUS
481683e1 4610 && CONST_INT_P (XEXP (op0, 1))
1d1eb80c 4611 && (rtx_equal_p (op1, XEXP (op0, 0))
5352ea68
AP
4612 || rtx_equal_p (op1, XEXP (op0, 1)))
4613 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4614 && XEXP (op0, 1) != const0_rtx)
1d1eb80c
BS
4615 {
4616 rtx new_cmp
4617 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4618 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4619 cmp_mode, XEXP (op0, 0), new_cmp);
4620 }
4621
921c4418
RIL
4622 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4623 if ((code == LTU || code == GEU)
4624 && GET_CODE (op0) == PLUS
cf369845
HPN
4625 && rtx_equal_p (op1, XEXP (op0, 1))
4626 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4627 && !rtx_equal_p (op1, XEXP (op0, 0)))
4942b76b
JJ
4628 return simplify_gen_relational (code, mode, cmp_mode, op0,
4629 copy_rtx (XEXP (op0, 0)));
921c4418 4630
3fa0cacd
RS
4631 if (op1 == const0_rtx)
4632 {
4633 /* Canonicalize (GTU x 0) as (NE x 0). */
4634 if (code == GTU)
4635 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4636 /* Canonicalize (LEU x 0) as (EQ x 0). */
4637 if (code == LEU)
4638 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4639 }
4640 else if (op1 == const1_rtx)
4641 {
4642 switch (code)
4643 {
4644 case GE:
4645 /* Canonicalize (GE x 1) as (GT x 0). */
4646 return simplify_gen_relational (GT, mode, cmp_mode,
4647 op0, const0_rtx);
4648 case GEU:
4649 /* Canonicalize (GEU x 1) as (NE x 0). */
4650 return simplify_gen_relational (NE, mode, cmp_mode,
4651 op0, const0_rtx);
4652 case LT:
4653 /* Canonicalize (LT x 1) as (LE x 0). */
4654 return simplify_gen_relational (LE, mode, cmp_mode,
4655 op0, const0_rtx);
4656 case LTU:
4657 /* Canonicalize (LTU x 1) as (EQ x 0). */
4658 return simplify_gen_relational (EQ, mode, cmp_mode,
4659 op0, const0_rtx);
4660 default:
4661 break;
c6fb08ad
PB
4662 }
4663 }
3fa0cacd
RS
4664 else if (op1 == constm1_rtx)
4665 {
4666 /* Canonicalize (LE x -1) as (LT x 0). */
4667 if (code == LE)
4668 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4669 /* Canonicalize (GT x -1) as (GE x 0). */
4670 if (code == GT)
4671 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4672 }
0cedb36c 4673
bc4ad38c
ZD
4674 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4675 if ((code == EQ || code == NE)
4676 && (op0code == PLUS || op0code == MINUS)
4677 && CONSTANT_P (op1)
551a3297
RH
4678 && CONSTANT_P (XEXP (op0, 1))
4679 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
bc4ad38c
ZD
4680 {
4681 rtx x = XEXP (op0, 0);
4682 rtx c = XEXP (op0, 1);
d303c992
CLT
4683 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4684 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4685
4686 /* Detect an infinite recursive condition, where we oscillate at this
4687 simplification case between:
4688 A + B == C <---> C - B == A,
4689 where A, B, and C are all constants with non-simplifiable expressions,
4690 usually SYMBOL_REFs. */
4691 if (GET_CODE (tem) == invcode
4692 && CONSTANT_P (x)
4693 && rtx_equal_p (c, XEXP (tem, 1)))
4694 return NULL_RTX;
4695
4696 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
bc4ad38c
ZD
4697 }
4698
1419a885
RS
4699 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4700 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4701 if (code == NE
4702 && op1 == const0_rtx
4703 && GET_MODE_CLASS (mode) == MODE_INT
4704 && cmp_mode != VOIDmode
61961eff
RS
4705 /* ??? Work-around BImode bugs in the ia64 backend. */
4706 && mode != BImode
f8eacd97 4707 && cmp_mode != BImode
1419a885
RS
4708 && nonzero_bits (op0, cmp_mode) == 1
4709 && STORE_FLAG_VALUE == 1)
f8eacd97
RS
4710 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4711 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4712 : lowpart_subreg (mode, op0, cmp_mode);
1419a885 4713
5484a3c3
RS
4714 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4715 if ((code == EQ || code == NE)
4716 && op1 == const0_rtx
4717 && op0code == XOR)
4718 return simplify_gen_relational (code, mode, cmp_mode,
4719 XEXP (op0, 0), XEXP (op0, 1));
4720
4d49d44d 4721 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5484a3c3
RS
4722 if ((code == EQ || code == NE)
4723 && op0code == XOR
4724 && rtx_equal_p (XEXP (op0, 0), op1)
4d49d44d 4725 && !side_effects_p (XEXP (op0, 0)))
9d31ea5b
RS
4726 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4727 CONST0_RTX (mode));
4d49d44d
KH
4728
4729 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5484a3c3
RS
4730 if ((code == EQ || code == NE)
4731 && op0code == XOR
4732 && rtx_equal_p (XEXP (op0, 1), op1)
4d49d44d 4733 && !side_effects_p (XEXP (op0, 1)))
9d31ea5b
RS
4734 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4735 CONST0_RTX (mode));
5484a3c3
RS
4736
4737 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4738 if ((code == EQ || code == NE)
4739 && op0code == XOR
33ffb5c5
KZ
4740 && CONST_SCALAR_INT_P (op1)
4741 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5484a3c3
RS
4742 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4743 simplify_gen_binary (XOR, cmp_mode,
4744 XEXP (op0, 1), op1));
4745
10828a01
SL
4746 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4747 can be implemented with a BICS instruction on some targets, or
4748 constant-folded if y is a constant. */
4749 if ((code == EQ || code == NE)
4750 && op0code == AND
4751 && rtx_equal_p (XEXP (op0, 0), op1)
98f51044
JJ
4752 && !side_effects_p (op1)
4753 && op1 != CONST0_RTX (cmp_mode))
10828a01
SL
4754 {
4755 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4756 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4757
3202dccc
JJ
4758 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4759 CONST0_RTX (cmp_mode));
10828a01
SL
4760 }
4761
4762 /* Likewise for (eq/ne (and x y) y). */
4763 if ((code == EQ || code == NE)
4764 && op0code == AND
4765 && rtx_equal_p (XEXP (op0, 1), op1)
98f51044
JJ
4766 && !side_effects_p (op1)
4767 && op1 != CONST0_RTX (cmp_mode))
10828a01
SL
4768 {
4769 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4770 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4771
3202dccc
JJ
4772 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4773 CONST0_RTX (cmp_mode));
10828a01
SL
4774 }
4775
b17c024f
EB
4776 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4777 if ((code == EQ || code == NE)
4778 && GET_CODE (op0) == BSWAP
a8c50132 4779 && CONST_SCALAR_INT_P (op1))
b17c024f
EB
4780 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4781 simplify_gen_unary (BSWAP, cmp_mode,
4782 op1, cmp_mode));
4783
4784 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4785 if ((code == EQ || code == NE)
4786 && GET_CODE (op0) == BSWAP
4787 && GET_CODE (op1) == BSWAP)
4788 return simplify_gen_relational (code, mode, cmp_mode,
4789 XEXP (op0, 0), XEXP (op1, 0));
4790
69fce32f
RS
4791 if (op0code == POPCOUNT && op1 == const0_rtx)
4792 switch (code)
4793 {
4794 case EQ:
4795 case LE:
4796 case LEU:
4797 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4798 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4799 XEXP (op0, 0), const0_rtx);
4800
4801 case NE:
4802 case GT:
4803 case GTU:
4804 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
2376c58f 4805 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
69fce32f
RS
4806 XEXP (op0, 0), const0_rtx);
4807
4808 default:
4809 break;
4810 }
4811
c6fb08ad
PB
4812 return NULL_RTX;
4813}
4814
b8698a0f 4815enum
39641489 4816{
a567207e
PB
4817 CMP_EQ = 1,
4818 CMP_LT = 2,
4819 CMP_GT = 4,
4820 CMP_LTU = 8,
4821 CMP_GTU = 16
39641489
PB
4822};
4823
4824
4825/* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4826 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
b8698a0f 4827 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
a567207e 4828 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
39641489
PB
4829 For floating-point comparisons, assume that the operands were ordered. */
4830
4831static rtx
4832comparison_result (enum rtx_code code, int known_results)
4833{
39641489
PB
4834 switch (code)
4835 {
4836 case EQ:
4837 case UNEQ:
a567207e 4838 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
39641489
PB
4839 case NE:
4840 case LTGT:
a567207e 4841 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
39641489
PB
4842
4843 case LT:
4844 case UNLT:
a567207e 4845 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
39641489
PB
4846 case GE:
4847 case UNGE:
a567207e 4848 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
39641489
PB
4849
4850 case GT:
4851 case UNGT:
a567207e 4852 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
39641489
PB
4853 case LE:
4854 case UNLE:
a567207e 4855 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
39641489
PB
4856
4857 case LTU:
a567207e 4858 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
39641489 4859 case GEU:
a567207e 4860 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
39641489
PB
4861
4862 case GTU:
a567207e 4863 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
39641489 4864 case LEU:
a567207e 4865 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
39641489
PB
4866
4867 case ORDERED:
4868 return const_true_rtx;
4869 case UNORDERED:
4870 return const0_rtx;
4871 default:
4872 gcc_unreachable ();
4873 }
4874}
4875
807e902e
KZ
4876/* Check if the given comparison (done in the given MODE) is actually
4877 a tautology or a contradiction. If the mode is VOID_mode, the
4878 comparison is done in "infinite precision". If no simplification
4879 is possible, this function returns zero. Otherwise, it returns
4880 either const_true_rtx or const0_rtx. */
0cedb36c
JL
4881
4882rtx
7ce3e360 4883simplify_const_relational_operation (enum rtx_code code,
ef4bddc2 4884 machine_mode mode,
7ce3e360 4885 rtx op0, rtx op1)
0cedb36c 4886{
0cedb36c 4887 rtx tem;
4ba5f925
JH
4888 rtx trueop0;
4889 rtx trueop1;
0cedb36c 4890
41374e13
NS
4891 gcc_assert (mode != VOIDmode
4892 || (GET_MODE (op0) == VOIDmode
4893 && GET_MODE (op1) == VOIDmode));
47b1e19b 4894
0cedb36c
JL
4895 /* If op0 is a compare, extract the comparison arguments from it. */
4896 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5b5dc475
UW
4897 {
4898 op1 = XEXP (op0, 1);
4899 op0 = XEXP (op0, 0);
4900
4901 if (GET_MODE (op0) != VOIDmode)
4902 mode = GET_MODE (op0);
4903 else if (GET_MODE (op1) != VOIDmode)
4904 mode = GET_MODE (op1);
4905 else
4906 return 0;
4907 }
0cedb36c
JL
4908
4909 /* We can't simplify MODE_CC values since we don't know what the
4910 actual comparison is. */
8beccec8 4911 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
0cedb36c
JL
4912 return 0;
4913
52a75c3c 4914 /* Make sure the constant is second. */
9ce79a7a 4915 if (swap_commutative_operands_p (op0, op1))
52a75c3c 4916 {
e2be0590 4917 std::swap (op0, op1);
52a75c3c
RH
4918 code = swap_condition (code);
4919 }
4920
9ce79a7a
RS
4921 trueop0 = avoid_constant_pool_reference (op0);
4922 trueop1 = avoid_constant_pool_reference (op1);
4923
0cedb36c
JL
4924 /* For integer comparisons of A and B maybe we can simplify A - B and can
4925 then simplify a comparison of that with zero. If A and B are both either
4926 a register or a CONST_INT, this can't help; testing for these cases will
4927 prevent infinite recursion here and speed things up.
4928
e0d0c193
RG
4929 We can only do this for EQ and NE comparisons as otherwise we may
4930 lose or introduce overflow which we cannot disregard as undefined as
4931 we do not know the signedness of the operation on either the left or
4932 the right hand side of the comparison. */
0cedb36c 4933
e0d0c193
RG
4934 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4935 && (code == EQ || code == NE)
481683e1
SZ
4936 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4937 && (REG_P (op1) || CONST_INT_P (trueop1)))
0cedb36c 4938 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
e0d0c193
RG
4939 /* We cannot do this if tem is a nonzero address. */
4940 && ! nonzero_address_p (tem))
7ce3e360
RS
4941 return simplify_const_relational_operation (signed_condition (code),
4942 mode, tem, const0_rtx);
0cedb36c 4943
bdbb0460 4944 if (! HONOR_NANS (mode) && code == ORDERED)
1f36a2dd
JH
4945 return const_true_rtx;
4946
bdbb0460 4947 if (! HONOR_NANS (mode) && code == UNORDERED)
1f36a2dd
JH
4948 return const0_rtx;
4949
71925bc0 4950 /* For modes without NaNs, if the two operands are equal, we know the
39641489
PB
4951 result except if they have side-effects. Even with NaNs we know
4952 the result of unordered comparisons and, if signaling NaNs are
4953 irrelevant, also the result of LT/GT/LTGT. */
1b457aa4 4954 if ((! HONOR_NANS (trueop0)
39641489
PB
4955 || code == UNEQ || code == UNLE || code == UNGE
4956 || ((code == LT || code == GT || code == LTGT)
3d3dbadd 4957 && ! HONOR_SNANS (trueop0)))
8821d091
EB
4958 && rtx_equal_p (trueop0, trueop1)
4959 && ! side_effects_p (trueop0))
a567207e 4960 return comparison_result (code, CMP_EQ);
0cedb36c
JL
4961
4962 /* If the operands are floating-point constants, see if we can fold
4963 the result. */
48175537
KZ
4964 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4965 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
39641489 4966 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
0cedb36c 4967 {
34a72c33
RS
4968 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4969 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
90a74703 4970
1eeeb6a4 4971 /* Comparisons are unordered iff at least one of the values is NaN. */
34a72c33 4972 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
90a74703
JH
4973 switch (code)
4974 {
4975 case UNEQ:
4976 case UNLT:
4977 case UNGT:
4978 case UNLE:
4979 case UNGE:
4980 case NE:
4981 case UNORDERED:
4982 return const_true_rtx;
4983 case EQ:
4984 case LT:
4985 case GT:
4986 case LE:
4987 case GE:
4988 case LTGT:
4989 case ORDERED:
4990 return const0_rtx;
4991 default:
4992 return 0;
4993 }
0cedb36c 4994
39641489 4995 return comparison_result (code,
34a72c33
RS
4996 (real_equal (d0, d1) ? CMP_EQ :
4997 real_less (d0, d1) ? CMP_LT : CMP_GT));
0cedb36c 4998 }
0cedb36c
JL
4999
5000 /* Otherwise, see if the operands are both integers. */
39641489 5001 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
807e902e 5002 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
0cedb36c 5003 {
807e902e
KZ
5004 /* It would be nice if we really had a mode here. However, the
5005 largest int representable on the target is as good as
5006 infinite. */
ef4bddc2 5007 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
807e902e
KZ
5008 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
5009 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
5010
5011 if (wi::eq_p (ptrueop0, ptrueop1))
a567207e 5012 return comparison_result (code, CMP_EQ);
39641489
PB
5013 else
5014 {
807e902e
KZ
5015 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5016 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
a567207e 5017 return comparison_result (code, cr);
39641489 5018 }
0cedb36c
JL
5019 }
5020
39641489 5021 /* Optimize comparisons with upper and lower bounds. */
46c9550f 5022 if (HWI_COMPUTABLE_MODE_P (mode)
2d87c1d4
RL
5023 && CONST_INT_P (trueop1)
5024 && !side_effects_p (trueop0))
0cedb36c 5025 {
39641489
PB
5026 int sign;
5027 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5028 HOST_WIDE_INT val = INTVAL (trueop1);
5029 HOST_WIDE_INT mmin, mmax;
5030
5031 if (code == GEU
5032 || code == LEU
5033 || code == GTU
5034 || code == LTU)
5035 sign = 0;
5036 else
5037 sign = 1;
0aea6467 5038
39641489
PB
5039 /* Get a reduced range if the sign bit is zero. */
5040 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5041 {
5042 mmin = 0;
5043 mmax = nonzero;
5044 }
5045 else
5046 {
5047 rtx mmin_rtx, mmax_rtx;
a567207e 5048 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
39641489 5049
dc7c279e
JJ
5050 mmin = INTVAL (mmin_rtx);
5051 mmax = INTVAL (mmax_rtx);
5052 if (sign)
5053 {
5054 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5055
5056 mmin >>= (sign_copies - 1);
5057 mmax >>= (sign_copies - 1);
5058 }
0aea6467
ZD
5059 }
5060
0cedb36c
JL
5061 switch (code)
5062 {
39641489
PB
5063 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5064 case GEU:
5065 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5066 return const_true_rtx;
5067 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5068 return const0_rtx;
5069 break;
5070 case GE:
5071 if (val <= mmin)
5072 return const_true_rtx;
5073 if (val > mmax)
5074 return const0_rtx;
5075 break;
5076
5077 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5078 case LEU:
5079 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5080 return const_true_rtx;
5081 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5082 return const0_rtx;
5083 break;
5084 case LE:
5085 if (val >= mmax)
5086 return const_true_rtx;
5087 if (val < mmin)
5088 return const0_rtx;
5089 break;
5090
0cedb36c 5091 case EQ:
39641489
PB
5092 /* x == y is always false for y out of range. */
5093 if (val < mmin || val > mmax)
5094 return const0_rtx;
5095 break;
5096
5097 /* x > y is always false for y >= mmax, always true for y < mmin. */
5098 case GTU:
5099 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5100 return const0_rtx;
5101 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5102 return const_true_rtx;
5103 break;
5104 case GT:
5105 if (val >= mmax)
5106 return const0_rtx;
5107 if (val < mmin)
5108 return const_true_rtx;
5109 break;
5110
5111 /* x < y is always false for y <= mmin, always true for y > mmax. */
5112 case LTU:
5113 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5114 return const0_rtx;
5115 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5116 return const_true_rtx;
5117 break;
5118 case LT:
5119 if (val <= mmin)
0cedb36c 5120 return const0_rtx;
39641489
PB
5121 if (val > mmax)
5122 return const_true_rtx;
0cedb36c
JL
5123 break;
5124
5125 case NE:
39641489
PB
5126 /* x != y is always true for y out of range. */
5127 if (val < mmin || val > mmax)
0cedb36c
JL
5128 return const_true_rtx;
5129 break;
5130
39641489
PB
5131 default:
5132 break;
5133 }
5134 }
5135
5136 /* Optimize integer comparisons with zero. */
2d87c1d4 5137 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
39641489
PB
5138 {
5139 /* Some addresses are known to be nonzero. We don't know
a567207e 5140 their sign, but equality comparisons are known. */
39641489 5141 if (nonzero_address_p (trueop0))
a567207e 5142 {
39641489
PB
5143 if (code == EQ || code == LEU)
5144 return const0_rtx;
5145 if (code == NE || code == GTU)
5146 return const_true_rtx;
a567207e 5147 }
39641489
PB
5148
5149 /* See if the first operand is an IOR with a constant. If so, we
5150 may be able to determine the result of this comparison. */
5151 if (GET_CODE (op0) == IOR)
a567207e 5152 {
39641489 5153 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
481683e1 5154 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
39641489 5155 {
5511bc5a 5156 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
a567207e 5157 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
43c36287
EB
5158 && (UINTVAL (inner_const)
5159 & ((unsigned HOST_WIDE_INT) 1
5160 << sign_bitnum)));
a567207e
PB
5161
5162 switch (code)
5163 {
5164 case EQ:
39641489 5165 case LEU:
a567207e
PB
5166 return const0_rtx;
5167 case NE:
39641489 5168 case GTU:
a567207e
PB
5169 return const_true_rtx;
5170 case LT:
5171 case LE:
5172 if (has_sign)
5173 return const_true_rtx;
5174 break;
5175 case GT:
39641489 5176 case GE:
a567207e
PB
5177 if (has_sign)
5178 return const0_rtx;
5179 break;
5180 default:
5181 break;
5182 }
5183 }
39641489
PB
5184 }
5185 }
5186
5187 /* Optimize comparison of ABS with zero. */
2d87c1d4 5188 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
39641489
PB
5189 && (GET_CODE (trueop0) == ABS
5190 || (GET_CODE (trueop0) == FLOAT_EXTEND
5191 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5192 {
5193 switch (code)
5194 {
0da65b89
RS
5195 case LT:
5196 /* Optimize abs(x) < 0.0. */
39641489 5197 if (!HONOR_SNANS (mode)
eeef0e45
ILT
5198 && (!INTEGRAL_MODE_P (mode)
5199 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
0da65b89 5200 {
39641489
PB
5201 if (INTEGRAL_MODE_P (mode)
5202 && (issue_strict_overflow_warning
5203 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5204 warning (OPT_Wstrict_overflow,
5205 ("assuming signed overflow does not occur when "
5206 "assuming abs (x) < 0 is false"));
5207 return const0_rtx;
0da65b89
RS
5208 }
5209 break;
5210
5211 case GE:
5212 /* Optimize abs(x) >= 0.0. */
39641489 5213 if (!HONOR_NANS (mode)
eeef0e45
ILT
5214 && (!INTEGRAL_MODE_P (mode)
5215 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
0da65b89 5216 {
39641489
PB
5217 if (INTEGRAL_MODE_P (mode)
5218 && (issue_strict_overflow_warning
5219 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5220 warning (OPT_Wstrict_overflow,
5221 ("assuming signed overflow does not occur when "
5222 "assuming abs (x) >= 0 is true"));
5223 return const_true_rtx;
0da65b89
RS
5224 }
5225 break;
5226
8d90f9c0
GK
5227 case UNGE:
5228 /* Optimize ! (abs(x) < 0.0). */
39641489 5229 return const_true_rtx;
46c5ad27 5230
0cedb36c
JL
5231 default:
5232 break;
5233 }
0cedb36c
JL
5234 }
5235
39641489 5236 return 0;
0cedb36c
JL
5237}
5238\f
5239/* Simplify CODE, an operation with result mode MODE and three operands,
5240 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5241 a constant. Return 0 if no simplifications is possible. */
5242
5243rtx
ef4bddc2
RS
5244simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5245 machine_mode op0_mode, rtx op0, rtx op1,
46c5ad27 5246 rtx op2)
0cedb36c 5247{
5511bc5a 5248 unsigned int width = GET_MODE_PRECISION (mode);
5c822194 5249 bool any_change = false;
da694a77 5250 rtx tem, trueop2;
0cedb36c
JL
5251
5252 /* VOIDmode means "infinite" precision. */
5253 if (width == 0)
5254 width = HOST_BITS_PER_WIDE_INT;
5255
5256 switch (code)
5257 {
1b1562a5 5258 case FMA:
5c822194
RH
5259 /* Simplify negations around the multiplication. */
5260 /* -a * -b + c => a * b + c. */
5261 if (GET_CODE (op0) == NEG)
5262 {
5263 tem = simplify_unary_operation (NEG, mode, op1, mode);
5264 if (tem)
5265 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5266 }
5267 else if (GET_CODE (op1) == NEG)
5268 {
5269 tem = simplify_unary_operation (NEG, mode, op0, mode);
5270 if (tem)
5271 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5272 }
5273
5274 /* Canonicalize the two multiplication operands. */
5275 /* a * -b + c => -b * a + c. */
5276 if (swap_commutative_operands_p (op0, op1))
e2be0590 5277 std::swap (op0, op1), any_change = true;
5c822194
RH
5278
5279 if (any_change)
5280 return gen_rtx_FMA (mode, op0, op1, op2);
1b1562a5
MM
5281 return NULL_RTX;
5282
0cedb36c
JL
5283 case SIGN_EXTRACT:
5284 case ZERO_EXTRACT:
481683e1
SZ
5285 if (CONST_INT_P (op0)
5286 && CONST_INT_P (op1)
5287 && CONST_INT_P (op2)
d882fe51 5288 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
f9e158c3 5289 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
0cedb36c
JL
5290 {
5291 /* Extracting a bit-field from a constant */
43c36287 5292 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5511bc5a
BS
5293 HOST_WIDE_INT op1val = INTVAL (op1);
5294 HOST_WIDE_INT op2val = INTVAL (op2);
0cedb36c 5295 if (BITS_BIG_ENDIAN)
5511bc5a 5296 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
0cedb36c 5297 else
5511bc5a 5298 val >>= op2val;
0cedb36c 5299
5511bc5a 5300 if (HOST_BITS_PER_WIDE_INT != op1val)
0cedb36c
JL
5301 {
5302 /* First zero-extend. */
5511bc5a 5303 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
0cedb36c
JL
5304 /* If desired, propagate sign bit. */
5305 if (code == SIGN_EXTRACT
5511bc5a 5306 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
43c36287 5307 != 0)
5511bc5a 5308 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
0cedb36c
JL
5309 }
5310
449ecb09 5311 return gen_int_mode (val, mode);
0cedb36c
JL
5312 }
5313 break;
5314
5315 case IF_THEN_ELSE:
481683e1 5316 if (CONST_INT_P (op0))
0cedb36c
JL
5317 return op0 != const0_rtx ? op1 : op2;
5318
31f0f571
RS
5319 /* Convert c ? a : a into "a". */
5320 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
0cedb36c 5321 return op1;
31f0f571
RS
5322
5323 /* Convert a != b ? a : b into "a". */
5324 if (GET_CODE (op0) == NE
5325 && ! side_effects_p (op0)
5326 && ! HONOR_NANS (mode)
5327 && ! HONOR_SIGNED_ZEROS (mode)
5328 && ((rtx_equal_p (XEXP (op0, 0), op1)
5329 && rtx_equal_p (XEXP (op0, 1), op2))
5330 || (rtx_equal_p (XEXP (op0, 0), op2)
5331 && rtx_equal_p (XEXP (op0, 1), op1))))
5332 return op1;
5333
5334 /* Convert a == b ? a : b into "b". */
5335 if (GET_CODE (op0) == EQ
5336 && ! side_effects_p (op0)
5337 && ! HONOR_NANS (mode)
5338 && ! HONOR_SIGNED_ZEROS (mode)
5339 && ((rtx_equal_p (XEXP (op0, 0), op1)
5340 && rtx_equal_p (XEXP (op0, 1), op2))
5341 || (rtx_equal_p (XEXP (op0, 0), op2)
5342 && rtx_equal_p (XEXP (op0, 1), op1))))
0cedb36c 5343 return op2;
31f0f571 5344
34222cd6
BS
5345 /* Convert (!c) != {0,...,0} ? a : b into
5346 c != {0,...,0} ? b : a for vector modes. */
5347 if (VECTOR_MODE_P (GET_MODE (op1))
5348 && GET_CODE (op0) == NE
5349 && GET_CODE (XEXP (op0, 0)) == NOT
5350 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5351 {
5352 rtx cv = XEXP (op0, 1);
5353 int nunits = CONST_VECTOR_NUNITS (cv);
5354 bool ok = true;
5355 for (int i = 0; i < nunits; ++i)
5356 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5357 {
5358 ok = false;
5359 break;
5360 }
5361 if (ok)
5362 {
5363 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5364 XEXP (XEXP (op0, 0), 0),
5365 XEXP (op0, 1));
5366 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5367 return retval;
5368 }
5369 }
5370
ec8e098d 5371 if (COMPARISON_P (op0) && ! side_effects_p (op0))
0cedb36c 5372 {
ef4bddc2 5373 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
47b1e19b
JH
5374 ? GET_MODE (XEXP (op0, 1))
5375 : GET_MODE (XEXP (op0, 0)));
3e882897 5376 rtx temp;
a774e06e
RH
5377
5378 /* Look for happy constants in op1 and op2. */
481683e1 5379 if (CONST_INT_P (op1) && CONST_INT_P (op2))
a774e06e
RH
5380 {
5381 HOST_WIDE_INT t = INTVAL (op1);
5382 HOST_WIDE_INT f = INTVAL (op2);
786de7eb 5383
a774e06e
RH
5384 if (t == STORE_FLAG_VALUE && f == 0)
5385 code = GET_CODE (op0);
261efdef
JH
5386 else if (t == 0 && f == STORE_FLAG_VALUE)
5387 {
5388 enum rtx_code tmp;
5389 tmp = reversed_comparison_code (op0, NULL_RTX);
5390 if (tmp == UNKNOWN)
5391 break;
5392 code = tmp;
5393 }
a774e06e
RH
5394 else
5395 break;
5396
77306e3e 5397 return simplify_gen_relational (code, mode, cmp_mode,
c6fb08ad
PB
5398 XEXP (op0, 0), XEXP (op0, 1));
5399 }
5400
5401 if (cmp_mode == VOIDmode)
5402 cmp_mode = op0_mode;
5403 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5404 cmp_mode, XEXP (op0, 0),
5405 XEXP (op0, 1));
5406
5407 /* See if any simplifications were possible. */
5408 if (temp)
5409 {
481683e1 5410 if (CONST_INT_P (temp))
c6fb08ad
PB
5411 return temp == const0_rtx ? op2 : op1;
5412 else if (temp)
5413 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
a774e06e 5414 }
0cedb36c
JL
5415 }
5416 break;
31f0f571 5417
d9deed68 5418 case VEC_MERGE:
41374e13
NS
5419 gcc_assert (GET_MODE (op0) == mode);
5420 gcc_assert (GET_MODE (op1) == mode);
5421 gcc_assert (VECTOR_MODE_P (mode));
da694a77
MG
5422 trueop2 = avoid_constant_pool_reference (op2);
5423 if (CONST_INT_P (trueop2))
d9deed68 5424 {
cb5ca315 5425 int elt_size = GET_MODE_UNIT_SIZE (mode);
d9deed68 5426 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
da694a77
MG
5427 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5428 unsigned HOST_WIDE_INT mask;
5429 if (n_elts == HOST_BITS_PER_WIDE_INT)
5430 mask = -1;
5431 else
5432 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
d9deed68 5433
da694a77 5434 if (!(sel & mask) && !side_effects_p (op0))
852c8ba1 5435 return op1;
da694a77 5436 if ((sel & mask) == mask && !side_effects_p (op1))
852c8ba1
JH
5437 return op0;
5438
da694a77
MG
5439 rtx trueop0 = avoid_constant_pool_reference (op0);
5440 rtx trueop1 = avoid_constant_pool_reference (op1);
5441 if (GET_CODE (trueop0) == CONST_VECTOR
5442 && GET_CODE (trueop1) == CONST_VECTOR)
852c8ba1
JH
5443 {
5444 rtvec v = rtvec_alloc (n_elts);
5445 unsigned int i;
5446
5447 for (i = 0; i < n_elts; i++)
da694a77
MG
5448 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5449 ? CONST_VECTOR_ELT (trueop0, i)
5450 : CONST_VECTOR_ELT (trueop1, i));
852c8ba1
JH
5451 return gen_rtx_CONST_VECTOR (mode, v);
5452 }
da694a77
MG
5453
5454 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5455 if no element from a appears in the result. */
5456 if (GET_CODE (op0) == VEC_MERGE)
5457 {
5458 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5459 if (CONST_INT_P (tem))
5460 {
5461 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5462 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5463 return simplify_gen_ternary (code, mode, mode,
5464 XEXP (op0, 1), op1, op2);
5465 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5466 return simplify_gen_ternary (code, mode, mode,
5467 XEXP (op0, 0), op1, op2);
5468 }
5469 }
5470 if (GET_CODE (op1) == VEC_MERGE)
5471 {
5472 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5473 if (CONST_INT_P (tem))
5474 {
5475 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5476 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5477 return simplify_gen_ternary (code, mode, mode,
5478 op0, XEXP (op1, 1), op2);
5479 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5480 return simplify_gen_ternary (code, mode, mode,
5481 op0, XEXP (op1, 0), op2);
5482 }
5483 }
691b9fb7
PM
5484
5485 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5486 with a. */
5487 if (GET_CODE (op0) == VEC_DUPLICATE
5488 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5489 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5490 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5491 {
5492 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5493 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5494 {
5495 if (XEXP (XEXP (op0, 0), 0) == op1
5496 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5497 return op1;
5498 }
5499 }
d9deed68 5500 }
da694a77
MG
5501
5502 if (rtx_equal_p (op0, op1)
5503 && !side_effects_p (op2) && !side_effects_p (op1))
5504 return op0;
5505
d9deed68 5506 break;
0cedb36c
JL
5507
5508 default:
41374e13 5509 gcc_unreachable ();
0cedb36c
JL
5510 }
5511
5512 return 0;
5513}
5514
807e902e
KZ
5515/* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5516 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5517 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
eea50aa0 5518
550d1387
GK
5519 Works by unpacking OP into a collection of 8-bit values
5520 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5521 and then repacking them again for OUTERMODE. */
eea50aa0 5522
550d1387 5523static rtx
ef4bddc2
RS
5524simplify_immed_subreg (machine_mode outermode, rtx op,
5525 machine_mode innermode, unsigned int byte)
550d1387 5526{
550d1387 5527 enum {
550d1387
GK
5528 value_bit = 8,
5529 value_mask = (1 << value_bit) - 1
5530 };
807e902e 5531 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
550d1387
GK
5532 int value_start;
5533 int i;
5534 int elem;
5535
5536 int num_elem;
5537 rtx * elems;
5538 int elem_bitsize;
5539 rtx result_s;
5540 rtvec result_v = NULL;
5541 enum mode_class outer_class;
ef4bddc2 5542 machine_mode outer_submode;
807e902e 5543 int max_bitsize;
550d1387
GK
5544
5545 /* Some ports misuse CCmode. */
481683e1 5546 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
e5c56fd9
JH
5547 return op;
5548
6e4b5aaf
RH
5549 /* We have no way to represent a complex constant at the rtl level. */
5550 if (COMPLEX_MODE_P (outermode))
5551 return NULL_RTX;
5552
807e902e
KZ
5553 /* We support any size mode. */
5554 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5555 GET_MODE_BITSIZE (innermode));
5556
550d1387
GK
5557 /* Unpack the value. */
5558
cb2a532e
AH
5559 if (GET_CODE (op) == CONST_VECTOR)
5560 {
550d1387
GK
5561 num_elem = CONST_VECTOR_NUNITS (op);
5562 elems = &CONST_VECTOR_ELT (op, 0);
6c825cd4 5563 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
550d1387
GK
5564 }
5565 else
5566 {
5567 num_elem = 1;
5568 elems = &op;
5569 elem_bitsize = max_bitsize;
5570 }
41374e13
NS
5571 /* If this asserts, it is too complicated; reducing value_bit may help. */
5572 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5573 /* I don't know how to handle endianness of sub-units. */
5574 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
b8698a0f 5575
550d1387
GK
5576 for (elem = 0; elem < num_elem; elem++)
5577 {
5578 unsigned char * vp;
5579 rtx el = elems[elem];
b8698a0f 5580
550d1387
GK
5581 /* Vectors are kept in target memory order. (This is probably
5582 a mistake.) */
5583 {
5584 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
558c51c5 5585 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
550d1387
GK
5586 / BITS_PER_UNIT);
5587 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5588 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5589 unsigned bytele = (subword_byte % UNITS_PER_WORD
5590 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5591 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5592 }
b8698a0f 5593
550d1387 5594 switch (GET_CODE (el))
34a80643 5595 {
550d1387
GK
5596 case CONST_INT:
5597 for (i = 0;
b8698a0f 5598 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
550d1387
GK
5599 i += value_bit)
5600 *vp++ = INTVAL (el) >> i;
5601 /* CONST_INTs are always logically sign-extended. */
5602 for (; i < elem_bitsize; i += value_bit)
5603 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5604 break;
b8698a0f 5605
807e902e
KZ
5606 case CONST_WIDE_INT:
5607 {
5608 rtx_mode_t val = std::make_pair (el, innermode);
5609 unsigned char extend = wi::sign_mask (val);
5610
5611 for (i = 0; i < elem_bitsize; i += value_bit)
5612 *vp++ = wi::extract_uhwi (val, i, value_bit);
5613 for (; i < elem_bitsize; i += value_bit)
5614 *vp++ = extend;
5615 }
5616 break;
5617
550d1387 5618 case CONST_DOUBLE:
807e902e 5619 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
550d1387 5620 {
929e10f4 5621 unsigned char extend = 0;
550d1387
GK
5622 /* If this triggers, someone should have generated a
5623 CONST_INT instead. */
41374e13 5624 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
cb2a532e 5625
550d1387
GK
5626 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5627 *vp++ = CONST_DOUBLE_LOW (el) >> i;
49ab6098 5628 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
550d1387 5629 {
8064d930
RE
5630 *vp++
5631 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
550d1387
GK
5632 i += value_bit;
5633 }
929e10f4
MS
5634
5635 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5636 extend = -1;
1125164c 5637 for (; i < elem_bitsize; i += value_bit)
929e10f4 5638 *vp++ = extend;
550d1387 5639 }
41374e13 5640 else
34a80643 5641 {
807e902e
KZ
5642 /* This is big enough for anything on the platform. */
5643 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
550d1387 5644 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
41374e13 5645
3d8bf70f 5646 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
41374e13
NS
5647 gcc_assert (bitsize <= elem_bitsize);
5648 gcc_assert (bitsize % value_bit == 0);
550d1387
GK
5649
5650 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5651 GET_MODE (el));
5652
5653 /* real_to_target produces its result in words affected by
5654 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5655 and use WORDS_BIG_ENDIAN instead; see the documentation
5656 of SUBREG in rtl.texi. */
5657 for (i = 0; i < bitsize; i += value_bit)
226cfe61 5658 {
550d1387
GK
5659 int ibase;
5660 if (WORDS_BIG_ENDIAN)
5661 ibase = bitsize - 1 - i;
5662 else
5663 ibase = i;
5664 *vp++ = tmp[ibase / 32] >> i % 32;
226cfe61 5665 }
b8698a0f 5666
550d1387
GK
5667 /* It shouldn't matter what's done here, so fill it with
5668 zero. */
5669 for (; i < elem_bitsize; i += value_bit)
5670 *vp++ = 0;
34a80643 5671 }
550d1387 5672 break;
14c931f1
CF
5673
5674 case CONST_FIXED:
5675 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5676 {
5677 for (i = 0; i < elem_bitsize; i += value_bit)
5678 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5679 }
5680 else
5681 {
5682 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5683 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
49ab6098 5684 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
14c931f1
CF
5685 i += value_bit)
5686 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5687 >> (i - HOST_BITS_PER_WIDE_INT);
5688 for (; i < elem_bitsize; i += value_bit)
5689 *vp++ = 0;
5690 }
5691 break;
b8698a0f 5692
550d1387 5693 default:
41374e13 5694 gcc_unreachable ();
226cfe61 5695 }
cb2a532e
AH
5696 }
5697
550d1387
GK
5698 /* Now, pick the right byte to start with. */
5699 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5700 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5701 will already have offset 0. */
5702 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
eea50aa0 5703 {
558c51c5 5704 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
550d1387
GK
5705 - byte);
5706 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5707 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5708 byte = (subword_byte % UNITS_PER_WORD
5709 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5710 }
eea50aa0 5711
550d1387
GK
5712 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5713 so if it's become negative it will instead be very large.) */
41374e13 5714 gcc_assert (byte < GET_MODE_SIZE (innermode));
3767c0fd 5715
550d1387
GK
5716 /* Convert from bytes to chunks of size value_bit. */
5717 value_start = byte * (BITS_PER_UNIT / value_bit);
eea50aa0 5718
550d1387 5719 /* Re-pack the value. */
1c0e448f 5720 num_elem = GET_MODE_NUNITS (outermode);
b8698a0f 5721
550d1387
GK
5722 if (VECTOR_MODE_P (outermode))
5723 {
550d1387
GK
5724 result_v = rtvec_alloc (num_elem);
5725 elems = &RTVEC_ELT (result_v, 0);
550d1387
GK
5726 }
5727 else
1c0e448f 5728 elems = &result_s;
eea50aa0 5729
1c0e448f 5730 outer_submode = GET_MODE_INNER (outermode);
550d1387
GK
5731 outer_class = GET_MODE_CLASS (outer_submode);
5732 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
451f86fd 5733
41374e13
NS
5734 gcc_assert (elem_bitsize % value_bit == 0);
5735 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
451f86fd 5736
550d1387
GK
5737 for (elem = 0; elem < num_elem; elem++)
5738 {
5739 unsigned char *vp;
b8698a0f 5740
550d1387
GK
5741 /* Vectors are stored in target memory order. (This is probably
5742 a mistake.) */
5743 {
5744 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
558c51c5 5745 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
550d1387
GK
5746 / BITS_PER_UNIT);
5747 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5748 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5749 unsigned bytele = (subword_byte % UNITS_PER_WORD
5750 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5751 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5752 }
5753
5754 switch (outer_class)
eea50aa0 5755 {
550d1387
GK
5756 case MODE_INT:
5757 case MODE_PARTIAL_INT:
5758 {
807e902e
KZ
5759 int u;
5760 int base = 0;
5761 int units
5762 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5763 / HOST_BITS_PER_WIDE_INT;
5764 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5765 wide_int r;
5766
0daaf8aa
JJ
5767 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5768 return NULL_RTX;
807e902e
KZ
5769 for (u = 0; u < units; u++)
5770 {
5771 unsigned HOST_WIDE_INT buf = 0;
5772 for (i = 0;
5773 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5774 i += value_bit)
5775 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5776
5777 tmp[u] = buf;
5778 base += HOST_BITS_PER_WIDE_INT;
5779 }
807e902e
KZ
5780 r = wide_int::from_array (tmp, units,
5781 GET_MODE_PRECISION (outer_submode));
0daaf8aa
JJ
5782#if TARGET_SUPPORTS_WIDE_INT == 0
5783 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5784 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5785 return NULL_RTX;
5786#endif
807e902e 5787 elems[elem] = immed_wide_int_const (r, outer_submode);
550d1387
GK
5788 }
5789 break;
b8698a0f 5790
550d1387 5791 case MODE_FLOAT:
15ed7b52 5792 case MODE_DECIMAL_FLOAT:
550d1387
GK
5793 {
5794 REAL_VALUE_TYPE r;
807e902e 5795 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
b8698a0f 5796
550d1387
GK
5797 /* real_from_target wants its input in words affected by
5798 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5799 and use WORDS_BIG_ENDIAN instead; see the documentation
5800 of SUBREG in rtl.texi. */
5801 for (i = 0; i < max_bitsize / 32; i++)
5802 tmp[i] = 0;
5803 for (i = 0; i < elem_bitsize; i += value_bit)
5804 {
5805 int ibase;
5806 if (WORDS_BIG_ENDIAN)
5807 ibase = elem_bitsize - 1 - i;
5808 else
5809 ibase = i;
effdb493 5810 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
550d1387 5811 }
eea50aa0 5812
550d1387 5813 real_from_target (&r, tmp, outer_submode);
555affd7 5814 elems[elem] = const_double_from_real_value (r, outer_submode);
550d1387
GK
5815 }
5816 break;
14c931f1
CF
5817
5818 case MODE_FRACT:
5819 case MODE_UFRACT:
5820 case MODE_ACCUM:
5821 case MODE_UACCUM:
5822 {
5823 FIXED_VALUE_TYPE f;
5824 f.data.low = 0;
5825 f.data.high = 0;
5826 f.mode = outer_submode;
5827
5828 for (i = 0;
5829 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5830 i += value_bit)
43c36287 5831 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
14c931f1 5832 for (; i < elem_bitsize; i += value_bit)
43c36287 5833 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
14c931f1
CF
5834 << (i - HOST_BITS_PER_WIDE_INT));
5835
5836 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5837 }
5838 break;
b8698a0f 5839
550d1387 5840 default:
41374e13 5841 gcc_unreachable ();
550d1387
GK
5842 }
5843 }
5844 if (VECTOR_MODE_P (outermode))
5845 return gen_rtx_CONST_VECTOR (outermode, result_v);
5846 else
5847 return result_s;
5848}
eea50aa0 5849
550d1387
GK
5850/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5851 Return 0 if no simplifications are possible. */
5852rtx
ef4bddc2
RS
5853simplify_subreg (machine_mode outermode, rtx op,
5854 machine_mode innermode, unsigned int byte)
550d1387
GK
5855{
5856 /* Little bit of sanity checking. */
41374e13
NS
5857 gcc_assert (innermode != VOIDmode);
5858 gcc_assert (outermode != VOIDmode);
5859 gcc_assert (innermode != BLKmode);
5860 gcc_assert (outermode != BLKmode);
eea50aa0 5861
41374e13
NS
5862 gcc_assert (GET_MODE (op) == innermode
5863 || GET_MODE (op) == VOIDmode);
eea50aa0 5864
0343822b
RS
5865 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5866 return NULL_RTX;
5867
5868 if (byte >= GET_MODE_SIZE (innermode))
5869 return NULL_RTX;
eea50aa0 5870
550d1387
GK
5871 if (outermode == innermode && !byte)
5872 return op;
eea50aa0 5873
33ffb5c5
KZ
5874 if (CONST_SCALAR_INT_P (op)
5875 || CONST_DOUBLE_AS_FLOAT_P (op)
14c931f1 5876 || GET_CODE (op) == CONST_FIXED
550d1387
GK
5877 || GET_CODE (op) == CONST_VECTOR)
5878 return simplify_immed_subreg (outermode, op, innermode, byte);
eea50aa0
JH
5879
5880 /* Changing mode twice with SUBREG => just change it once,
5881 or not at all if changing back op starting mode. */
5882 if (GET_CODE (op) == SUBREG)
5883 {
ef4bddc2 5884 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
1ffb3f9a 5885 int final_offset = byte + SUBREG_BYTE (op);
53ed1a12 5886 rtx newx;
eea50aa0
JH
5887
5888 if (outermode == innermostmode
5889 && byte == 0 && SUBREG_BYTE (op) == 0)
5890 return SUBREG_REG (op);
5891
1ffb3f9a
JH
5892 /* The SUBREG_BYTE represents offset, as if the value were stored
5893 in memory. Irritating exception is paradoxical subreg, where
5894 we define SUBREG_BYTE to be 0. On big endian machines, this
2d76cb1a 5895 value should be negative. For a moment, undo this exception. */
1ffb3f9a 5896 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
eea50aa0 5897 {
1ffb3f9a
JH
5898 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5899 if (WORDS_BIG_ENDIAN)
5900 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5901 if (BYTES_BIG_ENDIAN)
5902 final_offset += difference % UNITS_PER_WORD;
5903 }
5904 if (SUBREG_BYTE (op) == 0
5905 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5906 {
5907 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5908 if (WORDS_BIG_ENDIAN)
5909 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5910 if (BYTES_BIG_ENDIAN)
5911 final_offset += difference % UNITS_PER_WORD;
5912 }
5913
5914 /* See whether resulting subreg will be paradoxical. */
2fe7bb35 5915 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
1ffb3f9a
JH
5916 {
5917 /* In nonparadoxical subregs we can't handle negative offsets. */
5918 if (final_offset < 0)
5919 return NULL_RTX;
5920 /* Bail out in case resulting subreg would be incorrect. */
5921 if (final_offset % GET_MODE_SIZE (outermode)
ae0ed63a
JM
5922 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5923 return NULL_RTX;
1ffb3f9a
JH
5924 }
5925 else
5926 {
5927 int offset = 0;
5928 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5929
5930 /* In paradoxical subreg, see if we are still looking on lower part.
5931 If so, our SUBREG_BYTE will be 0. */
5932 if (WORDS_BIG_ENDIAN)
5933 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5934 if (BYTES_BIG_ENDIAN)
5935 offset += difference % UNITS_PER_WORD;
5936 if (offset == final_offset)
5937 final_offset = 0;
eea50aa0 5938 else
ae0ed63a 5939 return NULL_RTX;
eea50aa0
JH
5940 }
5941
4d6922ee 5942 /* Recurse for further possible simplifications. */
beb72684
RH
5943 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5944 final_offset);
53ed1a12
BI
5945 if (newx)
5946 return newx;
beb72684
RH
5947 if (validate_subreg (outermode, innermostmode,
5948 SUBREG_REG (op), final_offset))
4613543f
RS
5949 {
5950 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5951 if (SUBREG_PROMOTED_VAR_P (op)
362d42dc 5952 && SUBREG_PROMOTED_SIGN (op) >= 0
4613543f
RS
5953 && GET_MODE_CLASS (outermode) == MODE_INT
5954 && IN_RANGE (GET_MODE_SIZE (outermode),
5955 GET_MODE_SIZE (innermode),
5956 GET_MODE_SIZE (innermostmode))
5957 && subreg_lowpart_p (newx))
5958 {
5959 SUBREG_PROMOTED_VAR_P (newx) = 1;
362d42dc 5960 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
4613543f
RS
5961 }
5962 return newx;
5963 }
beb72684 5964 return NULL_RTX;
eea50aa0
JH
5965 }
5966
5967 /* SUBREG of a hard register => just change the register number
5968 and/or mode. If the hard register is not valid in that mode,
5969 suppress this simplification. If the hard register is the stack,
5970 frame, or argument pointer, leave this as a SUBREG. */
5971
eef302d2 5972 if (REG_P (op) && HARD_REGISTER_P (op))
eea50aa0 5973 {
eef302d2
RS
5974 unsigned int regno, final_regno;
5975
5976 regno = REGNO (op);
5977 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5978 if (HARD_REGISTER_NUM_P (final_regno))
49d801d3 5979 {
dedc1e6d
AO
5980 rtx x;
5981 int final_offset = byte;
5982
5983 /* Adjust offset for paradoxical subregs. */
5984 if (byte == 0
5985 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5986 {
5987 int difference = (GET_MODE_SIZE (innermode)
5988 - GET_MODE_SIZE (outermode));
5989 if (WORDS_BIG_ENDIAN)
5990 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5991 if (BYTES_BIG_ENDIAN)
5992 final_offset += difference % UNITS_PER_WORD;
5993 }
5994
5995 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
49d801d3
JH
5996
5997 /* Propagate original regno. We don't have any way to specify
14b493d6 5998 the offset inside original regno, so do so only for lowpart.
49d801d3
JH
5999 The information is used only by alias analysis that can not
6000 grog partial register anyway. */
6001
6002 if (subreg_lowpart_offset (outermode, innermode) == byte)
6003 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6004 return x;
6005 }
eea50aa0
JH
6006 }
6007
6008 /* If we have a SUBREG of a register that we are replacing and we are
6009 replacing it with a MEM, make a new MEM and try replacing the
6010 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6011 or if we would be widening it. */
6012
3c0cb5de 6013 if (MEM_P (op)
5bfed9a9 6014 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
04864a46
JH
6015 /* Allow splitting of volatile memory references in case we don't
6016 have instruction to move the whole thing. */
6017 && (! MEM_VOLATILE_P (op)
ef89d648 6018 || ! have_insn_for (SET, innermode))
eea50aa0 6019 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
f1ec5147 6020 return adjust_address_nv (op, outermode, byte);
e5c56fd9
JH
6021
6022 /* Handle complex values represented as CONCAT
6023 of real and imaginary part. */
6024 if (GET_CODE (op) == CONCAT)
6025 {
a957d77f 6026 unsigned int part_size, final_offset;
4f1da2e9
RS
6027 rtx part, res;
6028
a957d77f
RS
6029 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6030 if (byte < part_size)
6031 {
6032 part = XEXP (op, 0);
6033 final_offset = byte;
6034 }
6035 else
6036 {
6037 part = XEXP (op, 1);
6038 final_offset = byte - part_size;
6039 }
6040
6041 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4f1da2e9 6042 return NULL_RTX;
e5c56fd9 6043
9199d62b
DD
6044 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6045 if (res)
6046 return res;
beb72684 6047 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4f1da2e9 6048 return gen_rtx_SUBREG (outermode, part, final_offset);
beb72684 6049 return NULL_RTX;
e5c56fd9
JH
6050 }
6051
40c5ed5b
RS
6052 /* A SUBREG resulting from a zero extension may fold to zero if
6053 it extracts higher bits that the ZERO_EXTEND's source bits. */
373b9e78 6054 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
bb51e270
RS
6055 {
6056 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
40c5ed5b 6057 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
bb51e270
RS
6058 return CONST0_RTX (outermode);
6059 }
6060
40c5ed5b 6061 if (SCALAR_INT_MODE_P (outermode)
992103ad
UB
6062 && SCALAR_INT_MODE_P (innermode)
6063 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6064 && byte == subreg_lowpart_offset (outermode, innermode))
6065 {
40c5ed5b
RS
6066 rtx tem = simplify_truncation (outermode, op, innermode);
6067 if (tem)
6068 return tem;
509dd380
JJ
6069 }
6070
eea50aa0
JH
6071 return NULL_RTX;
6072}
550d1387 6073
949c5d62
JH
6074/* Make a SUBREG operation or equivalent if it folds. */
6075
6076rtx
ef4bddc2
RS
6077simplify_gen_subreg (machine_mode outermode, rtx op,
6078 machine_mode innermode, unsigned int byte)
949c5d62 6079{
53ed1a12 6080 rtx newx;
949c5d62 6081
53ed1a12
BI
6082 newx = simplify_subreg (outermode, op, innermode, byte);
6083 if (newx)
6084 return newx;
949c5d62 6085
4f1da2e9
RS
6086 if (GET_CODE (op) == SUBREG
6087 || GET_CODE (op) == CONCAT
6088 || GET_MODE (op) == VOIDmode)
949c5d62
JH
6089 return NULL_RTX;
6090
beb72684
RH
6091 if (validate_subreg (outermode, innermode, op, byte))
6092 return gen_rtx_SUBREG (outermode, op, byte);
6093
6094 return NULL_RTX;
949c5d62 6095}
beb72684 6096
3403a1a9
AS
6097/* Generates a subreg to get the least significant part of EXPR (in mode
6098 INNER_MODE) to OUTER_MODE. */
6099
6100rtx
6101lowpart_subreg (machine_mode outer_mode, rtx expr,
6102 machine_mode inner_mode)
6103{
6104 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6105 subreg_lowpart_offset (outer_mode, inner_mode));
6106}
6107
0cedb36c
JL
6108/* Simplify X, an rtx expression.
6109
6110 Return the simplified expression or NULL if no simplifications
6111 were possible.
6112
6113 This is the preferred entry point into the simplification routines;
6114 however, we still allow passes to call the more specific routines.
6115
14b493d6 6116 Right now GCC has three (yes, three) major bodies of RTL simplification
0cedb36c
JL
6117 code that need to be unified.
6118
6119 1. fold_rtx in cse.c. This code uses various CSE specific
6120 information to aid in RTL simplification.
6121
6122 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6123 it uses combine specific information to aid in RTL
6124 simplification.
6125
6126 3. The routines in this file.
6127
6128
6129 Long term we want to only have one body of simplification code; to
6130 get to that state I recommend the following steps:
6131
6132 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6133 which are not pass dependent state into these routines.
6134
6135 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6136 use this routine whenever possible.
6137
6138 3. Allow for pass dependent state to be provided to these
6139 routines and add simplifications based on the pass dependent
6140 state. Remove code from cse.c & combine.c that becomes
6141 redundant/dead.
6142
6143 It will take time, but ultimately the compiler will be easier to
6144 maintain and improve. It's totally silly that when we add a
6145 simplification that it needs to be added to 4 places (3 for RTL
6146 simplification and 1 for tree simplification. */
786de7eb 6147
0cedb36c 6148rtx
58f9752a 6149simplify_rtx (const_rtx x)
0cedb36c 6150{
58f9752a 6151 const enum rtx_code code = GET_CODE (x);
ef4bddc2 6152 const machine_mode mode = GET_MODE (x);
0cedb36c
JL
6153
6154 switch (GET_RTX_CLASS (code))
6155 {
ec8e098d 6156 case RTX_UNARY:
0cedb36c
JL
6157 return simplify_unary_operation (code, mode,
6158 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
ec8e098d 6159 case RTX_COMM_ARITH:
df0afdbe 6160 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
cf6bcbd0 6161 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
b42abad8 6162
2b72593e 6163 /* Fall through.... */
b42abad8 6164
ec8e098d 6165 case RTX_BIN_ARITH:
0cedb36c
JL
6166 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6167
ec8e098d
PB
6168 case RTX_TERNARY:
6169 case RTX_BITFIELD_OPS:
0cedb36c 6170 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
d9c695ff
RK
6171 XEXP (x, 0), XEXP (x, 1),
6172 XEXP (x, 2));
0cedb36c 6173
ec8e098d
PB
6174 case RTX_COMPARE:
6175 case RTX_COMM_COMPARE:
c6fb08ad
PB
6176 return simplify_relational_operation (code, mode,
6177 ((GET_MODE (XEXP (x, 0))
6178 != VOIDmode)
6179 ? GET_MODE (XEXP (x, 0))
6180 : GET_MODE (XEXP (x, 1))),
6181 XEXP (x, 0),
6182 XEXP (x, 1));
d41ba56f 6183
ec8e098d 6184 case RTX_EXTRA:
949c5d62 6185 if (code == SUBREG)
e2561558
RS
6186 return simplify_subreg (mode, SUBREG_REG (x),
6187 GET_MODE (SUBREG_REG (x)),
6188 SUBREG_BYTE (x));
d41ba56f
RS
6189 break;
6190
ec8e098d 6191 case RTX_OBJ:
d41ba56f
RS
6192 if (code == LO_SUM)
6193 {
6194 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6195 if (GET_CODE (XEXP (x, 0)) == HIGH
6196 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6197 return XEXP (x, 1);
6198 }
6199 break;
6200
0cedb36c 6201 default:
d41ba56f 6202 break;
0cedb36c 6203 }
d41ba56f 6204 return NULL;
0cedb36c 6205}