]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
Add gen_(const_)vec_duplicate helpers
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36
37 /* Simplification and canonicalization of RTL. */
38
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
45
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
58 \f
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
62 {
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
64
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
81
82 if (!is_int_mode (mode, &int_mode))
83 return false;
84
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
88
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
94 {
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
106 }
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
111 {
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
114 }
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
119
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
123 }
124
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
128
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133 scalar_int_mode int_mode;
134
135 if (!is_int_mode (mode, &int_mode))
136 return false;
137
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
141
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
144 }
145
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
150 {
151 unsigned int width;
152
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
156
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
160
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
163 }
164
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
169 {
170 unsigned int width;
171
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
175
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
179
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
182 }
183 \f
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
186
187 rtx
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
190 {
191 rtx tem;
192
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
197
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
202
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
204 }
205 \f
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
208 rtx
209 avoid_constant_pool_reference (rtx x)
210 {
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
214
215 switch (GET_CODE (x))
216 {
217 case MEM:
218 break;
219
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
228
229 default:
230 return x;
231 }
232
233 if (GET_MODE (x) == BLKmode)
234 return x;
235
236 addr = XEXP (x, 0);
237
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
240
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
245 {
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
248 }
249
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
252
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
257 {
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
260
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
267 {
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
271 }
272 }
273
274 return x;
275 }
276 \f
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
280
281 rtx
282 delegitimize_mem_from_attrs (rtx x)
283 {
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
289 {
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
293
294 switch (TREE_CODE (decl))
295 {
296 default:
297 decl = NULL;
298 break;
299
300 case VAR_DECL:
301 break;
302
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
310 {
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
314
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
323 {
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
327 }
328 break;
329 }
330 }
331
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
339 {
340 rtx newx;
341
342 offset += MEM_OFFSET (x);
343
344 newx = DECL_RTL (decl);
345
346 if (MEM_P (newx))
347 {
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
349
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
368 }
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
372 }
373 }
374
375 return x;
376 }
377 \f
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
380
381 rtx
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
384 {
385 rtx tem;
386
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
390
391 return gen_rtx_fmt_e (code, mode, op);
392 }
393
394 /* Likewise for ternary operations. */
395
396 rtx
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
399 {
400 rtx tem;
401
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
406
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
408 }
409
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
412
413 rtx
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
416 {
417 rtx tem;
418
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
422
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
424 }
425 \f
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
430
431 rtx
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
434 {
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
442
443 if (__builtin_expect (fn != NULL, 0))
444 {
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
448 }
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
451
452 switch (GET_RTX_CLASS (code))
453 {
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
461
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
469
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
480
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
493
494 case RTX_EXTRA:
495 if (code == SUBREG)
496 {
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
504 }
505 break;
506
507 case RTX_OBJ:
508 if (code == MEM)
509 {
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
514 }
515 else if (code == LO_SUM)
516 {
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
519
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
522 {
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
528 }
529
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
533 }
534 break;
535
536 default:
537 break;
538 }
539
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
544 {
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
549 {
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
553 {
554 if (newvec == vec)
555 {
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
560 }
561 RTVEC_ELT (newvec, j) = op;
562 }
563 }
564 break;
565
566 case 'e':
567 if (XEXP (x, i))
568 {
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
571 {
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
575 }
576 }
577 break;
578 }
579 return newx;
580 }
581
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
584
585 rtx
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
587 {
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
589 }
590 \f
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
593
594 RTL provides two ways of truncating a value:
595
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
600
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
606
607 2. a TRUNCATE. This form handles both scalar and compound integers.
608
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
613
614 simplify_gen_unary (TRUNCATE, ...)
615
616 and leave simplify_unary_operation to work out which representation
617 should be used.
618
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
624
625 (and:DI X Y)
626
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
631
632 (and:DI (reg:DI X) (const_int 63))
633
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
637
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
641 {
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 scalar_int_mode int_mode, int_op_mode, subreg_mode;
645
646 gcc_assert (precision <= op_precision);
647
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
651 {
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
667 }
668
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
677 {
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
680 {
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
684 }
685 }
686
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
703
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
715
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
727
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
730 and C2. */
731 if (GET_CODE (op) == AND
732 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
734 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
735 && CONST_INT_P (XEXP (op, 1)))
736 {
737 rtx op0 = (XEXP (XEXP (op, 0), 0));
738 rtx shift_op = XEXP (XEXP (op, 0), 1);
739 rtx mask_op = XEXP (op, 1);
740 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
741 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
742
743 if (shift < precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode) >> shift) & mask)
747 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
748 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
749 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
750 {
751 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
752 return simplify_gen_binary (AND, mode, op0, mask_op);
753 }
754 }
755
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
758 changing len. */
759 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
760 && REG_P (XEXP (op, 0))
761 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
762 && CONST_INT_P (XEXP (op, 1))
763 && CONST_INT_P (XEXP (op, 2)))
764 {
765 rtx op0 = XEXP (op, 0);
766 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
767 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
768 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
769 {
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
772 {
773 pos -= op_precision - precision;
774 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
775 XEXP (op, 1), GEN_INT (pos));
776 }
777 }
778 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
779 {
780 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
781 if (op0)
782 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
783 XEXP (op, 1), XEXP (op, 2));
784 }
785 }
786
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && SCALAR_INT_MODE_P (mode)
791 && SCALAR_INT_MODE_P (op_mode)
792 && precision >= BITS_PER_WORD
793 && 2 * precision <= op_precision
794 && CONST_INT_P (XEXP (op, 1))
795 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
796 && UINTVAL (XEXP (op, 1)) < op_precision)
797 {
798 int byte = subreg_lowpart_offset (mode, op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
804 }
805
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op) == LSHIFTRT
810 || GET_CODE (op) == ASHIFTRT)
811 && is_a <scalar_int_mode> (mode, &int_mode)
812 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
813 && MEM_P (XEXP (op, 0))
814 && CONST_INT_P (XEXP (op, 1))
815 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
816 && INTVAL (XEXP (op, 1)) > 0
817 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
818 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op, 0))
821 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
823 {
824 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
825 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
826 return adjust_address_nv (XEXP (op, 0), int_mode,
827 (WORDS_BIG_ENDIAN
828 ? byte - shifted_bytes
829 : byte + shifted_bytes));
830 }
831
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op) == ABS
835 || GET_CODE (op) == NEG)
836 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
838 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
839 return simplify_gen_unary (GET_CODE (op), mode,
840 XEXP (XEXP (op, 0), 0), mode);
841
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
843 (truncate:A X). */
844 if (GET_CODE (op) == SUBREG
845 && is_a <scalar_int_mode> (mode, &int_mode)
846 && SCALAR_INT_MODE_P (op_mode)
847 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
848 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
849 && subreg_lowpart_p (op))
850 {
851 rtx inner = XEXP (SUBREG_REG (op), 0);
852 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
853 return simplify_gen_unary (TRUNCATE, int_mode, inner,
854 GET_MODE (inner));
855 else
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
859 }
860
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
865
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
874
875 return NULL_RTX;
876 }
877 \f
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
881 rtx
882 simplify_unary_operation (enum rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
884 {
885 rtx trueop, tem;
886
887 trueop = avoid_constant_pool_reference (op);
888
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
892
893 return simplify_unary_operation_1 (code, mode, op);
894 }
895
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
898
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
901 {
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
909 {
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
918 }
919 return in_bits <= out_bits;
920 }
921
922 /* Perform some simplifications we can do even if the operands
923 aren't constant. */
924 static rtx
925 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
926 {
927 enum rtx_code reversed;
928 rtx temp;
929 scalar_int_mode inner, int_mode, op_mode, op0_mode;
930
931 switch (code)
932 {
933 case NOT:
934 /* (not (not X)) == X. */
935 if (GET_CODE (op) == NOT)
936 return XEXP (op, 0);
937
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op)
941 && (mode == BImode || STORE_FLAG_VALUE == -1)
942 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
943 return simplify_gen_relational (reversed, mode, VOIDmode,
944 XEXP (op, 0), XEXP (op, 1));
945
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == constm1_rtx)
949 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
950
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
955 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
956 CONSTM1_RTX (mode));
957
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op) == XOR
960 && CONST_INT_P (XEXP (op, 1))
961 && (temp = simplify_unary_operation (NOT, mode,
962 XEXP (op, 1), mode)) != 0)
963 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
964
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op) == PLUS
967 && CONST_INT_P (XEXP (op, 1))
968 && mode_signbit_p (mode, XEXP (op, 1))
969 && (temp = simplify_unary_operation (NOT, mode,
970 XEXP (op, 1), mode)) != 0)
971 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
972
973
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
978 bother with. */
979 if (GET_CODE (op) == ASHIFT
980 && XEXP (op, 0) == const1_rtx)
981 {
982 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
983 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
984 }
985
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE == -1
990 && is_a <scalar_int_mode> (mode, &int_mode)
991 && GET_CODE (op) == ASHIFTRT
992 && CONST_INT_P (XEXP (op, 1))
993 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
994 return simplify_gen_relational (GE, int_mode, VOIDmode,
995 XEXP (op, 0), const0_rtx);
996
997
998 if (partial_subreg_p (op)
999 && subreg_lowpart_p (op)
1000 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1001 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1002 {
1003 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1004 rtx x;
1005
1006 x = gen_rtx_ROTATE (inner_mode,
1007 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1008 inner_mode),
1009 XEXP (SUBREG_REG (op), 1));
1010 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1011 if (temp)
1012 return temp;
1013 }
1014
1015 /* Apply De Morgan's laws to reduce number of patterns for machines
1016 with negating logical insns (and-not, nand, etc.). If result has
1017 only one NOT, put it first, since that is how the patterns are
1018 coded. */
1019 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1020 {
1021 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1022 machine_mode op_mode;
1023
1024 op_mode = GET_MODE (in1);
1025 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1026
1027 op_mode = GET_MODE (in2);
1028 if (op_mode == VOIDmode)
1029 op_mode = mode;
1030 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1031
1032 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1033 std::swap (in1, in2);
1034
1035 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1036 mode, in1, in2);
1037 }
1038
1039 /* (not (bswap x)) -> (bswap (not x)). */
1040 if (GET_CODE (op) == BSWAP)
1041 {
1042 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1043 return simplify_gen_unary (BSWAP, mode, x, mode);
1044 }
1045 break;
1046
1047 case NEG:
1048 /* (neg (neg X)) == X. */
1049 if (GET_CODE (op) == NEG)
1050 return XEXP (op, 0);
1051
1052 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1053 If comparison is not reversible use
1054 x ? y : (neg y). */
1055 if (GET_CODE (op) == IF_THEN_ELSE)
1056 {
1057 rtx cond = XEXP (op, 0);
1058 rtx true_rtx = XEXP (op, 1);
1059 rtx false_rtx = XEXP (op, 2);
1060
1061 if ((GET_CODE (true_rtx) == NEG
1062 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1063 || (GET_CODE (false_rtx) == NEG
1064 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1065 {
1066 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1067 temp = reversed_comparison (cond, mode);
1068 else
1069 {
1070 temp = cond;
1071 std::swap (true_rtx, false_rtx);
1072 }
1073 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1074 mode, temp, true_rtx, false_rtx);
1075 }
1076 }
1077
1078 /* (neg (plus X 1)) can become (not X). */
1079 if (GET_CODE (op) == PLUS
1080 && XEXP (op, 1) == const1_rtx)
1081 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1082
1083 /* Similarly, (neg (not X)) is (plus X 1). */
1084 if (GET_CODE (op) == NOT)
1085 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1086 CONST1_RTX (mode));
1087
1088 /* (neg (minus X Y)) can become (minus Y X). This transformation
1089 isn't safe for modes with signed zeros, since if X and Y are
1090 both +0, (minus Y X) is the same as (minus X Y). If the
1091 rounding mode is towards +infinity (or -infinity) then the two
1092 expressions will be rounded differently. */
1093 if (GET_CODE (op) == MINUS
1094 && !HONOR_SIGNED_ZEROS (mode)
1095 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1096 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1097
1098 if (GET_CODE (op) == PLUS
1099 && !HONOR_SIGNED_ZEROS (mode)
1100 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1101 {
1102 /* (neg (plus A C)) is simplified to (minus -C A). */
1103 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1104 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1105 {
1106 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1107 if (temp)
1108 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1109 }
1110
1111 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1112 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1113 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1114 }
1115
1116 /* (neg (mult A B)) becomes (mult A (neg B)).
1117 This works even for floating-point values. */
1118 if (GET_CODE (op) == MULT
1119 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1120 {
1121 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1122 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1123 }
1124
1125 /* NEG commutes with ASHIFT since it is multiplication. Only do
1126 this if we can then eliminate the NEG (e.g., if the operand
1127 is a constant). */
1128 if (GET_CODE (op) == ASHIFT)
1129 {
1130 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1131 if (temp)
1132 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1133 }
1134
1135 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1136 C is equal to the width of MODE minus 1. */
1137 if (GET_CODE (op) == ASHIFTRT
1138 && CONST_INT_P (XEXP (op, 1))
1139 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1140 return simplify_gen_binary (LSHIFTRT, mode,
1141 XEXP (op, 0), XEXP (op, 1));
1142
1143 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1144 C is equal to the width of MODE minus 1. */
1145 if (GET_CODE (op) == LSHIFTRT
1146 && CONST_INT_P (XEXP (op, 1))
1147 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1148 return simplify_gen_binary (ASHIFTRT, mode,
1149 XEXP (op, 0), XEXP (op, 1));
1150
1151 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1152 if (GET_CODE (op) == XOR
1153 && XEXP (op, 1) == const1_rtx
1154 && nonzero_bits (XEXP (op, 0), mode) == 1)
1155 return plus_constant (mode, XEXP (op, 0), -1);
1156
1157 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1158 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1159 if (GET_CODE (op) == LT
1160 && XEXP (op, 1) == const0_rtx
1161 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1162 {
1163 int_mode = as_a <scalar_int_mode> (mode);
1164 int isize = GET_MODE_PRECISION (inner);
1165 if (STORE_FLAG_VALUE == 1)
1166 {
1167 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1168 GEN_INT (isize - 1));
1169 if (int_mode == inner)
1170 return temp;
1171 if (GET_MODE_PRECISION (int_mode) > isize)
1172 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1173 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1174 }
1175 else if (STORE_FLAG_VALUE == -1)
1176 {
1177 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1178 GEN_INT (isize - 1));
1179 if (int_mode == inner)
1180 return temp;
1181 if (GET_MODE_PRECISION (int_mode) > isize)
1182 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1183 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1184 }
1185 }
1186 break;
1187
1188 case TRUNCATE:
1189 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1190 with the umulXi3_highpart patterns. */
1191 if (GET_CODE (op) == LSHIFTRT
1192 && GET_CODE (XEXP (op, 0)) == MULT)
1193 break;
1194
1195 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1196 {
1197 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1198 {
1199 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1200 if (temp)
1201 return temp;
1202 }
1203 /* We can't handle truncation to a partial integer mode here
1204 because we don't know the real bitsize of the partial
1205 integer mode. */
1206 break;
1207 }
1208
1209 if (GET_MODE (op) != VOIDmode)
1210 {
1211 temp = simplify_truncation (mode, op, GET_MODE (op));
1212 if (temp)
1213 return temp;
1214 }
1215
1216 /* If we know that the value is already truncated, we can
1217 replace the TRUNCATE with a SUBREG. */
1218 if (GET_MODE_NUNITS (mode) == 1
1219 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1220 || truncated_to_mode (mode, op)))
1221 {
1222 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1223 if (temp)
1224 return temp;
1225 }
1226
1227 /* A truncate of a comparison can be replaced with a subreg if
1228 STORE_FLAG_VALUE permits. This is like the previous test,
1229 but it works even if the comparison is done in a mode larger
1230 than HOST_BITS_PER_WIDE_INT. */
1231 if (HWI_COMPUTABLE_MODE_P (mode)
1232 && COMPARISON_P (op)
1233 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1234 {
1235 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1236 if (temp)
1237 return temp;
1238 }
1239
1240 /* A truncate of a memory is just loading the low part of the memory
1241 if we are not changing the meaning of the address. */
1242 if (GET_CODE (op) == MEM
1243 && !VECTOR_MODE_P (mode)
1244 && !MEM_VOLATILE_P (op)
1245 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1246 {
1247 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1248 if (temp)
1249 return temp;
1250 }
1251
1252 break;
1253
1254 case FLOAT_TRUNCATE:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1257
1258 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1259 if (GET_CODE (op) == FLOAT_EXTEND
1260 && GET_MODE (XEXP (op, 0)) == mode)
1261 return XEXP (op, 0);
1262
1263 /* (float_truncate:SF (float_truncate:DF foo:XF))
1264 = (float_truncate:SF foo:XF).
1265 This may eliminate double rounding, so it is unsafe.
1266
1267 (float_truncate:SF (float_extend:XF foo:DF))
1268 = (float_truncate:SF foo:DF).
1269
1270 (float_truncate:DF (float_extend:XF foo:SF))
1271 = (float_extend:DF foo:SF). */
1272 if ((GET_CODE (op) == FLOAT_TRUNCATE
1273 && flag_unsafe_math_optimizations)
1274 || GET_CODE (op) == FLOAT_EXTEND)
1275 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1276 > GET_MODE_UNIT_SIZE (mode)
1277 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1278 mode,
1279 XEXP (op, 0), mode);
1280
1281 /* (float_truncate (float x)) is (float x) */
1282 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1283 && (flag_unsafe_math_optimizations
1284 || exact_int_to_float_conversion_p (op)))
1285 return simplify_gen_unary (GET_CODE (op), mode,
1286 XEXP (op, 0),
1287 GET_MODE (XEXP (op, 0)));
1288
1289 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1290 (OP:SF foo:SF) if OP is NEG or ABS. */
1291 if ((GET_CODE (op) == ABS
1292 || GET_CODE (op) == NEG)
1293 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1294 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1295 return simplify_gen_unary (GET_CODE (op), mode,
1296 XEXP (XEXP (op, 0), 0), mode);
1297
1298 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1299 is (float_truncate:SF x). */
1300 if (GET_CODE (op) == SUBREG
1301 && subreg_lowpart_p (op)
1302 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1303 return SUBREG_REG (op);
1304 break;
1305
1306 case FLOAT_EXTEND:
1307 if (DECIMAL_FLOAT_MODE_P (mode))
1308 break;
1309
1310 /* (float_extend (float_extend x)) is (float_extend x)
1311
1312 (float_extend (float x)) is (float x) assuming that double
1313 rounding can't happen.
1314 */
1315 if (GET_CODE (op) == FLOAT_EXTEND
1316 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1317 && exact_int_to_float_conversion_p (op)))
1318 return simplify_gen_unary (GET_CODE (op), mode,
1319 XEXP (op, 0),
1320 GET_MODE (XEXP (op, 0)));
1321
1322 break;
1323
1324 case ABS:
1325 /* (abs (neg <foo>)) -> (abs <foo>) */
1326 if (GET_CODE (op) == NEG)
1327 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1328 GET_MODE (XEXP (op, 0)));
1329
1330 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1331 do nothing. */
1332 if (GET_MODE (op) == VOIDmode)
1333 break;
1334
1335 /* If operand is something known to be positive, ignore the ABS. */
1336 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1337 || val_signbit_known_clear_p (GET_MODE (op),
1338 nonzero_bits (op, GET_MODE (op))))
1339 return op;
1340
1341 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1342 if (is_a <scalar_int_mode> (mode, &int_mode)
1343 && (num_sign_bit_copies (op, int_mode)
1344 == GET_MODE_PRECISION (int_mode)))
1345 return gen_rtx_NEG (int_mode, op);
1346
1347 break;
1348
1349 case FFS:
1350 /* (ffs (*_extend <X>)) = (ffs <X>) */
1351 if (GET_CODE (op) == SIGN_EXTEND
1352 || GET_CODE (op) == ZERO_EXTEND)
1353 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1354 GET_MODE (XEXP (op, 0)));
1355 break;
1356
1357 case POPCOUNT:
1358 switch (GET_CODE (op))
1359 {
1360 case BSWAP:
1361 case ZERO_EXTEND:
1362 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1363 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1364 GET_MODE (XEXP (op, 0)));
1365
1366 case ROTATE:
1367 case ROTATERT:
1368 /* Rotations don't affect popcount. */
1369 if (!side_effects_p (XEXP (op, 1)))
1370 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1371 GET_MODE (XEXP (op, 0)));
1372 break;
1373
1374 default:
1375 break;
1376 }
1377 break;
1378
1379 case PARITY:
1380 switch (GET_CODE (op))
1381 {
1382 case NOT:
1383 case BSWAP:
1384 case ZERO_EXTEND:
1385 case SIGN_EXTEND:
1386 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1387 GET_MODE (XEXP (op, 0)));
1388
1389 case ROTATE:
1390 case ROTATERT:
1391 /* Rotations don't affect parity. */
1392 if (!side_effects_p (XEXP (op, 1)))
1393 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1394 GET_MODE (XEXP (op, 0)));
1395 break;
1396
1397 default:
1398 break;
1399 }
1400 break;
1401
1402 case BSWAP:
1403 /* (bswap (bswap x)) -> x. */
1404 if (GET_CODE (op) == BSWAP)
1405 return XEXP (op, 0);
1406 break;
1407
1408 case FLOAT:
1409 /* (float (sign_extend <X>)) = (float <X>). */
1410 if (GET_CODE (op) == SIGN_EXTEND)
1411 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1412 GET_MODE (XEXP (op, 0)));
1413 break;
1414
1415 case SIGN_EXTEND:
1416 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1417 becomes just the MINUS if its mode is MODE. This allows
1418 folding switch statements on machines using casesi (such as
1419 the VAX). */
1420 if (GET_CODE (op) == TRUNCATE
1421 && GET_MODE (XEXP (op, 0)) == mode
1422 && GET_CODE (XEXP (op, 0)) == MINUS
1423 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1424 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1425 return XEXP (op, 0);
1426
1427 /* Extending a widening multiplication should be canonicalized to
1428 a wider widening multiplication. */
1429 if (GET_CODE (op) == MULT)
1430 {
1431 rtx lhs = XEXP (op, 0);
1432 rtx rhs = XEXP (op, 1);
1433 enum rtx_code lcode = GET_CODE (lhs);
1434 enum rtx_code rcode = GET_CODE (rhs);
1435
1436 /* Widening multiplies usually extend both operands, but sometimes
1437 they use a shift to extract a portion of a register. */
1438 if ((lcode == SIGN_EXTEND
1439 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1440 && (rcode == SIGN_EXTEND
1441 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1442 {
1443 machine_mode lmode = GET_MODE (lhs);
1444 machine_mode rmode = GET_MODE (rhs);
1445 int bits;
1446
1447 if (lcode == ASHIFTRT)
1448 /* Number of bits not shifted off the end. */
1449 bits = (GET_MODE_UNIT_PRECISION (lmode)
1450 - INTVAL (XEXP (lhs, 1)));
1451 else /* lcode == SIGN_EXTEND */
1452 /* Size of inner mode. */
1453 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1454
1455 if (rcode == ASHIFTRT)
1456 bits += (GET_MODE_UNIT_PRECISION (rmode)
1457 - INTVAL (XEXP (rhs, 1)));
1458 else /* rcode == SIGN_EXTEND */
1459 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1460
1461 /* We can only widen multiplies if the result is mathematiclly
1462 equivalent. I.e. if overflow was impossible. */
1463 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1464 return simplify_gen_binary
1465 (MULT, mode,
1466 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1467 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1468 }
1469 }
1470
1471 /* Check for a sign extension of a subreg of a promoted
1472 variable, where the promotion is sign-extended, and the
1473 target mode is the same as the variable's promotion. */
1474 if (GET_CODE (op) == SUBREG
1475 && SUBREG_PROMOTED_VAR_P (op)
1476 && SUBREG_PROMOTED_SIGNED_P (op)
1477 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1478 {
1479 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1480 if (temp)
1481 return temp;
1482 }
1483
1484 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1485 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1486 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1487 {
1488 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1489 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1490 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1491 GET_MODE (XEXP (op, 0)));
1492 }
1493
1494 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 is (sign_extend:M (subreg:O <X>)) if there is mode with
1496 GET_MODE_BITSIZE (N) - I bits.
1497 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1498 is similarly (zero_extend:M (subreg:O <X>)). */
1499 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1500 && GET_CODE (XEXP (op, 0)) == ASHIFT
1501 && is_a <scalar_int_mode> (mode, &int_mode)
1502 && CONST_INT_P (XEXP (op, 1))
1503 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1504 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1505 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1506 {
1507 scalar_int_mode tmode;
1508 gcc_assert (GET_MODE_BITSIZE (int_mode)
1509 > GET_MODE_BITSIZE (op_mode));
1510 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1511 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1512 {
1513 rtx inner =
1514 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1515 if (inner)
1516 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1517 ? SIGN_EXTEND : ZERO_EXTEND,
1518 int_mode, inner, tmode);
1519 }
1520 }
1521
1522 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1523 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1524 if (GET_CODE (op) == LSHIFTRT
1525 && CONST_INT_P (XEXP (op, 1))
1526 && XEXP (op, 1) != const0_rtx)
1527 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1528
1529 #if defined(POINTERS_EXTEND_UNSIGNED)
1530 /* As we do not know which address space the pointer is referring to,
1531 we can do this only if the target does not support different pointer
1532 or address modes depending on the address space. */
1533 if (target_default_pointer_address_modes_p ()
1534 && ! POINTERS_EXTEND_UNSIGNED
1535 && mode == Pmode && GET_MODE (op) == ptr_mode
1536 && (CONSTANT_P (op)
1537 || (GET_CODE (op) == SUBREG
1538 && REG_P (SUBREG_REG (op))
1539 && REG_POINTER (SUBREG_REG (op))
1540 && GET_MODE (SUBREG_REG (op)) == Pmode))
1541 && !targetm.have_ptr_extend ())
1542 {
1543 temp
1544 = convert_memory_address_addr_space_1 (Pmode, op,
1545 ADDR_SPACE_GENERIC, false,
1546 true);
1547 if (temp)
1548 return temp;
1549 }
1550 #endif
1551 break;
1552
1553 case ZERO_EXTEND:
1554 /* Check for a zero extension of a subreg of a promoted
1555 variable, where the promotion is zero-extended, and the
1556 target mode is the same as the variable's promotion. */
1557 if (GET_CODE (op) == SUBREG
1558 && SUBREG_PROMOTED_VAR_P (op)
1559 && SUBREG_PROMOTED_UNSIGNED_P (op)
1560 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1561 {
1562 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1563 if (temp)
1564 return temp;
1565 }
1566
1567 /* Extending a widening multiplication should be canonicalized to
1568 a wider widening multiplication. */
1569 if (GET_CODE (op) == MULT)
1570 {
1571 rtx lhs = XEXP (op, 0);
1572 rtx rhs = XEXP (op, 1);
1573 enum rtx_code lcode = GET_CODE (lhs);
1574 enum rtx_code rcode = GET_CODE (rhs);
1575
1576 /* Widening multiplies usually extend both operands, but sometimes
1577 they use a shift to extract a portion of a register. */
1578 if ((lcode == ZERO_EXTEND
1579 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1580 && (rcode == ZERO_EXTEND
1581 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1582 {
1583 machine_mode lmode = GET_MODE (lhs);
1584 machine_mode rmode = GET_MODE (rhs);
1585 int bits;
1586
1587 if (lcode == LSHIFTRT)
1588 /* Number of bits not shifted off the end. */
1589 bits = (GET_MODE_UNIT_PRECISION (lmode)
1590 - INTVAL (XEXP (lhs, 1)));
1591 else /* lcode == ZERO_EXTEND */
1592 /* Size of inner mode. */
1593 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1594
1595 if (rcode == LSHIFTRT)
1596 bits += (GET_MODE_UNIT_PRECISION (rmode)
1597 - INTVAL (XEXP (rhs, 1)));
1598 else /* rcode == ZERO_EXTEND */
1599 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1600
1601 /* We can only widen multiplies if the result is mathematiclly
1602 equivalent. I.e. if overflow was impossible. */
1603 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1604 return simplify_gen_binary
1605 (MULT, mode,
1606 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1607 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1608 }
1609 }
1610
1611 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1612 if (GET_CODE (op) == ZERO_EXTEND)
1613 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1614 GET_MODE (XEXP (op, 0)));
1615
1616 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1617 is (zero_extend:M (subreg:O <X>)) if there is mode with
1618 GET_MODE_PRECISION (N) - I bits. */
1619 if (GET_CODE (op) == LSHIFTRT
1620 && GET_CODE (XEXP (op, 0)) == ASHIFT
1621 && is_a <scalar_int_mode> (mode, &int_mode)
1622 && CONST_INT_P (XEXP (op, 1))
1623 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1624 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1625 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1626 {
1627 scalar_int_mode tmode;
1628 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1629 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1630 {
1631 rtx inner =
1632 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1633 if (inner)
1634 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1635 inner, tmode);
1636 }
1637 }
1638
1639 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1640 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1641 of mode N. E.g.
1642 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1643 (and:SI (reg:SI) (const_int 63)). */
1644 if (partial_subreg_p (op)
1645 && is_a <scalar_int_mode> (mode, &int_mode)
1646 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1647 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1648 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1649 && subreg_lowpart_p (op)
1650 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1651 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1652 {
1653 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1654 return SUBREG_REG (op);
1655 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1656 op0_mode);
1657 }
1658
1659 #if defined(POINTERS_EXTEND_UNSIGNED)
1660 /* As we do not know which address space the pointer is referring to,
1661 we can do this only if the target does not support different pointer
1662 or address modes depending on the address space. */
1663 if (target_default_pointer_address_modes_p ()
1664 && POINTERS_EXTEND_UNSIGNED > 0
1665 && mode == Pmode && GET_MODE (op) == ptr_mode
1666 && (CONSTANT_P (op)
1667 || (GET_CODE (op) == SUBREG
1668 && REG_P (SUBREG_REG (op))
1669 && REG_POINTER (SUBREG_REG (op))
1670 && GET_MODE (SUBREG_REG (op)) == Pmode))
1671 && !targetm.have_ptr_extend ())
1672 {
1673 temp
1674 = convert_memory_address_addr_space_1 (Pmode, op,
1675 ADDR_SPACE_GENERIC, false,
1676 true);
1677 if (temp)
1678 return temp;
1679 }
1680 #endif
1681 break;
1682
1683 default:
1684 break;
1685 }
1686
1687 return 0;
1688 }
1689
1690 /* Try to compute the value of a unary operation CODE whose output mode is to
1691 be MODE with input operand OP whose mode was originally OP_MODE.
1692 Return zero if the value cannot be computed. */
1693 rtx
1694 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1695 rtx op, machine_mode op_mode)
1696 {
1697 scalar_int_mode result_mode;
1698
1699 if (code == VEC_DUPLICATE)
1700 {
1701 gcc_assert (VECTOR_MODE_P (mode));
1702 if (GET_MODE (op) != VOIDmode)
1703 {
1704 if (!VECTOR_MODE_P (GET_MODE (op)))
1705 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1706 else
1707 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1708 (GET_MODE (op)));
1709 }
1710 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1711 return gen_const_vec_duplicate (mode, op);
1712 if (GET_CODE (op) == CONST_VECTOR)
1713 {
1714 int elt_size = GET_MODE_UNIT_SIZE (mode);
1715 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1716 rtvec v = rtvec_alloc (n_elts);
1717 unsigned int i;
1718
1719 machine_mode inmode = GET_MODE (op);
1720 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1721 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1722
1723 gcc_assert (in_n_elts < n_elts);
1724 gcc_assert ((n_elts % in_n_elts) == 0);
1725 for (i = 0; i < n_elts; i++)
1726 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1727 return gen_rtx_CONST_VECTOR (mode, v);
1728 }
1729 }
1730
1731 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1732 {
1733 int elt_size = GET_MODE_UNIT_SIZE (mode);
1734 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1735 machine_mode opmode = GET_MODE (op);
1736 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1737 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1738 rtvec v = rtvec_alloc (n_elts);
1739 unsigned int i;
1740
1741 gcc_assert (op_n_elts == n_elts);
1742 for (i = 0; i < n_elts; i++)
1743 {
1744 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1745 CONST_VECTOR_ELT (op, i),
1746 GET_MODE_INNER (opmode));
1747 if (!x)
1748 return 0;
1749 RTVEC_ELT (v, i) = x;
1750 }
1751 return gen_rtx_CONST_VECTOR (mode, v);
1752 }
1753
1754 /* The order of these tests is critical so that, for example, we don't
1755 check the wrong mode (input vs. output) for a conversion operation,
1756 such as FIX. At some point, this should be simplified. */
1757
1758 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1759 {
1760 REAL_VALUE_TYPE d;
1761
1762 if (op_mode == VOIDmode)
1763 {
1764 /* CONST_INT have VOIDmode as the mode. We assume that all
1765 the bits of the constant are significant, though, this is
1766 a dangerous assumption as many times CONST_INTs are
1767 created and used with garbage in the bits outside of the
1768 precision of the implied mode of the const_int. */
1769 op_mode = MAX_MODE_INT;
1770 }
1771
1772 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1773
1774 /* Avoid the folding if flag_signaling_nans is on and
1775 operand is a signaling NaN. */
1776 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1777 return 0;
1778
1779 d = real_value_truncate (mode, d);
1780 return const_double_from_real_value (d, mode);
1781 }
1782 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1783 {
1784 REAL_VALUE_TYPE d;
1785
1786 if (op_mode == VOIDmode)
1787 {
1788 /* CONST_INT have VOIDmode as the mode. We assume that all
1789 the bits of the constant are significant, though, this is
1790 a dangerous assumption as many times CONST_INTs are
1791 created and used with garbage in the bits outside of the
1792 precision of the implied mode of the const_int. */
1793 op_mode = MAX_MODE_INT;
1794 }
1795
1796 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1797
1798 /* Avoid the folding if flag_signaling_nans is on and
1799 operand is a signaling NaN. */
1800 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1801 return 0;
1802
1803 d = real_value_truncate (mode, d);
1804 return const_double_from_real_value (d, mode);
1805 }
1806
1807 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1808 {
1809 unsigned int width = GET_MODE_PRECISION (result_mode);
1810 wide_int result;
1811 scalar_int_mode imode = (op_mode == VOIDmode
1812 ? result_mode
1813 : as_a <scalar_int_mode> (op_mode));
1814 rtx_mode_t op0 = rtx_mode_t (op, imode);
1815 int int_value;
1816
1817 #if TARGET_SUPPORTS_WIDE_INT == 0
1818 /* This assert keeps the simplification from producing a result
1819 that cannot be represented in a CONST_DOUBLE but a lot of
1820 upstream callers expect that this function never fails to
1821 simplify something and so you if you added this to the test
1822 above the code would die later anyway. If this assert
1823 happens, you just need to make the port support wide int. */
1824 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1825 #endif
1826
1827 switch (code)
1828 {
1829 case NOT:
1830 result = wi::bit_not (op0);
1831 break;
1832
1833 case NEG:
1834 result = wi::neg (op0);
1835 break;
1836
1837 case ABS:
1838 result = wi::abs (op0);
1839 break;
1840
1841 case FFS:
1842 result = wi::shwi (wi::ffs (op0), result_mode);
1843 break;
1844
1845 case CLZ:
1846 if (wi::ne_p (op0, 0))
1847 int_value = wi::clz (op0);
1848 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1849 int_value = GET_MODE_PRECISION (imode);
1850 result = wi::shwi (int_value, result_mode);
1851 break;
1852
1853 case CLRSB:
1854 result = wi::shwi (wi::clrsb (op0), result_mode);
1855 break;
1856
1857 case CTZ:
1858 if (wi::ne_p (op0, 0))
1859 int_value = wi::ctz (op0);
1860 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1861 int_value = GET_MODE_PRECISION (imode);
1862 result = wi::shwi (int_value, result_mode);
1863 break;
1864
1865 case POPCOUNT:
1866 result = wi::shwi (wi::popcount (op0), result_mode);
1867 break;
1868
1869 case PARITY:
1870 result = wi::shwi (wi::parity (op0), result_mode);
1871 break;
1872
1873 case BSWAP:
1874 result = wide_int (op0).bswap ();
1875 break;
1876
1877 case TRUNCATE:
1878 case ZERO_EXTEND:
1879 result = wide_int::from (op0, width, UNSIGNED);
1880 break;
1881
1882 case SIGN_EXTEND:
1883 result = wide_int::from (op0, width, SIGNED);
1884 break;
1885
1886 case SQRT:
1887 default:
1888 return 0;
1889 }
1890
1891 return immed_wide_int_const (result, result_mode);
1892 }
1893
1894 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1895 && SCALAR_FLOAT_MODE_P (mode)
1896 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1897 {
1898 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1899 switch (code)
1900 {
1901 case SQRT:
1902 return 0;
1903 case ABS:
1904 d = real_value_abs (&d);
1905 break;
1906 case NEG:
1907 d = real_value_negate (&d);
1908 break;
1909 case FLOAT_TRUNCATE:
1910 /* Don't perform the operation if flag_signaling_nans is on
1911 and the operand is a signaling NaN. */
1912 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1913 return NULL_RTX;
1914 d = real_value_truncate (mode, d);
1915 break;
1916 case FLOAT_EXTEND:
1917 /* Don't perform the operation if flag_signaling_nans is on
1918 and the operand is a signaling NaN. */
1919 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1920 return NULL_RTX;
1921 /* All this does is change the mode, unless changing
1922 mode class. */
1923 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1924 real_convert (&d, mode, &d);
1925 break;
1926 case FIX:
1927 /* Don't perform the operation if flag_signaling_nans is on
1928 and the operand is a signaling NaN. */
1929 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1930 return NULL_RTX;
1931 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1932 break;
1933 case NOT:
1934 {
1935 long tmp[4];
1936 int i;
1937
1938 real_to_target (tmp, &d, GET_MODE (op));
1939 for (i = 0; i < 4; i++)
1940 tmp[i] = ~tmp[i];
1941 real_from_target (&d, tmp, mode);
1942 break;
1943 }
1944 default:
1945 gcc_unreachable ();
1946 }
1947 return const_double_from_real_value (d, mode);
1948 }
1949 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1950 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1951 && is_int_mode (mode, &result_mode))
1952 {
1953 unsigned int width = GET_MODE_PRECISION (result_mode);
1954 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1955 operators are intentionally left unspecified (to ease implementation
1956 by target backends), for consistency, this routine implements the
1957 same semantics for constant folding as used by the middle-end. */
1958
1959 /* This was formerly used only for non-IEEE float.
1960 eggert@twinsun.com says it is safe for IEEE also. */
1961 REAL_VALUE_TYPE t;
1962 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1963 wide_int wmax, wmin;
1964 /* This is part of the abi to real_to_integer, but we check
1965 things before making this call. */
1966 bool fail;
1967
1968 switch (code)
1969 {
1970 case FIX:
1971 if (REAL_VALUE_ISNAN (*x))
1972 return const0_rtx;
1973
1974 /* Test against the signed upper bound. */
1975 wmax = wi::max_value (width, SIGNED);
1976 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1977 if (real_less (&t, x))
1978 return immed_wide_int_const (wmax, mode);
1979
1980 /* Test against the signed lower bound. */
1981 wmin = wi::min_value (width, SIGNED);
1982 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1983 if (real_less (x, &t))
1984 return immed_wide_int_const (wmin, mode);
1985
1986 return immed_wide_int_const (real_to_integer (x, &fail, width),
1987 mode);
1988
1989 case UNSIGNED_FIX:
1990 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1991 return const0_rtx;
1992
1993 /* Test against the unsigned upper bound. */
1994 wmax = wi::max_value (width, UNSIGNED);
1995 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1996 if (real_less (&t, x))
1997 return immed_wide_int_const (wmax, mode);
1998
1999 return immed_wide_int_const (real_to_integer (x, &fail, width),
2000 mode);
2001
2002 default:
2003 gcc_unreachable ();
2004 }
2005 }
2006
2007 return NULL_RTX;
2008 }
2009 \f
2010 /* Subroutine of simplify_binary_operation to simplify a binary operation
2011 CODE that can commute with byte swapping, with result mode MODE and
2012 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2013 Return zero if no simplification or canonicalization is possible. */
2014
2015 static rtx
2016 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2017 rtx op0, rtx op1)
2018 {
2019 rtx tem;
2020
2021 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2022 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2023 {
2024 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2025 simplify_gen_unary (BSWAP, mode, op1, mode));
2026 return simplify_gen_unary (BSWAP, mode, tem, mode);
2027 }
2028
2029 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2030 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2031 {
2032 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2033 return simplify_gen_unary (BSWAP, mode, tem, mode);
2034 }
2035
2036 return NULL_RTX;
2037 }
2038
2039 /* Subroutine of simplify_binary_operation to simplify a commutative,
2040 associative binary operation CODE with result mode MODE, operating
2041 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2042 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2043 canonicalization is possible. */
2044
2045 static rtx
2046 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2047 rtx op0, rtx op1)
2048 {
2049 rtx tem;
2050
2051 /* Linearize the operator to the left. */
2052 if (GET_CODE (op1) == code)
2053 {
2054 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2055 if (GET_CODE (op0) == code)
2056 {
2057 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2058 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2059 }
2060
2061 /* "a op (b op c)" becomes "(b op c) op a". */
2062 if (! swap_commutative_operands_p (op1, op0))
2063 return simplify_gen_binary (code, mode, op1, op0);
2064
2065 std::swap (op0, op1);
2066 }
2067
2068 if (GET_CODE (op0) == code)
2069 {
2070 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2071 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2072 {
2073 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2074 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2075 }
2076
2077 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2078 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2079 if (tem != 0)
2080 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2081
2082 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2083 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2084 if (tem != 0)
2085 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2086 }
2087
2088 return 0;
2089 }
2090
2091
2092 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2093 and OP1. Return 0 if no simplification is possible.
2094
2095 Don't use this for relational operations such as EQ or LT.
2096 Use simplify_relational_operation instead. */
2097 rtx
2098 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2099 rtx op0, rtx op1)
2100 {
2101 rtx trueop0, trueop1;
2102 rtx tem;
2103
2104 /* Relational operations don't work here. We must know the mode
2105 of the operands in order to do the comparison correctly.
2106 Assuming a full word can give incorrect results.
2107 Consider comparing 128 with -128 in QImode. */
2108 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2109 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2110
2111 /* Make sure the constant is second. */
2112 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2113 && swap_commutative_operands_p (op0, op1))
2114 std::swap (op0, op1);
2115
2116 trueop0 = avoid_constant_pool_reference (op0);
2117 trueop1 = avoid_constant_pool_reference (op1);
2118
2119 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2120 if (tem)
2121 return tem;
2122 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2123
2124 if (tem)
2125 return tem;
2126
2127 /* If the above steps did not result in a simplification and op0 or op1
2128 were constant pool references, use the referenced constants directly. */
2129 if (trueop0 != op0 || trueop1 != op1)
2130 return simplify_gen_binary (code, mode, trueop0, trueop1);
2131
2132 return NULL_RTX;
2133 }
2134
2135 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2136 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2137 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2138 actual constants. */
2139
2140 static rtx
2141 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2142 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2143 {
2144 rtx tem, reversed, opleft, opright;
2145 HOST_WIDE_INT val;
2146 scalar_int_mode int_mode, inner_mode;
2147
2148 /* Even if we can't compute a constant result,
2149 there are some cases worth simplifying. */
2150
2151 switch (code)
2152 {
2153 case PLUS:
2154 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2155 when x is NaN, infinite, or finite and nonzero. They aren't
2156 when x is -0 and the rounding mode is not towards -infinity,
2157 since (-0) + 0 is then 0. */
2158 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2159 return op0;
2160
2161 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2162 transformations are safe even for IEEE. */
2163 if (GET_CODE (op0) == NEG)
2164 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2165 else if (GET_CODE (op1) == NEG)
2166 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2167
2168 /* (~a) + 1 -> -a */
2169 if (INTEGRAL_MODE_P (mode)
2170 && GET_CODE (op0) == NOT
2171 && trueop1 == const1_rtx)
2172 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2173
2174 /* Handle both-operands-constant cases. We can only add
2175 CONST_INTs to constants since the sum of relocatable symbols
2176 can't be handled by most assemblers. Don't add CONST_INT
2177 to CONST_INT since overflow won't be computed properly if wider
2178 than HOST_BITS_PER_WIDE_INT. */
2179
2180 if ((GET_CODE (op0) == CONST
2181 || GET_CODE (op0) == SYMBOL_REF
2182 || GET_CODE (op0) == LABEL_REF)
2183 && CONST_INT_P (op1))
2184 return plus_constant (mode, op0, INTVAL (op1));
2185 else if ((GET_CODE (op1) == CONST
2186 || GET_CODE (op1) == SYMBOL_REF
2187 || GET_CODE (op1) == LABEL_REF)
2188 && CONST_INT_P (op0))
2189 return plus_constant (mode, op1, INTVAL (op0));
2190
2191 /* See if this is something like X * C - X or vice versa or
2192 if the multiplication is written as a shift. If so, we can
2193 distribute and make a new multiply, shift, or maybe just
2194 have X (if C is 2 in the example above). But don't make
2195 something more expensive than we had before. */
2196
2197 if (is_a <scalar_int_mode> (mode, &int_mode))
2198 {
2199 rtx lhs = op0, rhs = op1;
2200
2201 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2202 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2203
2204 if (GET_CODE (lhs) == NEG)
2205 {
2206 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2207 lhs = XEXP (lhs, 0);
2208 }
2209 else if (GET_CODE (lhs) == MULT
2210 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2211 {
2212 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2213 lhs = XEXP (lhs, 0);
2214 }
2215 else if (GET_CODE (lhs) == ASHIFT
2216 && CONST_INT_P (XEXP (lhs, 1))
2217 && INTVAL (XEXP (lhs, 1)) >= 0
2218 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2219 {
2220 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2221 GET_MODE_PRECISION (int_mode));
2222 lhs = XEXP (lhs, 0);
2223 }
2224
2225 if (GET_CODE (rhs) == NEG)
2226 {
2227 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2228 rhs = XEXP (rhs, 0);
2229 }
2230 else if (GET_CODE (rhs) == MULT
2231 && CONST_INT_P (XEXP (rhs, 1)))
2232 {
2233 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2234 rhs = XEXP (rhs, 0);
2235 }
2236 else if (GET_CODE (rhs) == ASHIFT
2237 && CONST_INT_P (XEXP (rhs, 1))
2238 && INTVAL (XEXP (rhs, 1)) >= 0
2239 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2240 {
2241 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2242 GET_MODE_PRECISION (int_mode));
2243 rhs = XEXP (rhs, 0);
2244 }
2245
2246 if (rtx_equal_p (lhs, rhs))
2247 {
2248 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2249 rtx coeff;
2250 bool speed = optimize_function_for_speed_p (cfun);
2251
2252 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2253
2254 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2255 return (set_src_cost (tem, int_mode, speed)
2256 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2257 }
2258 }
2259
2260 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2261 if (CONST_SCALAR_INT_P (op1)
2262 && GET_CODE (op0) == XOR
2263 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2264 && mode_signbit_p (mode, op1))
2265 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2266 simplify_gen_binary (XOR, mode, op1,
2267 XEXP (op0, 1)));
2268
2269 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2270 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2271 && GET_CODE (op0) == MULT
2272 && GET_CODE (XEXP (op0, 0)) == NEG)
2273 {
2274 rtx in1, in2;
2275
2276 in1 = XEXP (XEXP (op0, 0), 0);
2277 in2 = XEXP (op0, 1);
2278 return simplify_gen_binary (MINUS, mode, op1,
2279 simplify_gen_binary (MULT, mode,
2280 in1, in2));
2281 }
2282
2283 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2284 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2285 is 1. */
2286 if (COMPARISON_P (op0)
2287 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2288 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2289 && (reversed = reversed_comparison (op0, mode)))
2290 return
2291 simplify_gen_unary (NEG, mode, reversed, mode);
2292
2293 /* If one of the operands is a PLUS or a MINUS, see if we can
2294 simplify this by the associative law.
2295 Don't use the associative law for floating point.
2296 The inaccuracy makes it nonassociative,
2297 and subtle programs can break if operations are associated. */
2298
2299 if (INTEGRAL_MODE_P (mode)
2300 && (plus_minus_operand_p (op0)
2301 || plus_minus_operand_p (op1))
2302 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2303 return tem;
2304
2305 /* Reassociate floating point addition only when the user
2306 specifies associative math operations. */
2307 if (FLOAT_MODE_P (mode)
2308 && flag_associative_math)
2309 {
2310 tem = simplify_associative_operation (code, mode, op0, op1);
2311 if (tem)
2312 return tem;
2313 }
2314 break;
2315
2316 case COMPARE:
2317 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2318 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2319 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2320 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2321 {
2322 rtx xop00 = XEXP (op0, 0);
2323 rtx xop10 = XEXP (op1, 0);
2324
2325 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2326 return xop00;
2327
2328 if (REG_P (xop00) && REG_P (xop10)
2329 && REGNO (xop00) == REGNO (xop10)
2330 && GET_MODE (xop00) == mode
2331 && GET_MODE (xop10) == mode
2332 && GET_MODE_CLASS (mode) == MODE_CC)
2333 return xop00;
2334 }
2335 break;
2336
2337 case MINUS:
2338 /* We can't assume x-x is 0 even with non-IEEE floating point,
2339 but since it is zero except in very strange circumstances, we
2340 will treat it as zero with -ffinite-math-only. */
2341 if (rtx_equal_p (trueop0, trueop1)
2342 && ! side_effects_p (op0)
2343 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2344 return CONST0_RTX (mode);
2345
2346 /* Change subtraction from zero into negation. (0 - x) is the
2347 same as -x when x is NaN, infinite, or finite and nonzero.
2348 But if the mode has signed zeros, and does not round towards
2349 -infinity, then 0 - 0 is 0, not -0. */
2350 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2351 return simplify_gen_unary (NEG, mode, op1, mode);
2352
2353 /* (-1 - a) is ~a, unless the expression contains symbolic
2354 constants, in which case not retaining additions and
2355 subtractions could cause invalid assembly to be produced. */
2356 if (trueop0 == constm1_rtx
2357 && !contains_symbolic_reference_p (op1))
2358 return simplify_gen_unary (NOT, mode, op1, mode);
2359
2360 /* Subtracting 0 has no effect unless the mode has signed zeros
2361 and supports rounding towards -infinity. In such a case,
2362 0 - 0 is -0. */
2363 if (!(HONOR_SIGNED_ZEROS (mode)
2364 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2365 && trueop1 == CONST0_RTX (mode))
2366 return op0;
2367
2368 /* See if this is something like X * C - X or vice versa or
2369 if the multiplication is written as a shift. If so, we can
2370 distribute and make a new multiply, shift, or maybe just
2371 have X (if C is 2 in the example above). But don't make
2372 something more expensive than we had before. */
2373
2374 if (is_a <scalar_int_mode> (mode, &int_mode))
2375 {
2376 rtx lhs = op0, rhs = op1;
2377
2378 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2379 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2380
2381 if (GET_CODE (lhs) == NEG)
2382 {
2383 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2384 lhs = XEXP (lhs, 0);
2385 }
2386 else if (GET_CODE (lhs) == MULT
2387 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2388 {
2389 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2390 lhs = XEXP (lhs, 0);
2391 }
2392 else if (GET_CODE (lhs) == ASHIFT
2393 && CONST_INT_P (XEXP (lhs, 1))
2394 && INTVAL (XEXP (lhs, 1)) >= 0
2395 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2396 {
2397 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2398 GET_MODE_PRECISION (int_mode));
2399 lhs = XEXP (lhs, 0);
2400 }
2401
2402 if (GET_CODE (rhs) == NEG)
2403 {
2404 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2405 rhs = XEXP (rhs, 0);
2406 }
2407 else if (GET_CODE (rhs) == MULT
2408 && CONST_INT_P (XEXP (rhs, 1)))
2409 {
2410 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2411 rhs = XEXP (rhs, 0);
2412 }
2413 else if (GET_CODE (rhs) == ASHIFT
2414 && CONST_INT_P (XEXP (rhs, 1))
2415 && INTVAL (XEXP (rhs, 1)) >= 0
2416 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2417 {
2418 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2419 GET_MODE_PRECISION (int_mode));
2420 negcoeff1 = -negcoeff1;
2421 rhs = XEXP (rhs, 0);
2422 }
2423
2424 if (rtx_equal_p (lhs, rhs))
2425 {
2426 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2427 rtx coeff;
2428 bool speed = optimize_function_for_speed_p (cfun);
2429
2430 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2431
2432 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2433 return (set_src_cost (tem, int_mode, speed)
2434 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2435 }
2436 }
2437
2438 /* (a - (-b)) -> (a + b). True even for IEEE. */
2439 if (GET_CODE (op1) == NEG)
2440 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2441
2442 /* (-x - c) may be simplified as (-c - x). */
2443 if (GET_CODE (op0) == NEG
2444 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2445 {
2446 tem = simplify_unary_operation (NEG, mode, op1, mode);
2447 if (tem)
2448 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2449 }
2450
2451 /* Don't let a relocatable value get a negative coeff. */
2452 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2453 return simplify_gen_binary (PLUS, mode,
2454 op0,
2455 neg_const_int (mode, op1));
2456
2457 /* (x - (x & y)) -> (x & ~y) */
2458 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2459 {
2460 if (rtx_equal_p (op0, XEXP (op1, 0)))
2461 {
2462 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2463 GET_MODE (XEXP (op1, 1)));
2464 return simplify_gen_binary (AND, mode, op0, tem);
2465 }
2466 if (rtx_equal_p (op0, XEXP (op1, 1)))
2467 {
2468 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2469 GET_MODE (XEXP (op1, 0)));
2470 return simplify_gen_binary (AND, mode, op0, tem);
2471 }
2472 }
2473
2474 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2475 by reversing the comparison code if valid. */
2476 if (STORE_FLAG_VALUE == 1
2477 && trueop0 == const1_rtx
2478 && COMPARISON_P (op1)
2479 && (reversed = reversed_comparison (op1, mode)))
2480 return reversed;
2481
2482 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2483 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2484 && GET_CODE (op1) == MULT
2485 && GET_CODE (XEXP (op1, 0)) == NEG)
2486 {
2487 rtx in1, in2;
2488
2489 in1 = XEXP (XEXP (op1, 0), 0);
2490 in2 = XEXP (op1, 1);
2491 return simplify_gen_binary (PLUS, mode,
2492 simplify_gen_binary (MULT, mode,
2493 in1, in2),
2494 op0);
2495 }
2496
2497 /* Canonicalize (minus (neg A) (mult B C)) to
2498 (minus (mult (neg B) C) A). */
2499 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2500 && GET_CODE (op1) == MULT
2501 && GET_CODE (op0) == NEG)
2502 {
2503 rtx in1, in2;
2504
2505 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2506 in2 = XEXP (op1, 1);
2507 return simplify_gen_binary (MINUS, mode,
2508 simplify_gen_binary (MULT, mode,
2509 in1, in2),
2510 XEXP (op0, 0));
2511 }
2512
2513 /* If one of the operands is a PLUS or a MINUS, see if we can
2514 simplify this by the associative law. This will, for example,
2515 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2516 Don't use the associative law for floating point.
2517 The inaccuracy makes it nonassociative,
2518 and subtle programs can break if operations are associated. */
2519
2520 if (INTEGRAL_MODE_P (mode)
2521 && (plus_minus_operand_p (op0)
2522 || plus_minus_operand_p (op1))
2523 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2524 return tem;
2525 break;
2526
2527 case MULT:
2528 if (trueop1 == constm1_rtx)
2529 return simplify_gen_unary (NEG, mode, op0, mode);
2530
2531 if (GET_CODE (op0) == NEG)
2532 {
2533 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2534 /* If op1 is a MULT as well and simplify_unary_operation
2535 just moved the NEG to the second operand, simplify_gen_binary
2536 below could through simplify_associative_operation move
2537 the NEG around again and recurse endlessly. */
2538 if (temp
2539 && GET_CODE (op1) == MULT
2540 && GET_CODE (temp) == MULT
2541 && XEXP (op1, 0) == XEXP (temp, 0)
2542 && GET_CODE (XEXP (temp, 1)) == NEG
2543 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2544 temp = NULL_RTX;
2545 if (temp)
2546 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2547 }
2548 if (GET_CODE (op1) == NEG)
2549 {
2550 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2551 /* If op0 is a MULT as well and simplify_unary_operation
2552 just moved the NEG to the second operand, simplify_gen_binary
2553 below could through simplify_associative_operation move
2554 the NEG around again and recurse endlessly. */
2555 if (temp
2556 && GET_CODE (op0) == MULT
2557 && GET_CODE (temp) == MULT
2558 && XEXP (op0, 0) == XEXP (temp, 0)
2559 && GET_CODE (XEXP (temp, 1)) == NEG
2560 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2561 temp = NULL_RTX;
2562 if (temp)
2563 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2564 }
2565
2566 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2567 x is NaN, since x * 0 is then also NaN. Nor is it valid
2568 when the mode has signed zeros, since multiplying a negative
2569 number by 0 will give -0, not 0. */
2570 if (!HONOR_NANS (mode)
2571 && !HONOR_SIGNED_ZEROS (mode)
2572 && trueop1 == CONST0_RTX (mode)
2573 && ! side_effects_p (op0))
2574 return op1;
2575
2576 /* In IEEE floating point, x*1 is not equivalent to x for
2577 signalling NaNs. */
2578 if (!HONOR_SNANS (mode)
2579 && trueop1 == CONST1_RTX (mode))
2580 return op0;
2581
2582 /* Convert multiply by constant power of two into shift. */
2583 if (CONST_SCALAR_INT_P (trueop1))
2584 {
2585 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2586 if (val >= 0)
2587 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2588 }
2589
2590 /* x*2 is x+x and x*(-1) is -x */
2591 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2592 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2593 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2594 && GET_MODE (op0) == mode)
2595 {
2596 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2597
2598 if (real_equal (d1, &dconst2))
2599 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2600
2601 if (!HONOR_SNANS (mode)
2602 && real_equal (d1, &dconstm1))
2603 return simplify_gen_unary (NEG, mode, op0, mode);
2604 }
2605
2606 /* Optimize -x * -x as x * x. */
2607 if (FLOAT_MODE_P (mode)
2608 && GET_CODE (op0) == NEG
2609 && GET_CODE (op1) == NEG
2610 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2611 && !side_effects_p (XEXP (op0, 0)))
2612 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2613
2614 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2615 if (SCALAR_FLOAT_MODE_P (mode)
2616 && GET_CODE (op0) == ABS
2617 && GET_CODE (op1) == ABS
2618 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2619 && !side_effects_p (XEXP (op0, 0)))
2620 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2621
2622 /* Reassociate multiplication, but for floating point MULTs
2623 only when the user specifies unsafe math optimizations. */
2624 if (! FLOAT_MODE_P (mode)
2625 || flag_unsafe_math_optimizations)
2626 {
2627 tem = simplify_associative_operation (code, mode, op0, op1);
2628 if (tem)
2629 return tem;
2630 }
2631 break;
2632
2633 case IOR:
2634 if (trueop1 == CONST0_RTX (mode))
2635 return op0;
2636 if (INTEGRAL_MODE_P (mode)
2637 && trueop1 == CONSTM1_RTX (mode)
2638 && !side_effects_p (op0))
2639 return op1;
2640 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2641 return op0;
2642 /* A | (~A) -> -1 */
2643 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2644 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2645 && ! side_effects_p (op0)
2646 && SCALAR_INT_MODE_P (mode))
2647 return constm1_rtx;
2648
2649 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2650 if (CONST_INT_P (op1)
2651 && HWI_COMPUTABLE_MODE_P (mode)
2652 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2653 && !side_effects_p (op0))
2654 return op1;
2655
2656 /* Canonicalize (X & C1) | C2. */
2657 if (GET_CODE (op0) == AND
2658 && CONST_INT_P (trueop1)
2659 && CONST_INT_P (XEXP (op0, 1)))
2660 {
2661 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2662 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2663 HOST_WIDE_INT c2 = INTVAL (trueop1);
2664
2665 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2666 if ((c1 & c2) == c1
2667 && !side_effects_p (XEXP (op0, 0)))
2668 return trueop1;
2669
2670 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2671 if (((c1|c2) & mask) == mask)
2672 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2673 }
2674
2675 /* Convert (A & B) | A to A. */
2676 if (GET_CODE (op0) == AND
2677 && (rtx_equal_p (XEXP (op0, 0), op1)
2678 || rtx_equal_p (XEXP (op0, 1), op1))
2679 && ! side_effects_p (XEXP (op0, 0))
2680 && ! side_effects_p (XEXP (op0, 1)))
2681 return op1;
2682
2683 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2684 mode size to (rotate A CX). */
2685
2686 if (GET_CODE (op1) == ASHIFT
2687 || GET_CODE (op1) == SUBREG)
2688 {
2689 opleft = op1;
2690 opright = op0;
2691 }
2692 else
2693 {
2694 opright = op1;
2695 opleft = op0;
2696 }
2697
2698 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2699 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2700 && CONST_INT_P (XEXP (opleft, 1))
2701 && CONST_INT_P (XEXP (opright, 1))
2702 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2703 == GET_MODE_UNIT_PRECISION (mode)))
2704 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2705
2706 /* Same, but for ashift that has been "simplified" to a wider mode
2707 by simplify_shift_const. */
2708
2709 if (GET_CODE (opleft) == SUBREG
2710 && is_a <scalar_int_mode> (mode, &int_mode)
2711 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2712 &inner_mode)
2713 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2714 && GET_CODE (opright) == LSHIFTRT
2715 && GET_CODE (XEXP (opright, 0)) == SUBREG
2716 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2717 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2718 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2719 SUBREG_REG (XEXP (opright, 0)))
2720 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2721 && CONST_INT_P (XEXP (opright, 1))
2722 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2723 + INTVAL (XEXP (opright, 1))
2724 == GET_MODE_PRECISION (int_mode)))
2725 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2726 XEXP (SUBREG_REG (opleft), 1));
2727
2728 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2729 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2730 the PLUS does not affect any of the bits in OP1: then we can do
2731 the IOR as a PLUS and we can associate. This is valid if OP1
2732 can be safely shifted left C bits. */
2733 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2734 && GET_CODE (XEXP (op0, 0)) == PLUS
2735 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2736 && CONST_INT_P (XEXP (op0, 1))
2737 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2738 {
2739 int count = INTVAL (XEXP (op0, 1));
2740 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2741
2742 if (mask >> count == INTVAL (trueop1)
2743 && trunc_int_for_mode (mask, mode) == mask
2744 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2745 return simplify_gen_binary (ASHIFTRT, mode,
2746 plus_constant (mode, XEXP (op0, 0),
2747 mask),
2748 XEXP (op0, 1));
2749 }
2750
2751 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2752 if (tem)
2753 return tem;
2754
2755 tem = simplify_associative_operation (code, mode, op0, op1);
2756 if (tem)
2757 return tem;
2758 break;
2759
2760 case XOR:
2761 if (trueop1 == CONST0_RTX (mode))
2762 return op0;
2763 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2764 return simplify_gen_unary (NOT, mode, op0, mode);
2765 if (rtx_equal_p (trueop0, trueop1)
2766 && ! side_effects_p (op0)
2767 && GET_MODE_CLASS (mode) != MODE_CC)
2768 return CONST0_RTX (mode);
2769
2770 /* Canonicalize XOR of the most significant bit to PLUS. */
2771 if (CONST_SCALAR_INT_P (op1)
2772 && mode_signbit_p (mode, op1))
2773 return simplify_gen_binary (PLUS, mode, op0, op1);
2774 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2775 if (CONST_SCALAR_INT_P (op1)
2776 && GET_CODE (op0) == PLUS
2777 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2778 && mode_signbit_p (mode, XEXP (op0, 1)))
2779 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2780 simplify_gen_binary (XOR, mode, op1,
2781 XEXP (op0, 1)));
2782
2783 /* If we are XORing two things that have no bits in common,
2784 convert them into an IOR. This helps to detect rotation encoded
2785 using those methods and possibly other simplifications. */
2786
2787 if (HWI_COMPUTABLE_MODE_P (mode)
2788 && (nonzero_bits (op0, mode)
2789 & nonzero_bits (op1, mode)) == 0)
2790 return (simplify_gen_binary (IOR, mode, op0, op1));
2791
2792 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2793 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2794 (NOT y). */
2795 {
2796 int num_negated = 0;
2797
2798 if (GET_CODE (op0) == NOT)
2799 num_negated++, op0 = XEXP (op0, 0);
2800 if (GET_CODE (op1) == NOT)
2801 num_negated++, op1 = XEXP (op1, 0);
2802
2803 if (num_negated == 2)
2804 return simplify_gen_binary (XOR, mode, op0, op1);
2805 else if (num_negated == 1)
2806 return simplify_gen_unary (NOT, mode,
2807 simplify_gen_binary (XOR, mode, op0, op1),
2808 mode);
2809 }
2810
2811 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2812 correspond to a machine insn or result in further simplifications
2813 if B is a constant. */
2814
2815 if (GET_CODE (op0) == AND
2816 && rtx_equal_p (XEXP (op0, 1), op1)
2817 && ! side_effects_p (op1))
2818 return simplify_gen_binary (AND, mode,
2819 simplify_gen_unary (NOT, mode,
2820 XEXP (op0, 0), mode),
2821 op1);
2822
2823 else if (GET_CODE (op0) == AND
2824 && rtx_equal_p (XEXP (op0, 0), op1)
2825 && ! side_effects_p (op1))
2826 return simplify_gen_binary (AND, mode,
2827 simplify_gen_unary (NOT, mode,
2828 XEXP (op0, 1), mode),
2829 op1);
2830
2831 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2832 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2833 out bits inverted twice and not set by C. Similarly, given
2834 (xor (and (xor A B) C) D), simplify without inverting C in
2835 the xor operand: (xor (and A C) (B&C)^D).
2836 */
2837 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2838 && GET_CODE (XEXP (op0, 0)) == XOR
2839 && CONST_INT_P (op1)
2840 && CONST_INT_P (XEXP (op0, 1))
2841 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2842 {
2843 enum rtx_code op = GET_CODE (op0);
2844 rtx a = XEXP (XEXP (op0, 0), 0);
2845 rtx b = XEXP (XEXP (op0, 0), 1);
2846 rtx c = XEXP (op0, 1);
2847 rtx d = op1;
2848 HOST_WIDE_INT bval = INTVAL (b);
2849 HOST_WIDE_INT cval = INTVAL (c);
2850 HOST_WIDE_INT dval = INTVAL (d);
2851 HOST_WIDE_INT xcval;
2852
2853 if (op == IOR)
2854 xcval = ~cval;
2855 else
2856 xcval = cval;
2857
2858 return simplify_gen_binary (XOR, mode,
2859 simplify_gen_binary (op, mode, a, c),
2860 gen_int_mode ((bval & xcval) ^ dval,
2861 mode));
2862 }
2863
2864 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2865 we can transform like this:
2866 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2867 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2868 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2869 Attempt a few simplifications when B and C are both constants. */
2870 if (GET_CODE (op0) == AND
2871 && CONST_INT_P (op1)
2872 && CONST_INT_P (XEXP (op0, 1)))
2873 {
2874 rtx a = XEXP (op0, 0);
2875 rtx b = XEXP (op0, 1);
2876 rtx c = op1;
2877 HOST_WIDE_INT bval = INTVAL (b);
2878 HOST_WIDE_INT cval = INTVAL (c);
2879
2880 /* Instead of computing ~A&C, we compute its negated value,
2881 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2882 optimize for sure. If it does not simplify, we still try
2883 to compute ~A&C below, but since that always allocates
2884 RTL, we don't try that before committing to returning a
2885 simplified expression. */
2886 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2887 GEN_INT (~cval));
2888
2889 if ((~cval & bval) == 0)
2890 {
2891 rtx na_c = NULL_RTX;
2892 if (n_na_c)
2893 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2894 else
2895 {
2896 /* If ~A does not simplify, don't bother: we don't
2897 want to simplify 2 operations into 3, and if na_c
2898 were to simplify with na, n_na_c would have
2899 simplified as well. */
2900 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2901 if (na)
2902 na_c = simplify_gen_binary (AND, mode, na, c);
2903 }
2904
2905 /* Try to simplify ~A&C | ~B&C. */
2906 if (na_c != NULL_RTX)
2907 return simplify_gen_binary (IOR, mode, na_c,
2908 gen_int_mode (~bval & cval, mode));
2909 }
2910 else
2911 {
2912 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2913 if (n_na_c == CONSTM1_RTX (mode))
2914 {
2915 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2916 gen_int_mode (~cval & bval,
2917 mode));
2918 return simplify_gen_binary (IOR, mode, a_nc_b,
2919 gen_int_mode (~bval & cval,
2920 mode));
2921 }
2922 }
2923 }
2924
2925 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2926 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2927 machines, and also has shorter instruction path length. */
2928 if (GET_CODE (op0) == AND
2929 && GET_CODE (XEXP (op0, 0)) == XOR
2930 && CONST_INT_P (XEXP (op0, 1))
2931 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2932 {
2933 rtx a = trueop1;
2934 rtx b = XEXP (XEXP (op0, 0), 1);
2935 rtx c = XEXP (op0, 1);
2936 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2937 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2938 rtx bc = simplify_gen_binary (AND, mode, b, c);
2939 return simplify_gen_binary (IOR, mode, a_nc, bc);
2940 }
2941 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2942 else if (GET_CODE (op0) == AND
2943 && GET_CODE (XEXP (op0, 0)) == XOR
2944 && CONST_INT_P (XEXP (op0, 1))
2945 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2946 {
2947 rtx a = XEXP (XEXP (op0, 0), 0);
2948 rtx b = trueop1;
2949 rtx c = XEXP (op0, 1);
2950 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2951 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2952 rtx ac = simplify_gen_binary (AND, mode, a, c);
2953 return simplify_gen_binary (IOR, mode, ac, b_nc);
2954 }
2955
2956 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2957 comparison if STORE_FLAG_VALUE is 1. */
2958 if (STORE_FLAG_VALUE == 1
2959 && trueop1 == const1_rtx
2960 && COMPARISON_P (op0)
2961 && (reversed = reversed_comparison (op0, mode)))
2962 return reversed;
2963
2964 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2965 is (lt foo (const_int 0)), so we can perform the above
2966 simplification if STORE_FLAG_VALUE is 1. */
2967
2968 if (is_a <scalar_int_mode> (mode, &int_mode)
2969 && STORE_FLAG_VALUE == 1
2970 && trueop1 == const1_rtx
2971 && GET_CODE (op0) == LSHIFTRT
2972 && CONST_INT_P (XEXP (op0, 1))
2973 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
2974 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
2975
2976 /* (xor (comparison foo bar) (const_int sign-bit))
2977 when STORE_FLAG_VALUE is the sign bit. */
2978 if (is_a <scalar_int_mode> (mode, &int_mode)
2979 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
2980 && trueop1 == const_true_rtx
2981 && COMPARISON_P (op0)
2982 && (reversed = reversed_comparison (op0, int_mode)))
2983 return reversed;
2984
2985 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2986 if (tem)
2987 return tem;
2988
2989 tem = simplify_associative_operation (code, mode, op0, op1);
2990 if (tem)
2991 return tem;
2992 break;
2993
2994 case AND:
2995 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2996 return trueop1;
2997 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2998 return op0;
2999 if (HWI_COMPUTABLE_MODE_P (mode))
3000 {
3001 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3002 HOST_WIDE_INT nzop1;
3003 if (CONST_INT_P (trueop1))
3004 {
3005 HOST_WIDE_INT val1 = INTVAL (trueop1);
3006 /* If we are turning off bits already known off in OP0, we need
3007 not do an AND. */
3008 if ((nzop0 & ~val1) == 0)
3009 return op0;
3010 }
3011 nzop1 = nonzero_bits (trueop1, mode);
3012 /* If we are clearing all the nonzero bits, the result is zero. */
3013 if ((nzop1 & nzop0) == 0
3014 && !side_effects_p (op0) && !side_effects_p (op1))
3015 return CONST0_RTX (mode);
3016 }
3017 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3018 && GET_MODE_CLASS (mode) != MODE_CC)
3019 return op0;
3020 /* A & (~A) -> 0 */
3021 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3022 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3023 && ! side_effects_p (op0)
3024 && GET_MODE_CLASS (mode) != MODE_CC)
3025 return CONST0_RTX (mode);
3026
3027 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3028 there are no nonzero bits of C outside of X's mode. */
3029 if ((GET_CODE (op0) == SIGN_EXTEND
3030 || GET_CODE (op0) == ZERO_EXTEND)
3031 && CONST_INT_P (trueop1)
3032 && HWI_COMPUTABLE_MODE_P (mode)
3033 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3034 & UINTVAL (trueop1)) == 0)
3035 {
3036 machine_mode imode = GET_MODE (XEXP (op0, 0));
3037 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3038 gen_int_mode (INTVAL (trueop1),
3039 imode));
3040 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3041 }
3042
3043 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3044 we might be able to further simplify the AND with X and potentially
3045 remove the truncation altogether. */
3046 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3047 {
3048 rtx x = XEXP (op0, 0);
3049 machine_mode xmode = GET_MODE (x);
3050 tem = simplify_gen_binary (AND, xmode, x,
3051 gen_int_mode (INTVAL (trueop1), xmode));
3052 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3053 }
3054
3055 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3056 if (GET_CODE (op0) == IOR
3057 && CONST_INT_P (trueop1)
3058 && CONST_INT_P (XEXP (op0, 1)))
3059 {
3060 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3061 return simplify_gen_binary (IOR, mode,
3062 simplify_gen_binary (AND, mode,
3063 XEXP (op0, 0), op1),
3064 gen_int_mode (tmp, mode));
3065 }
3066
3067 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3068 insn (and may simplify more). */
3069 if (GET_CODE (op0) == XOR
3070 && rtx_equal_p (XEXP (op0, 0), op1)
3071 && ! side_effects_p (op1))
3072 return simplify_gen_binary (AND, mode,
3073 simplify_gen_unary (NOT, mode,
3074 XEXP (op0, 1), mode),
3075 op1);
3076
3077 if (GET_CODE (op0) == XOR
3078 && rtx_equal_p (XEXP (op0, 1), op1)
3079 && ! side_effects_p (op1))
3080 return simplify_gen_binary (AND, mode,
3081 simplify_gen_unary (NOT, mode,
3082 XEXP (op0, 0), mode),
3083 op1);
3084
3085 /* Similarly for (~(A ^ B)) & A. */
3086 if (GET_CODE (op0) == NOT
3087 && GET_CODE (XEXP (op0, 0)) == XOR
3088 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3089 && ! side_effects_p (op1))
3090 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3091
3092 if (GET_CODE (op0) == NOT
3093 && GET_CODE (XEXP (op0, 0)) == XOR
3094 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3095 && ! side_effects_p (op1))
3096 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3097
3098 /* Convert (A | B) & A to A. */
3099 if (GET_CODE (op0) == IOR
3100 && (rtx_equal_p (XEXP (op0, 0), op1)
3101 || rtx_equal_p (XEXP (op0, 1), op1))
3102 && ! side_effects_p (XEXP (op0, 0))
3103 && ! side_effects_p (XEXP (op0, 1)))
3104 return op1;
3105
3106 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3107 ((A & N) + B) & M -> (A + B) & M
3108 Similarly if (N & M) == 0,
3109 ((A | N) + B) & M -> (A + B) & M
3110 and for - instead of + and/or ^ instead of |.
3111 Also, if (N & M) == 0, then
3112 (A +- N) & M -> A & M. */
3113 if (CONST_INT_P (trueop1)
3114 && HWI_COMPUTABLE_MODE_P (mode)
3115 && ~UINTVAL (trueop1)
3116 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3117 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3118 {
3119 rtx pmop[2];
3120 int which;
3121
3122 pmop[0] = XEXP (op0, 0);
3123 pmop[1] = XEXP (op0, 1);
3124
3125 if (CONST_INT_P (pmop[1])
3126 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3127 return simplify_gen_binary (AND, mode, pmop[0], op1);
3128
3129 for (which = 0; which < 2; which++)
3130 {
3131 tem = pmop[which];
3132 switch (GET_CODE (tem))
3133 {
3134 case AND:
3135 if (CONST_INT_P (XEXP (tem, 1))
3136 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3137 == UINTVAL (trueop1))
3138 pmop[which] = XEXP (tem, 0);
3139 break;
3140 case IOR:
3141 case XOR:
3142 if (CONST_INT_P (XEXP (tem, 1))
3143 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3144 pmop[which] = XEXP (tem, 0);
3145 break;
3146 default:
3147 break;
3148 }
3149 }
3150
3151 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3152 {
3153 tem = simplify_gen_binary (GET_CODE (op0), mode,
3154 pmop[0], pmop[1]);
3155 return simplify_gen_binary (code, mode, tem, op1);
3156 }
3157 }
3158
3159 /* (and X (ior (not X) Y) -> (and X Y) */
3160 if (GET_CODE (op1) == IOR
3161 && GET_CODE (XEXP (op1, 0)) == NOT
3162 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3163 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3164
3165 /* (and (ior (not X) Y) X) -> (and X Y) */
3166 if (GET_CODE (op0) == IOR
3167 && GET_CODE (XEXP (op0, 0)) == NOT
3168 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3169 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3170
3171 /* (and X (ior Y (not X)) -> (and X Y) */
3172 if (GET_CODE (op1) == IOR
3173 && GET_CODE (XEXP (op1, 1)) == NOT
3174 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3175 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3176
3177 /* (and (ior Y (not X)) X) -> (and X Y) */
3178 if (GET_CODE (op0) == IOR
3179 && GET_CODE (XEXP (op0, 1)) == NOT
3180 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3181 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3182
3183 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3184 if (tem)
3185 return tem;
3186
3187 tem = simplify_associative_operation (code, mode, op0, op1);
3188 if (tem)
3189 return tem;
3190 break;
3191
3192 case UDIV:
3193 /* 0/x is 0 (or x&0 if x has side-effects). */
3194 if (trueop0 == CONST0_RTX (mode)
3195 && !cfun->can_throw_non_call_exceptions)
3196 {
3197 if (side_effects_p (op1))
3198 return simplify_gen_binary (AND, mode, op1, trueop0);
3199 return trueop0;
3200 }
3201 /* x/1 is x. */
3202 if (trueop1 == CONST1_RTX (mode))
3203 {
3204 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3205 if (tem)
3206 return tem;
3207 }
3208 /* Convert divide by power of two into shift. */
3209 if (CONST_INT_P (trueop1)
3210 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3211 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3212 break;
3213
3214 case DIV:
3215 /* Handle floating point and integers separately. */
3216 if (SCALAR_FLOAT_MODE_P (mode))
3217 {
3218 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3219 safe for modes with NaNs, since 0.0 / 0.0 will then be
3220 NaN rather than 0.0. Nor is it safe for modes with signed
3221 zeros, since dividing 0 by a negative number gives -0.0 */
3222 if (trueop0 == CONST0_RTX (mode)
3223 && !HONOR_NANS (mode)
3224 && !HONOR_SIGNED_ZEROS (mode)
3225 && ! side_effects_p (op1))
3226 return op0;
3227 /* x/1.0 is x. */
3228 if (trueop1 == CONST1_RTX (mode)
3229 && !HONOR_SNANS (mode))
3230 return op0;
3231
3232 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3233 && trueop1 != CONST0_RTX (mode))
3234 {
3235 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3236
3237 /* x/-1.0 is -x. */
3238 if (real_equal (d1, &dconstm1)
3239 && !HONOR_SNANS (mode))
3240 return simplify_gen_unary (NEG, mode, op0, mode);
3241
3242 /* Change FP division by a constant into multiplication.
3243 Only do this with -freciprocal-math. */
3244 if (flag_reciprocal_math
3245 && !real_equal (d1, &dconst0))
3246 {
3247 REAL_VALUE_TYPE d;
3248 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3249 tem = const_double_from_real_value (d, mode);
3250 return simplify_gen_binary (MULT, mode, op0, tem);
3251 }
3252 }
3253 }
3254 else if (SCALAR_INT_MODE_P (mode))
3255 {
3256 /* 0/x is 0 (or x&0 if x has side-effects). */
3257 if (trueop0 == CONST0_RTX (mode)
3258 && !cfun->can_throw_non_call_exceptions)
3259 {
3260 if (side_effects_p (op1))
3261 return simplify_gen_binary (AND, mode, op1, trueop0);
3262 return trueop0;
3263 }
3264 /* x/1 is x. */
3265 if (trueop1 == CONST1_RTX (mode))
3266 {
3267 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3268 if (tem)
3269 return tem;
3270 }
3271 /* x/-1 is -x. */
3272 if (trueop1 == constm1_rtx)
3273 {
3274 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3275 if (x)
3276 return simplify_gen_unary (NEG, mode, x, mode);
3277 }
3278 }
3279 break;
3280
3281 case UMOD:
3282 /* 0%x is 0 (or x&0 if x has side-effects). */
3283 if (trueop0 == CONST0_RTX (mode))
3284 {
3285 if (side_effects_p (op1))
3286 return simplify_gen_binary (AND, mode, op1, trueop0);
3287 return trueop0;
3288 }
3289 /* x%1 is 0 (of x&0 if x has side-effects). */
3290 if (trueop1 == CONST1_RTX (mode))
3291 {
3292 if (side_effects_p (op0))
3293 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3294 return CONST0_RTX (mode);
3295 }
3296 /* Implement modulus by power of two as AND. */
3297 if (CONST_INT_P (trueop1)
3298 && exact_log2 (UINTVAL (trueop1)) > 0)
3299 return simplify_gen_binary (AND, mode, op0,
3300 gen_int_mode (INTVAL (op1) - 1, mode));
3301 break;
3302
3303 case MOD:
3304 /* 0%x is 0 (or x&0 if x has side-effects). */
3305 if (trueop0 == CONST0_RTX (mode))
3306 {
3307 if (side_effects_p (op1))
3308 return simplify_gen_binary (AND, mode, op1, trueop0);
3309 return trueop0;
3310 }
3311 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3312 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3313 {
3314 if (side_effects_p (op0))
3315 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3316 return CONST0_RTX (mode);
3317 }
3318 break;
3319
3320 case ROTATERT:
3321 case ROTATE:
3322 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3323 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3324 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3325 amount instead. */
3326 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3327 if (CONST_INT_P (trueop1)
3328 && IN_RANGE (INTVAL (trueop1),
3329 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3330 GET_MODE_UNIT_PRECISION (mode) - 1))
3331 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3332 mode, op0,
3333 GEN_INT (GET_MODE_UNIT_PRECISION (mode)
3334 - INTVAL (trueop1)));
3335 #endif
3336 /* FALLTHRU */
3337 case ASHIFTRT:
3338 if (trueop1 == CONST0_RTX (mode))
3339 return op0;
3340 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3341 return op0;
3342 /* Rotating ~0 always results in ~0. */
3343 if (CONST_INT_P (trueop0)
3344 && HWI_COMPUTABLE_MODE_P (mode)
3345 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3346 && ! side_effects_p (op1))
3347 return op0;
3348
3349 canonicalize_shift:
3350 /* Given:
3351 scalar modes M1, M2
3352 scalar constants c1, c2
3353 size (M2) > size (M1)
3354 c1 == size (M2) - size (M1)
3355 optimize:
3356 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3357 <low_part>)
3358 (const_int <c2>))
3359 to:
3360 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3361 <low_part>). */
3362 if ((code == ASHIFTRT || code == LSHIFTRT)
3363 && is_a <scalar_int_mode> (mode, &int_mode)
3364 && SUBREG_P (op0)
3365 && CONST_INT_P (op1)
3366 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3367 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3368 &inner_mode)
3369 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3370 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3371 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3372 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3373 && subreg_lowpart_p (op0))
3374 {
3375 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3376 + INTVAL (op1));
3377 tmp = simplify_gen_binary (code, inner_mode,
3378 XEXP (SUBREG_REG (op0), 0),
3379 tmp);
3380 return lowpart_subreg (int_mode, tmp, inner_mode);
3381 }
3382
3383 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3384 {
3385 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3386 if (val != INTVAL (op1))
3387 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3388 }
3389 break;
3390
3391 case ASHIFT:
3392 case SS_ASHIFT:
3393 case US_ASHIFT:
3394 if (trueop1 == CONST0_RTX (mode))
3395 return op0;
3396 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3397 return op0;
3398 goto canonicalize_shift;
3399
3400 case LSHIFTRT:
3401 if (trueop1 == CONST0_RTX (mode))
3402 return op0;
3403 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3404 return op0;
3405 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3406 if (GET_CODE (op0) == CLZ
3407 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3408 && CONST_INT_P (trueop1)
3409 && STORE_FLAG_VALUE == 1
3410 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3411 {
3412 unsigned HOST_WIDE_INT zero_val = 0;
3413
3414 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3415 && zero_val == GET_MODE_PRECISION (inner_mode)
3416 && INTVAL (trueop1) == exact_log2 (zero_val))
3417 return simplify_gen_relational (EQ, mode, inner_mode,
3418 XEXP (op0, 0), const0_rtx);
3419 }
3420 goto canonicalize_shift;
3421
3422 case SMIN:
3423 if (HWI_COMPUTABLE_MODE_P (mode)
3424 && mode_signbit_p (mode, trueop1)
3425 && ! side_effects_p (op0))
3426 return op1;
3427 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3428 return op0;
3429 tem = simplify_associative_operation (code, mode, op0, op1);
3430 if (tem)
3431 return tem;
3432 break;
3433
3434 case SMAX:
3435 if (HWI_COMPUTABLE_MODE_P (mode)
3436 && CONST_INT_P (trueop1)
3437 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3438 && ! side_effects_p (op0))
3439 return op1;
3440 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3441 return op0;
3442 tem = simplify_associative_operation (code, mode, op0, op1);
3443 if (tem)
3444 return tem;
3445 break;
3446
3447 case UMIN:
3448 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3449 return op1;
3450 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3451 return op0;
3452 tem = simplify_associative_operation (code, mode, op0, op1);
3453 if (tem)
3454 return tem;
3455 break;
3456
3457 case UMAX:
3458 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3459 return op1;
3460 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3461 return op0;
3462 tem = simplify_associative_operation (code, mode, op0, op1);
3463 if (tem)
3464 return tem;
3465 break;
3466
3467 case SS_PLUS:
3468 case US_PLUS:
3469 case SS_MINUS:
3470 case US_MINUS:
3471 case SS_MULT:
3472 case US_MULT:
3473 case SS_DIV:
3474 case US_DIV:
3475 /* ??? There are simplifications that can be done. */
3476 return 0;
3477
3478 case VEC_SELECT:
3479 if (!VECTOR_MODE_P (mode))
3480 {
3481 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3482 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3483 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3484 gcc_assert (XVECLEN (trueop1, 0) == 1);
3485 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3486
3487 if (GET_CODE (trueop0) == CONST_VECTOR)
3488 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3489 (trueop1, 0, 0)));
3490
3491 /* Extract a scalar element from a nested VEC_SELECT expression
3492 (with optional nested VEC_CONCAT expression). Some targets
3493 (i386) extract scalar element from a vector using chain of
3494 nested VEC_SELECT expressions. When input operand is a memory
3495 operand, this operation can be simplified to a simple scalar
3496 load from an offseted memory address. */
3497 if (GET_CODE (trueop0) == VEC_SELECT)
3498 {
3499 rtx op0 = XEXP (trueop0, 0);
3500 rtx op1 = XEXP (trueop0, 1);
3501
3502 machine_mode opmode = GET_MODE (op0);
3503 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3504 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3505
3506 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3507 int elem;
3508
3509 rtvec vec;
3510 rtx tmp_op, tmp;
3511
3512 gcc_assert (GET_CODE (op1) == PARALLEL);
3513 gcc_assert (i < n_elts);
3514
3515 /* Select element, pointed by nested selector. */
3516 elem = INTVAL (XVECEXP (op1, 0, i));
3517
3518 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3519 if (GET_CODE (op0) == VEC_CONCAT)
3520 {
3521 rtx op00 = XEXP (op0, 0);
3522 rtx op01 = XEXP (op0, 1);
3523
3524 machine_mode mode00, mode01;
3525 int n_elts00, n_elts01;
3526
3527 mode00 = GET_MODE (op00);
3528 mode01 = GET_MODE (op01);
3529
3530 /* Find out number of elements of each operand. */
3531 if (VECTOR_MODE_P (mode00))
3532 {
3533 elt_size = GET_MODE_UNIT_SIZE (mode00);
3534 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3535 }
3536 else
3537 n_elts00 = 1;
3538
3539 if (VECTOR_MODE_P (mode01))
3540 {
3541 elt_size = GET_MODE_UNIT_SIZE (mode01);
3542 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3543 }
3544 else
3545 n_elts01 = 1;
3546
3547 gcc_assert (n_elts == n_elts00 + n_elts01);
3548
3549 /* Select correct operand of VEC_CONCAT
3550 and adjust selector. */
3551 if (elem < n_elts01)
3552 tmp_op = op00;
3553 else
3554 {
3555 tmp_op = op01;
3556 elem -= n_elts00;
3557 }
3558 }
3559 else
3560 tmp_op = op0;
3561
3562 vec = rtvec_alloc (1);
3563 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3564
3565 tmp = gen_rtx_fmt_ee (code, mode,
3566 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3567 return tmp;
3568 }
3569 if (GET_CODE (trueop0) == VEC_DUPLICATE
3570 && GET_MODE (XEXP (trueop0, 0)) == mode)
3571 return XEXP (trueop0, 0);
3572 }
3573 else
3574 {
3575 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3576 gcc_assert (GET_MODE_INNER (mode)
3577 == GET_MODE_INNER (GET_MODE (trueop0)));
3578 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3579
3580 if (GET_CODE (trueop0) == CONST_VECTOR)
3581 {
3582 int elt_size = GET_MODE_UNIT_SIZE (mode);
3583 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3584 rtvec v = rtvec_alloc (n_elts);
3585 unsigned int i;
3586
3587 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3588 for (i = 0; i < n_elts; i++)
3589 {
3590 rtx x = XVECEXP (trueop1, 0, i);
3591
3592 gcc_assert (CONST_INT_P (x));
3593 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3594 INTVAL (x));
3595 }
3596
3597 return gen_rtx_CONST_VECTOR (mode, v);
3598 }
3599
3600 /* Recognize the identity. */
3601 if (GET_MODE (trueop0) == mode)
3602 {
3603 bool maybe_ident = true;
3604 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3605 {
3606 rtx j = XVECEXP (trueop1, 0, i);
3607 if (!CONST_INT_P (j) || INTVAL (j) != i)
3608 {
3609 maybe_ident = false;
3610 break;
3611 }
3612 }
3613 if (maybe_ident)
3614 return trueop0;
3615 }
3616
3617 /* If we build {a,b} then permute it, build the result directly. */
3618 if (XVECLEN (trueop1, 0) == 2
3619 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3620 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3621 && GET_CODE (trueop0) == VEC_CONCAT
3622 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3623 && GET_MODE (XEXP (trueop0, 0)) == mode
3624 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3625 && GET_MODE (XEXP (trueop0, 1)) == mode)
3626 {
3627 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3628 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3629 rtx subop0, subop1;
3630
3631 gcc_assert (i0 < 4 && i1 < 4);
3632 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3633 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3634
3635 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3636 }
3637
3638 if (XVECLEN (trueop1, 0) == 2
3639 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3640 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3641 && GET_CODE (trueop0) == VEC_CONCAT
3642 && GET_MODE (trueop0) == mode)
3643 {
3644 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3645 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3646 rtx subop0, subop1;
3647
3648 gcc_assert (i0 < 2 && i1 < 2);
3649 subop0 = XEXP (trueop0, i0);
3650 subop1 = XEXP (trueop0, i1);
3651
3652 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3653 }
3654
3655 /* If we select one half of a vec_concat, return that. */
3656 if (GET_CODE (trueop0) == VEC_CONCAT
3657 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3658 {
3659 rtx subop0 = XEXP (trueop0, 0);
3660 rtx subop1 = XEXP (trueop0, 1);
3661 machine_mode mode0 = GET_MODE (subop0);
3662 machine_mode mode1 = GET_MODE (subop1);
3663 int li = GET_MODE_UNIT_SIZE (mode0);
3664 int l0 = GET_MODE_SIZE (mode0) / li;
3665 int l1 = GET_MODE_SIZE (mode1) / li;
3666 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3667 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3668 {
3669 bool success = true;
3670 for (int i = 1; i < l0; ++i)
3671 {
3672 rtx j = XVECEXP (trueop1, 0, i);
3673 if (!CONST_INT_P (j) || INTVAL (j) != i)
3674 {
3675 success = false;
3676 break;
3677 }
3678 }
3679 if (success)
3680 return subop0;
3681 }
3682 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3683 {
3684 bool success = true;
3685 for (int i = 1; i < l1; ++i)
3686 {
3687 rtx j = XVECEXP (trueop1, 0, i);
3688 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3689 {
3690 success = false;
3691 break;
3692 }
3693 }
3694 if (success)
3695 return subop1;
3696 }
3697 }
3698 }
3699
3700 if (XVECLEN (trueop1, 0) == 1
3701 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3702 && GET_CODE (trueop0) == VEC_CONCAT)
3703 {
3704 rtx vec = trueop0;
3705 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3706
3707 /* Try to find the element in the VEC_CONCAT. */
3708 while (GET_MODE (vec) != mode
3709 && GET_CODE (vec) == VEC_CONCAT)
3710 {
3711 HOST_WIDE_INT vec_size;
3712
3713 if (CONST_INT_P (XEXP (vec, 0)))
3714 {
3715 /* vec_concat of two const_ints doesn't make sense with
3716 respect to modes. */
3717 if (CONST_INT_P (XEXP (vec, 1)))
3718 return 0;
3719
3720 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3721 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3722 }
3723 else
3724 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3725
3726 if (offset < vec_size)
3727 vec = XEXP (vec, 0);
3728 else
3729 {
3730 offset -= vec_size;
3731 vec = XEXP (vec, 1);
3732 }
3733 vec = avoid_constant_pool_reference (vec);
3734 }
3735
3736 if (GET_MODE (vec) == mode)
3737 return vec;
3738 }
3739
3740 /* If we select elements in a vec_merge that all come from the same
3741 operand, select from that operand directly. */
3742 if (GET_CODE (op0) == VEC_MERGE)
3743 {
3744 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3745 if (CONST_INT_P (trueop02))
3746 {
3747 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3748 bool all_operand0 = true;
3749 bool all_operand1 = true;
3750 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3751 {
3752 rtx j = XVECEXP (trueop1, 0, i);
3753 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3754 all_operand1 = false;
3755 else
3756 all_operand0 = false;
3757 }
3758 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3759 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3760 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3761 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3762 }
3763 }
3764
3765 /* If we have two nested selects that are inverses of each
3766 other, replace them with the source operand. */
3767 if (GET_CODE (trueop0) == VEC_SELECT
3768 && GET_MODE (XEXP (trueop0, 0)) == mode)
3769 {
3770 rtx op0_subop1 = XEXP (trueop0, 1);
3771 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3772 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3773
3774 /* Apply the outer ordering vector to the inner one. (The inner
3775 ordering vector is expressly permitted to be of a different
3776 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3777 then the two VEC_SELECTs cancel. */
3778 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3779 {
3780 rtx x = XVECEXP (trueop1, 0, i);
3781 if (!CONST_INT_P (x))
3782 return 0;
3783 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3784 if (!CONST_INT_P (y) || i != INTVAL (y))
3785 return 0;
3786 }
3787 return XEXP (trueop0, 0);
3788 }
3789
3790 return 0;
3791 case VEC_CONCAT:
3792 {
3793 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3794 ? GET_MODE (trueop0)
3795 : GET_MODE_INNER (mode));
3796 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3797 ? GET_MODE (trueop1)
3798 : GET_MODE_INNER (mode));
3799
3800 gcc_assert (VECTOR_MODE_P (mode));
3801 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3802 == GET_MODE_SIZE (mode));
3803
3804 if (VECTOR_MODE_P (op0_mode))
3805 gcc_assert (GET_MODE_INNER (mode)
3806 == GET_MODE_INNER (op0_mode));
3807 else
3808 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3809
3810 if (VECTOR_MODE_P (op1_mode))
3811 gcc_assert (GET_MODE_INNER (mode)
3812 == GET_MODE_INNER (op1_mode));
3813 else
3814 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3815
3816 if ((GET_CODE (trueop0) == CONST_VECTOR
3817 || CONST_SCALAR_INT_P (trueop0)
3818 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3819 && (GET_CODE (trueop1) == CONST_VECTOR
3820 || CONST_SCALAR_INT_P (trueop1)
3821 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3822 {
3823 int elt_size = GET_MODE_UNIT_SIZE (mode);
3824 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3825 rtvec v = rtvec_alloc (n_elts);
3826 unsigned int i;
3827 unsigned in_n_elts = 1;
3828
3829 if (VECTOR_MODE_P (op0_mode))
3830 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3831 for (i = 0; i < n_elts; i++)
3832 {
3833 if (i < in_n_elts)
3834 {
3835 if (!VECTOR_MODE_P (op0_mode))
3836 RTVEC_ELT (v, i) = trueop0;
3837 else
3838 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3839 }
3840 else
3841 {
3842 if (!VECTOR_MODE_P (op1_mode))
3843 RTVEC_ELT (v, i) = trueop1;
3844 else
3845 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3846 i - in_n_elts);
3847 }
3848 }
3849
3850 return gen_rtx_CONST_VECTOR (mode, v);
3851 }
3852
3853 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3854 Restrict the transformation to avoid generating a VEC_SELECT with a
3855 mode unrelated to its operand. */
3856 if (GET_CODE (trueop0) == VEC_SELECT
3857 && GET_CODE (trueop1) == VEC_SELECT
3858 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3859 && GET_MODE (XEXP (trueop0, 0)) == mode)
3860 {
3861 rtx par0 = XEXP (trueop0, 1);
3862 rtx par1 = XEXP (trueop1, 1);
3863 int len0 = XVECLEN (par0, 0);
3864 int len1 = XVECLEN (par1, 0);
3865 rtvec vec = rtvec_alloc (len0 + len1);
3866 for (int i = 0; i < len0; i++)
3867 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3868 for (int i = 0; i < len1; i++)
3869 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3870 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3871 gen_rtx_PARALLEL (VOIDmode, vec));
3872 }
3873 }
3874 return 0;
3875
3876 default:
3877 gcc_unreachable ();
3878 }
3879
3880 return 0;
3881 }
3882
3883 rtx
3884 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3885 rtx op0, rtx op1)
3886 {
3887 if (VECTOR_MODE_P (mode)
3888 && code != VEC_CONCAT
3889 && GET_CODE (op0) == CONST_VECTOR
3890 && GET_CODE (op1) == CONST_VECTOR)
3891 {
3892 unsigned n_elts = GET_MODE_NUNITS (mode);
3893 machine_mode op0mode = GET_MODE (op0);
3894 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3895 machine_mode op1mode = GET_MODE (op1);
3896 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3897 rtvec v = rtvec_alloc (n_elts);
3898 unsigned int i;
3899
3900 gcc_assert (op0_n_elts == n_elts);
3901 gcc_assert (op1_n_elts == n_elts);
3902 for (i = 0; i < n_elts; i++)
3903 {
3904 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3905 CONST_VECTOR_ELT (op0, i),
3906 CONST_VECTOR_ELT (op1, i));
3907 if (!x)
3908 return 0;
3909 RTVEC_ELT (v, i) = x;
3910 }
3911
3912 return gen_rtx_CONST_VECTOR (mode, v);
3913 }
3914
3915 if (VECTOR_MODE_P (mode)
3916 && code == VEC_CONCAT
3917 && (CONST_SCALAR_INT_P (op0)
3918 || GET_CODE (op0) == CONST_FIXED
3919 || CONST_DOUBLE_AS_FLOAT_P (op0))
3920 && (CONST_SCALAR_INT_P (op1)
3921 || CONST_DOUBLE_AS_FLOAT_P (op1)
3922 || GET_CODE (op1) == CONST_FIXED))
3923 {
3924 unsigned n_elts = GET_MODE_NUNITS (mode);
3925 rtvec v = rtvec_alloc (n_elts);
3926
3927 gcc_assert (n_elts >= 2);
3928 if (n_elts == 2)
3929 {
3930 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3931 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3932
3933 RTVEC_ELT (v, 0) = op0;
3934 RTVEC_ELT (v, 1) = op1;
3935 }
3936 else
3937 {
3938 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3939 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3940 unsigned i;
3941
3942 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3943 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3944 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3945
3946 for (i = 0; i < op0_n_elts; ++i)
3947 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3948 for (i = 0; i < op1_n_elts; ++i)
3949 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3950 }
3951
3952 return gen_rtx_CONST_VECTOR (mode, v);
3953 }
3954
3955 if (SCALAR_FLOAT_MODE_P (mode)
3956 && CONST_DOUBLE_AS_FLOAT_P (op0)
3957 && CONST_DOUBLE_AS_FLOAT_P (op1)
3958 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3959 {
3960 if (code == AND
3961 || code == IOR
3962 || code == XOR)
3963 {
3964 long tmp0[4];
3965 long tmp1[4];
3966 REAL_VALUE_TYPE r;
3967 int i;
3968
3969 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3970 GET_MODE (op0));
3971 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3972 GET_MODE (op1));
3973 for (i = 0; i < 4; i++)
3974 {
3975 switch (code)
3976 {
3977 case AND:
3978 tmp0[i] &= tmp1[i];
3979 break;
3980 case IOR:
3981 tmp0[i] |= tmp1[i];
3982 break;
3983 case XOR:
3984 tmp0[i] ^= tmp1[i];
3985 break;
3986 default:
3987 gcc_unreachable ();
3988 }
3989 }
3990 real_from_target (&r, tmp0, mode);
3991 return const_double_from_real_value (r, mode);
3992 }
3993 else
3994 {
3995 REAL_VALUE_TYPE f0, f1, value, result;
3996 const REAL_VALUE_TYPE *opr0, *opr1;
3997 bool inexact;
3998
3999 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4000 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4001
4002 if (HONOR_SNANS (mode)
4003 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4004 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4005 return 0;
4006
4007 real_convert (&f0, mode, opr0);
4008 real_convert (&f1, mode, opr1);
4009
4010 if (code == DIV
4011 && real_equal (&f1, &dconst0)
4012 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4013 return 0;
4014
4015 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4016 && flag_trapping_math
4017 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4018 {
4019 int s0 = REAL_VALUE_NEGATIVE (f0);
4020 int s1 = REAL_VALUE_NEGATIVE (f1);
4021
4022 switch (code)
4023 {
4024 case PLUS:
4025 /* Inf + -Inf = NaN plus exception. */
4026 if (s0 != s1)
4027 return 0;
4028 break;
4029 case MINUS:
4030 /* Inf - Inf = NaN plus exception. */
4031 if (s0 == s1)
4032 return 0;
4033 break;
4034 case DIV:
4035 /* Inf / Inf = NaN plus exception. */
4036 return 0;
4037 default:
4038 break;
4039 }
4040 }
4041
4042 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4043 && flag_trapping_math
4044 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4045 || (REAL_VALUE_ISINF (f1)
4046 && real_equal (&f0, &dconst0))))
4047 /* Inf * 0 = NaN plus exception. */
4048 return 0;
4049
4050 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4051 &f0, &f1);
4052 real_convert (&result, mode, &value);
4053
4054 /* Don't constant fold this floating point operation if
4055 the result has overflowed and flag_trapping_math. */
4056
4057 if (flag_trapping_math
4058 && MODE_HAS_INFINITIES (mode)
4059 && REAL_VALUE_ISINF (result)
4060 && !REAL_VALUE_ISINF (f0)
4061 && !REAL_VALUE_ISINF (f1))
4062 /* Overflow plus exception. */
4063 return 0;
4064
4065 /* Don't constant fold this floating point operation if the
4066 result may dependent upon the run-time rounding mode and
4067 flag_rounding_math is set, or if GCC's software emulation
4068 is unable to accurately represent the result. */
4069
4070 if ((flag_rounding_math
4071 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4072 && (inexact || !real_identical (&result, &value)))
4073 return NULL_RTX;
4074
4075 return const_double_from_real_value (result, mode);
4076 }
4077 }
4078
4079 /* We can fold some multi-word operations. */
4080 scalar_int_mode int_mode;
4081 if (is_a <scalar_int_mode> (mode, &int_mode)
4082 && CONST_SCALAR_INT_P (op0)
4083 && CONST_SCALAR_INT_P (op1))
4084 {
4085 wide_int result;
4086 bool overflow;
4087 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4088 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4089
4090 #if TARGET_SUPPORTS_WIDE_INT == 0
4091 /* This assert keeps the simplification from producing a result
4092 that cannot be represented in a CONST_DOUBLE but a lot of
4093 upstream callers expect that this function never fails to
4094 simplify something and so you if you added this to the test
4095 above the code would die later anyway. If this assert
4096 happens, you just need to make the port support wide int. */
4097 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4098 #endif
4099 switch (code)
4100 {
4101 case MINUS:
4102 result = wi::sub (pop0, pop1);
4103 break;
4104
4105 case PLUS:
4106 result = wi::add (pop0, pop1);
4107 break;
4108
4109 case MULT:
4110 result = wi::mul (pop0, pop1);
4111 break;
4112
4113 case DIV:
4114 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4115 if (overflow)
4116 return NULL_RTX;
4117 break;
4118
4119 case MOD:
4120 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4121 if (overflow)
4122 return NULL_RTX;
4123 break;
4124
4125 case UDIV:
4126 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4127 if (overflow)
4128 return NULL_RTX;
4129 break;
4130
4131 case UMOD:
4132 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4133 if (overflow)
4134 return NULL_RTX;
4135 break;
4136
4137 case AND:
4138 result = wi::bit_and (pop0, pop1);
4139 break;
4140
4141 case IOR:
4142 result = wi::bit_or (pop0, pop1);
4143 break;
4144
4145 case XOR:
4146 result = wi::bit_xor (pop0, pop1);
4147 break;
4148
4149 case SMIN:
4150 result = wi::smin (pop0, pop1);
4151 break;
4152
4153 case SMAX:
4154 result = wi::smax (pop0, pop1);
4155 break;
4156
4157 case UMIN:
4158 result = wi::umin (pop0, pop1);
4159 break;
4160
4161 case UMAX:
4162 result = wi::umax (pop0, pop1);
4163 break;
4164
4165 case LSHIFTRT:
4166 case ASHIFTRT:
4167 case ASHIFT:
4168 {
4169 wide_int wop1 = pop1;
4170 if (SHIFT_COUNT_TRUNCATED)
4171 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4172 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4173 return NULL_RTX;
4174
4175 switch (code)
4176 {
4177 case LSHIFTRT:
4178 result = wi::lrshift (pop0, wop1);
4179 break;
4180
4181 case ASHIFTRT:
4182 result = wi::arshift (pop0, wop1);
4183 break;
4184
4185 case ASHIFT:
4186 result = wi::lshift (pop0, wop1);
4187 break;
4188
4189 default:
4190 gcc_unreachable ();
4191 }
4192 break;
4193 }
4194 case ROTATE:
4195 case ROTATERT:
4196 {
4197 if (wi::neg_p (pop1))
4198 return NULL_RTX;
4199
4200 switch (code)
4201 {
4202 case ROTATE:
4203 result = wi::lrotate (pop0, pop1);
4204 break;
4205
4206 case ROTATERT:
4207 result = wi::rrotate (pop0, pop1);
4208 break;
4209
4210 default:
4211 gcc_unreachable ();
4212 }
4213 break;
4214 }
4215 default:
4216 return NULL_RTX;
4217 }
4218 return immed_wide_int_const (result, int_mode);
4219 }
4220
4221 return NULL_RTX;
4222 }
4223
4224
4225 \f
4226 /* Return a positive integer if X should sort after Y. The value
4227 returned is 1 if and only if X and Y are both regs. */
4228
4229 static int
4230 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4231 {
4232 int result;
4233
4234 result = (commutative_operand_precedence (y)
4235 - commutative_operand_precedence (x));
4236 if (result)
4237 return result + result;
4238
4239 /* Group together equal REGs to do more simplification. */
4240 if (REG_P (x) && REG_P (y))
4241 return REGNO (x) > REGNO (y);
4242
4243 return 0;
4244 }
4245
4246 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4247 operands may be another PLUS or MINUS.
4248
4249 Rather than test for specific case, we do this by a brute-force method
4250 and do all possible simplifications until no more changes occur. Then
4251 we rebuild the operation.
4252
4253 May return NULL_RTX when no changes were made. */
4254
4255 static rtx
4256 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4257 rtx op1)
4258 {
4259 struct simplify_plus_minus_op_data
4260 {
4261 rtx op;
4262 short neg;
4263 } ops[16];
4264 rtx result, tem;
4265 int n_ops = 2;
4266 int changed, n_constants, canonicalized = 0;
4267 int i, j;
4268
4269 memset (ops, 0, sizeof ops);
4270
4271 /* Set up the two operands and then expand them until nothing has been
4272 changed. If we run out of room in our array, give up; this should
4273 almost never happen. */
4274
4275 ops[0].op = op0;
4276 ops[0].neg = 0;
4277 ops[1].op = op1;
4278 ops[1].neg = (code == MINUS);
4279
4280 do
4281 {
4282 changed = 0;
4283 n_constants = 0;
4284
4285 for (i = 0; i < n_ops; i++)
4286 {
4287 rtx this_op = ops[i].op;
4288 int this_neg = ops[i].neg;
4289 enum rtx_code this_code = GET_CODE (this_op);
4290
4291 switch (this_code)
4292 {
4293 case PLUS:
4294 case MINUS:
4295 if (n_ops == ARRAY_SIZE (ops))
4296 return NULL_RTX;
4297
4298 ops[n_ops].op = XEXP (this_op, 1);
4299 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4300 n_ops++;
4301
4302 ops[i].op = XEXP (this_op, 0);
4303 changed = 1;
4304 /* If this operand was negated then we will potentially
4305 canonicalize the expression. Similarly if we don't
4306 place the operands adjacent we're re-ordering the
4307 expression and thus might be performing a
4308 canonicalization. Ignore register re-ordering.
4309 ??? It might be better to shuffle the ops array here,
4310 but then (plus (plus (A, B), plus (C, D))) wouldn't
4311 be seen as non-canonical. */
4312 if (this_neg
4313 || (i != n_ops - 2
4314 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4315 canonicalized = 1;
4316 break;
4317
4318 case NEG:
4319 ops[i].op = XEXP (this_op, 0);
4320 ops[i].neg = ! this_neg;
4321 changed = 1;
4322 canonicalized = 1;
4323 break;
4324
4325 case CONST:
4326 if (n_ops != ARRAY_SIZE (ops)
4327 && GET_CODE (XEXP (this_op, 0)) == PLUS
4328 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4329 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4330 {
4331 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4332 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4333 ops[n_ops].neg = this_neg;
4334 n_ops++;
4335 changed = 1;
4336 canonicalized = 1;
4337 }
4338 break;
4339
4340 case NOT:
4341 /* ~a -> (-a - 1) */
4342 if (n_ops != ARRAY_SIZE (ops))
4343 {
4344 ops[n_ops].op = CONSTM1_RTX (mode);
4345 ops[n_ops++].neg = this_neg;
4346 ops[i].op = XEXP (this_op, 0);
4347 ops[i].neg = !this_neg;
4348 changed = 1;
4349 canonicalized = 1;
4350 }
4351 break;
4352
4353 case CONST_INT:
4354 n_constants++;
4355 if (this_neg)
4356 {
4357 ops[i].op = neg_const_int (mode, this_op);
4358 ops[i].neg = 0;
4359 changed = 1;
4360 canonicalized = 1;
4361 }
4362 break;
4363
4364 default:
4365 break;
4366 }
4367 }
4368 }
4369 while (changed);
4370
4371 if (n_constants > 1)
4372 canonicalized = 1;
4373
4374 gcc_assert (n_ops >= 2);
4375
4376 /* If we only have two operands, we can avoid the loops. */
4377 if (n_ops == 2)
4378 {
4379 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4380 rtx lhs, rhs;
4381
4382 /* Get the two operands. Be careful with the order, especially for
4383 the cases where code == MINUS. */
4384 if (ops[0].neg && ops[1].neg)
4385 {
4386 lhs = gen_rtx_NEG (mode, ops[0].op);
4387 rhs = ops[1].op;
4388 }
4389 else if (ops[0].neg)
4390 {
4391 lhs = ops[1].op;
4392 rhs = ops[0].op;
4393 }
4394 else
4395 {
4396 lhs = ops[0].op;
4397 rhs = ops[1].op;
4398 }
4399
4400 return simplify_const_binary_operation (code, mode, lhs, rhs);
4401 }
4402
4403 /* Now simplify each pair of operands until nothing changes. */
4404 while (1)
4405 {
4406 /* Insertion sort is good enough for a small array. */
4407 for (i = 1; i < n_ops; i++)
4408 {
4409 struct simplify_plus_minus_op_data save;
4410 int cmp;
4411
4412 j = i - 1;
4413 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4414 if (cmp <= 0)
4415 continue;
4416 /* Just swapping registers doesn't count as canonicalization. */
4417 if (cmp != 1)
4418 canonicalized = 1;
4419
4420 save = ops[i];
4421 do
4422 ops[j + 1] = ops[j];
4423 while (j--
4424 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4425 ops[j + 1] = save;
4426 }
4427
4428 changed = 0;
4429 for (i = n_ops - 1; i > 0; i--)
4430 for (j = i - 1; j >= 0; j--)
4431 {
4432 rtx lhs = ops[j].op, rhs = ops[i].op;
4433 int lneg = ops[j].neg, rneg = ops[i].neg;
4434
4435 if (lhs != 0 && rhs != 0)
4436 {
4437 enum rtx_code ncode = PLUS;
4438
4439 if (lneg != rneg)
4440 {
4441 ncode = MINUS;
4442 if (lneg)
4443 std::swap (lhs, rhs);
4444 }
4445 else if (swap_commutative_operands_p (lhs, rhs))
4446 std::swap (lhs, rhs);
4447
4448 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4449 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4450 {
4451 rtx tem_lhs, tem_rhs;
4452
4453 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4454 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4455 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4456 tem_rhs);
4457
4458 if (tem && !CONSTANT_P (tem))
4459 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4460 }
4461 else
4462 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4463
4464 if (tem)
4465 {
4466 /* Reject "simplifications" that just wrap the two
4467 arguments in a CONST. Failure to do so can result
4468 in infinite recursion with simplify_binary_operation
4469 when it calls us to simplify CONST operations.
4470 Also, if we find such a simplification, don't try
4471 any more combinations with this rhs: We must have
4472 something like symbol+offset, ie. one of the
4473 trivial CONST expressions we handle later. */
4474 if (GET_CODE (tem) == CONST
4475 && GET_CODE (XEXP (tem, 0)) == ncode
4476 && XEXP (XEXP (tem, 0), 0) == lhs
4477 && XEXP (XEXP (tem, 0), 1) == rhs)
4478 break;
4479 lneg &= rneg;
4480 if (GET_CODE (tem) == NEG)
4481 tem = XEXP (tem, 0), lneg = !lneg;
4482 if (CONST_INT_P (tem) && lneg)
4483 tem = neg_const_int (mode, tem), lneg = 0;
4484
4485 ops[i].op = tem;
4486 ops[i].neg = lneg;
4487 ops[j].op = NULL_RTX;
4488 changed = 1;
4489 canonicalized = 1;
4490 }
4491 }
4492 }
4493
4494 if (!changed)
4495 break;
4496
4497 /* Pack all the operands to the lower-numbered entries. */
4498 for (i = 0, j = 0; j < n_ops; j++)
4499 if (ops[j].op)
4500 {
4501 ops[i] = ops[j];
4502 i++;
4503 }
4504 n_ops = i;
4505 }
4506
4507 /* If nothing changed, check that rematerialization of rtl instructions
4508 is still required. */
4509 if (!canonicalized)
4510 {
4511 /* Perform rematerialization if only all operands are registers and
4512 all operations are PLUS. */
4513 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4514 around rs6000 and how it uses the CA register. See PR67145. */
4515 for (i = 0; i < n_ops; i++)
4516 if (ops[i].neg
4517 || !REG_P (ops[i].op)
4518 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4519 && fixed_regs[REGNO (ops[i].op)]
4520 && !global_regs[REGNO (ops[i].op)]
4521 && ops[i].op != frame_pointer_rtx
4522 && ops[i].op != arg_pointer_rtx
4523 && ops[i].op != stack_pointer_rtx))
4524 return NULL_RTX;
4525 goto gen_result;
4526 }
4527
4528 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4529 if (n_ops == 2
4530 && CONST_INT_P (ops[1].op)
4531 && CONSTANT_P (ops[0].op)
4532 && ops[0].neg)
4533 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4534
4535 /* We suppressed creation of trivial CONST expressions in the
4536 combination loop to avoid recursion. Create one manually now.
4537 The combination loop should have ensured that there is exactly
4538 one CONST_INT, and the sort will have ensured that it is last
4539 in the array and that any other constant will be next-to-last. */
4540
4541 if (n_ops > 1
4542 && CONST_INT_P (ops[n_ops - 1].op)
4543 && CONSTANT_P (ops[n_ops - 2].op))
4544 {
4545 rtx value = ops[n_ops - 1].op;
4546 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4547 value = neg_const_int (mode, value);
4548 if (CONST_INT_P (value))
4549 {
4550 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4551 INTVAL (value));
4552 n_ops--;
4553 }
4554 }
4555
4556 /* Put a non-negated operand first, if possible. */
4557
4558 for (i = 0; i < n_ops && ops[i].neg; i++)
4559 continue;
4560 if (i == n_ops)
4561 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4562 else if (i != 0)
4563 {
4564 tem = ops[0].op;
4565 ops[0] = ops[i];
4566 ops[i].op = tem;
4567 ops[i].neg = 1;
4568 }
4569
4570 /* Now make the result by performing the requested operations. */
4571 gen_result:
4572 result = ops[0].op;
4573 for (i = 1; i < n_ops; i++)
4574 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4575 mode, result, ops[i].op);
4576
4577 return result;
4578 }
4579
4580 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4581 static bool
4582 plus_minus_operand_p (const_rtx x)
4583 {
4584 return GET_CODE (x) == PLUS
4585 || GET_CODE (x) == MINUS
4586 || (GET_CODE (x) == CONST
4587 && GET_CODE (XEXP (x, 0)) == PLUS
4588 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4589 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4590 }
4591
4592 /* Like simplify_binary_operation except used for relational operators.
4593 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4594 not also be VOIDmode.
4595
4596 CMP_MODE specifies in which mode the comparison is done in, so it is
4597 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4598 the operands or, if both are VOIDmode, the operands are compared in
4599 "infinite precision". */
4600 rtx
4601 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4602 machine_mode cmp_mode, rtx op0, rtx op1)
4603 {
4604 rtx tem, trueop0, trueop1;
4605
4606 if (cmp_mode == VOIDmode)
4607 cmp_mode = GET_MODE (op0);
4608 if (cmp_mode == VOIDmode)
4609 cmp_mode = GET_MODE (op1);
4610
4611 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4612 if (tem)
4613 {
4614 if (SCALAR_FLOAT_MODE_P (mode))
4615 {
4616 if (tem == const0_rtx)
4617 return CONST0_RTX (mode);
4618 #ifdef FLOAT_STORE_FLAG_VALUE
4619 {
4620 REAL_VALUE_TYPE val;
4621 val = FLOAT_STORE_FLAG_VALUE (mode);
4622 return const_double_from_real_value (val, mode);
4623 }
4624 #else
4625 return NULL_RTX;
4626 #endif
4627 }
4628 if (VECTOR_MODE_P (mode))
4629 {
4630 if (tem == const0_rtx)
4631 return CONST0_RTX (mode);
4632 #ifdef VECTOR_STORE_FLAG_VALUE
4633 {
4634 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4635 if (val == NULL_RTX)
4636 return NULL_RTX;
4637 if (val == const1_rtx)
4638 return CONST1_RTX (mode);
4639
4640 return gen_const_vec_duplicate (mode, val);
4641 }
4642 #else
4643 return NULL_RTX;
4644 #endif
4645 }
4646
4647 return tem;
4648 }
4649
4650 /* For the following tests, ensure const0_rtx is op1. */
4651 if (swap_commutative_operands_p (op0, op1)
4652 || (op0 == const0_rtx && op1 != const0_rtx))
4653 std::swap (op0, op1), code = swap_condition (code);
4654
4655 /* If op0 is a compare, extract the comparison arguments from it. */
4656 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4657 return simplify_gen_relational (code, mode, VOIDmode,
4658 XEXP (op0, 0), XEXP (op0, 1));
4659
4660 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4661 || CC0_P (op0))
4662 return NULL_RTX;
4663
4664 trueop0 = avoid_constant_pool_reference (op0);
4665 trueop1 = avoid_constant_pool_reference (op1);
4666 return simplify_relational_operation_1 (code, mode, cmp_mode,
4667 trueop0, trueop1);
4668 }
4669
4670 /* This part of simplify_relational_operation is only used when CMP_MODE
4671 is not in class MODE_CC (i.e. it is a real comparison).
4672
4673 MODE is the mode of the result, while CMP_MODE specifies in which
4674 mode the comparison is done in, so it is the mode of the operands. */
4675
4676 static rtx
4677 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4678 machine_mode cmp_mode, rtx op0, rtx op1)
4679 {
4680 enum rtx_code op0code = GET_CODE (op0);
4681
4682 if (op1 == const0_rtx && COMPARISON_P (op0))
4683 {
4684 /* If op0 is a comparison, extract the comparison arguments
4685 from it. */
4686 if (code == NE)
4687 {
4688 if (GET_MODE (op0) == mode)
4689 return simplify_rtx (op0);
4690 else
4691 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4692 XEXP (op0, 0), XEXP (op0, 1));
4693 }
4694 else if (code == EQ)
4695 {
4696 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4697 if (new_code != UNKNOWN)
4698 return simplify_gen_relational (new_code, mode, VOIDmode,
4699 XEXP (op0, 0), XEXP (op0, 1));
4700 }
4701 }
4702
4703 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4704 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4705 if ((code == LTU || code == GEU)
4706 && GET_CODE (op0) == PLUS
4707 && CONST_INT_P (XEXP (op0, 1))
4708 && (rtx_equal_p (op1, XEXP (op0, 0))
4709 || rtx_equal_p (op1, XEXP (op0, 1)))
4710 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4711 && XEXP (op0, 1) != const0_rtx)
4712 {
4713 rtx new_cmp
4714 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4715 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4716 cmp_mode, XEXP (op0, 0), new_cmp);
4717 }
4718
4719 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4720 transformed into (LTU a -C). */
4721 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4722 && CONST_INT_P (XEXP (op0, 1))
4723 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4724 && XEXP (op0, 1) != const0_rtx)
4725 {
4726 rtx new_cmp
4727 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4728 return simplify_gen_relational (LTU, mode, cmp_mode,
4729 XEXP (op0, 0), new_cmp);
4730 }
4731
4732 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4733 if ((code == LTU || code == GEU)
4734 && GET_CODE (op0) == PLUS
4735 && rtx_equal_p (op1, XEXP (op0, 1))
4736 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4737 && !rtx_equal_p (op1, XEXP (op0, 0)))
4738 return simplify_gen_relational (code, mode, cmp_mode, op0,
4739 copy_rtx (XEXP (op0, 0)));
4740
4741 if (op1 == const0_rtx)
4742 {
4743 /* Canonicalize (GTU x 0) as (NE x 0). */
4744 if (code == GTU)
4745 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4746 /* Canonicalize (LEU x 0) as (EQ x 0). */
4747 if (code == LEU)
4748 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4749 }
4750 else if (op1 == const1_rtx)
4751 {
4752 switch (code)
4753 {
4754 case GE:
4755 /* Canonicalize (GE x 1) as (GT x 0). */
4756 return simplify_gen_relational (GT, mode, cmp_mode,
4757 op0, const0_rtx);
4758 case GEU:
4759 /* Canonicalize (GEU x 1) as (NE x 0). */
4760 return simplify_gen_relational (NE, mode, cmp_mode,
4761 op0, const0_rtx);
4762 case LT:
4763 /* Canonicalize (LT x 1) as (LE x 0). */
4764 return simplify_gen_relational (LE, mode, cmp_mode,
4765 op0, const0_rtx);
4766 case LTU:
4767 /* Canonicalize (LTU x 1) as (EQ x 0). */
4768 return simplify_gen_relational (EQ, mode, cmp_mode,
4769 op0, const0_rtx);
4770 default:
4771 break;
4772 }
4773 }
4774 else if (op1 == constm1_rtx)
4775 {
4776 /* Canonicalize (LE x -1) as (LT x 0). */
4777 if (code == LE)
4778 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4779 /* Canonicalize (GT x -1) as (GE x 0). */
4780 if (code == GT)
4781 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4782 }
4783
4784 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4785 if ((code == EQ || code == NE)
4786 && (op0code == PLUS || op0code == MINUS)
4787 && CONSTANT_P (op1)
4788 && CONSTANT_P (XEXP (op0, 1))
4789 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4790 {
4791 rtx x = XEXP (op0, 0);
4792 rtx c = XEXP (op0, 1);
4793 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4794 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4795
4796 /* Detect an infinite recursive condition, where we oscillate at this
4797 simplification case between:
4798 A + B == C <---> C - B == A,
4799 where A, B, and C are all constants with non-simplifiable expressions,
4800 usually SYMBOL_REFs. */
4801 if (GET_CODE (tem) == invcode
4802 && CONSTANT_P (x)
4803 && rtx_equal_p (c, XEXP (tem, 1)))
4804 return NULL_RTX;
4805
4806 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4807 }
4808
4809 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4810 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4811 scalar_int_mode int_mode, int_cmp_mode;
4812 if (code == NE
4813 && op1 == const0_rtx
4814 && is_int_mode (mode, &int_mode)
4815 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4816 /* ??? Work-around BImode bugs in the ia64 backend. */
4817 && int_mode != BImode
4818 && int_cmp_mode != BImode
4819 && nonzero_bits (op0, int_cmp_mode) == 1
4820 && STORE_FLAG_VALUE == 1)
4821 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4822 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
4823 : lowpart_subreg (int_mode, op0, int_cmp_mode);
4824
4825 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4826 if ((code == EQ || code == NE)
4827 && op1 == const0_rtx
4828 && op0code == XOR)
4829 return simplify_gen_relational (code, mode, cmp_mode,
4830 XEXP (op0, 0), XEXP (op0, 1));
4831
4832 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4833 if ((code == EQ || code == NE)
4834 && op0code == XOR
4835 && rtx_equal_p (XEXP (op0, 0), op1)
4836 && !side_effects_p (XEXP (op0, 0)))
4837 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4838 CONST0_RTX (mode));
4839
4840 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4841 if ((code == EQ || code == NE)
4842 && op0code == XOR
4843 && rtx_equal_p (XEXP (op0, 1), op1)
4844 && !side_effects_p (XEXP (op0, 1)))
4845 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4846 CONST0_RTX (mode));
4847
4848 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4849 if ((code == EQ || code == NE)
4850 && op0code == XOR
4851 && CONST_SCALAR_INT_P (op1)
4852 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4853 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4854 simplify_gen_binary (XOR, cmp_mode,
4855 XEXP (op0, 1), op1));
4856
4857 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4858 can be implemented with a BICS instruction on some targets, or
4859 constant-folded if y is a constant. */
4860 if ((code == EQ || code == NE)
4861 && op0code == AND
4862 && rtx_equal_p (XEXP (op0, 0), op1)
4863 && !side_effects_p (op1)
4864 && op1 != CONST0_RTX (cmp_mode))
4865 {
4866 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4867 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4868
4869 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4870 CONST0_RTX (cmp_mode));
4871 }
4872
4873 /* Likewise for (eq/ne (and x y) y). */
4874 if ((code == EQ || code == NE)
4875 && op0code == AND
4876 && rtx_equal_p (XEXP (op0, 1), op1)
4877 && !side_effects_p (op1)
4878 && op1 != CONST0_RTX (cmp_mode))
4879 {
4880 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4881 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4882
4883 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4884 CONST0_RTX (cmp_mode));
4885 }
4886
4887 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4888 if ((code == EQ || code == NE)
4889 && GET_CODE (op0) == BSWAP
4890 && CONST_SCALAR_INT_P (op1))
4891 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4892 simplify_gen_unary (BSWAP, cmp_mode,
4893 op1, cmp_mode));
4894
4895 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4896 if ((code == EQ || code == NE)
4897 && GET_CODE (op0) == BSWAP
4898 && GET_CODE (op1) == BSWAP)
4899 return simplify_gen_relational (code, mode, cmp_mode,
4900 XEXP (op0, 0), XEXP (op1, 0));
4901
4902 if (op0code == POPCOUNT && op1 == const0_rtx)
4903 switch (code)
4904 {
4905 case EQ:
4906 case LE:
4907 case LEU:
4908 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4909 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4910 XEXP (op0, 0), const0_rtx);
4911
4912 case NE:
4913 case GT:
4914 case GTU:
4915 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4916 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4917 XEXP (op0, 0), const0_rtx);
4918
4919 default:
4920 break;
4921 }
4922
4923 return NULL_RTX;
4924 }
4925
4926 enum
4927 {
4928 CMP_EQ = 1,
4929 CMP_LT = 2,
4930 CMP_GT = 4,
4931 CMP_LTU = 8,
4932 CMP_GTU = 16
4933 };
4934
4935
4936 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4937 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4938 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4939 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4940 For floating-point comparisons, assume that the operands were ordered. */
4941
4942 static rtx
4943 comparison_result (enum rtx_code code, int known_results)
4944 {
4945 switch (code)
4946 {
4947 case EQ:
4948 case UNEQ:
4949 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4950 case NE:
4951 case LTGT:
4952 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4953
4954 case LT:
4955 case UNLT:
4956 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4957 case GE:
4958 case UNGE:
4959 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4960
4961 case GT:
4962 case UNGT:
4963 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4964 case LE:
4965 case UNLE:
4966 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4967
4968 case LTU:
4969 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4970 case GEU:
4971 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4972
4973 case GTU:
4974 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4975 case LEU:
4976 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4977
4978 case ORDERED:
4979 return const_true_rtx;
4980 case UNORDERED:
4981 return const0_rtx;
4982 default:
4983 gcc_unreachable ();
4984 }
4985 }
4986
4987 /* Check if the given comparison (done in the given MODE) is actually
4988 a tautology or a contradiction. If the mode is VOID_mode, the
4989 comparison is done in "infinite precision". If no simplification
4990 is possible, this function returns zero. Otherwise, it returns
4991 either const_true_rtx or const0_rtx. */
4992
4993 rtx
4994 simplify_const_relational_operation (enum rtx_code code,
4995 machine_mode mode,
4996 rtx op0, rtx op1)
4997 {
4998 rtx tem;
4999 rtx trueop0;
5000 rtx trueop1;
5001
5002 gcc_assert (mode != VOIDmode
5003 || (GET_MODE (op0) == VOIDmode
5004 && GET_MODE (op1) == VOIDmode));
5005
5006 /* If op0 is a compare, extract the comparison arguments from it. */
5007 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5008 {
5009 op1 = XEXP (op0, 1);
5010 op0 = XEXP (op0, 0);
5011
5012 if (GET_MODE (op0) != VOIDmode)
5013 mode = GET_MODE (op0);
5014 else if (GET_MODE (op1) != VOIDmode)
5015 mode = GET_MODE (op1);
5016 else
5017 return 0;
5018 }
5019
5020 /* We can't simplify MODE_CC values since we don't know what the
5021 actual comparison is. */
5022 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5023 return 0;
5024
5025 /* Make sure the constant is second. */
5026 if (swap_commutative_operands_p (op0, op1))
5027 {
5028 std::swap (op0, op1);
5029 code = swap_condition (code);
5030 }
5031
5032 trueop0 = avoid_constant_pool_reference (op0);
5033 trueop1 = avoid_constant_pool_reference (op1);
5034
5035 /* For integer comparisons of A and B maybe we can simplify A - B and can
5036 then simplify a comparison of that with zero. If A and B are both either
5037 a register or a CONST_INT, this can't help; testing for these cases will
5038 prevent infinite recursion here and speed things up.
5039
5040 We can only do this for EQ and NE comparisons as otherwise we may
5041 lose or introduce overflow which we cannot disregard as undefined as
5042 we do not know the signedness of the operation on either the left or
5043 the right hand side of the comparison. */
5044
5045 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5046 && (code == EQ || code == NE)
5047 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5048 && (REG_P (op1) || CONST_INT_P (trueop1)))
5049 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5050 /* We cannot do this if tem is a nonzero address. */
5051 && ! nonzero_address_p (tem))
5052 return simplify_const_relational_operation (signed_condition (code),
5053 mode, tem, const0_rtx);
5054
5055 if (! HONOR_NANS (mode) && code == ORDERED)
5056 return const_true_rtx;
5057
5058 if (! HONOR_NANS (mode) && code == UNORDERED)
5059 return const0_rtx;
5060
5061 /* For modes without NaNs, if the two operands are equal, we know the
5062 result except if they have side-effects. Even with NaNs we know
5063 the result of unordered comparisons and, if signaling NaNs are
5064 irrelevant, also the result of LT/GT/LTGT. */
5065 if ((! HONOR_NANS (trueop0)
5066 || code == UNEQ || code == UNLE || code == UNGE
5067 || ((code == LT || code == GT || code == LTGT)
5068 && ! HONOR_SNANS (trueop0)))
5069 && rtx_equal_p (trueop0, trueop1)
5070 && ! side_effects_p (trueop0))
5071 return comparison_result (code, CMP_EQ);
5072
5073 /* If the operands are floating-point constants, see if we can fold
5074 the result. */
5075 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5076 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5077 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5078 {
5079 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5080 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5081
5082 /* Comparisons are unordered iff at least one of the values is NaN. */
5083 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5084 switch (code)
5085 {
5086 case UNEQ:
5087 case UNLT:
5088 case UNGT:
5089 case UNLE:
5090 case UNGE:
5091 case NE:
5092 case UNORDERED:
5093 return const_true_rtx;
5094 case EQ:
5095 case LT:
5096 case GT:
5097 case LE:
5098 case GE:
5099 case LTGT:
5100 case ORDERED:
5101 return const0_rtx;
5102 default:
5103 return 0;
5104 }
5105
5106 return comparison_result (code,
5107 (real_equal (d0, d1) ? CMP_EQ :
5108 real_less (d0, d1) ? CMP_LT : CMP_GT));
5109 }
5110
5111 /* Otherwise, see if the operands are both integers. */
5112 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5113 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5114 {
5115 /* It would be nice if we really had a mode here. However, the
5116 largest int representable on the target is as good as
5117 infinite. */
5118 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5119 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5120 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5121
5122 if (wi::eq_p (ptrueop0, ptrueop1))
5123 return comparison_result (code, CMP_EQ);
5124 else
5125 {
5126 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5127 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5128 return comparison_result (code, cr);
5129 }
5130 }
5131
5132 /* Optimize comparisons with upper and lower bounds. */
5133 scalar_int_mode int_mode;
5134 if (CONST_INT_P (trueop1)
5135 && is_a <scalar_int_mode> (mode, &int_mode)
5136 && HWI_COMPUTABLE_MODE_P (int_mode)
5137 && !side_effects_p (trueop0))
5138 {
5139 int sign;
5140 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5141 HOST_WIDE_INT val = INTVAL (trueop1);
5142 HOST_WIDE_INT mmin, mmax;
5143
5144 if (code == GEU
5145 || code == LEU
5146 || code == GTU
5147 || code == LTU)
5148 sign = 0;
5149 else
5150 sign = 1;
5151
5152 /* Get a reduced range if the sign bit is zero. */
5153 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5154 {
5155 mmin = 0;
5156 mmax = nonzero;
5157 }
5158 else
5159 {
5160 rtx mmin_rtx, mmax_rtx;
5161 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5162
5163 mmin = INTVAL (mmin_rtx);
5164 mmax = INTVAL (mmax_rtx);
5165 if (sign)
5166 {
5167 unsigned int sign_copies
5168 = num_sign_bit_copies (trueop0, int_mode);
5169
5170 mmin >>= (sign_copies - 1);
5171 mmax >>= (sign_copies - 1);
5172 }
5173 }
5174
5175 switch (code)
5176 {
5177 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5178 case GEU:
5179 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5180 return const_true_rtx;
5181 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5182 return const0_rtx;
5183 break;
5184 case GE:
5185 if (val <= mmin)
5186 return const_true_rtx;
5187 if (val > mmax)
5188 return const0_rtx;
5189 break;
5190
5191 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5192 case LEU:
5193 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5194 return const_true_rtx;
5195 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5196 return const0_rtx;
5197 break;
5198 case LE:
5199 if (val >= mmax)
5200 return const_true_rtx;
5201 if (val < mmin)
5202 return const0_rtx;
5203 break;
5204
5205 case EQ:
5206 /* x == y is always false for y out of range. */
5207 if (val < mmin || val > mmax)
5208 return const0_rtx;
5209 break;
5210
5211 /* x > y is always false for y >= mmax, always true for y < mmin. */
5212 case GTU:
5213 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5214 return const0_rtx;
5215 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5216 return const_true_rtx;
5217 break;
5218 case GT:
5219 if (val >= mmax)
5220 return const0_rtx;
5221 if (val < mmin)
5222 return const_true_rtx;
5223 break;
5224
5225 /* x < y is always false for y <= mmin, always true for y > mmax. */
5226 case LTU:
5227 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5228 return const0_rtx;
5229 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5230 return const_true_rtx;
5231 break;
5232 case LT:
5233 if (val <= mmin)
5234 return const0_rtx;
5235 if (val > mmax)
5236 return const_true_rtx;
5237 break;
5238
5239 case NE:
5240 /* x != y is always true for y out of range. */
5241 if (val < mmin || val > mmax)
5242 return const_true_rtx;
5243 break;
5244
5245 default:
5246 break;
5247 }
5248 }
5249
5250 /* Optimize integer comparisons with zero. */
5251 if (is_a <scalar_int_mode> (mode, &int_mode)
5252 && trueop1 == const0_rtx
5253 && !side_effects_p (trueop0))
5254 {
5255 /* Some addresses are known to be nonzero. We don't know
5256 their sign, but equality comparisons are known. */
5257 if (nonzero_address_p (trueop0))
5258 {
5259 if (code == EQ || code == LEU)
5260 return const0_rtx;
5261 if (code == NE || code == GTU)
5262 return const_true_rtx;
5263 }
5264
5265 /* See if the first operand is an IOR with a constant. If so, we
5266 may be able to determine the result of this comparison. */
5267 if (GET_CODE (op0) == IOR)
5268 {
5269 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5270 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5271 {
5272 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5273 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5274 && (UINTVAL (inner_const)
5275 & (HOST_WIDE_INT_1U
5276 << sign_bitnum)));
5277
5278 switch (code)
5279 {
5280 case EQ:
5281 case LEU:
5282 return const0_rtx;
5283 case NE:
5284 case GTU:
5285 return const_true_rtx;
5286 case LT:
5287 case LE:
5288 if (has_sign)
5289 return const_true_rtx;
5290 break;
5291 case GT:
5292 case GE:
5293 if (has_sign)
5294 return const0_rtx;
5295 break;
5296 default:
5297 break;
5298 }
5299 }
5300 }
5301 }
5302
5303 /* Optimize comparison of ABS with zero. */
5304 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5305 && (GET_CODE (trueop0) == ABS
5306 || (GET_CODE (trueop0) == FLOAT_EXTEND
5307 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5308 {
5309 switch (code)
5310 {
5311 case LT:
5312 /* Optimize abs(x) < 0.0. */
5313 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5314 return const0_rtx;
5315 break;
5316
5317 case GE:
5318 /* Optimize abs(x) >= 0.0. */
5319 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5320 return const_true_rtx;
5321 break;
5322
5323 case UNGE:
5324 /* Optimize ! (abs(x) < 0.0). */
5325 return const_true_rtx;
5326
5327 default:
5328 break;
5329 }
5330 }
5331
5332 return 0;
5333 }
5334
5335 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5336 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5337 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5338 can be simplified to that or NULL_RTX if not.
5339 Assume X is compared against zero with CMP_CODE and the true
5340 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5341
5342 static rtx
5343 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5344 {
5345 if (cmp_code != EQ && cmp_code != NE)
5346 return NULL_RTX;
5347
5348 /* Result on X == 0 and X !=0 respectively. */
5349 rtx on_zero, on_nonzero;
5350 if (cmp_code == EQ)
5351 {
5352 on_zero = true_val;
5353 on_nonzero = false_val;
5354 }
5355 else
5356 {
5357 on_zero = false_val;
5358 on_nonzero = true_val;
5359 }
5360
5361 rtx_code op_code = GET_CODE (on_nonzero);
5362 if ((op_code != CLZ && op_code != CTZ)
5363 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5364 || !CONST_INT_P (on_zero))
5365 return NULL_RTX;
5366
5367 HOST_WIDE_INT op_val;
5368 scalar_int_mode mode ATTRIBUTE_UNUSED
5369 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5370 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5371 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5372 && op_val == INTVAL (on_zero))
5373 return on_nonzero;
5374
5375 return NULL_RTX;
5376 }
5377
5378 \f
5379 /* Simplify CODE, an operation with result mode MODE and three operands,
5380 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5381 a constant. Return 0 if no simplifications is possible. */
5382
5383 rtx
5384 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5385 machine_mode op0_mode, rtx op0, rtx op1,
5386 rtx op2)
5387 {
5388 bool any_change = false;
5389 rtx tem, trueop2;
5390 scalar_int_mode int_mode, int_op0_mode;
5391
5392 switch (code)
5393 {
5394 case FMA:
5395 /* Simplify negations around the multiplication. */
5396 /* -a * -b + c => a * b + c. */
5397 if (GET_CODE (op0) == NEG)
5398 {
5399 tem = simplify_unary_operation (NEG, mode, op1, mode);
5400 if (tem)
5401 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5402 }
5403 else if (GET_CODE (op1) == NEG)
5404 {
5405 tem = simplify_unary_operation (NEG, mode, op0, mode);
5406 if (tem)
5407 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5408 }
5409
5410 /* Canonicalize the two multiplication operands. */
5411 /* a * -b + c => -b * a + c. */
5412 if (swap_commutative_operands_p (op0, op1))
5413 std::swap (op0, op1), any_change = true;
5414
5415 if (any_change)
5416 return gen_rtx_FMA (mode, op0, op1, op2);
5417 return NULL_RTX;
5418
5419 case SIGN_EXTRACT:
5420 case ZERO_EXTRACT:
5421 if (CONST_INT_P (op0)
5422 && CONST_INT_P (op1)
5423 && CONST_INT_P (op2)
5424 && is_a <scalar_int_mode> (mode, &int_mode)
5425 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5426 && HWI_COMPUTABLE_MODE_P (int_mode))
5427 {
5428 /* Extracting a bit-field from a constant */
5429 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5430 HOST_WIDE_INT op1val = INTVAL (op1);
5431 HOST_WIDE_INT op2val = INTVAL (op2);
5432 if (!BITS_BIG_ENDIAN)
5433 val >>= op2val;
5434 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5435 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5436 else
5437 /* Not enough information to calculate the bit position. */
5438 break;
5439
5440 if (HOST_BITS_PER_WIDE_INT != op1val)
5441 {
5442 /* First zero-extend. */
5443 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5444 /* If desired, propagate sign bit. */
5445 if (code == SIGN_EXTRACT
5446 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5447 != 0)
5448 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5449 }
5450
5451 return gen_int_mode (val, int_mode);
5452 }
5453 break;
5454
5455 case IF_THEN_ELSE:
5456 if (CONST_INT_P (op0))
5457 return op0 != const0_rtx ? op1 : op2;
5458
5459 /* Convert c ? a : a into "a". */
5460 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5461 return op1;
5462
5463 /* Convert a != b ? a : b into "a". */
5464 if (GET_CODE (op0) == NE
5465 && ! side_effects_p (op0)
5466 && ! HONOR_NANS (mode)
5467 && ! HONOR_SIGNED_ZEROS (mode)
5468 && ((rtx_equal_p (XEXP (op0, 0), op1)
5469 && rtx_equal_p (XEXP (op0, 1), op2))
5470 || (rtx_equal_p (XEXP (op0, 0), op2)
5471 && rtx_equal_p (XEXP (op0, 1), op1))))
5472 return op1;
5473
5474 /* Convert a == b ? a : b into "b". */
5475 if (GET_CODE (op0) == EQ
5476 && ! side_effects_p (op0)
5477 && ! HONOR_NANS (mode)
5478 && ! HONOR_SIGNED_ZEROS (mode)
5479 && ((rtx_equal_p (XEXP (op0, 0), op1)
5480 && rtx_equal_p (XEXP (op0, 1), op2))
5481 || (rtx_equal_p (XEXP (op0, 0), op2)
5482 && rtx_equal_p (XEXP (op0, 1), op1))))
5483 return op2;
5484
5485 /* Convert (!c) != {0,...,0} ? a : b into
5486 c != {0,...,0} ? b : a for vector modes. */
5487 if (VECTOR_MODE_P (GET_MODE (op1))
5488 && GET_CODE (op0) == NE
5489 && GET_CODE (XEXP (op0, 0)) == NOT
5490 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5491 {
5492 rtx cv = XEXP (op0, 1);
5493 int nunits = CONST_VECTOR_NUNITS (cv);
5494 bool ok = true;
5495 for (int i = 0; i < nunits; ++i)
5496 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5497 {
5498 ok = false;
5499 break;
5500 }
5501 if (ok)
5502 {
5503 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5504 XEXP (XEXP (op0, 0), 0),
5505 XEXP (op0, 1));
5506 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5507 return retval;
5508 }
5509 }
5510
5511 /* Convert x == 0 ? N : clz (x) into clz (x) when
5512 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5513 Similarly for ctz (x). */
5514 if (COMPARISON_P (op0) && !side_effects_p (op0)
5515 && XEXP (op0, 1) == const0_rtx)
5516 {
5517 rtx simplified
5518 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5519 op1, op2);
5520 if (simplified)
5521 return simplified;
5522 }
5523
5524 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5525 {
5526 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5527 ? GET_MODE (XEXP (op0, 1))
5528 : GET_MODE (XEXP (op0, 0)));
5529 rtx temp;
5530
5531 /* Look for happy constants in op1 and op2. */
5532 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5533 {
5534 HOST_WIDE_INT t = INTVAL (op1);
5535 HOST_WIDE_INT f = INTVAL (op2);
5536
5537 if (t == STORE_FLAG_VALUE && f == 0)
5538 code = GET_CODE (op0);
5539 else if (t == 0 && f == STORE_FLAG_VALUE)
5540 {
5541 enum rtx_code tmp;
5542 tmp = reversed_comparison_code (op0, NULL);
5543 if (tmp == UNKNOWN)
5544 break;
5545 code = tmp;
5546 }
5547 else
5548 break;
5549
5550 return simplify_gen_relational (code, mode, cmp_mode,
5551 XEXP (op0, 0), XEXP (op0, 1));
5552 }
5553
5554 if (cmp_mode == VOIDmode)
5555 cmp_mode = op0_mode;
5556 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5557 cmp_mode, XEXP (op0, 0),
5558 XEXP (op0, 1));
5559
5560 /* See if any simplifications were possible. */
5561 if (temp)
5562 {
5563 if (CONST_INT_P (temp))
5564 return temp == const0_rtx ? op2 : op1;
5565 else if (temp)
5566 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5567 }
5568 }
5569 break;
5570
5571 case VEC_MERGE:
5572 gcc_assert (GET_MODE (op0) == mode);
5573 gcc_assert (GET_MODE (op1) == mode);
5574 gcc_assert (VECTOR_MODE_P (mode));
5575 trueop2 = avoid_constant_pool_reference (op2);
5576 if (CONST_INT_P (trueop2))
5577 {
5578 int elt_size = GET_MODE_UNIT_SIZE (mode);
5579 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5580 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5581 unsigned HOST_WIDE_INT mask;
5582 if (n_elts == HOST_BITS_PER_WIDE_INT)
5583 mask = -1;
5584 else
5585 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5586
5587 if (!(sel & mask) && !side_effects_p (op0))
5588 return op1;
5589 if ((sel & mask) == mask && !side_effects_p (op1))
5590 return op0;
5591
5592 rtx trueop0 = avoid_constant_pool_reference (op0);
5593 rtx trueop1 = avoid_constant_pool_reference (op1);
5594 if (GET_CODE (trueop0) == CONST_VECTOR
5595 && GET_CODE (trueop1) == CONST_VECTOR)
5596 {
5597 rtvec v = rtvec_alloc (n_elts);
5598 unsigned int i;
5599
5600 for (i = 0; i < n_elts; i++)
5601 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5602 ? CONST_VECTOR_ELT (trueop0, i)
5603 : CONST_VECTOR_ELT (trueop1, i));
5604 return gen_rtx_CONST_VECTOR (mode, v);
5605 }
5606
5607 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5608 if no element from a appears in the result. */
5609 if (GET_CODE (op0) == VEC_MERGE)
5610 {
5611 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5612 if (CONST_INT_P (tem))
5613 {
5614 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5615 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5616 return simplify_gen_ternary (code, mode, mode,
5617 XEXP (op0, 1), op1, op2);
5618 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5619 return simplify_gen_ternary (code, mode, mode,
5620 XEXP (op0, 0), op1, op2);
5621 }
5622 }
5623 if (GET_CODE (op1) == VEC_MERGE)
5624 {
5625 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5626 if (CONST_INT_P (tem))
5627 {
5628 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5629 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5630 return simplify_gen_ternary (code, mode, mode,
5631 op0, XEXP (op1, 1), op2);
5632 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5633 return simplify_gen_ternary (code, mode, mode,
5634 op0, XEXP (op1, 0), op2);
5635 }
5636 }
5637
5638 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5639 with a. */
5640 if (GET_CODE (op0) == VEC_DUPLICATE
5641 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5642 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5643 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5644 {
5645 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5646 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5647 {
5648 if (XEXP (XEXP (op0, 0), 0) == op1
5649 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5650 return op1;
5651 }
5652 }
5653 }
5654
5655 if (rtx_equal_p (op0, op1)
5656 && !side_effects_p (op2) && !side_effects_p (op1))
5657 return op0;
5658
5659 break;
5660
5661 default:
5662 gcc_unreachable ();
5663 }
5664
5665 return 0;
5666 }
5667
5668 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5669 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5670 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5671
5672 Works by unpacking OP into a collection of 8-bit values
5673 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5674 and then repacking them again for OUTERMODE. */
5675
5676 static rtx
5677 simplify_immed_subreg (machine_mode outermode, rtx op,
5678 machine_mode innermode, unsigned int byte)
5679 {
5680 enum {
5681 value_bit = 8,
5682 value_mask = (1 << value_bit) - 1
5683 };
5684 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5685 int value_start;
5686 int i;
5687 int elem;
5688
5689 int num_elem;
5690 rtx * elems;
5691 int elem_bitsize;
5692 rtx result_s = NULL;
5693 rtvec result_v = NULL;
5694 enum mode_class outer_class;
5695 scalar_mode outer_submode;
5696 int max_bitsize;
5697
5698 /* Some ports misuse CCmode. */
5699 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5700 return op;
5701
5702 /* We have no way to represent a complex constant at the rtl level. */
5703 if (COMPLEX_MODE_P (outermode))
5704 return NULL_RTX;
5705
5706 /* We support any size mode. */
5707 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5708 GET_MODE_BITSIZE (innermode));
5709
5710 /* Unpack the value. */
5711
5712 if (GET_CODE (op) == CONST_VECTOR)
5713 {
5714 num_elem = CONST_VECTOR_NUNITS (op);
5715 elems = &CONST_VECTOR_ELT (op, 0);
5716 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5717 }
5718 else
5719 {
5720 num_elem = 1;
5721 elems = &op;
5722 elem_bitsize = max_bitsize;
5723 }
5724 /* If this asserts, it is too complicated; reducing value_bit may help. */
5725 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5726 /* I don't know how to handle endianness of sub-units. */
5727 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5728
5729 for (elem = 0; elem < num_elem; elem++)
5730 {
5731 unsigned char * vp;
5732 rtx el = elems[elem];
5733
5734 /* Vectors are kept in target memory order. (This is probably
5735 a mistake.) */
5736 {
5737 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5738 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5739 / BITS_PER_UNIT);
5740 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5741 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5742 unsigned bytele = (subword_byte % UNITS_PER_WORD
5743 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5744 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5745 }
5746
5747 switch (GET_CODE (el))
5748 {
5749 case CONST_INT:
5750 for (i = 0;
5751 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5752 i += value_bit)
5753 *vp++ = INTVAL (el) >> i;
5754 /* CONST_INTs are always logically sign-extended. */
5755 for (; i < elem_bitsize; i += value_bit)
5756 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5757 break;
5758
5759 case CONST_WIDE_INT:
5760 {
5761 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
5762 unsigned char extend = wi::sign_mask (val);
5763 int prec = wi::get_precision (val);
5764
5765 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5766 *vp++ = wi::extract_uhwi (val, i, value_bit);
5767 for (; i < elem_bitsize; i += value_bit)
5768 *vp++ = extend;
5769 }
5770 break;
5771
5772 case CONST_DOUBLE:
5773 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5774 {
5775 unsigned char extend = 0;
5776 /* If this triggers, someone should have generated a
5777 CONST_INT instead. */
5778 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5779
5780 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5781 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5782 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5783 {
5784 *vp++
5785 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5786 i += value_bit;
5787 }
5788
5789 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5790 extend = -1;
5791 for (; i < elem_bitsize; i += value_bit)
5792 *vp++ = extend;
5793 }
5794 else
5795 {
5796 /* This is big enough for anything on the platform. */
5797 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5798 scalar_float_mode el_mode;
5799
5800 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5801 int bitsize = GET_MODE_BITSIZE (el_mode);
5802
5803 gcc_assert (bitsize <= elem_bitsize);
5804 gcc_assert (bitsize % value_bit == 0);
5805
5806 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5807 GET_MODE (el));
5808
5809 /* real_to_target produces its result in words affected by
5810 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5811 and use WORDS_BIG_ENDIAN instead; see the documentation
5812 of SUBREG in rtl.texi. */
5813 for (i = 0; i < bitsize; i += value_bit)
5814 {
5815 int ibase;
5816 if (WORDS_BIG_ENDIAN)
5817 ibase = bitsize - 1 - i;
5818 else
5819 ibase = i;
5820 *vp++ = tmp[ibase / 32] >> i % 32;
5821 }
5822
5823 /* It shouldn't matter what's done here, so fill it with
5824 zero. */
5825 for (; i < elem_bitsize; i += value_bit)
5826 *vp++ = 0;
5827 }
5828 break;
5829
5830 case CONST_FIXED:
5831 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5832 {
5833 for (i = 0; i < elem_bitsize; i += value_bit)
5834 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5835 }
5836 else
5837 {
5838 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5839 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5840 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5841 i += value_bit)
5842 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5843 >> (i - HOST_BITS_PER_WIDE_INT);
5844 for (; i < elem_bitsize; i += value_bit)
5845 *vp++ = 0;
5846 }
5847 break;
5848
5849 default:
5850 gcc_unreachable ();
5851 }
5852 }
5853
5854 /* Now, pick the right byte to start with. */
5855 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5856 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5857 will already have offset 0. */
5858 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5859 {
5860 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5861 - byte);
5862 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5863 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5864 byte = (subword_byte % UNITS_PER_WORD
5865 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5866 }
5867
5868 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5869 so if it's become negative it will instead be very large.) */
5870 gcc_assert (byte < GET_MODE_SIZE (innermode));
5871
5872 /* Convert from bytes to chunks of size value_bit. */
5873 value_start = byte * (BITS_PER_UNIT / value_bit);
5874
5875 /* Re-pack the value. */
5876 num_elem = GET_MODE_NUNITS (outermode);
5877
5878 if (VECTOR_MODE_P (outermode))
5879 {
5880 result_v = rtvec_alloc (num_elem);
5881 elems = &RTVEC_ELT (result_v, 0);
5882 }
5883 else
5884 elems = &result_s;
5885
5886 outer_submode = GET_MODE_INNER (outermode);
5887 outer_class = GET_MODE_CLASS (outer_submode);
5888 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5889
5890 gcc_assert (elem_bitsize % value_bit == 0);
5891 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5892
5893 for (elem = 0; elem < num_elem; elem++)
5894 {
5895 unsigned char *vp;
5896
5897 /* Vectors are stored in target memory order. (This is probably
5898 a mistake.) */
5899 {
5900 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5901 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5902 / BITS_PER_UNIT);
5903 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5904 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5905 unsigned bytele = (subword_byte % UNITS_PER_WORD
5906 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5907 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5908 }
5909
5910 switch (outer_class)
5911 {
5912 case MODE_INT:
5913 case MODE_PARTIAL_INT:
5914 {
5915 int u;
5916 int base = 0;
5917 int units
5918 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5919 / HOST_BITS_PER_WIDE_INT;
5920 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5921 wide_int r;
5922
5923 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5924 return NULL_RTX;
5925 for (u = 0; u < units; u++)
5926 {
5927 unsigned HOST_WIDE_INT buf = 0;
5928 for (i = 0;
5929 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5930 i += value_bit)
5931 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5932
5933 tmp[u] = buf;
5934 base += HOST_BITS_PER_WIDE_INT;
5935 }
5936 r = wide_int::from_array (tmp, units,
5937 GET_MODE_PRECISION (outer_submode));
5938 #if TARGET_SUPPORTS_WIDE_INT == 0
5939 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5940 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5941 return NULL_RTX;
5942 #endif
5943 elems[elem] = immed_wide_int_const (r, outer_submode);
5944 }
5945 break;
5946
5947 case MODE_FLOAT:
5948 case MODE_DECIMAL_FLOAT:
5949 {
5950 REAL_VALUE_TYPE r;
5951 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5952
5953 /* real_from_target wants its input in words affected by
5954 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5955 and use WORDS_BIG_ENDIAN instead; see the documentation
5956 of SUBREG in rtl.texi. */
5957 for (i = 0; i < elem_bitsize; i += value_bit)
5958 {
5959 int ibase;
5960 if (WORDS_BIG_ENDIAN)
5961 ibase = elem_bitsize - 1 - i;
5962 else
5963 ibase = i;
5964 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5965 }
5966
5967 real_from_target (&r, tmp, outer_submode);
5968 elems[elem] = const_double_from_real_value (r, outer_submode);
5969 }
5970 break;
5971
5972 case MODE_FRACT:
5973 case MODE_UFRACT:
5974 case MODE_ACCUM:
5975 case MODE_UACCUM:
5976 {
5977 FIXED_VALUE_TYPE f;
5978 f.data.low = 0;
5979 f.data.high = 0;
5980 f.mode = outer_submode;
5981
5982 for (i = 0;
5983 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5984 i += value_bit)
5985 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5986 for (; i < elem_bitsize; i += value_bit)
5987 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5988 << (i - HOST_BITS_PER_WIDE_INT));
5989
5990 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5991 }
5992 break;
5993
5994 default:
5995 gcc_unreachable ();
5996 }
5997 }
5998 if (VECTOR_MODE_P (outermode))
5999 return gen_rtx_CONST_VECTOR (outermode, result_v);
6000 else
6001 return result_s;
6002 }
6003
6004 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6005 Return 0 if no simplifications are possible. */
6006 rtx
6007 simplify_subreg (machine_mode outermode, rtx op,
6008 machine_mode innermode, unsigned int byte)
6009 {
6010 /* Little bit of sanity checking. */
6011 gcc_assert (innermode != VOIDmode);
6012 gcc_assert (outermode != VOIDmode);
6013 gcc_assert (innermode != BLKmode);
6014 gcc_assert (outermode != BLKmode);
6015
6016 gcc_assert (GET_MODE (op) == innermode
6017 || GET_MODE (op) == VOIDmode);
6018
6019 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6020 return NULL_RTX;
6021
6022 if (byte >= GET_MODE_SIZE (innermode))
6023 return NULL_RTX;
6024
6025 if (outermode == innermode && !byte)
6026 return op;
6027
6028 if (CONST_SCALAR_INT_P (op)
6029 || CONST_DOUBLE_AS_FLOAT_P (op)
6030 || GET_CODE (op) == CONST_FIXED
6031 || GET_CODE (op) == CONST_VECTOR)
6032 return simplify_immed_subreg (outermode, op, innermode, byte);
6033
6034 /* Changing mode twice with SUBREG => just change it once,
6035 or not at all if changing back op starting mode. */
6036 if (GET_CODE (op) == SUBREG)
6037 {
6038 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6039 rtx newx;
6040
6041 if (outermode == innermostmode
6042 && byte == 0 && SUBREG_BYTE (op) == 0)
6043 return SUBREG_REG (op);
6044
6045 /* Work out the memory offset of the final OUTERMODE value relative
6046 to the inner value of OP. */
6047 HOST_WIDE_INT mem_offset = subreg_memory_offset (outermode,
6048 innermode, byte);
6049 HOST_WIDE_INT op_mem_offset = subreg_memory_offset (op);
6050 HOST_WIDE_INT final_offset = mem_offset + op_mem_offset;
6051
6052 /* See whether resulting subreg will be paradoxical. */
6053 if (!paradoxical_subreg_p (outermode, innermostmode))
6054 {
6055 /* In nonparadoxical subregs we can't handle negative offsets. */
6056 if (final_offset < 0)
6057 return NULL_RTX;
6058 /* Bail out in case resulting subreg would be incorrect. */
6059 if (final_offset % GET_MODE_SIZE (outermode)
6060 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6061 return NULL_RTX;
6062 }
6063 else
6064 {
6065 HOST_WIDE_INT required_offset
6066 = subreg_memory_offset (outermode, innermostmode, 0);
6067 if (final_offset != required_offset)
6068 return NULL_RTX;
6069 /* Paradoxical subregs always have byte offset 0. */
6070 final_offset = 0;
6071 }
6072
6073 /* Recurse for further possible simplifications. */
6074 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6075 final_offset);
6076 if (newx)
6077 return newx;
6078 if (validate_subreg (outermode, innermostmode,
6079 SUBREG_REG (op), final_offset))
6080 {
6081 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6082 if (SUBREG_PROMOTED_VAR_P (op)
6083 && SUBREG_PROMOTED_SIGN (op) >= 0
6084 && GET_MODE_CLASS (outermode) == MODE_INT
6085 && IN_RANGE (GET_MODE_SIZE (outermode),
6086 GET_MODE_SIZE (innermode),
6087 GET_MODE_SIZE (innermostmode))
6088 && subreg_lowpart_p (newx))
6089 {
6090 SUBREG_PROMOTED_VAR_P (newx) = 1;
6091 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6092 }
6093 return newx;
6094 }
6095 return NULL_RTX;
6096 }
6097
6098 /* SUBREG of a hard register => just change the register number
6099 and/or mode. If the hard register is not valid in that mode,
6100 suppress this simplification. If the hard register is the stack,
6101 frame, or argument pointer, leave this as a SUBREG. */
6102
6103 if (REG_P (op) && HARD_REGISTER_P (op))
6104 {
6105 unsigned int regno, final_regno;
6106
6107 regno = REGNO (op);
6108 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6109 if (HARD_REGISTER_NUM_P (final_regno))
6110 {
6111 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6112 subreg_memory_offset (outermode,
6113 innermode, byte));
6114
6115 /* Propagate original regno. We don't have any way to specify
6116 the offset inside original regno, so do so only for lowpart.
6117 The information is used only by alias analysis that can not
6118 grog partial register anyway. */
6119
6120 if (subreg_lowpart_offset (outermode, innermode) == byte)
6121 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6122 return x;
6123 }
6124 }
6125
6126 /* If we have a SUBREG of a register that we are replacing and we are
6127 replacing it with a MEM, make a new MEM and try replacing the
6128 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6129 or if we would be widening it. */
6130
6131 if (MEM_P (op)
6132 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6133 /* Allow splitting of volatile memory references in case we don't
6134 have instruction to move the whole thing. */
6135 && (! MEM_VOLATILE_P (op)
6136 || ! have_insn_for (SET, innermode))
6137 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6138 return adjust_address_nv (op, outermode, byte);
6139
6140 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6141 of two parts. */
6142 if (GET_CODE (op) == CONCAT
6143 || GET_CODE (op) == VEC_CONCAT)
6144 {
6145 unsigned int part_size, final_offset;
6146 rtx part, res;
6147
6148 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6149 if (part_mode == VOIDmode)
6150 part_mode = GET_MODE_INNER (GET_MODE (op));
6151 part_size = GET_MODE_SIZE (part_mode);
6152 if (byte < part_size)
6153 {
6154 part = XEXP (op, 0);
6155 final_offset = byte;
6156 }
6157 else
6158 {
6159 part = XEXP (op, 1);
6160 final_offset = byte - part_size;
6161 }
6162
6163 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6164 return NULL_RTX;
6165
6166 part_mode = GET_MODE (part);
6167 if (part_mode == VOIDmode)
6168 part_mode = GET_MODE_INNER (GET_MODE (op));
6169 res = simplify_subreg (outermode, part, part_mode, final_offset);
6170 if (res)
6171 return res;
6172 if (validate_subreg (outermode, part_mode, part, final_offset))
6173 return gen_rtx_SUBREG (outermode, part, final_offset);
6174 return NULL_RTX;
6175 }
6176
6177 /* A SUBREG resulting from a zero extension may fold to zero if
6178 it extracts higher bits that the ZERO_EXTEND's source bits. */
6179 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6180 {
6181 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6182 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6183 return CONST0_RTX (outermode);
6184 }
6185
6186 scalar_int_mode int_outermode, int_innermode;
6187 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6188 && is_a <scalar_int_mode> (innermode, &int_innermode)
6189 && (GET_MODE_PRECISION (int_outermode)
6190 < GET_MODE_PRECISION (int_innermode))
6191 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6192 {
6193 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6194 if (tem)
6195 return tem;
6196 }
6197
6198 return NULL_RTX;
6199 }
6200
6201 /* Make a SUBREG operation or equivalent if it folds. */
6202
6203 rtx
6204 simplify_gen_subreg (machine_mode outermode, rtx op,
6205 machine_mode innermode, unsigned int byte)
6206 {
6207 rtx newx;
6208
6209 newx = simplify_subreg (outermode, op, innermode, byte);
6210 if (newx)
6211 return newx;
6212
6213 if (GET_CODE (op) == SUBREG
6214 || GET_CODE (op) == CONCAT
6215 || GET_MODE (op) == VOIDmode)
6216 return NULL_RTX;
6217
6218 if (validate_subreg (outermode, innermode, op, byte))
6219 return gen_rtx_SUBREG (outermode, op, byte);
6220
6221 return NULL_RTX;
6222 }
6223
6224 /* Generates a subreg to get the least significant part of EXPR (in mode
6225 INNER_MODE) to OUTER_MODE. */
6226
6227 rtx
6228 lowpart_subreg (machine_mode outer_mode, rtx expr,
6229 machine_mode inner_mode)
6230 {
6231 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6232 subreg_lowpart_offset (outer_mode, inner_mode));
6233 }
6234
6235 /* Simplify X, an rtx expression.
6236
6237 Return the simplified expression or NULL if no simplifications
6238 were possible.
6239
6240 This is the preferred entry point into the simplification routines;
6241 however, we still allow passes to call the more specific routines.
6242
6243 Right now GCC has three (yes, three) major bodies of RTL simplification
6244 code that need to be unified.
6245
6246 1. fold_rtx in cse.c. This code uses various CSE specific
6247 information to aid in RTL simplification.
6248
6249 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6250 it uses combine specific information to aid in RTL
6251 simplification.
6252
6253 3. The routines in this file.
6254
6255
6256 Long term we want to only have one body of simplification code; to
6257 get to that state I recommend the following steps:
6258
6259 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6260 which are not pass dependent state into these routines.
6261
6262 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6263 use this routine whenever possible.
6264
6265 3. Allow for pass dependent state to be provided to these
6266 routines and add simplifications based on the pass dependent
6267 state. Remove code from cse.c & combine.c that becomes
6268 redundant/dead.
6269
6270 It will take time, but ultimately the compiler will be easier to
6271 maintain and improve. It's totally silly that when we add a
6272 simplification that it needs to be added to 4 places (3 for RTL
6273 simplification and 1 for tree simplification. */
6274
6275 rtx
6276 simplify_rtx (const_rtx x)
6277 {
6278 const enum rtx_code code = GET_CODE (x);
6279 const machine_mode mode = GET_MODE (x);
6280
6281 switch (GET_RTX_CLASS (code))
6282 {
6283 case RTX_UNARY:
6284 return simplify_unary_operation (code, mode,
6285 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6286 case RTX_COMM_ARITH:
6287 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6288 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6289
6290 /* Fall through. */
6291
6292 case RTX_BIN_ARITH:
6293 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6294
6295 case RTX_TERNARY:
6296 case RTX_BITFIELD_OPS:
6297 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6298 XEXP (x, 0), XEXP (x, 1),
6299 XEXP (x, 2));
6300
6301 case RTX_COMPARE:
6302 case RTX_COMM_COMPARE:
6303 return simplify_relational_operation (code, mode,
6304 ((GET_MODE (XEXP (x, 0))
6305 != VOIDmode)
6306 ? GET_MODE (XEXP (x, 0))
6307 : GET_MODE (XEXP (x, 1))),
6308 XEXP (x, 0),
6309 XEXP (x, 1));
6310
6311 case RTX_EXTRA:
6312 if (code == SUBREG)
6313 return simplify_subreg (mode, SUBREG_REG (x),
6314 GET_MODE (SUBREG_REG (x)),
6315 SUBREG_BYTE (x));
6316 break;
6317
6318 case RTX_OBJ:
6319 if (code == LO_SUM)
6320 {
6321 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6322 if (GET_CODE (XEXP (x, 0)) == HIGH
6323 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6324 return XEXP (x, 1);
6325 }
6326 break;
6327
6328 default:
6329 break;
6330 }
6331 return NULL;
6332 }