]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
[PR90048] Fortran OpenACC 'private' clause rejected for predetermined private loop...
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38
39 /* Simplification and canonicalization of RTL. */
40
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
47
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
58 \f
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
62 {
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
64
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
81
82 if (!is_int_mode (mode, &int_mode))
83 return false;
84
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
88
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
94 {
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
106 }
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
111 {
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
114 }
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
119
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
123 }
124
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
128
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133 scalar_int_mode int_mode;
134
135 if (!is_int_mode (mode, &int_mode))
136 return false;
137
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
141
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
144 }
145
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
150 {
151 unsigned int width;
152
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
156
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
160
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
163 }
164
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
169 {
170 unsigned int width;
171
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
175
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
179
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
182 }
183 \f
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
186
187 rtx
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
190 {
191 rtx tem;
192
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
197
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
202
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
204 }
205 \f
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
208 rtx
209 avoid_constant_pool_reference (rtx x)
210 {
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 poly_int64 offset = 0;
214
215 switch (GET_CODE (x))
216 {
217 case MEM:
218 break;
219
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
228
229 default:
230 return x;
231 }
232
233 if (GET_MODE (x) == BLKmode)
234 return x;
235
236 addr = XEXP (x, 0);
237
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
240
241 /* Split the address into a base and integer offset. */
242 addr = strip_offset (addr, &offset);
243
244 if (GET_CODE (addr) == LO_SUM)
245 addr = XEXP (addr, 1);
246
247 /* If this is a constant pool reference, we can turn it into its
248 constant and hope that simplifications happen. */
249 if (GET_CODE (addr) == SYMBOL_REF
250 && CONSTANT_POOL_ADDRESS_P (addr))
251 {
252 c = get_pool_constant (addr);
253 cmode = get_pool_mode (addr);
254
255 /* If we're accessing the constant in a different mode than it was
256 originally stored, attempt to fix that up via subreg simplifications.
257 If that fails we have no choice but to return the original memory. */
258 if (known_eq (offset, 0) && cmode == GET_MODE (x))
259 return c;
260 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
261 {
262 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
263 if (tem && CONSTANT_P (tem))
264 return tem;
265 }
266 }
267
268 return x;
269 }
270 \f
271 /* Simplify a MEM based on its attributes. This is the default
272 delegitimize_address target hook, and it's recommended that every
273 overrider call it. */
274
275 rtx
276 delegitimize_mem_from_attrs (rtx x)
277 {
278 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
279 use their base addresses as equivalent. */
280 if (MEM_P (x)
281 && MEM_EXPR (x)
282 && MEM_OFFSET_KNOWN_P (x))
283 {
284 tree decl = MEM_EXPR (x);
285 machine_mode mode = GET_MODE (x);
286 poly_int64 offset = 0;
287
288 switch (TREE_CODE (decl))
289 {
290 default:
291 decl = NULL;
292 break;
293
294 case VAR_DECL:
295 break;
296
297 case ARRAY_REF:
298 case ARRAY_RANGE_REF:
299 case COMPONENT_REF:
300 case BIT_FIELD_REF:
301 case REALPART_EXPR:
302 case IMAGPART_EXPR:
303 case VIEW_CONVERT_EXPR:
304 {
305 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
306 tree toffset;
307 int unsignedp, reversep, volatilep = 0;
308
309 decl
310 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
311 &unsignedp, &reversep, &volatilep);
312 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
313 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
314 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
315 decl = NULL;
316 else
317 offset += bytepos + toffset_val;
318 break;
319 }
320 }
321
322 if (decl
323 && mode == GET_MODE (x)
324 && VAR_P (decl)
325 && (TREE_STATIC (decl)
326 || DECL_THREAD_LOCAL_P (decl))
327 && DECL_RTL_SET_P (decl)
328 && MEM_P (DECL_RTL (decl)))
329 {
330 rtx newx;
331
332 offset += MEM_OFFSET (x);
333
334 newx = DECL_RTL (decl);
335
336 if (MEM_P (newx))
337 {
338 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
339 poly_int64 n_offset, o_offset;
340
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 n = strip_offset (n, &n_offset);
348 o = strip_offset (o, &o_offset);
349 if (!(known_eq (o_offset, n_offset + offset)
350 && rtx_equal_p (o, n)))
351 x = adjust_address_nv (newx, mode, offset);
352 }
353 else if (GET_MODE (x) == GET_MODE (newx)
354 && known_eq (offset, 0))
355 x = newx;
356 }
357 }
358
359 return x;
360 }
361 \f
362 /* Make a unary operation by first seeing if it folds and otherwise making
363 the specified operation. */
364
365 rtx
366 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
367 machine_mode op_mode)
368 {
369 rtx tem;
370
371 /* If this simplifies, use it. */
372 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
373 return tem;
374
375 return gen_rtx_fmt_e (code, mode, op);
376 }
377
378 /* Likewise for ternary operations. */
379
380 rtx
381 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
382 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
383 {
384 rtx tem;
385
386 /* If this simplifies, use it. */
387 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
388 op0, op1, op2)) != 0)
389 return tem;
390
391 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
392 }
393
394 /* Likewise, for relational operations.
395 CMP_MODE specifies mode comparison is done in. */
396
397 rtx
398 simplify_gen_relational (enum rtx_code code, machine_mode mode,
399 machine_mode cmp_mode, rtx op0, rtx op1)
400 {
401 rtx tem;
402
403 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
404 op0, op1)) != 0)
405 return tem;
406
407 return gen_rtx_fmt_ee (code, mode, op0, op1);
408 }
409 \f
410 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
411 and simplify the result. If FN is non-NULL, call this callback on each
412 X, if it returns non-NULL, replace X with its return value and simplify the
413 result. */
414
415 rtx
416 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
417 rtx (*fn) (rtx, const_rtx, void *), void *data)
418 {
419 enum rtx_code code = GET_CODE (x);
420 machine_mode mode = GET_MODE (x);
421 machine_mode op_mode;
422 const char *fmt;
423 rtx op0, op1, op2, newx, op;
424 rtvec vec, newvec;
425 int i, j;
426
427 if (__builtin_expect (fn != NULL, 0))
428 {
429 newx = fn (x, old_rtx, data);
430 if (newx)
431 return newx;
432 }
433 else if (rtx_equal_p (x, old_rtx))
434 return copy_rtx ((rtx) data);
435
436 switch (GET_RTX_CLASS (code))
437 {
438 case RTX_UNARY:
439 op0 = XEXP (x, 0);
440 op_mode = GET_MODE (op0);
441 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
442 if (op0 == XEXP (x, 0))
443 return x;
444 return simplify_gen_unary (code, mode, op0, op_mode);
445
446 case RTX_BIN_ARITH:
447 case RTX_COMM_ARITH:
448 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
449 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
451 return x;
452 return simplify_gen_binary (code, mode, op0, op1);
453
454 case RTX_COMPARE:
455 case RTX_COMM_COMPARE:
456 op0 = XEXP (x, 0);
457 op1 = XEXP (x, 1);
458 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
459 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
461 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
462 return x;
463 return simplify_gen_relational (code, mode, op_mode, op0, op1);
464
465 case RTX_TERNARY:
466 case RTX_BITFIELD_OPS:
467 op0 = XEXP (x, 0);
468 op_mode = GET_MODE (op0);
469 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
470 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
471 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
472 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
473 return x;
474 if (op_mode == VOIDmode)
475 op_mode = GET_MODE (op0);
476 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
477
478 case RTX_EXTRA:
479 if (code == SUBREG)
480 {
481 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
482 if (op0 == SUBREG_REG (x))
483 return x;
484 op0 = simplify_gen_subreg (GET_MODE (x), op0,
485 GET_MODE (SUBREG_REG (x)),
486 SUBREG_BYTE (x));
487 return op0 ? op0 : x;
488 }
489 break;
490
491 case RTX_OBJ:
492 if (code == MEM)
493 {
494 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
495 if (op0 == XEXP (x, 0))
496 return x;
497 return replace_equiv_address_nv (x, op0);
498 }
499 else if (code == LO_SUM)
500 {
501 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
502 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
503
504 /* (lo_sum (high x) y) -> y where x and y have the same base. */
505 if (GET_CODE (op0) == HIGH)
506 {
507 rtx base0, base1, offset0, offset1;
508 split_const (XEXP (op0, 0), &base0, &offset0);
509 split_const (op1, &base1, &offset1);
510 if (rtx_equal_p (base0, base1))
511 return op1;
512 }
513
514 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
515 return x;
516 return gen_rtx_LO_SUM (mode, op0, op1);
517 }
518 break;
519
520 default:
521 break;
522 }
523
524 newx = x;
525 fmt = GET_RTX_FORMAT (code);
526 for (i = 0; fmt[i]; i++)
527 switch (fmt[i])
528 {
529 case 'E':
530 vec = XVEC (x, i);
531 newvec = XVEC (newx, i);
532 for (j = 0; j < GET_NUM_ELEM (vec); j++)
533 {
534 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
535 old_rtx, fn, data);
536 if (op != RTVEC_ELT (vec, j))
537 {
538 if (newvec == vec)
539 {
540 newvec = shallow_copy_rtvec (vec);
541 if (x == newx)
542 newx = shallow_copy_rtx (x);
543 XVEC (newx, i) = newvec;
544 }
545 RTVEC_ELT (newvec, j) = op;
546 }
547 }
548 break;
549
550 case 'e':
551 if (XEXP (x, i))
552 {
553 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
554 if (op != XEXP (x, i))
555 {
556 if (x == newx)
557 newx = shallow_copy_rtx (x);
558 XEXP (newx, i) = op;
559 }
560 }
561 break;
562 }
563 return newx;
564 }
565
566 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
567 resulting RTX. Return a new RTX which is as simplified as possible. */
568
569 rtx
570 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
571 {
572 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
573 }
574 \f
575 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
576 Only handle cases where the truncated value is inherently an rvalue.
577
578 RTL provides two ways of truncating a value:
579
580 1. a lowpart subreg. This form is only a truncation when both
581 the outer and inner modes (here MODE and OP_MODE respectively)
582 are scalar integers, and only then when the subreg is used as
583 an rvalue.
584
585 It is only valid to form such truncating subregs if the
586 truncation requires no action by the target. The onus for
587 proving this is on the creator of the subreg -- e.g. the
588 caller to simplify_subreg or simplify_gen_subreg -- and typically
589 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
590
591 2. a TRUNCATE. This form handles both scalar and compound integers.
592
593 The first form is preferred where valid. However, the TRUNCATE
594 handling in simplify_unary_operation turns the second form into the
595 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
596 so it is generally safe to form rvalue truncations using:
597
598 simplify_gen_unary (TRUNCATE, ...)
599
600 and leave simplify_unary_operation to work out which representation
601 should be used.
602
603 Because of the proof requirements on (1), simplify_truncation must
604 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
605 regardless of whether the outer truncation came from a SUBREG or a
606 TRUNCATE. For example, if the caller has proven that an SImode
607 truncation of:
608
609 (and:DI X Y)
610
611 is a no-op and can be represented as a subreg, it does not follow
612 that SImode truncations of X and Y are also no-ops. On a target
613 like 64-bit MIPS that requires SImode values to be stored in
614 sign-extended form, an SImode truncation of:
615
616 (and:DI (reg:DI X) (const_int 63))
617
618 is trivially a no-op because only the lower 6 bits can be set.
619 However, X is still an arbitrary 64-bit number and so we cannot
620 assume that truncating it too is a no-op. */
621
622 static rtx
623 simplify_truncation (machine_mode mode, rtx op,
624 machine_mode op_mode)
625 {
626 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
627 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
628 scalar_int_mode int_mode, int_op_mode, subreg_mode;
629
630 gcc_assert (precision <= op_precision);
631
632 /* Optimize truncations of zero and sign extended values. */
633 if (GET_CODE (op) == ZERO_EXTEND
634 || GET_CODE (op) == SIGN_EXTEND)
635 {
636 /* There are three possibilities. If MODE is the same as the
637 origmode, we can omit both the extension and the subreg.
638 If MODE is not larger than the origmode, we can apply the
639 truncation without the extension. Finally, if the outermode
640 is larger than the origmode, we can just extend to the appropriate
641 mode. */
642 machine_mode origmode = GET_MODE (XEXP (op, 0));
643 if (mode == origmode)
644 return XEXP (op, 0);
645 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
646 return simplify_gen_unary (TRUNCATE, mode,
647 XEXP (op, 0), origmode);
648 else
649 return simplify_gen_unary (GET_CODE (op), mode,
650 XEXP (op, 0), origmode);
651 }
652
653 /* If the machine can perform operations in the truncated mode, distribute
654 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
655 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
656 if (1
657 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
658 && (GET_CODE (op) == PLUS
659 || GET_CODE (op) == MINUS
660 || GET_CODE (op) == MULT))
661 {
662 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
663 if (op0)
664 {
665 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
666 if (op1)
667 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
668 }
669 }
670
671 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
672 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
673 the outer subreg is effectively a truncation to the original mode. */
674 if ((GET_CODE (op) == LSHIFTRT
675 || GET_CODE (op) == ASHIFTRT)
676 /* Ensure that OP_MODE is at least twice as wide as MODE
677 to avoid the possibility that an outer LSHIFTRT shifts by more
678 than the sign extension's sign_bit_copies and introduces zeros
679 into the high bits of the result. */
680 && 2 * precision <= op_precision
681 && CONST_INT_P (XEXP (op, 1))
682 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
683 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
684 && UINTVAL (XEXP (op, 1)) < precision)
685 return simplify_gen_binary (ASHIFTRT, mode,
686 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
687
688 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
689 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
690 the outer subreg is effectively a truncation to the original mode. */
691 if ((GET_CODE (op) == LSHIFTRT
692 || GET_CODE (op) == ASHIFTRT)
693 && CONST_INT_P (XEXP (op, 1))
694 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
695 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
696 && UINTVAL (XEXP (op, 1)) < precision)
697 return simplify_gen_binary (LSHIFTRT, mode,
698 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
699
700 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
701 to (ashift:QI (x:QI) C), where C is a suitable small constant and
702 the outer subreg is effectively a truncation to the original mode. */
703 if (GET_CODE (op) == ASHIFT
704 && CONST_INT_P (XEXP (op, 1))
705 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
706 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
707 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
708 && UINTVAL (XEXP (op, 1)) < precision)
709 return simplify_gen_binary (ASHIFT, mode,
710 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
711
712 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
713 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
714 and C2. */
715 if (GET_CODE (op) == AND
716 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
717 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
718 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
719 && CONST_INT_P (XEXP (op, 1)))
720 {
721 rtx op0 = (XEXP (XEXP (op, 0), 0));
722 rtx shift_op = XEXP (XEXP (op, 0), 1);
723 rtx mask_op = XEXP (op, 1);
724 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
725 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
726
727 if (shift < precision
728 /* If doing this transform works for an X with all bits set,
729 it works for any X. */
730 && ((GET_MODE_MASK (mode) >> shift) & mask)
731 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
732 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
733 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
734 {
735 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
736 return simplify_gen_binary (AND, mode, op0, mask_op);
737 }
738 }
739
740 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
741 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
742 changing len. */
743 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
744 && REG_P (XEXP (op, 0))
745 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
746 && CONST_INT_P (XEXP (op, 1))
747 && CONST_INT_P (XEXP (op, 2)))
748 {
749 rtx op0 = XEXP (op, 0);
750 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
751 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
752 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
753 {
754 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
755 if (op0)
756 {
757 pos -= op_precision - precision;
758 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
759 XEXP (op, 1), GEN_INT (pos));
760 }
761 }
762 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
763 {
764 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
765 if (op0)
766 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
767 XEXP (op, 1), XEXP (op, 2));
768 }
769 }
770
771 /* Recognize a word extraction from a multi-word subreg. */
772 if ((GET_CODE (op) == LSHIFTRT
773 || GET_CODE (op) == ASHIFTRT)
774 && SCALAR_INT_MODE_P (mode)
775 && SCALAR_INT_MODE_P (op_mode)
776 && precision >= BITS_PER_WORD
777 && 2 * precision <= op_precision
778 && CONST_INT_P (XEXP (op, 1))
779 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
780 && UINTVAL (XEXP (op, 1)) < op_precision)
781 {
782 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
783 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
784 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
785 (WORDS_BIG_ENDIAN
786 ? byte - shifted_bytes
787 : byte + shifted_bytes));
788 }
789
790 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
791 and try replacing the TRUNCATE and shift with it. Don't do this
792 if the MEM has a mode-dependent address. */
793 if ((GET_CODE (op) == LSHIFTRT
794 || GET_CODE (op) == ASHIFTRT)
795 && is_a <scalar_int_mode> (mode, &int_mode)
796 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
797 && MEM_P (XEXP (op, 0))
798 && CONST_INT_P (XEXP (op, 1))
799 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
800 && INTVAL (XEXP (op, 1)) > 0
801 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
802 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
803 MEM_ADDR_SPACE (XEXP (op, 0)))
804 && ! MEM_VOLATILE_P (XEXP (op, 0))
805 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
806 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
807 {
808 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
809 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
810 return adjust_address_nv (XEXP (op, 0), int_mode,
811 (WORDS_BIG_ENDIAN
812 ? byte - shifted_bytes
813 : byte + shifted_bytes));
814 }
815
816 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
817 (OP:SI foo:SI) if OP is NEG or ABS. */
818 if ((GET_CODE (op) == ABS
819 || GET_CODE (op) == NEG)
820 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
821 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
822 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
823 return simplify_gen_unary (GET_CODE (op), mode,
824 XEXP (XEXP (op, 0), 0), mode);
825
826 /* (truncate:A (subreg:B (truncate:C X) 0)) is
827 (truncate:A X). */
828 if (GET_CODE (op) == SUBREG
829 && is_a <scalar_int_mode> (mode, &int_mode)
830 && SCALAR_INT_MODE_P (op_mode)
831 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
832 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
833 && subreg_lowpart_p (op))
834 {
835 rtx inner = XEXP (SUBREG_REG (op), 0);
836 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
837 return simplify_gen_unary (TRUNCATE, int_mode, inner,
838 GET_MODE (inner));
839 else
840 /* If subreg above is paradoxical and C is narrower
841 than A, return (subreg:A (truncate:C X) 0). */
842 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
843 }
844
845 /* (truncate:A (truncate:B X)) is (truncate:A X). */
846 if (GET_CODE (op) == TRUNCATE)
847 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
848 GET_MODE (XEXP (op, 0)));
849
850 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
851 in mode A. */
852 if (GET_CODE (op) == IOR
853 && SCALAR_INT_MODE_P (mode)
854 && SCALAR_INT_MODE_P (op_mode)
855 && CONST_INT_P (XEXP (op, 1))
856 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
857 return constm1_rtx;
858
859 return NULL_RTX;
860 }
861 \f
862 /* Try to simplify a unary operation CODE whose output mode is to be
863 MODE with input operand OP whose mode was originally OP_MODE.
864 Return zero if no simplification can be made. */
865 rtx
866 simplify_unary_operation (enum rtx_code code, machine_mode mode,
867 rtx op, machine_mode op_mode)
868 {
869 rtx trueop, tem;
870
871 trueop = avoid_constant_pool_reference (op);
872
873 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
874 if (tem)
875 return tem;
876
877 return simplify_unary_operation_1 (code, mode, op);
878 }
879
880 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
881 to be exact. */
882
883 static bool
884 exact_int_to_float_conversion_p (const_rtx op)
885 {
886 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
887 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
888 /* Constants shouldn't reach here. */
889 gcc_assert (op0_mode != VOIDmode);
890 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
891 int in_bits = in_prec;
892 if (HWI_COMPUTABLE_MODE_P (op0_mode))
893 {
894 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
895 if (GET_CODE (op) == FLOAT)
896 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
897 else if (GET_CODE (op) == UNSIGNED_FLOAT)
898 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
899 else
900 gcc_unreachable ();
901 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
902 }
903 return in_bits <= out_bits;
904 }
905
906 /* Perform some simplifications we can do even if the operands
907 aren't constant. */
908 static rtx
909 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
910 {
911 enum rtx_code reversed;
912 rtx temp, elt, base, step;
913 scalar_int_mode inner, int_mode, op_mode, op0_mode;
914
915 switch (code)
916 {
917 case NOT:
918 /* (not (not X)) == X. */
919 if (GET_CODE (op) == NOT)
920 return XEXP (op, 0);
921
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op)
925 && (mode == BImode || STORE_FLAG_VALUE == -1)
926 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
927 return simplify_gen_relational (reversed, mode, VOIDmode,
928 XEXP (op, 0), XEXP (op, 1));
929
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op) == PLUS
932 && XEXP (op, 1) == constm1_rtx)
933 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
934
935 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
936 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
937 and MODE_VECTOR_INT. */
938 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
939 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
940 CONSTM1_RTX (mode));
941
942 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
943 if (GET_CODE (op) == XOR
944 && CONST_INT_P (XEXP (op, 1))
945 && (temp = simplify_unary_operation (NOT, mode,
946 XEXP (op, 1), mode)) != 0)
947 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
948
949 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
950 if (GET_CODE (op) == PLUS
951 && CONST_INT_P (XEXP (op, 1))
952 && mode_signbit_p (mode, XEXP (op, 1))
953 && (temp = simplify_unary_operation (NOT, mode,
954 XEXP (op, 1), mode)) != 0)
955 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
956
957
958 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
959 operands other than 1, but that is not valid. We could do a
960 similar simplification for (not (lshiftrt C X)) where C is
961 just the sign bit, but this doesn't seem common enough to
962 bother with. */
963 if (GET_CODE (op) == ASHIFT
964 && XEXP (op, 0) == const1_rtx)
965 {
966 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
967 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
968 }
969
970 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
971 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
972 so we can perform the above simplification. */
973 if (STORE_FLAG_VALUE == -1
974 && is_a <scalar_int_mode> (mode, &int_mode)
975 && GET_CODE (op) == ASHIFTRT
976 && CONST_INT_P (XEXP (op, 1))
977 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
978 return simplify_gen_relational (GE, int_mode, VOIDmode,
979 XEXP (op, 0), const0_rtx);
980
981
982 if (partial_subreg_p (op)
983 && subreg_lowpart_p (op)
984 && GET_CODE (SUBREG_REG (op)) == ASHIFT
985 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
986 {
987 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
988 rtx x;
989
990 x = gen_rtx_ROTATE (inner_mode,
991 simplify_gen_unary (NOT, inner_mode, const1_rtx,
992 inner_mode),
993 XEXP (SUBREG_REG (op), 1));
994 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
995 if (temp)
996 return temp;
997 }
998
999 /* Apply De Morgan's laws to reduce number of patterns for machines
1000 with negating logical insns (and-not, nand, etc.). If result has
1001 only one NOT, put it first, since that is how the patterns are
1002 coded. */
1003 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1004 {
1005 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1006 machine_mode op_mode;
1007
1008 op_mode = GET_MODE (in1);
1009 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1010
1011 op_mode = GET_MODE (in2);
1012 if (op_mode == VOIDmode)
1013 op_mode = mode;
1014 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1015
1016 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1017 std::swap (in1, in2);
1018
1019 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1020 mode, in1, in2);
1021 }
1022
1023 /* (not (bswap x)) -> (bswap (not x)). */
1024 if (GET_CODE (op) == BSWAP)
1025 {
1026 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1027 return simplify_gen_unary (BSWAP, mode, x, mode);
1028 }
1029 break;
1030
1031 case NEG:
1032 /* (neg (neg X)) == X. */
1033 if (GET_CODE (op) == NEG)
1034 return XEXP (op, 0);
1035
1036 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1037 If comparison is not reversible use
1038 x ? y : (neg y). */
1039 if (GET_CODE (op) == IF_THEN_ELSE)
1040 {
1041 rtx cond = XEXP (op, 0);
1042 rtx true_rtx = XEXP (op, 1);
1043 rtx false_rtx = XEXP (op, 2);
1044
1045 if ((GET_CODE (true_rtx) == NEG
1046 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1047 || (GET_CODE (false_rtx) == NEG
1048 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1049 {
1050 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1051 temp = reversed_comparison (cond, mode);
1052 else
1053 {
1054 temp = cond;
1055 std::swap (true_rtx, false_rtx);
1056 }
1057 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1058 mode, temp, true_rtx, false_rtx);
1059 }
1060 }
1061
1062 /* (neg (plus X 1)) can become (not X). */
1063 if (GET_CODE (op) == PLUS
1064 && XEXP (op, 1) == const1_rtx)
1065 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1066
1067 /* Similarly, (neg (not X)) is (plus X 1). */
1068 if (GET_CODE (op) == NOT)
1069 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1070 CONST1_RTX (mode));
1071
1072 /* (neg (minus X Y)) can become (minus Y X). This transformation
1073 isn't safe for modes with signed zeros, since if X and Y are
1074 both +0, (minus Y X) is the same as (minus X Y). If the
1075 rounding mode is towards +infinity (or -infinity) then the two
1076 expressions will be rounded differently. */
1077 if (GET_CODE (op) == MINUS
1078 && !HONOR_SIGNED_ZEROS (mode)
1079 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1080 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1081
1082 if (GET_CODE (op) == PLUS
1083 && !HONOR_SIGNED_ZEROS (mode)
1084 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1085 {
1086 /* (neg (plus A C)) is simplified to (minus -C A). */
1087 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1088 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1089 {
1090 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1091 if (temp)
1092 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1093 }
1094
1095 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1096 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1097 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1098 }
1099
1100 /* (neg (mult A B)) becomes (mult A (neg B)).
1101 This works even for floating-point values. */
1102 if (GET_CODE (op) == MULT
1103 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1104 {
1105 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1106 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1107 }
1108
1109 /* NEG commutes with ASHIFT since it is multiplication. Only do
1110 this if we can then eliminate the NEG (e.g., if the operand
1111 is a constant). */
1112 if (GET_CODE (op) == ASHIFT)
1113 {
1114 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1115 if (temp)
1116 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1117 }
1118
1119 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1120 C is equal to the width of MODE minus 1. */
1121 if (GET_CODE (op) == ASHIFTRT
1122 && CONST_INT_P (XEXP (op, 1))
1123 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1124 return simplify_gen_binary (LSHIFTRT, mode,
1125 XEXP (op, 0), XEXP (op, 1));
1126
1127 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1128 C is equal to the width of MODE minus 1. */
1129 if (GET_CODE (op) == LSHIFTRT
1130 && CONST_INT_P (XEXP (op, 1))
1131 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1132 return simplify_gen_binary (ASHIFTRT, mode,
1133 XEXP (op, 0), XEXP (op, 1));
1134
1135 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1136 if (GET_CODE (op) == XOR
1137 && XEXP (op, 1) == const1_rtx
1138 && nonzero_bits (XEXP (op, 0), mode) == 1)
1139 return plus_constant (mode, XEXP (op, 0), -1);
1140
1141 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1142 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1143 if (GET_CODE (op) == LT
1144 && XEXP (op, 1) == const0_rtx
1145 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1146 {
1147 int_mode = as_a <scalar_int_mode> (mode);
1148 int isize = GET_MODE_PRECISION (inner);
1149 if (STORE_FLAG_VALUE == 1)
1150 {
1151 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1152 gen_int_shift_amount (inner,
1153 isize - 1));
1154 if (int_mode == inner)
1155 return temp;
1156 if (GET_MODE_PRECISION (int_mode) > isize)
1157 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1158 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1159 }
1160 else if (STORE_FLAG_VALUE == -1)
1161 {
1162 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1163 gen_int_shift_amount (inner,
1164 isize - 1));
1165 if (int_mode == inner)
1166 return temp;
1167 if (GET_MODE_PRECISION (int_mode) > isize)
1168 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1169 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1170 }
1171 }
1172
1173 if (vec_series_p (op, &base, &step))
1174 {
1175 /* Only create a new series if we can simplify both parts. In other
1176 cases this isn't really a simplification, and it's not necessarily
1177 a win to replace a vector operation with a scalar operation. */
1178 scalar_mode inner_mode = GET_MODE_INNER (mode);
1179 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1180 if (base)
1181 {
1182 step = simplify_unary_operation (NEG, inner_mode,
1183 step, inner_mode);
1184 if (step)
1185 return gen_vec_series (mode, base, step);
1186 }
1187 }
1188 break;
1189
1190 case TRUNCATE:
1191 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1192 with the umulXi3_highpart patterns. */
1193 if (GET_CODE (op) == LSHIFTRT
1194 && GET_CODE (XEXP (op, 0)) == MULT)
1195 break;
1196
1197 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1198 {
1199 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1200 {
1201 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1202 if (temp)
1203 return temp;
1204 }
1205 /* We can't handle truncation to a partial integer mode here
1206 because we don't know the real bitsize of the partial
1207 integer mode. */
1208 break;
1209 }
1210
1211 if (GET_MODE (op) != VOIDmode)
1212 {
1213 temp = simplify_truncation (mode, op, GET_MODE (op));
1214 if (temp)
1215 return temp;
1216 }
1217
1218 /* If we know that the value is already truncated, we can
1219 replace the TRUNCATE with a SUBREG. */
1220 if (known_eq (GET_MODE_NUNITS (mode), 1)
1221 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1222 || truncated_to_mode (mode, op)))
1223 {
1224 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1225 if (temp)
1226 return temp;
1227 }
1228
1229 /* A truncate of a comparison can be replaced with a subreg if
1230 STORE_FLAG_VALUE permits. This is like the previous test,
1231 but it works even if the comparison is done in a mode larger
1232 than HOST_BITS_PER_WIDE_INT. */
1233 if (HWI_COMPUTABLE_MODE_P (mode)
1234 && COMPARISON_P (op)
1235 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1236 {
1237 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1238 if (temp)
1239 return temp;
1240 }
1241
1242 /* A truncate of a memory is just loading the low part of the memory
1243 if we are not changing the meaning of the address. */
1244 if (GET_CODE (op) == MEM
1245 && !VECTOR_MODE_P (mode)
1246 && !MEM_VOLATILE_P (op)
1247 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1248 {
1249 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1250 if (temp)
1251 return temp;
1252 }
1253
1254 break;
1255
1256 case FLOAT_TRUNCATE:
1257 if (DECIMAL_FLOAT_MODE_P (mode))
1258 break;
1259
1260 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1261 if (GET_CODE (op) == FLOAT_EXTEND
1262 && GET_MODE (XEXP (op, 0)) == mode)
1263 return XEXP (op, 0);
1264
1265 /* (float_truncate:SF (float_truncate:DF foo:XF))
1266 = (float_truncate:SF foo:XF).
1267 This may eliminate double rounding, so it is unsafe.
1268
1269 (float_truncate:SF (float_extend:XF foo:DF))
1270 = (float_truncate:SF foo:DF).
1271
1272 (float_truncate:DF (float_extend:XF foo:SF))
1273 = (float_extend:DF foo:SF). */
1274 if ((GET_CODE (op) == FLOAT_TRUNCATE
1275 && flag_unsafe_math_optimizations)
1276 || GET_CODE (op) == FLOAT_EXTEND)
1277 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1278 > GET_MODE_UNIT_SIZE (mode)
1279 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1280 mode,
1281 XEXP (op, 0), mode);
1282
1283 /* (float_truncate (float x)) is (float x) */
1284 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1285 && (flag_unsafe_math_optimizations
1286 || exact_int_to_float_conversion_p (op)))
1287 return simplify_gen_unary (GET_CODE (op), mode,
1288 XEXP (op, 0),
1289 GET_MODE (XEXP (op, 0)));
1290
1291 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1292 (OP:SF foo:SF) if OP is NEG or ABS. */
1293 if ((GET_CODE (op) == ABS
1294 || GET_CODE (op) == NEG)
1295 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1296 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1297 return simplify_gen_unary (GET_CODE (op), mode,
1298 XEXP (XEXP (op, 0), 0), mode);
1299
1300 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1301 is (float_truncate:SF x). */
1302 if (GET_CODE (op) == SUBREG
1303 && subreg_lowpart_p (op)
1304 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1305 return SUBREG_REG (op);
1306 break;
1307
1308 case FLOAT_EXTEND:
1309 if (DECIMAL_FLOAT_MODE_P (mode))
1310 break;
1311
1312 /* (float_extend (float_extend x)) is (float_extend x)
1313
1314 (float_extend (float x)) is (float x) assuming that double
1315 rounding can't happen.
1316 */
1317 if (GET_CODE (op) == FLOAT_EXTEND
1318 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1319 && exact_int_to_float_conversion_p (op)))
1320 return simplify_gen_unary (GET_CODE (op), mode,
1321 XEXP (op, 0),
1322 GET_MODE (XEXP (op, 0)));
1323
1324 break;
1325
1326 case ABS:
1327 /* (abs (neg <foo>)) -> (abs <foo>) */
1328 if (GET_CODE (op) == NEG)
1329 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1330 GET_MODE (XEXP (op, 0)));
1331
1332 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1333 do nothing. */
1334 if (GET_MODE (op) == VOIDmode)
1335 break;
1336
1337 /* If operand is something known to be positive, ignore the ABS. */
1338 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1339 || val_signbit_known_clear_p (GET_MODE (op),
1340 nonzero_bits (op, GET_MODE (op))))
1341 return op;
1342
1343 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1344 if (is_a <scalar_int_mode> (mode, &int_mode)
1345 && (num_sign_bit_copies (op, int_mode)
1346 == GET_MODE_PRECISION (int_mode)))
1347 return gen_rtx_NEG (int_mode, op);
1348
1349 break;
1350
1351 case FFS:
1352 /* (ffs (*_extend <X>)) = (ffs <X>) */
1353 if (GET_CODE (op) == SIGN_EXTEND
1354 || GET_CODE (op) == ZERO_EXTEND)
1355 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1356 GET_MODE (XEXP (op, 0)));
1357 break;
1358
1359 case POPCOUNT:
1360 switch (GET_CODE (op))
1361 {
1362 case BSWAP:
1363 case ZERO_EXTEND:
1364 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1365 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1366 GET_MODE (XEXP (op, 0)));
1367
1368 case ROTATE:
1369 case ROTATERT:
1370 /* Rotations don't affect popcount. */
1371 if (!side_effects_p (XEXP (op, 1)))
1372 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1373 GET_MODE (XEXP (op, 0)));
1374 break;
1375
1376 default:
1377 break;
1378 }
1379 break;
1380
1381 case PARITY:
1382 switch (GET_CODE (op))
1383 {
1384 case NOT:
1385 case BSWAP:
1386 case ZERO_EXTEND:
1387 case SIGN_EXTEND:
1388 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1389 GET_MODE (XEXP (op, 0)));
1390
1391 case ROTATE:
1392 case ROTATERT:
1393 /* Rotations don't affect parity. */
1394 if (!side_effects_p (XEXP (op, 1)))
1395 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1396 GET_MODE (XEXP (op, 0)));
1397 break;
1398
1399 default:
1400 break;
1401 }
1402 break;
1403
1404 case BSWAP:
1405 /* (bswap (bswap x)) -> x. */
1406 if (GET_CODE (op) == BSWAP)
1407 return XEXP (op, 0);
1408 break;
1409
1410 case FLOAT:
1411 /* (float (sign_extend <X>)) = (float <X>). */
1412 if (GET_CODE (op) == SIGN_EXTEND)
1413 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1414 GET_MODE (XEXP (op, 0)));
1415 break;
1416
1417 case SIGN_EXTEND:
1418 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1419 becomes just the MINUS if its mode is MODE. This allows
1420 folding switch statements on machines using casesi (such as
1421 the VAX). */
1422 if (GET_CODE (op) == TRUNCATE
1423 && GET_MODE (XEXP (op, 0)) == mode
1424 && GET_CODE (XEXP (op, 0)) == MINUS
1425 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1426 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1427 return XEXP (op, 0);
1428
1429 /* Extending a widening multiplication should be canonicalized to
1430 a wider widening multiplication. */
1431 if (GET_CODE (op) == MULT)
1432 {
1433 rtx lhs = XEXP (op, 0);
1434 rtx rhs = XEXP (op, 1);
1435 enum rtx_code lcode = GET_CODE (lhs);
1436 enum rtx_code rcode = GET_CODE (rhs);
1437
1438 /* Widening multiplies usually extend both operands, but sometimes
1439 they use a shift to extract a portion of a register. */
1440 if ((lcode == SIGN_EXTEND
1441 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1442 && (rcode == SIGN_EXTEND
1443 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1444 {
1445 machine_mode lmode = GET_MODE (lhs);
1446 machine_mode rmode = GET_MODE (rhs);
1447 int bits;
1448
1449 if (lcode == ASHIFTRT)
1450 /* Number of bits not shifted off the end. */
1451 bits = (GET_MODE_UNIT_PRECISION (lmode)
1452 - INTVAL (XEXP (lhs, 1)));
1453 else /* lcode == SIGN_EXTEND */
1454 /* Size of inner mode. */
1455 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1456
1457 if (rcode == ASHIFTRT)
1458 bits += (GET_MODE_UNIT_PRECISION (rmode)
1459 - INTVAL (XEXP (rhs, 1)));
1460 else /* rcode == SIGN_EXTEND */
1461 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1462
1463 /* We can only widen multiplies if the result is mathematiclly
1464 equivalent. I.e. if overflow was impossible. */
1465 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1466 return simplify_gen_binary
1467 (MULT, mode,
1468 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1469 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1470 }
1471 }
1472
1473 /* Check for a sign extension of a subreg of a promoted
1474 variable, where the promotion is sign-extended, and the
1475 target mode is the same as the variable's promotion. */
1476 if (GET_CODE (op) == SUBREG
1477 && SUBREG_PROMOTED_VAR_P (op)
1478 && SUBREG_PROMOTED_SIGNED_P (op)
1479 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1480 {
1481 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1482 if (temp)
1483 return temp;
1484 }
1485
1486 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1487 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1488 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1489 {
1490 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1491 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1492 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1493 GET_MODE (XEXP (op, 0)));
1494 }
1495
1496 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1497 is (sign_extend:M (subreg:O <X>)) if there is mode with
1498 GET_MODE_BITSIZE (N) - I bits.
1499 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1500 is similarly (zero_extend:M (subreg:O <X>)). */
1501 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1502 && GET_CODE (XEXP (op, 0)) == ASHIFT
1503 && is_a <scalar_int_mode> (mode, &int_mode)
1504 && CONST_INT_P (XEXP (op, 1))
1505 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1506 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1507 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1508 {
1509 scalar_int_mode tmode;
1510 gcc_assert (GET_MODE_BITSIZE (int_mode)
1511 > GET_MODE_BITSIZE (op_mode));
1512 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1513 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1514 {
1515 rtx inner =
1516 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1517 if (inner)
1518 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1519 ? SIGN_EXTEND : ZERO_EXTEND,
1520 int_mode, inner, tmode);
1521 }
1522 }
1523
1524 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1525 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1526 if (GET_CODE (op) == LSHIFTRT
1527 && CONST_INT_P (XEXP (op, 1))
1528 && XEXP (op, 1) != const0_rtx)
1529 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1530
1531 #if defined(POINTERS_EXTEND_UNSIGNED)
1532 /* As we do not know which address space the pointer is referring to,
1533 we can do this only if the target does not support different pointer
1534 or address modes depending on the address space. */
1535 if (target_default_pointer_address_modes_p ()
1536 && ! POINTERS_EXTEND_UNSIGNED
1537 && mode == Pmode && GET_MODE (op) == ptr_mode
1538 && (CONSTANT_P (op)
1539 || (GET_CODE (op) == SUBREG
1540 && REG_P (SUBREG_REG (op))
1541 && REG_POINTER (SUBREG_REG (op))
1542 && GET_MODE (SUBREG_REG (op)) == Pmode))
1543 && !targetm.have_ptr_extend ())
1544 {
1545 temp
1546 = convert_memory_address_addr_space_1 (Pmode, op,
1547 ADDR_SPACE_GENERIC, false,
1548 true);
1549 if (temp)
1550 return temp;
1551 }
1552 #endif
1553 break;
1554
1555 case ZERO_EXTEND:
1556 /* Check for a zero extension of a subreg of a promoted
1557 variable, where the promotion is zero-extended, and the
1558 target mode is the same as the variable's promotion. */
1559 if (GET_CODE (op) == SUBREG
1560 && SUBREG_PROMOTED_VAR_P (op)
1561 && SUBREG_PROMOTED_UNSIGNED_P (op)
1562 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1563 {
1564 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1565 if (temp)
1566 return temp;
1567 }
1568
1569 /* Extending a widening multiplication should be canonicalized to
1570 a wider widening multiplication. */
1571 if (GET_CODE (op) == MULT)
1572 {
1573 rtx lhs = XEXP (op, 0);
1574 rtx rhs = XEXP (op, 1);
1575 enum rtx_code lcode = GET_CODE (lhs);
1576 enum rtx_code rcode = GET_CODE (rhs);
1577
1578 /* Widening multiplies usually extend both operands, but sometimes
1579 they use a shift to extract a portion of a register. */
1580 if ((lcode == ZERO_EXTEND
1581 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1582 && (rcode == ZERO_EXTEND
1583 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1584 {
1585 machine_mode lmode = GET_MODE (lhs);
1586 machine_mode rmode = GET_MODE (rhs);
1587 int bits;
1588
1589 if (lcode == LSHIFTRT)
1590 /* Number of bits not shifted off the end. */
1591 bits = (GET_MODE_UNIT_PRECISION (lmode)
1592 - INTVAL (XEXP (lhs, 1)));
1593 else /* lcode == ZERO_EXTEND */
1594 /* Size of inner mode. */
1595 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1596
1597 if (rcode == LSHIFTRT)
1598 bits += (GET_MODE_UNIT_PRECISION (rmode)
1599 - INTVAL (XEXP (rhs, 1)));
1600 else /* rcode == ZERO_EXTEND */
1601 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1602
1603 /* We can only widen multiplies if the result is mathematiclly
1604 equivalent. I.e. if overflow was impossible. */
1605 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1606 return simplify_gen_binary
1607 (MULT, mode,
1608 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1609 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1610 }
1611 }
1612
1613 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1614 if (GET_CODE (op) == ZERO_EXTEND)
1615 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1616 GET_MODE (XEXP (op, 0)));
1617
1618 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1619 is (zero_extend:M (subreg:O <X>)) if there is mode with
1620 GET_MODE_PRECISION (N) - I bits. */
1621 if (GET_CODE (op) == LSHIFTRT
1622 && GET_CODE (XEXP (op, 0)) == ASHIFT
1623 && is_a <scalar_int_mode> (mode, &int_mode)
1624 && CONST_INT_P (XEXP (op, 1))
1625 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1626 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1627 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1628 {
1629 scalar_int_mode tmode;
1630 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1631 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1632 {
1633 rtx inner =
1634 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1635 if (inner)
1636 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1637 inner, tmode);
1638 }
1639 }
1640
1641 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1642 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1643 of mode N. E.g.
1644 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1645 (and:SI (reg:SI) (const_int 63)). */
1646 if (partial_subreg_p (op)
1647 && is_a <scalar_int_mode> (mode, &int_mode)
1648 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1649 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1650 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1651 && subreg_lowpart_p (op)
1652 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1653 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1654 {
1655 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1656 return SUBREG_REG (op);
1657 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1658 op0_mode);
1659 }
1660
1661 #if defined(POINTERS_EXTEND_UNSIGNED)
1662 /* As we do not know which address space the pointer is referring to,
1663 we can do this only if the target does not support different pointer
1664 or address modes depending on the address space. */
1665 if (target_default_pointer_address_modes_p ()
1666 && POINTERS_EXTEND_UNSIGNED > 0
1667 && mode == Pmode && GET_MODE (op) == ptr_mode
1668 && (CONSTANT_P (op)
1669 || (GET_CODE (op) == SUBREG
1670 && REG_P (SUBREG_REG (op))
1671 && REG_POINTER (SUBREG_REG (op))
1672 && GET_MODE (SUBREG_REG (op)) == Pmode))
1673 && !targetm.have_ptr_extend ())
1674 {
1675 temp
1676 = convert_memory_address_addr_space_1 (Pmode, op,
1677 ADDR_SPACE_GENERIC, false,
1678 true);
1679 if (temp)
1680 return temp;
1681 }
1682 #endif
1683 break;
1684
1685 default:
1686 break;
1687 }
1688
1689 if (VECTOR_MODE_P (mode)
1690 && vec_duplicate_p (op, &elt)
1691 && code != VEC_DUPLICATE)
1692 {
1693 /* Try applying the operator to ELT and see if that simplifies.
1694 We can duplicate the result if so.
1695
1696 The reason we don't use simplify_gen_unary is that it isn't
1697 necessarily a win to convert things like:
1698
1699 (neg:V (vec_duplicate:V (reg:S R)))
1700
1701 to:
1702
1703 (vec_duplicate:V (neg:S (reg:S R)))
1704
1705 The first might be done entirely in vector registers while the
1706 second might need a move between register files. */
1707 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1708 elt, GET_MODE_INNER (GET_MODE (op)));
1709 if (temp)
1710 return gen_vec_duplicate (mode, temp);
1711 }
1712
1713 return 0;
1714 }
1715
1716 /* Try to compute the value of a unary operation CODE whose output mode is to
1717 be MODE with input operand OP whose mode was originally OP_MODE.
1718 Return zero if the value cannot be computed. */
1719 rtx
1720 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1721 rtx op, machine_mode op_mode)
1722 {
1723 scalar_int_mode result_mode;
1724
1725 if (code == VEC_DUPLICATE)
1726 {
1727 gcc_assert (VECTOR_MODE_P (mode));
1728 if (GET_MODE (op) != VOIDmode)
1729 {
1730 if (!VECTOR_MODE_P (GET_MODE (op)))
1731 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1732 else
1733 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1734 (GET_MODE (op)));
1735 }
1736 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1737 return gen_const_vec_duplicate (mode, op);
1738 unsigned int n_elts;
1739 if (GET_CODE (op) == CONST_VECTOR
1740 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
1741 {
1742 /* This must be constant if we're duplicating it to a constant
1743 number of elements. */
1744 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op).to_constant ();
1745 gcc_assert (in_n_elts < n_elts);
1746 gcc_assert ((n_elts % in_n_elts) == 0);
1747 rtvec v = rtvec_alloc (n_elts);
1748 for (unsigned i = 0; i < n_elts; i++)
1749 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1750 return gen_rtx_CONST_VECTOR (mode, v);
1751 }
1752 }
1753
1754 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1755 {
1756 unsigned int n_elts;
1757 if (!CONST_VECTOR_NUNITS (op).is_constant (&n_elts))
1758 return NULL_RTX;
1759
1760 machine_mode opmode = GET_MODE (op);
1761 gcc_assert (known_eq (GET_MODE_NUNITS (mode), n_elts));
1762 gcc_assert (known_eq (GET_MODE_NUNITS (opmode), n_elts));
1763
1764 rtvec v = rtvec_alloc (n_elts);
1765 unsigned int i;
1766
1767 for (i = 0; i < n_elts; i++)
1768 {
1769 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1770 CONST_VECTOR_ELT (op, i),
1771 GET_MODE_INNER (opmode));
1772 if (!x || !valid_for_const_vector_p (mode, x))
1773 return 0;
1774 RTVEC_ELT (v, i) = x;
1775 }
1776 return gen_rtx_CONST_VECTOR (mode, v);
1777 }
1778
1779 /* The order of these tests is critical so that, for example, we don't
1780 check the wrong mode (input vs. output) for a conversion operation,
1781 such as FIX. At some point, this should be simplified. */
1782
1783 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1784 {
1785 REAL_VALUE_TYPE d;
1786
1787 if (op_mode == VOIDmode)
1788 {
1789 /* CONST_INT have VOIDmode as the mode. We assume that all
1790 the bits of the constant are significant, though, this is
1791 a dangerous assumption as many times CONST_INTs are
1792 created and used with garbage in the bits outside of the
1793 precision of the implied mode of the const_int. */
1794 op_mode = MAX_MODE_INT;
1795 }
1796
1797 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1798
1799 /* Avoid the folding if flag_signaling_nans is on and
1800 operand is a signaling NaN. */
1801 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1802 return 0;
1803
1804 d = real_value_truncate (mode, d);
1805 return const_double_from_real_value (d, mode);
1806 }
1807 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1808 {
1809 REAL_VALUE_TYPE d;
1810
1811 if (op_mode == VOIDmode)
1812 {
1813 /* CONST_INT have VOIDmode as the mode. We assume that all
1814 the bits of the constant are significant, though, this is
1815 a dangerous assumption as many times CONST_INTs are
1816 created and used with garbage in the bits outside of the
1817 precision of the implied mode of the const_int. */
1818 op_mode = MAX_MODE_INT;
1819 }
1820
1821 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1822
1823 /* Avoid the folding if flag_signaling_nans is on and
1824 operand is a signaling NaN. */
1825 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1826 return 0;
1827
1828 d = real_value_truncate (mode, d);
1829 return const_double_from_real_value (d, mode);
1830 }
1831
1832 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1833 {
1834 unsigned int width = GET_MODE_PRECISION (result_mode);
1835 wide_int result;
1836 scalar_int_mode imode = (op_mode == VOIDmode
1837 ? result_mode
1838 : as_a <scalar_int_mode> (op_mode));
1839 rtx_mode_t op0 = rtx_mode_t (op, imode);
1840 int int_value;
1841
1842 #if TARGET_SUPPORTS_WIDE_INT == 0
1843 /* This assert keeps the simplification from producing a result
1844 that cannot be represented in a CONST_DOUBLE but a lot of
1845 upstream callers expect that this function never fails to
1846 simplify something and so you if you added this to the test
1847 above the code would die later anyway. If this assert
1848 happens, you just need to make the port support wide int. */
1849 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1850 #endif
1851
1852 switch (code)
1853 {
1854 case NOT:
1855 result = wi::bit_not (op0);
1856 break;
1857
1858 case NEG:
1859 result = wi::neg (op0);
1860 break;
1861
1862 case ABS:
1863 result = wi::abs (op0);
1864 break;
1865
1866 case FFS:
1867 result = wi::shwi (wi::ffs (op0), result_mode);
1868 break;
1869
1870 case CLZ:
1871 if (wi::ne_p (op0, 0))
1872 int_value = wi::clz (op0);
1873 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1874 return NULL_RTX;
1875 result = wi::shwi (int_value, result_mode);
1876 break;
1877
1878 case CLRSB:
1879 result = wi::shwi (wi::clrsb (op0), result_mode);
1880 break;
1881
1882 case CTZ:
1883 if (wi::ne_p (op0, 0))
1884 int_value = wi::ctz (op0);
1885 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1886 return NULL_RTX;
1887 result = wi::shwi (int_value, result_mode);
1888 break;
1889
1890 case POPCOUNT:
1891 result = wi::shwi (wi::popcount (op0), result_mode);
1892 break;
1893
1894 case PARITY:
1895 result = wi::shwi (wi::parity (op0), result_mode);
1896 break;
1897
1898 case BSWAP:
1899 result = wide_int (op0).bswap ();
1900 break;
1901
1902 case TRUNCATE:
1903 case ZERO_EXTEND:
1904 result = wide_int::from (op0, width, UNSIGNED);
1905 break;
1906
1907 case SIGN_EXTEND:
1908 result = wide_int::from (op0, width, SIGNED);
1909 break;
1910
1911 case SQRT:
1912 default:
1913 return 0;
1914 }
1915
1916 return immed_wide_int_const (result, result_mode);
1917 }
1918
1919 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1920 && SCALAR_FLOAT_MODE_P (mode)
1921 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1922 {
1923 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1924 switch (code)
1925 {
1926 case SQRT:
1927 return 0;
1928 case ABS:
1929 d = real_value_abs (&d);
1930 break;
1931 case NEG:
1932 d = real_value_negate (&d);
1933 break;
1934 case FLOAT_TRUNCATE:
1935 /* Don't perform the operation if flag_signaling_nans is on
1936 and the operand is a signaling NaN. */
1937 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1938 return NULL_RTX;
1939 d = real_value_truncate (mode, d);
1940 break;
1941 case FLOAT_EXTEND:
1942 /* Don't perform the operation if flag_signaling_nans is on
1943 and the operand is a signaling NaN. */
1944 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1945 return NULL_RTX;
1946 /* All this does is change the mode, unless changing
1947 mode class. */
1948 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1949 real_convert (&d, mode, &d);
1950 break;
1951 case FIX:
1952 /* Don't perform the operation if flag_signaling_nans is on
1953 and the operand is a signaling NaN. */
1954 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1955 return NULL_RTX;
1956 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1957 break;
1958 case NOT:
1959 {
1960 long tmp[4];
1961 int i;
1962
1963 real_to_target (tmp, &d, GET_MODE (op));
1964 for (i = 0; i < 4; i++)
1965 tmp[i] = ~tmp[i];
1966 real_from_target (&d, tmp, mode);
1967 break;
1968 }
1969 default:
1970 gcc_unreachable ();
1971 }
1972 return const_double_from_real_value (d, mode);
1973 }
1974 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1975 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1976 && is_int_mode (mode, &result_mode))
1977 {
1978 unsigned int width = GET_MODE_PRECISION (result_mode);
1979 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1980 operators are intentionally left unspecified (to ease implementation
1981 by target backends), for consistency, this routine implements the
1982 same semantics for constant folding as used by the middle-end. */
1983
1984 /* This was formerly used only for non-IEEE float.
1985 eggert@twinsun.com says it is safe for IEEE also. */
1986 REAL_VALUE_TYPE t;
1987 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1988 wide_int wmax, wmin;
1989 /* This is part of the abi to real_to_integer, but we check
1990 things before making this call. */
1991 bool fail;
1992
1993 switch (code)
1994 {
1995 case FIX:
1996 if (REAL_VALUE_ISNAN (*x))
1997 return const0_rtx;
1998
1999 /* Test against the signed upper bound. */
2000 wmax = wi::max_value (width, SIGNED);
2001 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2002 if (real_less (&t, x))
2003 return immed_wide_int_const (wmax, mode);
2004
2005 /* Test against the signed lower bound. */
2006 wmin = wi::min_value (width, SIGNED);
2007 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2008 if (real_less (x, &t))
2009 return immed_wide_int_const (wmin, mode);
2010
2011 return immed_wide_int_const (real_to_integer (x, &fail, width),
2012 mode);
2013
2014 case UNSIGNED_FIX:
2015 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2016 return const0_rtx;
2017
2018 /* Test against the unsigned upper bound. */
2019 wmax = wi::max_value (width, UNSIGNED);
2020 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2021 if (real_less (&t, x))
2022 return immed_wide_int_const (wmax, mode);
2023
2024 return immed_wide_int_const (real_to_integer (x, &fail, width),
2025 mode);
2026
2027 default:
2028 gcc_unreachable ();
2029 }
2030 }
2031
2032 /* Handle polynomial integers. */
2033 else if (CONST_POLY_INT_P (op))
2034 {
2035 poly_wide_int result;
2036 switch (code)
2037 {
2038 case NEG:
2039 result = -const_poly_int_value (op);
2040 break;
2041
2042 case NOT:
2043 result = ~const_poly_int_value (op);
2044 break;
2045
2046 default:
2047 return NULL_RTX;
2048 }
2049 return immed_wide_int_const (result, mode);
2050 }
2051
2052 return NULL_RTX;
2053 }
2054 \f
2055 /* Subroutine of simplify_binary_operation to simplify a binary operation
2056 CODE that can commute with byte swapping, with result mode MODE and
2057 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2058 Return zero if no simplification or canonicalization is possible. */
2059
2060 static rtx
2061 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2062 rtx op0, rtx op1)
2063 {
2064 rtx tem;
2065
2066 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2067 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2068 {
2069 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2070 simplify_gen_unary (BSWAP, mode, op1, mode));
2071 return simplify_gen_unary (BSWAP, mode, tem, mode);
2072 }
2073
2074 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2075 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2076 {
2077 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2078 return simplify_gen_unary (BSWAP, mode, tem, mode);
2079 }
2080
2081 return NULL_RTX;
2082 }
2083
2084 /* Subroutine of simplify_binary_operation to simplify a commutative,
2085 associative binary operation CODE with result mode MODE, operating
2086 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2087 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2088 canonicalization is possible. */
2089
2090 static rtx
2091 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2092 rtx op0, rtx op1)
2093 {
2094 rtx tem;
2095
2096 /* Linearize the operator to the left. */
2097 if (GET_CODE (op1) == code)
2098 {
2099 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2100 if (GET_CODE (op0) == code)
2101 {
2102 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2103 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2104 }
2105
2106 /* "a op (b op c)" becomes "(b op c) op a". */
2107 if (! swap_commutative_operands_p (op1, op0))
2108 return simplify_gen_binary (code, mode, op1, op0);
2109
2110 std::swap (op0, op1);
2111 }
2112
2113 if (GET_CODE (op0) == code)
2114 {
2115 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2116 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2117 {
2118 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2119 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2120 }
2121
2122 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2123 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2124 if (tem != 0)
2125 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2126
2127 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2128 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2129 if (tem != 0)
2130 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2131 }
2132
2133 return 0;
2134 }
2135
2136
2137 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2138 and OP1. Return 0 if no simplification is possible.
2139
2140 Don't use this for relational operations such as EQ or LT.
2141 Use simplify_relational_operation instead. */
2142 rtx
2143 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2144 rtx op0, rtx op1)
2145 {
2146 rtx trueop0, trueop1;
2147 rtx tem;
2148
2149 /* Relational operations don't work here. We must know the mode
2150 of the operands in order to do the comparison correctly.
2151 Assuming a full word can give incorrect results.
2152 Consider comparing 128 with -128 in QImode. */
2153 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2154 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2155
2156 /* Make sure the constant is second. */
2157 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2158 && swap_commutative_operands_p (op0, op1))
2159 std::swap (op0, op1);
2160
2161 trueop0 = avoid_constant_pool_reference (op0);
2162 trueop1 = avoid_constant_pool_reference (op1);
2163
2164 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2165 if (tem)
2166 return tem;
2167 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2168
2169 if (tem)
2170 return tem;
2171
2172 /* If the above steps did not result in a simplification and op0 or op1
2173 were constant pool references, use the referenced constants directly. */
2174 if (trueop0 != op0 || trueop1 != op1)
2175 return simplify_gen_binary (code, mode, trueop0, trueop1);
2176
2177 return NULL_RTX;
2178 }
2179
2180 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2181 which OP0 and OP1 are both vector series or vector duplicates
2182 (which are really just series with a step of 0). If so, try to
2183 form a new series by applying CODE to the bases and to the steps.
2184 Return null if no simplification is possible.
2185
2186 MODE is the mode of the operation and is known to be a vector
2187 integer mode. */
2188
2189 static rtx
2190 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2191 rtx op0, rtx op1)
2192 {
2193 rtx base0, step0;
2194 if (vec_duplicate_p (op0, &base0))
2195 step0 = const0_rtx;
2196 else if (!vec_series_p (op0, &base0, &step0))
2197 return NULL_RTX;
2198
2199 rtx base1, step1;
2200 if (vec_duplicate_p (op1, &base1))
2201 step1 = const0_rtx;
2202 else if (!vec_series_p (op1, &base1, &step1))
2203 return NULL_RTX;
2204
2205 /* Only create a new series if we can simplify both parts. In other
2206 cases this isn't really a simplification, and it's not necessarily
2207 a win to replace a vector operation with a scalar operation. */
2208 scalar_mode inner_mode = GET_MODE_INNER (mode);
2209 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2210 if (!new_base)
2211 return NULL_RTX;
2212
2213 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2214 if (!new_step)
2215 return NULL_RTX;
2216
2217 return gen_vec_series (mode, new_base, new_step);
2218 }
2219
2220 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2221 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2222 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2223 actual constants. */
2224
2225 static rtx
2226 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2227 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2228 {
2229 rtx tem, reversed, opleft, opright, elt0, elt1;
2230 HOST_WIDE_INT val;
2231 scalar_int_mode int_mode, inner_mode;
2232 poly_int64 offset;
2233
2234 /* Even if we can't compute a constant result,
2235 there are some cases worth simplifying. */
2236
2237 switch (code)
2238 {
2239 case PLUS:
2240 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2241 when x is NaN, infinite, or finite and nonzero. They aren't
2242 when x is -0 and the rounding mode is not towards -infinity,
2243 since (-0) + 0 is then 0. */
2244 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2245 return op0;
2246
2247 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2248 transformations are safe even for IEEE. */
2249 if (GET_CODE (op0) == NEG)
2250 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2251 else if (GET_CODE (op1) == NEG)
2252 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2253
2254 /* (~a) + 1 -> -a */
2255 if (INTEGRAL_MODE_P (mode)
2256 && GET_CODE (op0) == NOT
2257 && trueop1 == const1_rtx)
2258 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2259
2260 /* Handle both-operands-constant cases. We can only add
2261 CONST_INTs to constants since the sum of relocatable symbols
2262 can't be handled by most assemblers. Don't add CONST_INT
2263 to CONST_INT since overflow won't be computed properly if wider
2264 than HOST_BITS_PER_WIDE_INT. */
2265
2266 if ((GET_CODE (op0) == CONST
2267 || GET_CODE (op0) == SYMBOL_REF
2268 || GET_CODE (op0) == LABEL_REF)
2269 && poly_int_rtx_p (op1, &offset))
2270 return plus_constant (mode, op0, offset);
2271 else if ((GET_CODE (op1) == CONST
2272 || GET_CODE (op1) == SYMBOL_REF
2273 || GET_CODE (op1) == LABEL_REF)
2274 && poly_int_rtx_p (op0, &offset))
2275 return plus_constant (mode, op1, offset);
2276
2277 /* See if this is something like X * C - X or vice versa or
2278 if the multiplication is written as a shift. If so, we can
2279 distribute and make a new multiply, shift, or maybe just
2280 have X (if C is 2 in the example above). But don't make
2281 something more expensive than we had before. */
2282
2283 if (is_a <scalar_int_mode> (mode, &int_mode))
2284 {
2285 rtx lhs = op0, rhs = op1;
2286
2287 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2288 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2289
2290 if (GET_CODE (lhs) == NEG)
2291 {
2292 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2293 lhs = XEXP (lhs, 0);
2294 }
2295 else if (GET_CODE (lhs) == MULT
2296 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2297 {
2298 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2299 lhs = XEXP (lhs, 0);
2300 }
2301 else if (GET_CODE (lhs) == ASHIFT
2302 && CONST_INT_P (XEXP (lhs, 1))
2303 && INTVAL (XEXP (lhs, 1)) >= 0
2304 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2305 {
2306 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2307 GET_MODE_PRECISION (int_mode));
2308 lhs = XEXP (lhs, 0);
2309 }
2310
2311 if (GET_CODE (rhs) == NEG)
2312 {
2313 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2314 rhs = XEXP (rhs, 0);
2315 }
2316 else if (GET_CODE (rhs) == MULT
2317 && CONST_INT_P (XEXP (rhs, 1)))
2318 {
2319 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2320 rhs = XEXP (rhs, 0);
2321 }
2322 else if (GET_CODE (rhs) == ASHIFT
2323 && CONST_INT_P (XEXP (rhs, 1))
2324 && INTVAL (XEXP (rhs, 1)) >= 0
2325 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2326 {
2327 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2328 GET_MODE_PRECISION (int_mode));
2329 rhs = XEXP (rhs, 0);
2330 }
2331
2332 if (rtx_equal_p (lhs, rhs))
2333 {
2334 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2335 rtx coeff;
2336 bool speed = optimize_function_for_speed_p (cfun);
2337
2338 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2339
2340 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2341 return (set_src_cost (tem, int_mode, speed)
2342 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2343 }
2344 }
2345
2346 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2347 if (CONST_SCALAR_INT_P (op1)
2348 && GET_CODE (op0) == XOR
2349 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2350 && mode_signbit_p (mode, op1))
2351 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2352 simplify_gen_binary (XOR, mode, op1,
2353 XEXP (op0, 1)));
2354
2355 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2356 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2357 && GET_CODE (op0) == MULT
2358 && GET_CODE (XEXP (op0, 0)) == NEG)
2359 {
2360 rtx in1, in2;
2361
2362 in1 = XEXP (XEXP (op0, 0), 0);
2363 in2 = XEXP (op0, 1);
2364 return simplify_gen_binary (MINUS, mode, op1,
2365 simplify_gen_binary (MULT, mode,
2366 in1, in2));
2367 }
2368
2369 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2370 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2371 is 1. */
2372 if (COMPARISON_P (op0)
2373 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2374 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2375 && (reversed = reversed_comparison (op0, mode)))
2376 return
2377 simplify_gen_unary (NEG, mode, reversed, mode);
2378
2379 /* If one of the operands is a PLUS or a MINUS, see if we can
2380 simplify this by the associative law.
2381 Don't use the associative law for floating point.
2382 The inaccuracy makes it nonassociative,
2383 and subtle programs can break if operations are associated. */
2384
2385 if (INTEGRAL_MODE_P (mode)
2386 && (plus_minus_operand_p (op0)
2387 || plus_minus_operand_p (op1))
2388 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2389 return tem;
2390
2391 /* Reassociate floating point addition only when the user
2392 specifies associative math operations. */
2393 if (FLOAT_MODE_P (mode)
2394 && flag_associative_math)
2395 {
2396 tem = simplify_associative_operation (code, mode, op0, op1);
2397 if (tem)
2398 return tem;
2399 }
2400
2401 /* Handle vector series. */
2402 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2403 {
2404 tem = simplify_binary_operation_series (code, mode, op0, op1);
2405 if (tem)
2406 return tem;
2407 }
2408 break;
2409
2410 case COMPARE:
2411 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2412 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2413 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2414 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2415 {
2416 rtx xop00 = XEXP (op0, 0);
2417 rtx xop10 = XEXP (op1, 0);
2418
2419 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2420 return xop00;
2421
2422 if (REG_P (xop00) && REG_P (xop10)
2423 && REGNO (xop00) == REGNO (xop10)
2424 && GET_MODE (xop00) == mode
2425 && GET_MODE (xop10) == mode
2426 && GET_MODE_CLASS (mode) == MODE_CC)
2427 return xop00;
2428 }
2429 break;
2430
2431 case MINUS:
2432 /* We can't assume x-x is 0 even with non-IEEE floating point,
2433 but since it is zero except in very strange circumstances, we
2434 will treat it as zero with -ffinite-math-only. */
2435 if (rtx_equal_p (trueop0, trueop1)
2436 && ! side_effects_p (op0)
2437 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2438 return CONST0_RTX (mode);
2439
2440 /* Change subtraction from zero into negation. (0 - x) is the
2441 same as -x when x is NaN, infinite, or finite and nonzero.
2442 But if the mode has signed zeros, and does not round towards
2443 -infinity, then 0 - 0 is 0, not -0. */
2444 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2445 return simplify_gen_unary (NEG, mode, op1, mode);
2446
2447 /* (-1 - a) is ~a, unless the expression contains symbolic
2448 constants, in which case not retaining additions and
2449 subtractions could cause invalid assembly to be produced. */
2450 if (trueop0 == constm1_rtx
2451 && !contains_symbolic_reference_p (op1))
2452 return simplify_gen_unary (NOT, mode, op1, mode);
2453
2454 /* Subtracting 0 has no effect unless the mode has signed zeros
2455 and supports rounding towards -infinity. In such a case,
2456 0 - 0 is -0. */
2457 if (!(HONOR_SIGNED_ZEROS (mode)
2458 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2459 && trueop1 == CONST0_RTX (mode))
2460 return op0;
2461
2462 /* See if this is something like X * C - X or vice versa or
2463 if the multiplication is written as a shift. If so, we can
2464 distribute and make a new multiply, shift, or maybe just
2465 have X (if C is 2 in the example above). But don't make
2466 something more expensive than we had before. */
2467
2468 if (is_a <scalar_int_mode> (mode, &int_mode))
2469 {
2470 rtx lhs = op0, rhs = op1;
2471
2472 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2473 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2474
2475 if (GET_CODE (lhs) == NEG)
2476 {
2477 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2478 lhs = XEXP (lhs, 0);
2479 }
2480 else if (GET_CODE (lhs) == MULT
2481 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2482 {
2483 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2484 lhs = XEXP (lhs, 0);
2485 }
2486 else if (GET_CODE (lhs) == ASHIFT
2487 && CONST_INT_P (XEXP (lhs, 1))
2488 && INTVAL (XEXP (lhs, 1)) >= 0
2489 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2490 {
2491 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2492 GET_MODE_PRECISION (int_mode));
2493 lhs = XEXP (lhs, 0);
2494 }
2495
2496 if (GET_CODE (rhs) == NEG)
2497 {
2498 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2499 rhs = XEXP (rhs, 0);
2500 }
2501 else if (GET_CODE (rhs) == MULT
2502 && CONST_INT_P (XEXP (rhs, 1)))
2503 {
2504 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2505 rhs = XEXP (rhs, 0);
2506 }
2507 else if (GET_CODE (rhs) == ASHIFT
2508 && CONST_INT_P (XEXP (rhs, 1))
2509 && INTVAL (XEXP (rhs, 1)) >= 0
2510 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2511 {
2512 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2513 GET_MODE_PRECISION (int_mode));
2514 negcoeff1 = -negcoeff1;
2515 rhs = XEXP (rhs, 0);
2516 }
2517
2518 if (rtx_equal_p (lhs, rhs))
2519 {
2520 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2521 rtx coeff;
2522 bool speed = optimize_function_for_speed_p (cfun);
2523
2524 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2525
2526 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2527 return (set_src_cost (tem, int_mode, speed)
2528 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2529 }
2530 }
2531
2532 /* (a - (-b)) -> (a + b). True even for IEEE. */
2533 if (GET_CODE (op1) == NEG)
2534 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2535
2536 /* (-x - c) may be simplified as (-c - x). */
2537 if (GET_CODE (op0) == NEG
2538 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2539 {
2540 tem = simplify_unary_operation (NEG, mode, op1, mode);
2541 if (tem)
2542 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2543 }
2544
2545 if ((GET_CODE (op0) == CONST
2546 || GET_CODE (op0) == SYMBOL_REF
2547 || GET_CODE (op0) == LABEL_REF)
2548 && poly_int_rtx_p (op1, &offset))
2549 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2550
2551 /* Don't let a relocatable value get a negative coeff. */
2552 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2553 return simplify_gen_binary (PLUS, mode,
2554 op0,
2555 neg_const_int (mode, op1));
2556
2557 /* (x - (x & y)) -> (x & ~y) */
2558 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2559 {
2560 if (rtx_equal_p (op0, XEXP (op1, 0)))
2561 {
2562 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2563 GET_MODE (XEXP (op1, 1)));
2564 return simplify_gen_binary (AND, mode, op0, tem);
2565 }
2566 if (rtx_equal_p (op0, XEXP (op1, 1)))
2567 {
2568 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2569 GET_MODE (XEXP (op1, 0)));
2570 return simplify_gen_binary (AND, mode, op0, tem);
2571 }
2572 }
2573
2574 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2575 by reversing the comparison code if valid. */
2576 if (STORE_FLAG_VALUE == 1
2577 && trueop0 == const1_rtx
2578 && COMPARISON_P (op1)
2579 && (reversed = reversed_comparison (op1, mode)))
2580 return reversed;
2581
2582 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2583 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2584 && GET_CODE (op1) == MULT
2585 && GET_CODE (XEXP (op1, 0)) == NEG)
2586 {
2587 rtx in1, in2;
2588
2589 in1 = XEXP (XEXP (op1, 0), 0);
2590 in2 = XEXP (op1, 1);
2591 return simplify_gen_binary (PLUS, mode,
2592 simplify_gen_binary (MULT, mode,
2593 in1, in2),
2594 op0);
2595 }
2596
2597 /* Canonicalize (minus (neg A) (mult B C)) to
2598 (minus (mult (neg B) C) A). */
2599 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2600 && GET_CODE (op1) == MULT
2601 && GET_CODE (op0) == NEG)
2602 {
2603 rtx in1, in2;
2604
2605 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2606 in2 = XEXP (op1, 1);
2607 return simplify_gen_binary (MINUS, mode,
2608 simplify_gen_binary (MULT, mode,
2609 in1, in2),
2610 XEXP (op0, 0));
2611 }
2612
2613 /* If one of the operands is a PLUS or a MINUS, see if we can
2614 simplify this by the associative law. This will, for example,
2615 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2616 Don't use the associative law for floating point.
2617 The inaccuracy makes it nonassociative,
2618 and subtle programs can break if operations are associated. */
2619
2620 if (INTEGRAL_MODE_P (mode)
2621 && (plus_minus_operand_p (op0)
2622 || plus_minus_operand_p (op1))
2623 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2624 return tem;
2625
2626 /* Handle vector series. */
2627 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2628 {
2629 tem = simplify_binary_operation_series (code, mode, op0, op1);
2630 if (tem)
2631 return tem;
2632 }
2633 break;
2634
2635 case MULT:
2636 if (trueop1 == constm1_rtx)
2637 return simplify_gen_unary (NEG, mode, op0, mode);
2638
2639 if (GET_CODE (op0) == NEG)
2640 {
2641 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2642 /* If op1 is a MULT as well and simplify_unary_operation
2643 just moved the NEG to the second operand, simplify_gen_binary
2644 below could through simplify_associative_operation move
2645 the NEG around again and recurse endlessly. */
2646 if (temp
2647 && GET_CODE (op1) == MULT
2648 && GET_CODE (temp) == MULT
2649 && XEXP (op1, 0) == XEXP (temp, 0)
2650 && GET_CODE (XEXP (temp, 1)) == NEG
2651 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2652 temp = NULL_RTX;
2653 if (temp)
2654 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2655 }
2656 if (GET_CODE (op1) == NEG)
2657 {
2658 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2659 /* If op0 is a MULT as well and simplify_unary_operation
2660 just moved the NEG to the second operand, simplify_gen_binary
2661 below could through simplify_associative_operation move
2662 the NEG around again and recurse endlessly. */
2663 if (temp
2664 && GET_CODE (op0) == MULT
2665 && GET_CODE (temp) == MULT
2666 && XEXP (op0, 0) == XEXP (temp, 0)
2667 && GET_CODE (XEXP (temp, 1)) == NEG
2668 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2669 temp = NULL_RTX;
2670 if (temp)
2671 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2672 }
2673
2674 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2675 x is NaN, since x * 0 is then also NaN. Nor is it valid
2676 when the mode has signed zeros, since multiplying a negative
2677 number by 0 will give -0, not 0. */
2678 if (!HONOR_NANS (mode)
2679 && !HONOR_SIGNED_ZEROS (mode)
2680 && trueop1 == CONST0_RTX (mode)
2681 && ! side_effects_p (op0))
2682 return op1;
2683
2684 /* In IEEE floating point, x*1 is not equivalent to x for
2685 signalling NaNs. */
2686 if (!HONOR_SNANS (mode)
2687 && trueop1 == CONST1_RTX (mode))
2688 return op0;
2689
2690 /* Convert multiply by constant power of two into shift. */
2691 if (CONST_SCALAR_INT_P (trueop1))
2692 {
2693 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2694 if (val >= 0)
2695 return simplify_gen_binary (ASHIFT, mode, op0,
2696 gen_int_shift_amount (mode, val));
2697 }
2698
2699 /* x*2 is x+x and x*(-1) is -x */
2700 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2701 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2702 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2703 && GET_MODE (op0) == mode)
2704 {
2705 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2706
2707 if (real_equal (d1, &dconst2))
2708 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2709
2710 if (!HONOR_SNANS (mode)
2711 && real_equal (d1, &dconstm1))
2712 return simplify_gen_unary (NEG, mode, op0, mode);
2713 }
2714
2715 /* Optimize -x * -x as x * x. */
2716 if (FLOAT_MODE_P (mode)
2717 && GET_CODE (op0) == NEG
2718 && GET_CODE (op1) == NEG
2719 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2720 && !side_effects_p (XEXP (op0, 0)))
2721 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2722
2723 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2724 if (SCALAR_FLOAT_MODE_P (mode)
2725 && GET_CODE (op0) == ABS
2726 && GET_CODE (op1) == ABS
2727 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2728 && !side_effects_p (XEXP (op0, 0)))
2729 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2730
2731 /* Reassociate multiplication, but for floating point MULTs
2732 only when the user specifies unsafe math optimizations. */
2733 if (! FLOAT_MODE_P (mode)
2734 || flag_unsafe_math_optimizations)
2735 {
2736 tem = simplify_associative_operation (code, mode, op0, op1);
2737 if (tem)
2738 return tem;
2739 }
2740 break;
2741
2742 case IOR:
2743 if (trueop1 == CONST0_RTX (mode))
2744 return op0;
2745 if (INTEGRAL_MODE_P (mode)
2746 && trueop1 == CONSTM1_RTX (mode)
2747 && !side_effects_p (op0))
2748 return op1;
2749 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2750 return op0;
2751 /* A | (~A) -> -1 */
2752 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2753 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2754 && ! side_effects_p (op0)
2755 && SCALAR_INT_MODE_P (mode))
2756 return constm1_rtx;
2757
2758 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2759 if (CONST_INT_P (op1)
2760 && HWI_COMPUTABLE_MODE_P (mode)
2761 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2762 && !side_effects_p (op0))
2763 return op1;
2764
2765 /* Canonicalize (X & C1) | C2. */
2766 if (GET_CODE (op0) == AND
2767 && CONST_INT_P (trueop1)
2768 && CONST_INT_P (XEXP (op0, 1)))
2769 {
2770 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2771 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2772 HOST_WIDE_INT c2 = INTVAL (trueop1);
2773
2774 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2775 if ((c1 & c2) == c1
2776 && !side_effects_p (XEXP (op0, 0)))
2777 return trueop1;
2778
2779 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2780 if (((c1|c2) & mask) == mask)
2781 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2782 }
2783
2784 /* Convert (A & B) | A to A. */
2785 if (GET_CODE (op0) == AND
2786 && (rtx_equal_p (XEXP (op0, 0), op1)
2787 || rtx_equal_p (XEXP (op0, 1), op1))
2788 && ! side_effects_p (XEXP (op0, 0))
2789 && ! side_effects_p (XEXP (op0, 1)))
2790 return op1;
2791
2792 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2793 mode size to (rotate A CX). */
2794
2795 if (GET_CODE (op1) == ASHIFT
2796 || GET_CODE (op1) == SUBREG)
2797 {
2798 opleft = op1;
2799 opright = op0;
2800 }
2801 else
2802 {
2803 opright = op1;
2804 opleft = op0;
2805 }
2806
2807 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2808 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2809 && CONST_INT_P (XEXP (opleft, 1))
2810 && CONST_INT_P (XEXP (opright, 1))
2811 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2812 == GET_MODE_UNIT_PRECISION (mode)))
2813 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2814
2815 /* Same, but for ashift that has been "simplified" to a wider mode
2816 by simplify_shift_const. */
2817
2818 if (GET_CODE (opleft) == SUBREG
2819 && is_a <scalar_int_mode> (mode, &int_mode)
2820 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2821 &inner_mode)
2822 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2823 && GET_CODE (opright) == LSHIFTRT
2824 && GET_CODE (XEXP (opright, 0)) == SUBREG
2825 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2826 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2827 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2828 SUBREG_REG (XEXP (opright, 0)))
2829 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2830 && CONST_INT_P (XEXP (opright, 1))
2831 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2832 + INTVAL (XEXP (opright, 1))
2833 == GET_MODE_PRECISION (int_mode)))
2834 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2835 XEXP (SUBREG_REG (opleft), 1));
2836
2837 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2838 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2839 the PLUS does not affect any of the bits in OP1: then we can do
2840 the IOR as a PLUS and we can associate. This is valid if OP1
2841 can be safely shifted left C bits. */
2842 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2843 && GET_CODE (XEXP (op0, 0)) == PLUS
2844 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2845 && CONST_INT_P (XEXP (op0, 1))
2846 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2847 {
2848 int count = INTVAL (XEXP (op0, 1));
2849 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2850
2851 if (mask >> count == INTVAL (trueop1)
2852 && trunc_int_for_mode (mask, mode) == mask
2853 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2854 return simplify_gen_binary (ASHIFTRT, mode,
2855 plus_constant (mode, XEXP (op0, 0),
2856 mask),
2857 XEXP (op0, 1));
2858 }
2859
2860 /* The following happens with bitfield merging.
2861 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
2862 if (GET_CODE (op0) == AND
2863 && GET_CODE (op1) == AND
2864 && CONST_INT_P (XEXP (op0, 1))
2865 && CONST_INT_P (XEXP (op1, 1))
2866 && (INTVAL (XEXP (op0, 1))
2867 == ~INTVAL (XEXP (op1, 1))))
2868 {
2869 /* The IOR may be on both sides. */
2870 rtx top0 = NULL_RTX, top1 = NULL_RTX;
2871 if (GET_CODE (XEXP (op1, 0)) == IOR)
2872 top0 = op0, top1 = op1;
2873 else if (GET_CODE (XEXP (op0, 0)) == IOR)
2874 top0 = op1, top1 = op0;
2875 if (top0 && top1)
2876 {
2877 /* X may be on either side of the inner IOR. */
2878 rtx tem = NULL_RTX;
2879 if (rtx_equal_p (XEXP (top0, 0),
2880 XEXP (XEXP (top1, 0), 0)))
2881 tem = XEXP (XEXP (top1, 0), 1);
2882 else if (rtx_equal_p (XEXP (top0, 0),
2883 XEXP (XEXP (top1, 0), 1)))
2884 tem = XEXP (XEXP (top1, 0), 0);
2885 if (tem)
2886 return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
2887 simplify_gen_binary
2888 (AND, mode, tem, XEXP (top1, 1)));
2889 }
2890 }
2891
2892 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2893 if (tem)
2894 return tem;
2895
2896 tem = simplify_associative_operation (code, mode, op0, op1);
2897 if (tem)
2898 return tem;
2899 break;
2900
2901 case XOR:
2902 if (trueop1 == CONST0_RTX (mode))
2903 return op0;
2904 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2905 return simplify_gen_unary (NOT, mode, op0, mode);
2906 if (rtx_equal_p (trueop0, trueop1)
2907 && ! side_effects_p (op0)
2908 && GET_MODE_CLASS (mode) != MODE_CC)
2909 return CONST0_RTX (mode);
2910
2911 /* Canonicalize XOR of the most significant bit to PLUS. */
2912 if (CONST_SCALAR_INT_P (op1)
2913 && mode_signbit_p (mode, op1))
2914 return simplify_gen_binary (PLUS, mode, op0, op1);
2915 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2916 if (CONST_SCALAR_INT_P (op1)
2917 && GET_CODE (op0) == PLUS
2918 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2919 && mode_signbit_p (mode, XEXP (op0, 1)))
2920 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2921 simplify_gen_binary (XOR, mode, op1,
2922 XEXP (op0, 1)));
2923
2924 /* If we are XORing two things that have no bits in common,
2925 convert them into an IOR. This helps to detect rotation encoded
2926 using those methods and possibly other simplifications. */
2927
2928 if (HWI_COMPUTABLE_MODE_P (mode)
2929 && (nonzero_bits (op0, mode)
2930 & nonzero_bits (op1, mode)) == 0)
2931 return (simplify_gen_binary (IOR, mode, op0, op1));
2932
2933 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2934 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2935 (NOT y). */
2936 {
2937 int num_negated = 0;
2938
2939 if (GET_CODE (op0) == NOT)
2940 num_negated++, op0 = XEXP (op0, 0);
2941 if (GET_CODE (op1) == NOT)
2942 num_negated++, op1 = XEXP (op1, 0);
2943
2944 if (num_negated == 2)
2945 return simplify_gen_binary (XOR, mode, op0, op1);
2946 else if (num_negated == 1)
2947 return simplify_gen_unary (NOT, mode,
2948 simplify_gen_binary (XOR, mode, op0, op1),
2949 mode);
2950 }
2951
2952 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2953 correspond to a machine insn or result in further simplifications
2954 if B is a constant. */
2955
2956 if (GET_CODE (op0) == AND
2957 && rtx_equal_p (XEXP (op0, 1), op1)
2958 && ! side_effects_p (op1))
2959 return simplify_gen_binary (AND, mode,
2960 simplify_gen_unary (NOT, mode,
2961 XEXP (op0, 0), mode),
2962 op1);
2963
2964 else if (GET_CODE (op0) == AND
2965 && rtx_equal_p (XEXP (op0, 0), op1)
2966 && ! side_effects_p (op1))
2967 return simplify_gen_binary (AND, mode,
2968 simplify_gen_unary (NOT, mode,
2969 XEXP (op0, 1), mode),
2970 op1);
2971
2972 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2973 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2974 out bits inverted twice and not set by C. Similarly, given
2975 (xor (and (xor A B) C) D), simplify without inverting C in
2976 the xor operand: (xor (and A C) (B&C)^D).
2977 */
2978 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2979 && GET_CODE (XEXP (op0, 0)) == XOR
2980 && CONST_INT_P (op1)
2981 && CONST_INT_P (XEXP (op0, 1))
2982 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2983 {
2984 enum rtx_code op = GET_CODE (op0);
2985 rtx a = XEXP (XEXP (op0, 0), 0);
2986 rtx b = XEXP (XEXP (op0, 0), 1);
2987 rtx c = XEXP (op0, 1);
2988 rtx d = op1;
2989 HOST_WIDE_INT bval = INTVAL (b);
2990 HOST_WIDE_INT cval = INTVAL (c);
2991 HOST_WIDE_INT dval = INTVAL (d);
2992 HOST_WIDE_INT xcval;
2993
2994 if (op == IOR)
2995 xcval = ~cval;
2996 else
2997 xcval = cval;
2998
2999 return simplify_gen_binary (XOR, mode,
3000 simplify_gen_binary (op, mode, a, c),
3001 gen_int_mode ((bval & xcval) ^ dval,
3002 mode));
3003 }
3004
3005 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3006 we can transform like this:
3007 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3008 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3009 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3010 Attempt a few simplifications when B and C are both constants. */
3011 if (GET_CODE (op0) == AND
3012 && CONST_INT_P (op1)
3013 && CONST_INT_P (XEXP (op0, 1)))
3014 {
3015 rtx a = XEXP (op0, 0);
3016 rtx b = XEXP (op0, 1);
3017 rtx c = op1;
3018 HOST_WIDE_INT bval = INTVAL (b);
3019 HOST_WIDE_INT cval = INTVAL (c);
3020
3021 /* Instead of computing ~A&C, we compute its negated value,
3022 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3023 optimize for sure. If it does not simplify, we still try
3024 to compute ~A&C below, but since that always allocates
3025 RTL, we don't try that before committing to returning a
3026 simplified expression. */
3027 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3028 GEN_INT (~cval));
3029
3030 if ((~cval & bval) == 0)
3031 {
3032 rtx na_c = NULL_RTX;
3033 if (n_na_c)
3034 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3035 else
3036 {
3037 /* If ~A does not simplify, don't bother: we don't
3038 want to simplify 2 operations into 3, and if na_c
3039 were to simplify with na, n_na_c would have
3040 simplified as well. */
3041 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3042 if (na)
3043 na_c = simplify_gen_binary (AND, mode, na, c);
3044 }
3045
3046 /* Try to simplify ~A&C | ~B&C. */
3047 if (na_c != NULL_RTX)
3048 return simplify_gen_binary (IOR, mode, na_c,
3049 gen_int_mode (~bval & cval, mode));
3050 }
3051 else
3052 {
3053 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3054 if (n_na_c == CONSTM1_RTX (mode))
3055 {
3056 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3057 gen_int_mode (~cval & bval,
3058 mode));
3059 return simplify_gen_binary (IOR, mode, a_nc_b,
3060 gen_int_mode (~bval & cval,
3061 mode));
3062 }
3063 }
3064 }
3065
3066 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3067 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3068 machines, and also has shorter instruction path length. */
3069 if (GET_CODE (op0) == AND
3070 && GET_CODE (XEXP (op0, 0)) == XOR
3071 && CONST_INT_P (XEXP (op0, 1))
3072 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3073 {
3074 rtx a = trueop1;
3075 rtx b = XEXP (XEXP (op0, 0), 1);
3076 rtx c = XEXP (op0, 1);
3077 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3078 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3079 rtx bc = simplify_gen_binary (AND, mode, b, c);
3080 return simplify_gen_binary (IOR, mode, a_nc, bc);
3081 }
3082 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3083 else if (GET_CODE (op0) == AND
3084 && GET_CODE (XEXP (op0, 0)) == XOR
3085 && CONST_INT_P (XEXP (op0, 1))
3086 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3087 {
3088 rtx a = XEXP (XEXP (op0, 0), 0);
3089 rtx b = trueop1;
3090 rtx c = XEXP (op0, 1);
3091 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3092 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3093 rtx ac = simplify_gen_binary (AND, mode, a, c);
3094 return simplify_gen_binary (IOR, mode, ac, b_nc);
3095 }
3096
3097 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3098 comparison if STORE_FLAG_VALUE is 1. */
3099 if (STORE_FLAG_VALUE == 1
3100 && trueop1 == const1_rtx
3101 && COMPARISON_P (op0)
3102 && (reversed = reversed_comparison (op0, mode)))
3103 return reversed;
3104
3105 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3106 is (lt foo (const_int 0)), so we can perform the above
3107 simplification if STORE_FLAG_VALUE is 1. */
3108
3109 if (is_a <scalar_int_mode> (mode, &int_mode)
3110 && STORE_FLAG_VALUE == 1
3111 && trueop1 == const1_rtx
3112 && GET_CODE (op0) == LSHIFTRT
3113 && CONST_INT_P (XEXP (op0, 1))
3114 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3115 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3116
3117 /* (xor (comparison foo bar) (const_int sign-bit))
3118 when STORE_FLAG_VALUE is the sign bit. */
3119 if (is_a <scalar_int_mode> (mode, &int_mode)
3120 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3121 && trueop1 == const_true_rtx
3122 && COMPARISON_P (op0)
3123 && (reversed = reversed_comparison (op0, int_mode)))
3124 return reversed;
3125
3126 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3127 if (tem)
3128 return tem;
3129
3130 tem = simplify_associative_operation (code, mode, op0, op1);
3131 if (tem)
3132 return tem;
3133 break;
3134
3135 case AND:
3136 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3137 return trueop1;
3138 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3139 return op0;
3140 if (HWI_COMPUTABLE_MODE_P (mode))
3141 {
3142 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3143 HOST_WIDE_INT nzop1;
3144 if (CONST_INT_P (trueop1))
3145 {
3146 HOST_WIDE_INT val1 = INTVAL (trueop1);
3147 /* If we are turning off bits already known off in OP0, we need
3148 not do an AND. */
3149 if ((nzop0 & ~val1) == 0)
3150 return op0;
3151 }
3152 nzop1 = nonzero_bits (trueop1, mode);
3153 /* If we are clearing all the nonzero bits, the result is zero. */
3154 if ((nzop1 & nzop0) == 0
3155 && !side_effects_p (op0) && !side_effects_p (op1))
3156 return CONST0_RTX (mode);
3157 }
3158 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3159 && GET_MODE_CLASS (mode) != MODE_CC)
3160 return op0;
3161 /* A & (~A) -> 0 */
3162 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3163 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3164 && ! side_effects_p (op0)
3165 && GET_MODE_CLASS (mode) != MODE_CC)
3166 return CONST0_RTX (mode);
3167
3168 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3169 there are no nonzero bits of C outside of X's mode. */
3170 if ((GET_CODE (op0) == SIGN_EXTEND
3171 || GET_CODE (op0) == ZERO_EXTEND)
3172 && CONST_INT_P (trueop1)
3173 && HWI_COMPUTABLE_MODE_P (mode)
3174 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3175 & UINTVAL (trueop1)) == 0)
3176 {
3177 machine_mode imode = GET_MODE (XEXP (op0, 0));
3178 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3179 gen_int_mode (INTVAL (trueop1),
3180 imode));
3181 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3182 }
3183
3184 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3185 we might be able to further simplify the AND with X and potentially
3186 remove the truncation altogether. */
3187 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3188 {
3189 rtx x = XEXP (op0, 0);
3190 machine_mode xmode = GET_MODE (x);
3191 tem = simplify_gen_binary (AND, xmode, x,
3192 gen_int_mode (INTVAL (trueop1), xmode));
3193 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3194 }
3195
3196 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3197 if (GET_CODE (op0) == IOR
3198 && CONST_INT_P (trueop1)
3199 && CONST_INT_P (XEXP (op0, 1)))
3200 {
3201 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3202 return simplify_gen_binary (IOR, mode,
3203 simplify_gen_binary (AND, mode,
3204 XEXP (op0, 0), op1),
3205 gen_int_mode (tmp, mode));
3206 }
3207
3208 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3209 insn (and may simplify more). */
3210 if (GET_CODE (op0) == XOR
3211 && rtx_equal_p (XEXP (op0, 0), op1)
3212 && ! side_effects_p (op1))
3213 return simplify_gen_binary (AND, mode,
3214 simplify_gen_unary (NOT, mode,
3215 XEXP (op0, 1), mode),
3216 op1);
3217
3218 if (GET_CODE (op0) == XOR
3219 && rtx_equal_p (XEXP (op0, 1), op1)
3220 && ! side_effects_p (op1))
3221 return simplify_gen_binary (AND, mode,
3222 simplify_gen_unary (NOT, mode,
3223 XEXP (op0, 0), mode),
3224 op1);
3225
3226 /* Similarly for (~(A ^ B)) & A. */
3227 if (GET_CODE (op0) == NOT
3228 && GET_CODE (XEXP (op0, 0)) == XOR
3229 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3230 && ! side_effects_p (op1))
3231 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3232
3233 if (GET_CODE (op0) == NOT
3234 && GET_CODE (XEXP (op0, 0)) == XOR
3235 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3236 && ! side_effects_p (op1))
3237 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3238
3239 /* Convert (A | B) & A to A. */
3240 if (GET_CODE (op0) == IOR
3241 && (rtx_equal_p (XEXP (op0, 0), op1)
3242 || rtx_equal_p (XEXP (op0, 1), op1))
3243 && ! side_effects_p (XEXP (op0, 0))
3244 && ! side_effects_p (XEXP (op0, 1)))
3245 return op1;
3246
3247 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3248 ((A & N) + B) & M -> (A + B) & M
3249 Similarly if (N & M) == 0,
3250 ((A | N) + B) & M -> (A + B) & M
3251 and for - instead of + and/or ^ instead of |.
3252 Also, if (N & M) == 0, then
3253 (A +- N) & M -> A & M. */
3254 if (CONST_INT_P (trueop1)
3255 && HWI_COMPUTABLE_MODE_P (mode)
3256 && ~UINTVAL (trueop1)
3257 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3258 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3259 {
3260 rtx pmop[2];
3261 int which;
3262
3263 pmop[0] = XEXP (op0, 0);
3264 pmop[1] = XEXP (op0, 1);
3265
3266 if (CONST_INT_P (pmop[1])
3267 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3268 return simplify_gen_binary (AND, mode, pmop[0], op1);
3269
3270 for (which = 0; which < 2; which++)
3271 {
3272 tem = pmop[which];
3273 switch (GET_CODE (tem))
3274 {
3275 case AND:
3276 if (CONST_INT_P (XEXP (tem, 1))
3277 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3278 == UINTVAL (trueop1))
3279 pmop[which] = XEXP (tem, 0);
3280 break;
3281 case IOR:
3282 case XOR:
3283 if (CONST_INT_P (XEXP (tem, 1))
3284 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3285 pmop[which] = XEXP (tem, 0);
3286 break;
3287 default:
3288 break;
3289 }
3290 }
3291
3292 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3293 {
3294 tem = simplify_gen_binary (GET_CODE (op0), mode,
3295 pmop[0], pmop[1]);
3296 return simplify_gen_binary (code, mode, tem, op1);
3297 }
3298 }
3299
3300 /* (and X (ior (not X) Y) -> (and X Y) */
3301 if (GET_CODE (op1) == IOR
3302 && GET_CODE (XEXP (op1, 0)) == NOT
3303 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3304 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3305
3306 /* (and (ior (not X) Y) X) -> (and X Y) */
3307 if (GET_CODE (op0) == IOR
3308 && GET_CODE (XEXP (op0, 0)) == NOT
3309 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3310 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3311
3312 /* (and X (ior Y (not X)) -> (and X Y) */
3313 if (GET_CODE (op1) == IOR
3314 && GET_CODE (XEXP (op1, 1)) == NOT
3315 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3316 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3317
3318 /* (and (ior Y (not X)) X) -> (and X Y) */
3319 if (GET_CODE (op0) == IOR
3320 && GET_CODE (XEXP (op0, 1)) == NOT
3321 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3322 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3323
3324 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3325 if (tem)
3326 return tem;
3327
3328 tem = simplify_associative_operation (code, mode, op0, op1);
3329 if (tem)
3330 return tem;
3331 break;
3332
3333 case UDIV:
3334 /* 0/x is 0 (or x&0 if x has side-effects). */
3335 if (trueop0 == CONST0_RTX (mode)
3336 && !cfun->can_throw_non_call_exceptions)
3337 {
3338 if (side_effects_p (op1))
3339 return simplify_gen_binary (AND, mode, op1, trueop0);
3340 return trueop0;
3341 }
3342 /* x/1 is x. */
3343 if (trueop1 == CONST1_RTX (mode))
3344 {
3345 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3346 if (tem)
3347 return tem;
3348 }
3349 /* Convert divide by power of two into shift. */
3350 if (CONST_INT_P (trueop1)
3351 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3352 return simplify_gen_binary (LSHIFTRT, mode, op0,
3353 gen_int_shift_amount (mode, val));
3354 break;
3355
3356 case DIV:
3357 /* Handle floating point and integers separately. */
3358 if (SCALAR_FLOAT_MODE_P (mode))
3359 {
3360 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3361 safe for modes with NaNs, since 0.0 / 0.0 will then be
3362 NaN rather than 0.0. Nor is it safe for modes with signed
3363 zeros, since dividing 0 by a negative number gives -0.0 */
3364 if (trueop0 == CONST0_RTX (mode)
3365 && !HONOR_NANS (mode)
3366 && !HONOR_SIGNED_ZEROS (mode)
3367 && ! side_effects_p (op1))
3368 return op0;
3369 /* x/1.0 is x. */
3370 if (trueop1 == CONST1_RTX (mode)
3371 && !HONOR_SNANS (mode))
3372 return op0;
3373
3374 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3375 && trueop1 != CONST0_RTX (mode))
3376 {
3377 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3378
3379 /* x/-1.0 is -x. */
3380 if (real_equal (d1, &dconstm1)
3381 && !HONOR_SNANS (mode))
3382 return simplify_gen_unary (NEG, mode, op0, mode);
3383
3384 /* Change FP division by a constant into multiplication.
3385 Only do this with -freciprocal-math. */
3386 if (flag_reciprocal_math
3387 && !real_equal (d1, &dconst0))
3388 {
3389 REAL_VALUE_TYPE d;
3390 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3391 tem = const_double_from_real_value (d, mode);
3392 return simplify_gen_binary (MULT, mode, op0, tem);
3393 }
3394 }
3395 }
3396 else if (SCALAR_INT_MODE_P (mode))
3397 {
3398 /* 0/x is 0 (or x&0 if x has side-effects). */
3399 if (trueop0 == CONST0_RTX (mode)
3400 && !cfun->can_throw_non_call_exceptions)
3401 {
3402 if (side_effects_p (op1))
3403 return simplify_gen_binary (AND, mode, op1, trueop0);
3404 return trueop0;
3405 }
3406 /* x/1 is x. */
3407 if (trueop1 == CONST1_RTX (mode))
3408 {
3409 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3410 if (tem)
3411 return tem;
3412 }
3413 /* x/-1 is -x. */
3414 if (trueop1 == constm1_rtx)
3415 {
3416 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3417 if (x)
3418 return simplify_gen_unary (NEG, mode, x, mode);
3419 }
3420 }
3421 break;
3422
3423 case UMOD:
3424 /* 0%x is 0 (or x&0 if x has side-effects). */
3425 if (trueop0 == CONST0_RTX (mode))
3426 {
3427 if (side_effects_p (op1))
3428 return simplify_gen_binary (AND, mode, op1, trueop0);
3429 return trueop0;
3430 }
3431 /* x%1 is 0 (of x&0 if x has side-effects). */
3432 if (trueop1 == CONST1_RTX (mode))
3433 {
3434 if (side_effects_p (op0))
3435 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3436 return CONST0_RTX (mode);
3437 }
3438 /* Implement modulus by power of two as AND. */
3439 if (CONST_INT_P (trueop1)
3440 && exact_log2 (UINTVAL (trueop1)) > 0)
3441 return simplify_gen_binary (AND, mode, op0,
3442 gen_int_mode (UINTVAL (trueop1) - 1,
3443 mode));
3444 break;
3445
3446 case MOD:
3447 /* 0%x is 0 (or x&0 if x has side-effects). */
3448 if (trueop0 == CONST0_RTX (mode))
3449 {
3450 if (side_effects_p (op1))
3451 return simplify_gen_binary (AND, mode, op1, trueop0);
3452 return trueop0;
3453 }
3454 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3455 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3456 {
3457 if (side_effects_p (op0))
3458 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3459 return CONST0_RTX (mode);
3460 }
3461 break;
3462
3463 case ROTATERT:
3464 case ROTATE:
3465 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3466 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3467 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3468 amount instead. */
3469 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3470 if (CONST_INT_P (trueop1)
3471 && IN_RANGE (INTVAL (trueop1),
3472 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3473 GET_MODE_UNIT_PRECISION (mode) - 1))
3474 {
3475 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3476 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3477 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3478 mode, op0, new_amount_rtx);
3479 }
3480 #endif
3481 /* FALLTHRU */
3482 case ASHIFTRT:
3483 if (trueop1 == CONST0_RTX (mode))
3484 return op0;
3485 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3486 return op0;
3487 /* Rotating ~0 always results in ~0. */
3488 if (CONST_INT_P (trueop0)
3489 && HWI_COMPUTABLE_MODE_P (mode)
3490 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3491 && ! side_effects_p (op1))
3492 return op0;
3493
3494 canonicalize_shift:
3495 /* Given:
3496 scalar modes M1, M2
3497 scalar constants c1, c2
3498 size (M2) > size (M1)
3499 c1 == size (M2) - size (M1)
3500 optimize:
3501 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3502 <low_part>)
3503 (const_int <c2>))
3504 to:
3505 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3506 <low_part>). */
3507 if ((code == ASHIFTRT || code == LSHIFTRT)
3508 && is_a <scalar_int_mode> (mode, &int_mode)
3509 && SUBREG_P (op0)
3510 && CONST_INT_P (op1)
3511 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3512 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3513 &inner_mode)
3514 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3515 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3516 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3517 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3518 && subreg_lowpart_p (op0))
3519 {
3520 rtx tmp = gen_int_shift_amount
3521 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3522 tmp = simplify_gen_binary (code, inner_mode,
3523 XEXP (SUBREG_REG (op0), 0),
3524 tmp);
3525 return lowpart_subreg (int_mode, tmp, inner_mode);
3526 }
3527
3528 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3529 {
3530 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3531 if (val != INTVAL (op1))
3532 return simplify_gen_binary (code, mode, op0,
3533 gen_int_shift_amount (mode, val));
3534 }
3535 break;
3536
3537 case ASHIFT:
3538 case SS_ASHIFT:
3539 case US_ASHIFT:
3540 if (trueop1 == CONST0_RTX (mode))
3541 return op0;
3542 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3543 return op0;
3544 goto canonicalize_shift;
3545
3546 case LSHIFTRT:
3547 if (trueop1 == CONST0_RTX (mode))
3548 return op0;
3549 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3550 return op0;
3551 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3552 if (GET_CODE (op0) == CLZ
3553 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3554 && CONST_INT_P (trueop1)
3555 && STORE_FLAG_VALUE == 1
3556 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3557 {
3558 unsigned HOST_WIDE_INT zero_val = 0;
3559
3560 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3561 && zero_val == GET_MODE_PRECISION (inner_mode)
3562 && INTVAL (trueop1) == exact_log2 (zero_val))
3563 return simplify_gen_relational (EQ, mode, inner_mode,
3564 XEXP (op0, 0), const0_rtx);
3565 }
3566 goto canonicalize_shift;
3567
3568 case SMIN:
3569 if (HWI_COMPUTABLE_MODE_P (mode)
3570 && mode_signbit_p (mode, trueop1)
3571 && ! side_effects_p (op0))
3572 return op1;
3573 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3574 return op0;
3575 tem = simplify_associative_operation (code, mode, op0, op1);
3576 if (tem)
3577 return tem;
3578 break;
3579
3580 case SMAX:
3581 if (HWI_COMPUTABLE_MODE_P (mode)
3582 && CONST_INT_P (trueop1)
3583 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3584 && ! side_effects_p (op0))
3585 return op1;
3586 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3587 return op0;
3588 tem = simplify_associative_operation (code, mode, op0, op1);
3589 if (tem)
3590 return tem;
3591 break;
3592
3593 case UMIN:
3594 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3595 return op1;
3596 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3597 return op0;
3598 tem = simplify_associative_operation (code, mode, op0, op1);
3599 if (tem)
3600 return tem;
3601 break;
3602
3603 case UMAX:
3604 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3605 return op1;
3606 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3607 return op0;
3608 tem = simplify_associative_operation (code, mode, op0, op1);
3609 if (tem)
3610 return tem;
3611 break;
3612
3613 case SS_PLUS:
3614 case US_PLUS:
3615 case SS_MINUS:
3616 case US_MINUS:
3617 case SS_MULT:
3618 case US_MULT:
3619 case SS_DIV:
3620 case US_DIV:
3621 /* ??? There are simplifications that can be done. */
3622 return 0;
3623
3624 case VEC_SERIES:
3625 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3626 return gen_vec_duplicate (mode, op0);
3627 if (valid_for_const_vector_p (mode, op0)
3628 && valid_for_const_vector_p (mode, op1))
3629 return gen_const_vec_series (mode, op0, op1);
3630 return 0;
3631
3632 case VEC_SELECT:
3633 if (!VECTOR_MODE_P (mode))
3634 {
3635 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3636 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3637 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3638 gcc_assert (XVECLEN (trueop1, 0) == 1);
3639
3640 /* We can't reason about selections made at runtime. */
3641 if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3642 return 0;
3643
3644 if (vec_duplicate_p (trueop0, &elt0))
3645 return elt0;
3646
3647 if (GET_CODE (trueop0) == CONST_VECTOR)
3648 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3649 (trueop1, 0, 0)));
3650
3651 /* Extract a scalar element from a nested VEC_SELECT expression
3652 (with optional nested VEC_CONCAT expression). Some targets
3653 (i386) extract scalar element from a vector using chain of
3654 nested VEC_SELECT expressions. When input operand is a memory
3655 operand, this operation can be simplified to a simple scalar
3656 load from an offseted memory address. */
3657 int n_elts;
3658 if (GET_CODE (trueop0) == VEC_SELECT
3659 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3660 .is_constant (&n_elts)))
3661 {
3662 rtx op0 = XEXP (trueop0, 0);
3663 rtx op1 = XEXP (trueop0, 1);
3664
3665 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3666 int elem;
3667
3668 rtvec vec;
3669 rtx tmp_op, tmp;
3670
3671 gcc_assert (GET_CODE (op1) == PARALLEL);
3672 gcc_assert (i < n_elts);
3673
3674 /* Select element, pointed by nested selector. */
3675 elem = INTVAL (XVECEXP (op1, 0, i));
3676
3677 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3678 if (GET_CODE (op0) == VEC_CONCAT)
3679 {
3680 rtx op00 = XEXP (op0, 0);
3681 rtx op01 = XEXP (op0, 1);
3682
3683 machine_mode mode00, mode01;
3684 int n_elts00, n_elts01;
3685
3686 mode00 = GET_MODE (op00);
3687 mode01 = GET_MODE (op01);
3688
3689 /* Find out the number of elements of each operand.
3690 Since the concatenated result has a constant number
3691 of elements, the operands must too. */
3692 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
3693 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
3694
3695 gcc_assert (n_elts == n_elts00 + n_elts01);
3696
3697 /* Select correct operand of VEC_CONCAT
3698 and adjust selector. */
3699 if (elem < n_elts01)
3700 tmp_op = op00;
3701 else
3702 {
3703 tmp_op = op01;
3704 elem -= n_elts00;
3705 }
3706 }
3707 else
3708 tmp_op = op0;
3709
3710 vec = rtvec_alloc (1);
3711 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3712
3713 tmp = gen_rtx_fmt_ee (code, mode,
3714 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3715 return tmp;
3716 }
3717 }
3718 else
3719 {
3720 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3721 gcc_assert (GET_MODE_INNER (mode)
3722 == GET_MODE_INNER (GET_MODE (trueop0)));
3723 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3724
3725 if (vec_duplicate_p (trueop0, &elt0))
3726 /* It doesn't matter which elements are selected by trueop1,
3727 because they are all the same. */
3728 return gen_vec_duplicate (mode, elt0);
3729
3730 if (GET_CODE (trueop0) == CONST_VECTOR)
3731 {
3732 unsigned n_elts = XVECLEN (trueop1, 0);
3733 rtvec v = rtvec_alloc (n_elts);
3734 unsigned int i;
3735
3736 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
3737 for (i = 0; i < n_elts; i++)
3738 {
3739 rtx x = XVECEXP (trueop1, 0, i);
3740
3741 if (!CONST_INT_P (x))
3742 return 0;
3743
3744 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3745 INTVAL (x));
3746 }
3747
3748 return gen_rtx_CONST_VECTOR (mode, v);
3749 }
3750
3751 /* Recognize the identity. */
3752 if (GET_MODE (trueop0) == mode)
3753 {
3754 bool maybe_ident = true;
3755 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3756 {
3757 rtx j = XVECEXP (trueop1, 0, i);
3758 if (!CONST_INT_P (j) || INTVAL (j) != i)
3759 {
3760 maybe_ident = false;
3761 break;
3762 }
3763 }
3764 if (maybe_ident)
3765 return trueop0;
3766 }
3767
3768 /* If we build {a,b} then permute it, build the result directly. */
3769 if (XVECLEN (trueop1, 0) == 2
3770 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3771 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3772 && GET_CODE (trueop0) == VEC_CONCAT
3773 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3774 && GET_MODE (XEXP (trueop0, 0)) == mode
3775 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3776 && GET_MODE (XEXP (trueop0, 1)) == mode)
3777 {
3778 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3779 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3780 rtx subop0, subop1;
3781
3782 gcc_assert (i0 < 4 && i1 < 4);
3783 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3784 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3785
3786 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3787 }
3788
3789 if (XVECLEN (trueop1, 0) == 2
3790 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3791 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3792 && GET_CODE (trueop0) == VEC_CONCAT
3793 && GET_MODE (trueop0) == mode)
3794 {
3795 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3796 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3797 rtx subop0, subop1;
3798
3799 gcc_assert (i0 < 2 && i1 < 2);
3800 subop0 = XEXP (trueop0, i0);
3801 subop1 = XEXP (trueop0, i1);
3802
3803 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3804 }
3805
3806 /* If we select one half of a vec_concat, return that. */
3807 int l0, l1;
3808 if (GET_CODE (trueop0) == VEC_CONCAT
3809 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3810 .is_constant (&l0))
3811 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
3812 .is_constant (&l1))
3813 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3814 {
3815 rtx subop0 = XEXP (trueop0, 0);
3816 rtx subop1 = XEXP (trueop0, 1);
3817 machine_mode mode0 = GET_MODE (subop0);
3818 machine_mode mode1 = GET_MODE (subop1);
3819 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3820 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3821 {
3822 bool success = true;
3823 for (int i = 1; i < l0; ++i)
3824 {
3825 rtx j = XVECEXP (trueop1, 0, i);
3826 if (!CONST_INT_P (j) || INTVAL (j) != i)
3827 {
3828 success = false;
3829 break;
3830 }
3831 }
3832 if (success)
3833 return subop0;
3834 }
3835 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3836 {
3837 bool success = true;
3838 for (int i = 1; i < l1; ++i)
3839 {
3840 rtx j = XVECEXP (trueop1, 0, i);
3841 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3842 {
3843 success = false;
3844 break;
3845 }
3846 }
3847 if (success)
3848 return subop1;
3849 }
3850 }
3851 }
3852
3853 if (XVECLEN (trueop1, 0) == 1
3854 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3855 && GET_CODE (trueop0) == VEC_CONCAT)
3856 {
3857 rtx vec = trueop0;
3858 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3859
3860 /* Try to find the element in the VEC_CONCAT. */
3861 while (GET_MODE (vec) != mode
3862 && GET_CODE (vec) == VEC_CONCAT)
3863 {
3864 poly_int64 vec_size;
3865
3866 if (CONST_INT_P (XEXP (vec, 0)))
3867 {
3868 /* vec_concat of two const_ints doesn't make sense with
3869 respect to modes. */
3870 if (CONST_INT_P (XEXP (vec, 1)))
3871 return 0;
3872
3873 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3874 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3875 }
3876 else
3877 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3878
3879 if (known_lt (offset, vec_size))
3880 vec = XEXP (vec, 0);
3881 else if (known_ge (offset, vec_size))
3882 {
3883 offset -= vec_size;
3884 vec = XEXP (vec, 1);
3885 }
3886 else
3887 break;
3888 vec = avoid_constant_pool_reference (vec);
3889 }
3890
3891 if (GET_MODE (vec) == mode)
3892 return vec;
3893 }
3894
3895 /* If we select elements in a vec_merge that all come from the same
3896 operand, select from that operand directly. */
3897 if (GET_CODE (op0) == VEC_MERGE)
3898 {
3899 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3900 if (CONST_INT_P (trueop02))
3901 {
3902 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3903 bool all_operand0 = true;
3904 bool all_operand1 = true;
3905 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3906 {
3907 rtx j = XVECEXP (trueop1, 0, i);
3908 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3909 all_operand1 = false;
3910 else
3911 all_operand0 = false;
3912 }
3913 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3914 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3915 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3916 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3917 }
3918 }
3919
3920 /* If we have two nested selects that are inverses of each
3921 other, replace them with the source operand. */
3922 if (GET_CODE (trueop0) == VEC_SELECT
3923 && GET_MODE (XEXP (trueop0, 0)) == mode)
3924 {
3925 rtx op0_subop1 = XEXP (trueop0, 1);
3926 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3927 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
3928
3929 /* Apply the outer ordering vector to the inner one. (The inner
3930 ordering vector is expressly permitted to be of a different
3931 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3932 then the two VEC_SELECTs cancel. */
3933 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3934 {
3935 rtx x = XVECEXP (trueop1, 0, i);
3936 if (!CONST_INT_P (x))
3937 return 0;
3938 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3939 if (!CONST_INT_P (y) || i != INTVAL (y))
3940 return 0;
3941 }
3942 return XEXP (trueop0, 0);
3943 }
3944
3945 return 0;
3946 case VEC_CONCAT:
3947 {
3948 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3949 ? GET_MODE (trueop0)
3950 : GET_MODE_INNER (mode));
3951 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3952 ? GET_MODE (trueop1)
3953 : GET_MODE_INNER (mode));
3954
3955 gcc_assert (VECTOR_MODE_P (mode));
3956 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
3957 + GET_MODE_SIZE (op1_mode),
3958 GET_MODE_SIZE (mode)));
3959
3960 if (VECTOR_MODE_P (op0_mode))
3961 gcc_assert (GET_MODE_INNER (mode)
3962 == GET_MODE_INNER (op0_mode));
3963 else
3964 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3965
3966 if (VECTOR_MODE_P (op1_mode))
3967 gcc_assert (GET_MODE_INNER (mode)
3968 == GET_MODE_INNER (op1_mode));
3969 else
3970 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3971
3972 unsigned int n_elts, in_n_elts;
3973 if ((GET_CODE (trueop0) == CONST_VECTOR
3974 || CONST_SCALAR_INT_P (trueop0)
3975 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3976 && (GET_CODE (trueop1) == CONST_VECTOR
3977 || CONST_SCALAR_INT_P (trueop1)
3978 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
3979 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
3980 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
3981 {
3982 rtvec v = rtvec_alloc (n_elts);
3983 unsigned int i;
3984 for (i = 0; i < n_elts; i++)
3985 {
3986 if (i < in_n_elts)
3987 {
3988 if (!VECTOR_MODE_P (op0_mode))
3989 RTVEC_ELT (v, i) = trueop0;
3990 else
3991 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3992 }
3993 else
3994 {
3995 if (!VECTOR_MODE_P (op1_mode))
3996 RTVEC_ELT (v, i) = trueop1;
3997 else
3998 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3999 i - in_n_elts);
4000 }
4001 }
4002
4003 return gen_rtx_CONST_VECTOR (mode, v);
4004 }
4005
4006 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4007 Restrict the transformation to avoid generating a VEC_SELECT with a
4008 mode unrelated to its operand. */
4009 if (GET_CODE (trueop0) == VEC_SELECT
4010 && GET_CODE (trueop1) == VEC_SELECT
4011 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4012 && GET_MODE (XEXP (trueop0, 0)) == mode)
4013 {
4014 rtx par0 = XEXP (trueop0, 1);
4015 rtx par1 = XEXP (trueop1, 1);
4016 int len0 = XVECLEN (par0, 0);
4017 int len1 = XVECLEN (par1, 0);
4018 rtvec vec = rtvec_alloc (len0 + len1);
4019 for (int i = 0; i < len0; i++)
4020 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4021 for (int i = 0; i < len1; i++)
4022 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4023 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4024 gen_rtx_PARALLEL (VOIDmode, vec));
4025 }
4026 }
4027 return 0;
4028
4029 default:
4030 gcc_unreachable ();
4031 }
4032
4033 if (mode == GET_MODE (op0)
4034 && mode == GET_MODE (op1)
4035 && vec_duplicate_p (op0, &elt0)
4036 && vec_duplicate_p (op1, &elt1))
4037 {
4038 /* Try applying the operator to ELT and see if that simplifies.
4039 We can duplicate the result if so.
4040
4041 The reason we don't use simplify_gen_binary is that it isn't
4042 necessarily a win to convert things like:
4043
4044 (plus:V (vec_duplicate:V (reg:S R1))
4045 (vec_duplicate:V (reg:S R2)))
4046
4047 to:
4048
4049 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4050
4051 The first might be done entirely in vector registers while the
4052 second might need a move between register files. */
4053 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4054 elt0, elt1);
4055 if (tem)
4056 return gen_vec_duplicate (mode, tem);
4057 }
4058
4059 return 0;
4060 }
4061
4062 rtx
4063 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4064 rtx op0, rtx op1)
4065 {
4066 if (VECTOR_MODE_P (mode)
4067 && code != VEC_CONCAT
4068 && GET_CODE (op0) == CONST_VECTOR
4069 && GET_CODE (op1) == CONST_VECTOR)
4070 {
4071 unsigned int n_elts;
4072 if (!CONST_VECTOR_NUNITS (op0).is_constant (&n_elts))
4073 return NULL_RTX;
4074
4075 gcc_assert (known_eq (n_elts, CONST_VECTOR_NUNITS (op1)));
4076 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4077 rtvec v = rtvec_alloc (n_elts);
4078 unsigned int i;
4079
4080 for (i = 0; i < n_elts; i++)
4081 {
4082 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4083 CONST_VECTOR_ELT (op0, i),
4084 CONST_VECTOR_ELT (op1, i));
4085 if (!x || !valid_for_const_vector_p (mode, x))
4086 return 0;
4087 RTVEC_ELT (v, i) = x;
4088 }
4089
4090 return gen_rtx_CONST_VECTOR (mode, v);
4091 }
4092
4093 if (VECTOR_MODE_P (mode)
4094 && code == VEC_CONCAT
4095 && (CONST_SCALAR_INT_P (op0)
4096 || CONST_FIXED_P (op0)
4097 || CONST_DOUBLE_AS_FLOAT_P (op0))
4098 && (CONST_SCALAR_INT_P (op1)
4099 || CONST_DOUBLE_AS_FLOAT_P (op1)
4100 || CONST_FIXED_P (op1)))
4101 {
4102 /* Both inputs have a constant number of elements, so the result
4103 must too. */
4104 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4105 rtvec v = rtvec_alloc (n_elts);
4106
4107 gcc_assert (n_elts >= 2);
4108 if (n_elts == 2)
4109 {
4110 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4111 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4112
4113 RTVEC_ELT (v, 0) = op0;
4114 RTVEC_ELT (v, 1) = op1;
4115 }
4116 else
4117 {
4118 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4119 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4120 unsigned i;
4121
4122 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4123 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4124 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4125
4126 for (i = 0; i < op0_n_elts; ++i)
4127 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4128 for (i = 0; i < op1_n_elts; ++i)
4129 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4130 }
4131
4132 return gen_rtx_CONST_VECTOR (mode, v);
4133 }
4134
4135 if (SCALAR_FLOAT_MODE_P (mode)
4136 && CONST_DOUBLE_AS_FLOAT_P (op0)
4137 && CONST_DOUBLE_AS_FLOAT_P (op1)
4138 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4139 {
4140 if (code == AND
4141 || code == IOR
4142 || code == XOR)
4143 {
4144 long tmp0[4];
4145 long tmp1[4];
4146 REAL_VALUE_TYPE r;
4147 int i;
4148
4149 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4150 GET_MODE (op0));
4151 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4152 GET_MODE (op1));
4153 for (i = 0; i < 4; i++)
4154 {
4155 switch (code)
4156 {
4157 case AND:
4158 tmp0[i] &= tmp1[i];
4159 break;
4160 case IOR:
4161 tmp0[i] |= tmp1[i];
4162 break;
4163 case XOR:
4164 tmp0[i] ^= tmp1[i];
4165 break;
4166 default:
4167 gcc_unreachable ();
4168 }
4169 }
4170 real_from_target (&r, tmp0, mode);
4171 return const_double_from_real_value (r, mode);
4172 }
4173 else
4174 {
4175 REAL_VALUE_TYPE f0, f1, value, result;
4176 const REAL_VALUE_TYPE *opr0, *opr1;
4177 bool inexact;
4178
4179 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4180 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4181
4182 if (HONOR_SNANS (mode)
4183 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4184 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4185 return 0;
4186
4187 real_convert (&f0, mode, opr0);
4188 real_convert (&f1, mode, opr1);
4189
4190 if (code == DIV
4191 && real_equal (&f1, &dconst0)
4192 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4193 return 0;
4194
4195 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4196 && flag_trapping_math
4197 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4198 {
4199 int s0 = REAL_VALUE_NEGATIVE (f0);
4200 int s1 = REAL_VALUE_NEGATIVE (f1);
4201
4202 switch (code)
4203 {
4204 case PLUS:
4205 /* Inf + -Inf = NaN plus exception. */
4206 if (s0 != s1)
4207 return 0;
4208 break;
4209 case MINUS:
4210 /* Inf - Inf = NaN plus exception. */
4211 if (s0 == s1)
4212 return 0;
4213 break;
4214 case DIV:
4215 /* Inf / Inf = NaN plus exception. */
4216 return 0;
4217 default:
4218 break;
4219 }
4220 }
4221
4222 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4223 && flag_trapping_math
4224 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4225 || (REAL_VALUE_ISINF (f1)
4226 && real_equal (&f0, &dconst0))))
4227 /* Inf * 0 = NaN plus exception. */
4228 return 0;
4229
4230 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4231 &f0, &f1);
4232 real_convert (&result, mode, &value);
4233
4234 /* Don't constant fold this floating point operation if
4235 the result has overflowed and flag_trapping_math. */
4236
4237 if (flag_trapping_math
4238 && MODE_HAS_INFINITIES (mode)
4239 && REAL_VALUE_ISINF (result)
4240 && !REAL_VALUE_ISINF (f0)
4241 && !REAL_VALUE_ISINF (f1))
4242 /* Overflow plus exception. */
4243 return 0;
4244
4245 /* Don't constant fold this floating point operation if the
4246 result may dependent upon the run-time rounding mode and
4247 flag_rounding_math is set, or if GCC's software emulation
4248 is unable to accurately represent the result. */
4249
4250 if ((flag_rounding_math
4251 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4252 && (inexact || !real_identical (&result, &value)))
4253 return NULL_RTX;
4254
4255 return const_double_from_real_value (result, mode);
4256 }
4257 }
4258
4259 /* We can fold some multi-word operations. */
4260 scalar_int_mode int_mode;
4261 if (is_a <scalar_int_mode> (mode, &int_mode)
4262 && CONST_SCALAR_INT_P (op0)
4263 && CONST_SCALAR_INT_P (op1))
4264 {
4265 wide_int result;
4266 wi::overflow_type overflow;
4267 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4268 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4269
4270 #if TARGET_SUPPORTS_WIDE_INT == 0
4271 /* This assert keeps the simplification from producing a result
4272 that cannot be represented in a CONST_DOUBLE but a lot of
4273 upstream callers expect that this function never fails to
4274 simplify something and so you if you added this to the test
4275 above the code would die later anyway. If this assert
4276 happens, you just need to make the port support wide int. */
4277 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4278 #endif
4279 switch (code)
4280 {
4281 case MINUS:
4282 result = wi::sub (pop0, pop1);
4283 break;
4284
4285 case PLUS:
4286 result = wi::add (pop0, pop1);
4287 break;
4288
4289 case MULT:
4290 result = wi::mul (pop0, pop1);
4291 break;
4292
4293 case DIV:
4294 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4295 if (overflow)
4296 return NULL_RTX;
4297 break;
4298
4299 case MOD:
4300 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4301 if (overflow)
4302 return NULL_RTX;
4303 break;
4304
4305 case UDIV:
4306 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4307 if (overflow)
4308 return NULL_RTX;
4309 break;
4310
4311 case UMOD:
4312 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4313 if (overflow)
4314 return NULL_RTX;
4315 break;
4316
4317 case AND:
4318 result = wi::bit_and (pop0, pop1);
4319 break;
4320
4321 case IOR:
4322 result = wi::bit_or (pop0, pop1);
4323 break;
4324
4325 case XOR:
4326 result = wi::bit_xor (pop0, pop1);
4327 break;
4328
4329 case SMIN:
4330 result = wi::smin (pop0, pop1);
4331 break;
4332
4333 case SMAX:
4334 result = wi::smax (pop0, pop1);
4335 break;
4336
4337 case UMIN:
4338 result = wi::umin (pop0, pop1);
4339 break;
4340
4341 case UMAX:
4342 result = wi::umax (pop0, pop1);
4343 break;
4344
4345 case LSHIFTRT:
4346 case ASHIFTRT:
4347 case ASHIFT:
4348 {
4349 wide_int wop1 = pop1;
4350 if (SHIFT_COUNT_TRUNCATED)
4351 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4352 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4353 return NULL_RTX;
4354
4355 switch (code)
4356 {
4357 case LSHIFTRT:
4358 result = wi::lrshift (pop0, wop1);
4359 break;
4360
4361 case ASHIFTRT:
4362 result = wi::arshift (pop0, wop1);
4363 break;
4364
4365 case ASHIFT:
4366 result = wi::lshift (pop0, wop1);
4367 break;
4368
4369 default:
4370 gcc_unreachable ();
4371 }
4372 break;
4373 }
4374 case ROTATE:
4375 case ROTATERT:
4376 {
4377 if (wi::neg_p (pop1))
4378 return NULL_RTX;
4379
4380 switch (code)
4381 {
4382 case ROTATE:
4383 result = wi::lrotate (pop0, pop1);
4384 break;
4385
4386 case ROTATERT:
4387 result = wi::rrotate (pop0, pop1);
4388 break;
4389
4390 default:
4391 gcc_unreachable ();
4392 }
4393 break;
4394 }
4395 default:
4396 return NULL_RTX;
4397 }
4398 return immed_wide_int_const (result, int_mode);
4399 }
4400
4401 /* Handle polynomial integers. */
4402 if (NUM_POLY_INT_COEFFS > 1
4403 && is_a <scalar_int_mode> (mode, &int_mode)
4404 && poly_int_rtx_p (op0)
4405 && poly_int_rtx_p (op1))
4406 {
4407 poly_wide_int result;
4408 switch (code)
4409 {
4410 case PLUS:
4411 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4412 break;
4413
4414 case MINUS:
4415 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4416 break;
4417
4418 case MULT:
4419 if (CONST_SCALAR_INT_P (op1))
4420 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4421 else
4422 return NULL_RTX;
4423 break;
4424
4425 case ASHIFT:
4426 if (CONST_SCALAR_INT_P (op1))
4427 {
4428 wide_int shift = rtx_mode_t (op1, mode);
4429 if (SHIFT_COUNT_TRUNCATED)
4430 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4431 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4432 return NULL_RTX;
4433 result = wi::to_poly_wide (op0, mode) << shift;
4434 }
4435 else
4436 return NULL_RTX;
4437 break;
4438
4439 case IOR:
4440 if (!CONST_SCALAR_INT_P (op1)
4441 || !can_ior_p (wi::to_poly_wide (op0, mode),
4442 rtx_mode_t (op1, mode), &result))
4443 return NULL_RTX;
4444 break;
4445
4446 default:
4447 return NULL_RTX;
4448 }
4449 return immed_wide_int_const (result, int_mode);
4450 }
4451
4452 return NULL_RTX;
4453 }
4454
4455
4456 \f
4457 /* Return a positive integer if X should sort after Y. The value
4458 returned is 1 if and only if X and Y are both regs. */
4459
4460 static int
4461 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4462 {
4463 int result;
4464
4465 result = (commutative_operand_precedence (y)
4466 - commutative_operand_precedence (x));
4467 if (result)
4468 return result + result;
4469
4470 /* Group together equal REGs to do more simplification. */
4471 if (REG_P (x) && REG_P (y))
4472 return REGNO (x) > REGNO (y);
4473
4474 return 0;
4475 }
4476
4477 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4478 operands may be another PLUS or MINUS.
4479
4480 Rather than test for specific case, we do this by a brute-force method
4481 and do all possible simplifications until no more changes occur. Then
4482 we rebuild the operation.
4483
4484 May return NULL_RTX when no changes were made. */
4485
4486 static rtx
4487 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4488 rtx op1)
4489 {
4490 struct simplify_plus_minus_op_data
4491 {
4492 rtx op;
4493 short neg;
4494 } ops[16];
4495 rtx result, tem;
4496 int n_ops = 2;
4497 int changed, n_constants, canonicalized = 0;
4498 int i, j;
4499
4500 memset (ops, 0, sizeof ops);
4501
4502 /* Set up the two operands and then expand them until nothing has been
4503 changed. If we run out of room in our array, give up; this should
4504 almost never happen. */
4505
4506 ops[0].op = op0;
4507 ops[0].neg = 0;
4508 ops[1].op = op1;
4509 ops[1].neg = (code == MINUS);
4510
4511 do
4512 {
4513 changed = 0;
4514 n_constants = 0;
4515
4516 for (i = 0; i < n_ops; i++)
4517 {
4518 rtx this_op = ops[i].op;
4519 int this_neg = ops[i].neg;
4520 enum rtx_code this_code = GET_CODE (this_op);
4521
4522 switch (this_code)
4523 {
4524 case PLUS:
4525 case MINUS:
4526 if (n_ops == ARRAY_SIZE (ops))
4527 return NULL_RTX;
4528
4529 ops[n_ops].op = XEXP (this_op, 1);
4530 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4531 n_ops++;
4532
4533 ops[i].op = XEXP (this_op, 0);
4534 changed = 1;
4535 /* If this operand was negated then we will potentially
4536 canonicalize the expression. Similarly if we don't
4537 place the operands adjacent we're re-ordering the
4538 expression and thus might be performing a
4539 canonicalization. Ignore register re-ordering.
4540 ??? It might be better to shuffle the ops array here,
4541 but then (plus (plus (A, B), plus (C, D))) wouldn't
4542 be seen as non-canonical. */
4543 if (this_neg
4544 || (i != n_ops - 2
4545 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4546 canonicalized = 1;
4547 break;
4548
4549 case NEG:
4550 ops[i].op = XEXP (this_op, 0);
4551 ops[i].neg = ! this_neg;
4552 changed = 1;
4553 canonicalized = 1;
4554 break;
4555
4556 case CONST:
4557 if (n_ops != ARRAY_SIZE (ops)
4558 && GET_CODE (XEXP (this_op, 0)) == PLUS
4559 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4560 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4561 {
4562 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4563 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4564 ops[n_ops].neg = this_neg;
4565 n_ops++;
4566 changed = 1;
4567 canonicalized = 1;
4568 }
4569 break;
4570
4571 case NOT:
4572 /* ~a -> (-a - 1) */
4573 if (n_ops != ARRAY_SIZE (ops))
4574 {
4575 ops[n_ops].op = CONSTM1_RTX (mode);
4576 ops[n_ops++].neg = this_neg;
4577 ops[i].op = XEXP (this_op, 0);
4578 ops[i].neg = !this_neg;
4579 changed = 1;
4580 canonicalized = 1;
4581 }
4582 break;
4583
4584 case CONST_INT:
4585 n_constants++;
4586 if (this_neg)
4587 {
4588 ops[i].op = neg_const_int (mode, this_op);
4589 ops[i].neg = 0;
4590 changed = 1;
4591 canonicalized = 1;
4592 }
4593 break;
4594
4595 default:
4596 break;
4597 }
4598 }
4599 }
4600 while (changed);
4601
4602 if (n_constants > 1)
4603 canonicalized = 1;
4604
4605 gcc_assert (n_ops >= 2);
4606
4607 /* If we only have two operands, we can avoid the loops. */
4608 if (n_ops == 2)
4609 {
4610 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4611 rtx lhs, rhs;
4612
4613 /* Get the two operands. Be careful with the order, especially for
4614 the cases where code == MINUS. */
4615 if (ops[0].neg && ops[1].neg)
4616 {
4617 lhs = gen_rtx_NEG (mode, ops[0].op);
4618 rhs = ops[1].op;
4619 }
4620 else if (ops[0].neg)
4621 {
4622 lhs = ops[1].op;
4623 rhs = ops[0].op;
4624 }
4625 else
4626 {
4627 lhs = ops[0].op;
4628 rhs = ops[1].op;
4629 }
4630
4631 return simplify_const_binary_operation (code, mode, lhs, rhs);
4632 }
4633
4634 /* Now simplify each pair of operands until nothing changes. */
4635 while (1)
4636 {
4637 /* Insertion sort is good enough for a small array. */
4638 for (i = 1; i < n_ops; i++)
4639 {
4640 struct simplify_plus_minus_op_data save;
4641 int cmp;
4642
4643 j = i - 1;
4644 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4645 if (cmp <= 0)
4646 continue;
4647 /* Just swapping registers doesn't count as canonicalization. */
4648 if (cmp != 1)
4649 canonicalized = 1;
4650
4651 save = ops[i];
4652 do
4653 ops[j + 1] = ops[j];
4654 while (j--
4655 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4656 ops[j + 1] = save;
4657 }
4658
4659 changed = 0;
4660 for (i = n_ops - 1; i > 0; i--)
4661 for (j = i - 1; j >= 0; j--)
4662 {
4663 rtx lhs = ops[j].op, rhs = ops[i].op;
4664 int lneg = ops[j].neg, rneg = ops[i].neg;
4665
4666 if (lhs != 0 && rhs != 0)
4667 {
4668 enum rtx_code ncode = PLUS;
4669
4670 if (lneg != rneg)
4671 {
4672 ncode = MINUS;
4673 if (lneg)
4674 std::swap (lhs, rhs);
4675 }
4676 else if (swap_commutative_operands_p (lhs, rhs))
4677 std::swap (lhs, rhs);
4678
4679 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4680 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4681 {
4682 rtx tem_lhs, tem_rhs;
4683
4684 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4685 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4686 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4687 tem_rhs);
4688
4689 if (tem && !CONSTANT_P (tem))
4690 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4691 }
4692 else
4693 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4694
4695 if (tem)
4696 {
4697 /* Reject "simplifications" that just wrap the two
4698 arguments in a CONST. Failure to do so can result
4699 in infinite recursion with simplify_binary_operation
4700 when it calls us to simplify CONST operations.
4701 Also, if we find such a simplification, don't try
4702 any more combinations with this rhs: We must have
4703 something like symbol+offset, ie. one of the
4704 trivial CONST expressions we handle later. */
4705 if (GET_CODE (tem) == CONST
4706 && GET_CODE (XEXP (tem, 0)) == ncode
4707 && XEXP (XEXP (tem, 0), 0) == lhs
4708 && XEXP (XEXP (tem, 0), 1) == rhs)
4709 break;
4710 lneg &= rneg;
4711 if (GET_CODE (tem) == NEG)
4712 tem = XEXP (tem, 0), lneg = !lneg;
4713 if (CONST_INT_P (tem) && lneg)
4714 tem = neg_const_int (mode, tem), lneg = 0;
4715
4716 ops[i].op = tem;
4717 ops[i].neg = lneg;
4718 ops[j].op = NULL_RTX;
4719 changed = 1;
4720 canonicalized = 1;
4721 }
4722 }
4723 }
4724
4725 if (!changed)
4726 break;
4727
4728 /* Pack all the operands to the lower-numbered entries. */
4729 for (i = 0, j = 0; j < n_ops; j++)
4730 if (ops[j].op)
4731 {
4732 ops[i] = ops[j];
4733 i++;
4734 }
4735 n_ops = i;
4736 }
4737
4738 /* If nothing changed, check that rematerialization of rtl instructions
4739 is still required. */
4740 if (!canonicalized)
4741 {
4742 /* Perform rematerialization if only all operands are registers and
4743 all operations are PLUS. */
4744 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4745 around rs6000 and how it uses the CA register. See PR67145. */
4746 for (i = 0; i < n_ops; i++)
4747 if (ops[i].neg
4748 || !REG_P (ops[i].op)
4749 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4750 && fixed_regs[REGNO (ops[i].op)]
4751 && !global_regs[REGNO (ops[i].op)]
4752 && ops[i].op != frame_pointer_rtx
4753 && ops[i].op != arg_pointer_rtx
4754 && ops[i].op != stack_pointer_rtx))
4755 return NULL_RTX;
4756 goto gen_result;
4757 }
4758
4759 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4760 if (n_ops == 2
4761 && CONST_INT_P (ops[1].op)
4762 && CONSTANT_P (ops[0].op)
4763 && ops[0].neg)
4764 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4765
4766 /* We suppressed creation of trivial CONST expressions in the
4767 combination loop to avoid recursion. Create one manually now.
4768 The combination loop should have ensured that there is exactly
4769 one CONST_INT, and the sort will have ensured that it is last
4770 in the array and that any other constant will be next-to-last. */
4771
4772 if (n_ops > 1
4773 && CONST_INT_P (ops[n_ops - 1].op)
4774 && CONSTANT_P (ops[n_ops - 2].op))
4775 {
4776 rtx value = ops[n_ops - 1].op;
4777 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4778 value = neg_const_int (mode, value);
4779 if (CONST_INT_P (value))
4780 {
4781 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4782 INTVAL (value));
4783 n_ops--;
4784 }
4785 }
4786
4787 /* Put a non-negated operand first, if possible. */
4788
4789 for (i = 0; i < n_ops && ops[i].neg; i++)
4790 continue;
4791 if (i == n_ops)
4792 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4793 else if (i != 0)
4794 {
4795 tem = ops[0].op;
4796 ops[0] = ops[i];
4797 ops[i].op = tem;
4798 ops[i].neg = 1;
4799 }
4800
4801 /* Now make the result by performing the requested operations. */
4802 gen_result:
4803 result = ops[0].op;
4804 for (i = 1; i < n_ops; i++)
4805 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4806 mode, result, ops[i].op);
4807
4808 return result;
4809 }
4810
4811 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4812 static bool
4813 plus_minus_operand_p (const_rtx x)
4814 {
4815 return GET_CODE (x) == PLUS
4816 || GET_CODE (x) == MINUS
4817 || (GET_CODE (x) == CONST
4818 && GET_CODE (XEXP (x, 0)) == PLUS
4819 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4820 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4821 }
4822
4823 /* Like simplify_binary_operation except used for relational operators.
4824 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4825 not also be VOIDmode.
4826
4827 CMP_MODE specifies in which mode the comparison is done in, so it is
4828 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4829 the operands or, if both are VOIDmode, the operands are compared in
4830 "infinite precision". */
4831 rtx
4832 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4833 machine_mode cmp_mode, rtx op0, rtx op1)
4834 {
4835 rtx tem, trueop0, trueop1;
4836
4837 if (cmp_mode == VOIDmode)
4838 cmp_mode = GET_MODE (op0);
4839 if (cmp_mode == VOIDmode)
4840 cmp_mode = GET_MODE (op1);
4841
4842 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4843 if (tem)
4844 {
4845 if (SCALAR_FLOAT_MODE_P (mode))
4846 {
4847 if (tem == const0_rtx)
4848 return CONST0_RTX (mode);
4849 #ifdef FLOAT_STORE_FLAG_VALUE
4850 {
4851 REAL_VALUE_TYPE val;
4852 val = FLOAT_STORE_FLAG_VALUE (mode);
4853 return const_double_from_real_value (val, mode);
4854 }
4855 #else
4856 return NULL_RTX;
4857 #endif
4858 }
4859 if (VECTOR_MODE_P (mode))
4860 {
4861 if (tem == const0_rtx)
4862 return CONST0_RTX (mode);
4863 #ifdef VECTOR_STORE_FLAG_VALUE
4864 {
4865 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4866 if (val == NULL_RTX)
4867 return NULL_RTX;
4868 if (val == const1_rtx)
4869 return CONST1_RTX (mode);
4870
4871 return gen_const_vec_duplicate (mode, val);
4872 }
4873 #else
4874 return NULL_RTX;
4875 #endif
4876 }
4877
4878 return tem;
4879 }
4880
4881 /* For the following tests, ensure const0_rtx is op1. */
4882 if (swap_commutative_operands_p (op0, op1)
4883 || (op0 == const0_rtx && op1 != const0_rtx))
4884 std::swap (op0, op1), code = swap_condition (code);
4885
4886 /* If op0 is a compare, extract the comparison arguments from it. */
4887 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4888 return simplify_gen_relational (code, mode, VOIDmode,
4889 XEXP (op0, 0), XEXP (op0, 1));
4890
4891 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4892 || CC0_P (op0))
4893 return NULL_RTX;
4894
4895 trueop0 = avoid_constant_pool_reference (op0);
4896 trueop1 = avoid_constant_pool_reference (op1);
4897 return simplify_relational_operation_1 (code, mode, cmp_mode,
4898 trueop0, trueop1);
4899 }
4900
4901 /* This part of simplify_relational_operation is only used when CMP_MODE
4902 is not in class MODE_CC (i.e. it is a real comparison).
4903
4904 MODE is the mode of the result, while CMP_MODE specifies in which
4905 mode the comparison is done in, so it is the mode of the operands. */
4906
4907 static rtx
4908 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4909 machine_mode cmp_mode, rtx op0, rtx op1)
4910 {
4911 enum rtx_code op0code = GET_CODE (op0);
4912
4913 if (op1 == const0_rtx && COMPARISON_P (op0))
4914 {
4915 /* If op0 is a comparison, extract the comparison arguments
4916 from it. */
4917 if (code == NE)
4918 {
4919 if (GET_MODE (op0) == mode)
4920 return simplify_rtx (op0);
4921 else
4922 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4923 XEXP (op0, 0), XEXP (op0, 1));
4924 }
4925 else if (code == EQ)
4926 {
4927 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4928 if (new_code != UNKNOWN)
4929 return simplify_gen_relational (new_code, mode, VOIDmode,
4930 XEXP (op0, 0), XEXP (op0, 1));
4931 }
4932 }
4933
4934 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4935 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4936 if ((code == LTU || code == GEU)
4937 && GET_CODE (op0) == PLUS
4938 && CONST_INT_P (XEXP (op0, 1))
4939 && (rtx_equal_p (op1, XEXP (op0, 0))
4940 || rtx_equal_p (op1, XEXP (op0, 1)))
4941 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4942 && XEXP (op0, 1) != const0_rtx)
4943 {
4944 rtx new_cmp
4945 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4946 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4947 cmp_mode, XEXP (op0, 0), new_cmp);
4948 }
4949
4950 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4951 transformed into (LTU a -C). */
4952 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4953 && CONST_INT_P (XEXP (op0, 1))
4954 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4955 && XEXP (op0, 1) != const0_rtx)
4956 {
4957 rtx new_cmp
4958 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4959 return simplify_gen_relational (LTU, mode, cmp_mode,
4960 XEXP (op0, 0), new_cmp);
4961 }
4962
4963 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4964 if ((code == LTU || code == GEU)
4965 && GET_CODE (op0) == PLUS
4966 && rtx_equal_p (op1, XEXP (op0, 1))
4967 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4968 && !rtx_equal_p (op1, XEXP (op0, 0)))
4969 return simplify_gen_relational (code, mode, cmp_mode, op0,
4970 copy_rtx (XEXP (op0, 0)));
4971
4972 if (op1 == const0_rtx)
4973 {
4974 /* Canonicalize (GTU x 0) as (NE x 0). */
4975 if (code == GTU)
4976 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4977 /* Canonicalize (LEU x 0) as (EQ x 0). */
4978 if (code == LEU)
4979 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4980 }
4981 else if (op1 == const1_rtx)
4982 {
4983 switch (code)
4984 {
4985 case GE:
4986 /* Canonicalize (GE x 1) as (GT x 0). */
4987 return simplify_gen_relational (GT, mode, cmp_mode,
4988 op0, const0_rtx);
4989 case GEU:
4990 /* Canonicalize (GEU x 1) as (NE x 0). */
4991 return simplify_gen_relational (NE, mode, cmp_mode,
4992 op0, const0_rtx);
4993 case LT:
4994 /* Canonicalize (LT x 1) as (LE x 0). */
4995 return simplify_gen_relational (LE, mode, cmp_mode,
4996 op0, const0_rtx);
4997 case LTU:
4998 /* Canonicalize (LTU x 1) as (EQ x 0). */
4999 return simplify_gen_relational (EQ, mode, cmp_mode,
5000 op0, const0_rtx);
5001 default:
5002 break;
5003 }
5004 }
5005 else if (op1 == constm1_rtx)
5006 {
5007 /* Canonicalize (LE x -1) as (LT x 0). */
5008 if (code == LE)
5009 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5010 /* Canonicalize (GT x -1) as (GE x 0). */
5011 if (code == GT)
5012 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5013 }
5014
5015 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5016 if ((code == EQ || code == NE)
5017 && (op0code == PLUS || op0code == MINUS)
5018 && CONSTANT_P (op1)
5019 && CONSTANT_P (XEXP (op0, 1))
5020 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5021 {
5022 rtx x = XEXP (op0, 0);
5023 rtx c = XEXP (op0, 1);
5024 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5025 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5026
5027 /* Detect an infinite recursive condition, where we oscillate at this
5028 simplification case between:
5029 A + B == C <---> C - B == A,
5030 where A, B, and C are all constants with non-simplifiable expressions,
5031 usually SYMBOL_REFs. */
5032 if (GET_CODE (tem) == invcode
5033 && CONSTANT_P (x)
5034 && rtx_equal_p (c, XEXP (tem, 1)))
5035 return NULL_RTX;
5036
5037 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5038 }
5039
5040 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5041 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5042 scalar_int_mode int_mode, int_cmp_mode;
5043 if (code == NE
5044 && op1 == const0_rtx
5045 && is_int_mode (mode, &int_mode)
5046 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5047 /* ??? Work-around BImode bugs in the ia64 backend. */
5048 && int_mode != BImode
5049 && int_cmp_mode != BImode
5050 && nonzero_bits (op0, int_cmp_mode) == 1
5051 && STORE_FLAG_VALUE == 1)
5052 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5053 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5054 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5055
5056 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5057 if ((code == EQ || code == NE)
5058 && op1 == const0_rtx
5059 && op0code == XOR)
5060 return simplify_gen_relational (code, mode, cmp_mode,
5061 XEXP (op0, 0), XEXP (op0, 1));
5062
5063 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5064 if ((code == EQ || code == NE)
5065 && op0code == XOR
5066 && rtx_equal_p (XEXP (op0, 0), op1)
5067 && !side_effects_p (XEXP (op0, 0)))
5068 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5069 CONST0_RTX (mode));
5070
5071 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5072 if ((code == EQ || code == NE)
5073 && op0code == XOR
5074 && rtx_equal_p (XEXP (op0, 1), op1)
5075 && !side_effects_p (XEXP (op0, 1)))
5076 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5077 CONST0_RTX (mode));
5078
5079 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5080 if ((code == EQ || code == NE)
5081 && op0code == XOR
5082 && CONST_SCALAR_INT_P (op1)
5083 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5084 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5085 simplify_gen_binary (XOR, cmp_mode,
5086 XEXP (op0, 1), op1));
5087
5088 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5089 constant folding if x/y is a constant. */
5090 if ((code == EQ || code == NE)
5091 && (op0code == AND || op0code == IOR)
5092 && !side_effects_p (op1)
5093 && op1 != CONST0_RTX (cmp_mode))
5094 {
5095 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5096 (eq/ne (and (not y) x) 0). */
5097 if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5098 || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5099 {
5100 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5101 cmp_mode);
5102 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5103
5104 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5105 CONST0_RTX (cmp_mode));
5106 }
5107
5108 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5109 (eq/ne (and (not x) y) 0). */
5110 if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5111 || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5112 {
5113 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5114 cmp_mode);
5115 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5116
5117 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5118 CONST0_RTX (cmp_mode));
5119 }
5120 }
5121
5122 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5123 if ((code == EQ || code == NE)
5124 && GET_CODE (op0) == BSWAP
5125 && CONST_SCALAR_INT_P (op1))
5126 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5127 simplify_gen_unary (BSWAP, cmp_mode,
5128 op1, cmp_mode));
5129
5130 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5131 if ((code == EQ || code == NE)
5132 && GET_CODE (op0) == BSWAP
5133 && GET_CODE (op1) == BSWAP)
5134 return simplify_gen_relational (code, mode, cmp_mode,
5135 XEXP (op0, 0), XEXP (op1, 0));
5136
5137 if (op0code == POPCOUNT && op1 == const0_rtx)
5138 switch (code)
5139 {
5140 case EQ:
5141 case LE:
5142 case LEU:
5143 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5144 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5145 XEXP (op0, 0), const0_rtx);
5146
5147 case NE:
5148 case GT:
5149 case GTU:
5150 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5151 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5152 XEXP (op0, 0), const0_rtx);
5153
5154 default:
5155 break;
5156 }
5157
5158 return NULL_RTX;
5159 }
5160
5161 enum
5162 {
5163 CMP_EQ = 1,
5164 CMP_LT = 2,
5165 CMP_GT = 4,
5166 CMP_LTU = 8,
5167 CMP_GTU = 16
5168 };
5169
5170
5171 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5172 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5173 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5174 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5175 For floating-point comparisons, assume that the operands were ordered. */
5176
5177 static rtx
5178 comparison_result (enum rtx_code code, int known_results)
5179 {
5180 switch (code)
5181 {
5182 case EQ:
5183 case UNEQ:
5184 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5185 case NE:
5186 case LTGT:
5187 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5188
5189 case LT:
5190 case UNLT:
5191 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5192 case GE:
5193 case UNGE:
5194 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5195
5196 case GT:
5197 case UNGT:
5198 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5199 case LE:
5200 case UNLE:
5201 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5202
5203 case LTU:
5204 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5205 case GEU:
5206 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5207
5208 case GTU:
5209 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5210 case LEU:
5211 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5212
5213 case ORDERED:
5214 return const_true_rtx;
5215 case UNORDERED:
5216 return const0_rtx;
5217 default:
5218 gcc_unreachable ();
5219 }
5220 }
5221
5222 /* Check if the given comparison (done in the given MODE) is actually
5223 a tautology or a contradiction. If the mode is VOID_mode, the
5224 comparison is done in "infinite precision". If no simplification
5225 is possible, this function returns zero. Otherwise, it returns
5226 either const_true_rtx or const0_rtx. */
5227
5228 rtx
5229 simplify_const_relational_operation (enum rtx_code code,
5230 machine_mode mode,
5231 rtx op0, rtx op1)
5232 {
5233 rtx tem;
5234 rtx trueop0;
5235 rtx trueop1;
5236
5237 gcc_assert (mode != VOIDmode
5238 || (GET_MODE (op0) == VOIDmode
5239 && GET_MODE (op1) == VOIDmode));
5240
5241 /* If op0 is a compare, extract the comparison arguments from it. */
5242 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5243 {
5244 op1 = XEXP (op0, 1);
5245 op0 = XEXP (op0, 0);
5246
5247 if (GET_MODE (op0) != VOIDmode)
5248 mode = GET_MODE (op0);
5249 else if (GET_MODE (op1) != VOIDmode)
5250 mode = GET_MODE (op1);
5251 else
5252 return 0;
5253 }
5254
5255 /* We can't simplify MODE_CC values since we don't know what the
5256 actual comparison is. */
5257 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5258 return 0;
5259
5260 /* Make sure the constant is second. */
5261 if (swap_commutative_operands_p (op0, op1))
5262 {
5263 std::swap (op0, op1);
5264 code = swap_condition (code);
5265 }
5266
5267 trueop0 = avoid_constant_pool_reference (op0);
5268 trueop1 = avoid_constant_pool_reference (op1);
5269
5270 /* For integer comparisons of A and B maybe we can simplify A - B and can
5271 then simplify a comparison of that with zero. If A and B are both either
5272 a register or a CONST_INT, this can't help; testing for these cases will
5273 prevent infinite recursion here and speed things up.
5274
5275 We can only do this for EQ and NE comparisons as otherwise we may
5276 lose or introduce overflow which we cannot disregard as undefined as
5277 we do not know the signedness of the operation on either the left or
5278 the right hand side of the comparison. */
5279
5280 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5281 && (code == EQ || code == NE)
5282 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5283 && (REG_P (op1) || CONST_INT_P (trueop1)))
5284 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5285 /* We cannot do this if tem is a nonzero address. */
5286 && ! nonzero_address_p (tem))
5287 return simplify_const_relational_operation (signed_condition (code),
5288 mode, tem, const0_rtx);
5289
5290 if (! HONOR_NANS (mode) && code == ORDERED)
5291 return const_true_rtx;
5292
5293 if (! HONOR_NANS (mode) && code == UNORDERED)
5294 return const0_rtx;
5295
5296 /* For modes without NaNs, if the two operands are equal, we know the
5297 result except if they have side-effects. Even with NaNs we know
5298 the result of unordered comparisons and, if signaling NaNs are
5299 irrelevant, also the result of LT/GT/LTGT. */
5300 if ((! HONOR_NANS (trueop0)
5301 || code == UNEQ || code == UNLE || code == UNGE
5302 || ((code == LT || code == GT || code == LTGT)
5303 && ! HONOR_SNANS (trueop0)))
5304 && rtx_equal_p (trueop0, trueop1)
5305 && ! side_effects_p (trueop0))
5306 return comparison_result (code, CMP_EQ);
5307
5308 /* If the operands are floating-point constants, see if we can fold
5309 the result. */
5310 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5311 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5312 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5313 {
5314 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5315 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5316
5317 /* Comparisons are unordered iff at least one of the values is NaN. */
5318 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5319 switch (code)
5320 {
5321 case UNEQ:
5322 case UNLT:
5323 case UNGT:
5324 case UNLE:
5325 case UNGE:
5326 case NE:
5327 case UNORDERED:
5328 return const_true_rtx;
5329 case EQ:
5330 case LT:
5331 case GT:
5332 case LE:
5333 case GE:
5334 case LTGT:
5335 case ORDERED:
5336 return const0_rtx;
5337 default:
5338 return 0;
5339 }
5340
5341 return comparison_result (code,
5342 (real_equal (d0, d1) ? CMP_EQ :
5343 real_less (d0, d1) ? CMP_LT : CMP_GT));
5344 }
5345
5346 /* Otherwise, see if the operands are both integers. */
5347 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5348 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5349 {
5350 /* It would be nice if we really had a mode here. However, the
5351 largest int representable on the target is as good as
5352 infinite. */
5353 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5354 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5355 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5356
5357 if (wi::eq_p (ptrueop0, ptrueop1))
5358 return comparison_result (code, CMP_EQ);
5359 else
5360 {
5361 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5362 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5363 return comparison_result (code, cr);
5364 }
5365 }
5366
5367 /* Optimize comparisons with upper and lower bounds. */
5368 scalar_int_mode int_mode;
5369 if (CONST_INT_P (trueop1)
5370 && is_a <scalar_int_mode> (mode, &int_mode)
5371 && HWI_COMPUTABLE_MODE_P (int_mode)
5372 && !side_effects_p (trueop0))
5373 {
5374 int sign;
5375 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5376 HOST_WIDE_INT val = INTVAL (trueop1);
5377 HOST_WIDE_INT mmin, mmax;
5378
5379 if (code == GEU
5380 || code == LEU
5381 || code == GTU
5382 || code == LTU)
5383 sign = 0;
5384 else
5385 sign = 1;
5386
5387 /* Get a reduced range if the sign bit is zero. */
5388 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5389 {
5390 mmin = 0;
5391 mmax = nonzero;
5392 }
5393 else
5394 {
5395 rtx mmin_rtx, mmax_rtx;
5396 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5397
5398 mmin = INTVAL (mmin_rtx);
5399 mmax = INTVAL (mmax_rtx);
5400 if (sign)
5401 {
5402 unsigned int sign_copies
5403 = num_sign_bit_copies (trueop0, int_mode);
5404
5405 mmin >>= (sign_copies - 1);
5406 mmax >>= (sign_copies - 1);
5407 }
5408 }
5409
5410 switch (code)
5411 {
5412 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5413 case GEU:
5414 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5415 return const_true_rtx;
5416 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5417 return const0_rtx;
5418 break;
5419 case GE:
5420 if (val <= mmin)
5421 return const_true_rtx;
5422 if (val > mmax)
5423 return const0_rtx;
5424 break;
5425
5426 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5427 case LEU:
5428 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5429 return const_true_rtx;
5430 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5431 return const0_rtx;
5432 break;
5433 case LE:
5434 if (val >= mmax)
5435 return const_true_rtx;
5436 if (val < mmin)
5437 return const0_rtx;
5438 break;
5439
5440 case EQ:
5441 /* x == y is always false for y out of range. */
5442 if (val < mmin || val > mmax)
5443 return const0_rtx;
5444 break;
5445
5446 /* x > y is always false for y >= mmax, always true for y < mmin. */
5447 case GTU:
5448 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5449 return const0_rtx;
5450 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5451 return const_true_rtx;
5452 break;
5453 case GT:
5454 if (val >= mmax)
5455 return const0_rtx;
5456 if (val < mmin)
5457 return const_true_rtx;
5458 break;
5459
5460 /* x < y is always false for y <= mmin, always true for y > mmax. */
5461 case LTU:
5462 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5463 return const0_rtx;
5464 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5465 return const_true_rtx;
5466 break;
5467 case LT:
5468 if (val <= mmin)
5469 return const0_rtx;
5470 if (val > mmax)
5471 return const_true_rtx;
5472 break;
5473
5474 case NE:
5475 /* x != y is always true for y out of range. */
5476 if (val < mmin || val > mmax)
5477 return const_true_rtx;
5478 break;
5479
5480 default:
5481 break;
5482 }
5483 }
5484
5485 /* Optimize integer comparisons with zero. */
5486 if (is_a <scalar_int_mode> (mode, &int_mode)
5487 && trueop1 == const0_rtx
5488 && !side_effects_p (trueop0))
5489 {
5490 /* Some addresses are known to be nonzero. We don't know
5491 their sign, but equality comparisons are known. */
5492 if (nonzero_address_p (trueop0))
5493 {
5494 if (code == EQ || code == LEU)
5495 return const0_rtx;
5496 if (code == NE || code == GTU)
5497 return const_true_rtx;
5498 }
5499
5500 /* See if the first operand is an IOR with a constant. If so, we
5501 may be able to determine the result of this comparison. */
5502 if (GET_CODE (op0) == IOR)
5503 {
5504 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5505 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5506 {
5507 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5508 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5509 && (UINTVAL (inner_const)
5510 & (HOST_WIDE_INT_1U
5511 << sign_bitnum)));
5512
5513 switch (code)
5514 {
5515 case EQ:
5516 case LEU:
5517 return const0_rtx;
5518 case NE:
5519 case GTU:
5520 return const_true_rtx;
5521 case LT:
5522 case LE:
5523 if (has_sign)
5524 return const_true_rtx;
5525 break;
5526 case GT:
5527 case GE:
5528 if (has_sign)
5529 return const0_rtx;
5530 break;
5531 default:
5532 break;
5533 }
5534 }
5535 }
5536 }
5537
5538 /* Optimize comparison of ABS with zero. */
5539 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5540 && (GET_CODE (trueop0) == ABS
5541 || (GET_CODE (trueop0) == FLOAT_EXTEND
5542 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5543 {
5544 switch (code)
5545 {
5546 case LT:
5547 /* Optimize abs(x) < 0.0. */
5548 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5549 return const0_rtx;
5550 break;
5551
5552 case GE:
5553 /* Optimize abs(x) >= 0.0. */
5554 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5555 return const_true_rtx;
5556 break;
5557
5558 case UNGE:
5559 /* Optimize ! (abs(x) < 0.0). */
5560 return const_true_rtx;
5561
5562 default:
5563 break;
5564 }
5565 }
5566
5567 return 0;
5568 }
5569
5570 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5571 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5572 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5573 can be simplified to that or NULL_RTX if not.
5574 Assume X is compared against zero with CMP_CODE and the true
5575 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5576
5577 static rtx
5578 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5579 {
5580 if (cmp_code != EQ && cmp_code != NE)
5581 return NULL_RTX;
5582
5583 /* Result on X == 0 and X !=0 respectively. */
5584 rtx on_zero, on_nonzero;
5585 if (cmp_code == EQ)
5586 {
5587 on_zero = true_val;
5588 on_nonzero = false_val;
5589 }
5590 else
5591 {
5592 on_zero = false_val;
5593 on_nonzero = true_val;
5594 }
5595
5596 rtx_code op_code = GET_CODE (on_nonzero);
5597 if ((op_code != CLZ && op_code != CTZ)
5598 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5599 || !CONST_INT_P (on_zero))
5600 return NULL_RTX;
5601
5602 HOST_WIDE_INT op_val;
5603 scalar_int_mode mode ATTRIBUTE_UNUSED
5604 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5605 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5606 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5607 && op_val == INTVAL (on_zero))
5608 return on_nonzero;
5609
5610 return NULL_RTX;
5611 }
5612
5613 /* Try to simplify X given that it appears within operand OP of a
5614 VEC_MERGE operation whose mask is MASK. X need not use the same
5615 vector mode as the VEC_MERGE, but it must have the same number of
5616 elements.
5617
5618 Return the simplified X on success, otherwise return NULL_RTX. */
5619
5620 rtx
5621 simplify_merge_mask (rtx x, rtx mask, int op)
5622 {
5623 gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
5624 poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
5625 if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
5626 {
5627 if (side_effects_p (XEXP (x, 1 - op)))
5628 return NULL_RTX;
5629
5630 return XEXP (x, op);
5631 }
5632 if (UNARY_P (x)
5633 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5634 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
5635 {
5636 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5637 if (top0)
5638 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
5639 GET_MODE (XEXP (x, 0)));
5640 }
5641 if (BINARY_P (x)
5642 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5643 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5644 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5645 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
5646 {
5647 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5648 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5649 if (top0 || top1)
5650 {
5651 if (COMPARISON_P (x))
5652 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
5653 GET_MODE (XEXP (x, 0)) != VOIDmode
5654 ? GET_MODE (XEXP (x, 0))
5655 : GET_MODE (XEXP (x, 1)),
5656 top0 ? top0 : XEXP (x, 0),
5657 top1 ? top1 : XEXP (x, 1));
5658 else
5659 return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
5660 top0 ? top0 : XEXP (x, 0),
5661 top1 ? top1 : XEXP (x, 1));
5662 }
5663 }
5664 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
5665 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5666 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5667 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5668 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
5669 && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
5670 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
5671 {
5672 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5673 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5674 rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
5675 if (top0 || top1 || top2)
5676 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
5677 GET_MODE (XEXP (x, 0)),
5678 top0 ? top0 : XEXP (x, 0),
5679 top1 ? top1 : XEXP (x, 1),
5680 top2 ? top2 : XEXP (x, 2));
5681 }
5682 return NULL_RTX;
5683 }
5684
5685 \f
5686 /* Simplify CODE, an operation with result mode MODE and three operands,
5687 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5688 a constant. Return 0 if no simplifications is possible. */
5689
5690 rtx
5691 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5692 machine_mode op0_mode, rtx op0, rtx op1,
5693 rtx op2)
5694 {
5695 bool any_change = false;
5696 rtx tem, trueop2;
5697 scalar_int_mode int_mode, int_op0_mode;
5698 unsigned int n_elts;
5699
5700 switch (code)
5701 {
5702 case FMA:
5703 /* Simplify negations around the multiplication. */
5704 /* -a * -b + c => a * b + c. */
5705 if (GET_CODE (op0) == NEG)
5706 {
5707 tem = simplify_unary_operation (NEG, mode, op1, mode);
5708 if (tem)
5709 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5710 }
5711 else if (GET_CODE (op1) == NEG)
5712 {
5713 tem = simplify_unary_operation (NEG, mode, op0, mode);
5714 if (tem)
5715 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5716 }
5717
5718 /* Canonicalize the two multiplication operands. */
5719 /* a * -b + c => -b * a + c. */
5720 if (swap_commutative_operands_p (op0, op1))
5721 std::swap (op0, op1), any_change = true;
5722
5723 if (any_change)
5724 return gen_rtx_FMA (mode, op0, op1, op2);
5725 return NULL_RTX;
5726
5727 case SIGN_EXTRACT:
5728 case ZERO_EXTRACT:
5729 if (CONST_INT_P (op0)
5730 && CONST_INT_P (op1)
5731 && CONST_INT_P (op2)
5732 && is_a <scalar_int_mode> (mode, &int_mode)
5733 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5734 && HWI_COMPUTABLE_MODE_P (int_mode))
5735 {
5736 /* Extracting a bit-field from a constant */
5737 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5738 HOST_WIDE_INT op1val = INTVAL (op1);
5739 HOST_WIDE_INT op2val = INTVAL (op2);
5740 if (!BITS_BIG_ENDIAN)
5741 val >>= op2val;
5742 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5743 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5744 else
5745 /* Not enough information to calculate the bit position. */
5746 break;
5747
5748 if (HOST_BITS_PER_WIDE_INT != op1val)
5749 {
5750 /* First zero-extend. */
5751 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5752 /* If desired, propagate sign bit. */
5753 if (code == SIGN_EXTRACT
5754 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5755 != 0)
5756 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5757 }
5758
5759 return gen_int_mode (val, int_mode);
5760 }
5761 break;
5762
5763 case IF_THEN_ELSE:
5764 if (CONST_INT_P (op0))
5765 return op0 != const0_rtx ? op1 : op2;
5766
5767 /* Convert c ? a : a into "a". */
5768 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5769 return op1;
5770
5771 /* Convert a != b ? a : b into "a". */
5772 if (GET_CODE (op0) == NE
5773 && ! side_effects_p (op0)
5774 && ! HONOR_NANS (mode)
5775 && ! HONOR_SIGNED_ZEROS (mode)
5776 && ((rtx_equal_p (XEXP (op0, 0), op1)
5777 && rtx_equal_p (XEXP (op0, 1), op2))
5778 || (rtx_equal_p (XEXP (op0, 0), op2)
5779 && rtx_equal_p (XEXP (op0, 1), op1))))
5780 return op1;
5781
5782 /* Convert a == b ? a : b into "b". */
5783 if (GET_CODE (op0) == EQ
5784 && ! side_effects_p (op0)
5785 && ! HONOR_NANS (mode)
5786 && ! HONOR_SIGNED_ZEROS (mode)
5787 && ((rtx_equal_p (XEXP (op0, 0), op1)
5788 && rtx_equal_p (XEXP (op0, 1), op2))
5789 || (rtx_equal_p (XEXP (op0, 0), op2)
5790 && rtx_equal_p (XEXP (op0, 1), op1))))
5791 return op2;
5792
5793 /* Convert (!c) != {0,...,0} ? a : b into
5794 c != {0,...,0} ? b : a for vector modes. */
5795 if (VECTOR_MODE_P (GET_MODE (op1))
5796 && GET_CODE (op0) == NE
5797 && GET_CODE (XEXP (op0, 0)) == NOT
5798 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5799 {
5800 rtx cv = XEXP (op0, 1);
5801 int nunits;
5802 bool ok = true;
5803 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
5804 ok = false;
5805 else
5806 for (int i = 0; i < nunits; ++i)
5807 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5808 {
5809 ok = false;
5810 break;
5811 }
5812 if (ok)
5813 {
5814 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5815 XEXP (XEXP (op0, 0), 0),
5816 XEXP (op0, 1));
5817 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5818 return retval;
5819 }
5820 }
5821
5822 /* Convert x == 0 ? N : clz (x) into clz (x) when
5823 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5824 Similarly for ctz (x). */
5825 if (COMPARISON_P (op0) && !side_effects_p (op0)
5826 && XEXP (op0, 1) == const0_rtx)
5827 {
5828 rtx simplified
5829 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5830 op1, op2);
5831 if (simplified)
5832 return simplified;
5833 }
5834
5835 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5836 {
5837 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5838 ? GET_MODE (XEXP (op0, 1))
5839 : GET_MODE (XEXP (op0, 0)));
5840 rtx temp;
5841
5842 /* Look for happy constants in op1 and op2. */
5843 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5844 {
5845 HOST_WIDE_INT t = INTVAL (op1);
5846 HOST_WIDE_INT f = INTVAL (op2);
5847
5848 if (t == STORE_FLAG_VALUE && f == 0)
5849 code = GET_CODE (op0);
5850 else if (t == 0 && f == STORE_FLAG_VALUE)
5851 {
5852 enum rtx_code tmp;
5853 tmp = reversed_comparison_code (op0, NULL);
5854 if (tmp == UNKNOWN)
5855 break;
5856 code = tmp;
5857 }
5858 else
5859 break;
5860
5861 return simplify_gen_relational (code, mode, cmp_mode,
5862 XEXP (op0, 0), XEXP (op0, 1));
5863 }
5864
5865 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5866 cmp_mode, XEXP (op0, 0),
5867 XEXP (op0, 1));
5868
5869 /* See if any simplifications were possible. */
5870 if (temp)
5871 {
5872 if (CONST_INT_P (temp))
5873 return temp == const0_rtx ? op2 : op1;
5874 else if (temp)
5875 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5876 }
5877 }
5878 break;
5879
5880 case VEC_MERGE:
5881 gcc_assert (GET_MODE (op0) == mode);
5882 gcc_assert (GET_MODE (op1) == mode);
5883 gcc_assert (VECTOR_MODE_P (mode));
5884 trueop2 = avoid_constant_pool_reference (op2);
5885 if (CONST_INT_P (trueop2)
5886 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
5887 {
5888 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5889 unsigned HOST_WIDE_INT mask;
5890 if (n_elts == HOST_BITS_PER_WIDE_INT)
5891 mask = -1;
5892 else
5893 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5894
5895 if (!(sel & mask) && !side_effects_p (op0))
5896 return op1;
5897 if ((sel & mask) == mask && !side_effects_p (op1))
5898 return op0;
5899
5900 rtx trueop0 = avoid_constant_pool_reference (op0);
5901 rtx trueop1 = avoid_constant_pool_reference (op1);
5902 if (GET_CODE (trueop0) == CONST_VECTOR
5903 && GET_CODE (trueop1) == CONST_VECTOR)
5904 {
5905 rtvec v = rtvec_alloc (n_elts);
5906 unsigned int i;
5907
5908 for (i = 0; i < n_elts; i++)
5909 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5910 ? CONST_VECTOR_ELT (trueop0, i)
5911 : CONST_VECTOR_ELT (trueop1, i));
5912 return gen_rtx_CONST_VECTOR (mode, v);
5913 }
5914
5915 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5916 if no element from a appears in the result. */
5917 if (GET_CODE (op0) == VEC_MERGE)
5918 {
5919 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5920 if (CONST_INT_P (tem))
5921 {
5922 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5923 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5924 return simplify_gen_ternary (code, mode, mode,
5925 XEXP (op0, 1), op1, op2);
5926 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5927 return simplify_gen_ternary (code, mode, mode,
5928 XEXP (op0, 0), op1, op2);
5929 }
5930 }
5931 if (GET_CODE (op1) == VEC_MERGE)
5932 {
5933 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5934 if (CONST_INT_P (tem))
5935 {
5936 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5937 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5938 return simplify_gen_ternary (code, mode, mode,
5939 op0, XEXP (op1, 1), op2);
5940 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5941 return simplify_gen_ternary (code, mode, mode,
5942 op0, XEXP (op1, 0), op2);
5943 }
5944 }
5945
5946 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5947 with a. */
5948 if (GET_CODE (op0) == VEC_DUPLICATE
5949 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5950 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5951 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
5952 {
5953 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5954 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5955 {
5956 if (XEXP (XEXP (op0, 0), 0) == op1
5957 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5958 return op1;
5959 }
5960 }
5961 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5962 (const_int N))
5963 with (vec_concat (X) (B)) if N == 1 or
5964 (vec_concat (A) (X)) if N == 2. */
5965 if (GET_CODE (op0) == VEC_DUPLICATE
5966 && GET_CODE (op1) == CONST_VECTOR
5967 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
5968 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5969 && IN_RANGE (sel, 1, 2))
5970 {
5971 rtx newop0 = XEXP (op0, 0);
5972 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
5973 if (sel == 2)
5974 std::swap (newop0, newop1);
5975 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5976 }
5977 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5978 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5979 Only applies for vectors of two elements. */
5980 if (GET_CODE (op0) == VEC_DUPLICATE
5981 && GET_CODE (op1) == VEC_CONCAT
5982 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5983 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5984 && IN_RANGE (sel, 1, 2))
5985 {
5986 rtx newop0 = XEXP (op0, 0);
5987 rtx newop1 = XEXP (op1, 2 - sel);
5988 rtx otherop = XEXP (op1, sel - 1);
5989 if (sel == 2)
5990 std::swap (newop0, newop1);
5991 /* Don't want to throw away the other part of the vec_concat if
5992 it has side-effects. */
5993 if (!side_effects_p (otherop))
5994 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5995 }
5996
5997 /* Replace:
5998
5999 (vec_merge:outer (vec_duplicate:outer x:inner)
6000 (subreg:outer y:inner 0)
6001 (const_int N))
6002
6003 with (vec_concat:outer x:inner y:inner) if N == 1,
6004 or (vec_concat:outer y:inner x:inner) if N == 2.
6005
6006 Implicitly, this means we have a paradoxical subreg, but such
6007 a check is cheap, so make it anyway.
6008
6009 Only applies for vectors of two elements. */
6010 if (GET_CODE (op0) == VEC_DUPLICATE
6011 && GET_CODE (op1) == SUBREG
6012 && GET_MODE (op1) == GET_MODE (op0)
6013 && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6014 && paradoxical_subreg_p (op1)
6015 && subreg_lowpart_p (op1)
6016 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6017 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6018 && IN_RANGE (sel, 1, 2))
6019 {
6020 rtx newop0 = XEXP (op0, 0);
6021 rtx newop1 = SUBREG_REG (op1);
6022 if (sel == 2)
6023 std::swap (newop0, newop1);
6024 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6025 }
6026
6027 /* Same as above but with switched operands:
6028 Replace (vec_merge:outer (subreg:outer x:inner 0)
6029 (vec_duplicate:outer y:inner)
6030 (const_int N))
6031
6032 with (vec_concat:outer x:inner y:inner) if N == 1,
6033 or (vec_concat:outer y:inner x:inner) if N == 2. */
6034 if (GET_CODE (op1) == VEC_DUPLICATE
6035 && GET_CODE (op0) == SUBREG
6036 && GET_MODE (op0) == GET_MODE (op1)
6037 && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6038 && paradoxical_subreg_p (op0)
6039 && subreg_lowpart_p (op0)
6040 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6041 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6042 && IN_RANGE (sel, 1, 2))
6043 {
6044 rtx newop0 = SUBREG_REG (op0);
6045 rtx newop1 = XEXP (op1, 0);
6046 if (sel == 2)
6047 std::swap (newop0, newop1);
6048 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6049 }
6050
6051 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6052 (const_int n))
6053 with (vec_concat x y) or (vec_concat y x) depending on value
6054 of N. */
6055 if (GET_CODE (op0) == VEC_DUPLICATE
6056 && GET_CODE (op1) == VEC_DUPLICATE
6057 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6058 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6059 && IN_RANGE (sel, 1, 2))
6060 {
6061 rtx newop0 = XEXP (op0, 0);
6062 rtx newop1 = XEXP (op1, 0);
6063 if (sel == 2)
6064 std::swap (newop0, newop1);
6065
6066 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6067 }
6068 }
6069
6070 if (rtx_equal_p (op0, op1)
6071 && !side_effects_p (op2) && !side_effects_p (op1))
6072 return op0;
6073
6074 if (!side_effects_p (op2))
6075 {
6076 rtx top0
6077 = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6078 rtx top1
6079 = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6080 if (top0 || top1)
6081 return simplify_gen_ternary (code, mode, mode,
6082 top0 ? top0 : op0,
6083 top1 ? top1 : op1, op2);
6084 }
6085
6086 break;
6087
6088 default:
6089 gcc_unreachable ();
6090 }
6091
6092 return 0;
6093 }
6094
6095 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
6096 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
6097 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
6098
6099 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
6100 represented as a little-endian array of 'unsigned char', selecting by BYTE,
6101 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
6102 FIRST_ELEM is the number of the first element to extract, otherwise
6103 FIRST_ELEM is ignored. */
6104
6105 static rtx
6106 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
6107 machine_mode innermode, unsigned int byte,
6108 unsigned int first_elem, unsigned int inner_bytes)
6109 {
6110 enum {
6111 value_bit = 8,
6112 value_mask = (1 << value_bit) - 1
6113 };
6114 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
6115 int value_start;
6116 int i;
6117 int elem;
6118
6119 int num_elem;
6120 rtx * elems;
6121 int elem_bitsize;
6122 rtx result_s = NULL;
6123 rtvec result_v = NULL;
6124 enum mode_class outer_class;
6125 scalar_mode outer_submode;
6126 int max_bitsize;
6127
6128 /* Some ports misuse CCmode. */
6129 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
6130 return op;
6131
6132 /* We have no way to represent a complex constant at the rtl level. */
6133 if (COMPLEX_MODE_P (outermode))
6134 return NULL_RTX;
6135
6136 /* We support any size mode. */
6137 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
6138 inner_bytes * BITS_PER_UNIT);
6139
6140 /* Unpack the value. */
6141
6142 if (GET_CODE (op) == CONST_VECTOR)
6143 {
6144 num_elem = CEIL (inner_bytes, GET_MODE_UNIT_SIZE (innermode));
6145 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
6146 }
6147 else
6148 {
6149 num_elem = 1;
6150 elem_bitsize = max_bitsize;
6151 }
6152 /* If this asserts, it is too complicated; reducing value_bit may help. */
6153 gcc_assert (BITS_PER_UNIT % value_bit == 0);
6154 /* I don't know how to handle endianness of sub-units. */
6155 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
6156
6157 for (elem = 0; elem < num_elem; elem++)
6158 {
6159 unsigned char * vp;
6160 rtx el = (GET_CODE (op) == CONST_VECTOR
6161 ? CONST_VECTOR_ELT (op, first_elem + elem)
6162 : op);
6163
6164 /* Vectors are kept in target memory order. (This is probably
6165 a mistake.) */
6166 {
6167 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6168 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6169 / BITS_PER_UNIT);
6170 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6171 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6172 unsigned bytele = (subword_byte % UNITS_PER_WORD
6173 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6174 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
6175 }
6176
6177 switch (GET_CODE (el))
6178 {
6179 case CONST_INT:
6180 for (i = 0;
6181 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6182 i += value_bit)
6183 *vp++ = INTVAL (el) >> i;
6184 /* CONST_INTs are always logically sign-extended. */
6185 for (; i < elem_bitsize; i += value_bit)
6186 *vp++ = INTVAL (el) < 0 ? -1 : 0;
6187 break;
6188
6189 case CONST_WIDE_INT:
6190 {
6191 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
6192 unsigned char extend = wi::sign_mask (val);
6193 int prec = wi::get_precision (val);
6194
6195 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
6196 *vp++ = wi::extract_uhwi (val, i, value_bit);
6197 for (; i < elem_bitsize; i += value_bit)
6198 *vp++ = extend;
6199 }
6200 break;
6201
6202 case CONST_DOUBLE:
6203 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6204 {
6205 unsigned char extend = 0;
6206 /* If this triggers, someone should have generated a
6207 CONST_INT instead. */
6208 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6209
6210 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6211 *vp++ = CONST_DOUBLE_LOW (el) >> i;
6212 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6213 {
6214 *vp++
6215 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6216 i += value_bit;
6217 }
6218
6219 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6220 extend = -1;
6221 for (; i < elem_bitsize; i += value_bit)
6222 *vp++ = extend;
6223 }
6224 else
6225 {
6226 /* This is big enough for anything on the platform. */
6227 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6228 scalar_float_mode el_mode;
6229
6230 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6231 int bitsize = GET_MODE_BITSIZE (el_mode);
6232
6233 gcc_assert (bitsize <= elem_bitsize);
6234 gcc_assert (bitsize % value_bit == 0);
6235
6236 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6237 GET_MODE (el));
6238
6239 /* real_to_target produces its result in words affected by
6240 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6241 and use WORDS_BIG_ENDIAN instead; see the documentation
6242 of SUBREG in rtl.texi. */
6243 for (i = 0; i < bitsize; i += value_bit)
6244 {
6245 int ibase;
6246 if (WORDS_BIG_ENDIAN)
6247 ibase = bitsize - 1 - i;
6248 else
6249 ibase = i;
6250 *vp++ = tmp[ibase / 32] >> i % 32;
6251 }
6252
6253 /* It shouldn't matter what's done here, so fill it with
6254 zero. */
6255 for (; i < elem_bitsize; i += value_bit)
6256 *vp++ = 0;
6257 }
6258 break;
6259
6260 case CONST_FIXED:
6261 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6262 {
6263 for (i = 0; i < elem_bitsize; i += value_bit)
6264 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6265 }
6266 else
6267 {
6268 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6269 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6270 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6271 i += value_bit)
6272 *vp++ = CONST_FIXED_VALUE_HIGH (el)
6273 >> (i - HOST_BITS_PER_WIDE_INT);
6274 for (; i < elem_bitsize; i += value_bit)
6275 *vp++ = 0;
6276 }
6277 break;
6278
6279 default:
6280 gcc_unreachable ();
6281 }
6282 }
6283
6284 /* Now, pick the right byte to start with. */
6285 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6286 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6287 will already have offset 0. */
6288 if (inner_bytes >= GET_MODE_SIZE (outermode))
6289 {
6290 unsigned ibyte = inner_bytes - GET_MODE_SIZE (outermode) - byte;
6291 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6292 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6293 byte = (subword_byte % UNITS_PER_WORD
6294 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6295 }
6296
6297 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6298 so if it's become negative it will instead be very large.) */
6299 gcc_assert (byte < inner_bytes);
6300
6301 /* Convert from bytes to chunks of size value_bit. */
6302 value_start = byte * (BITS_PER_UNIT / value_bit);
6303
6304 /* Re-pack the value. */
6305 num_elem = GET_MODE_NUNITS (outermode);
6306
6307 if (VECTOR_MODE_P (outermode))
6308 {
6309 result_v = rtvec_alloc (num_elem);
6310 elems = &RTVEC_ELT (result_v, 0);
6311 }
6312 else
6313 elems = &result_s;
6314
6315 outer_submode = GET_MODE_INNER (outermode);
6316 outer_class = GET_MODE_CLASS (outer_submode);
6317 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6318
6319 gcc_assert (elem_bitsize % value_bit == 0);
6320 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6321
6322 for (elem = 0; elem < num_elem; elem++)
6323 {
6324 unsigned char *vp;
6325
6326 /* Vectors are stored in target memory order. (This is probably
6327 a mistake.) */
6328 {
6329 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6330 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6331 / BITS_PER_UNIT);
6332 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6333 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6334 unsigned bytele = (subword_byte % UNITS_PER_WORD
6335 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6336 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6337 }
6338
6339 switch (outer_class)
6340 {
6341 case MODE_INT:
6342 case MODE_PARTIAL_INT:
6343 {
6344 int u;
6345 int base = 0;
6346 int units
6347 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6348 / HOST_BITS_PER_WIDE_INT;
6349 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6350 wide_int r;
6351
6352 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6353 return NULL_RTX;
6354 for (u = 0; u < units; u++)
6355 {
6356 unsigned HOST_WIDE_INT buf = 0;
6357 for (i = 0;
6358 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6359 i += value_bit)
6360 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6361
6362 tmp[u] = buf;
6363 base += HOST_BITS_PER_WIDE_INT;
6364 }
6365 r = wide_int::from_array (tmp, units,
6366 GET_MODE_PRECISION (outer_submode));
6367 #if TARGET_SUPPORTS_WIDE_INT == 0
6368 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6369 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6370 return NULL_RTX;
6371 #endif
6372 elems[elem] = immed_wide_int_const (r, outer_submode);
6373 }
6374 break;
6375
6376 case MODE_FLOAT:
6377 case MODE_DECIMAL_FLOAT:
6378 {
6379 REAL_VALUE_TYPE r;
6380 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6381
6382 /* real_from_target wants its input in words affected by
6383 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6384 and use WORDS_BIG_ENDIAN instead; see the documentation
6385 of SUBREG in rtl.texi. */
6386 for (i = 0; i < elem_bitsize; i += value_bit)
6387 {
6388 int ibase;
6389 if (WORDS_BIG_ENDIAN)
6390 ibase = elem_bitsize - 1 - i;
6391 else
6392 ibase = i;
6393 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6394 }
6395
6396 real_from_target (&r, tmp, outer_submode);
6397 elems[elem] = const_double_from_real_value (r, outer_submode);
6398 }
6399 break;
6400
6401 case MODE_FRACT:
6402 case MODE_UFRACT:
6403 case MODE_ACCUM:
6404 case MODE_UACCUM:
6405 {
6406 FIXED_VALUE_TYPE f;
6407 f.data.low = 0;
6408 f.data.high = 0;
6409 f.mode = outer_submode;
6410
6411 for (i = 0;
6412 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6413 i += value_bit)
6414 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6415 for (; i < elem_bitsize; i += value_bit)
6416 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6417 << (i - HOST_BITS_PER_WIDE_INT));
6418
6419 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6420 }
6421 break;
6422
6423 default:
6424 gcc_unreachable ();
6425 }
6426 }
6427 if (VECTOR_MODE_P (outermode))
6428 return gen_rtx_CONST_VECTOR (outermode, result_v);
6429 else
6430 return result_s;
6431 }
6432
6433 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6434 Return 0 if no simplifications are possible. */
6435 rtx
6436 simplify_subreg (machine_mode outermode, rtx op,
6437 machine_mode innermode, poly_uint64 byte)
6438 {
6439 /* Little bit of sanity checking. */
6440 gcc_assert (innermode != VOIDmode);
6441 gcc_assert (outermode != VOIDmode);
6442 gcc_assert (innermode != BLKmode);
6443 gcc_assert (outermode != BLKmode);
6444
6445 gcc_assert (GET_MODE (op) == innermode
6446 || GET_MODE (op) == VOIDmode);
6447
6448 poly_uint64 outersize = GET_MODE_SIZE (outermode);
6449 if (!multiple_p (byte, outersize))
6450 return NULL_RTX;
6451
6452 poly_uint64 innersize = GET_MODE_SIZE (innermode);
6453 if (maybe_ge (byte, innersize))
6454 return NULL_RTX;
6455
6456 if (outermode == innermode && known_eq (byte, 0U))
6457 return op;
6458
6459 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6460 {
6461 rtx elt;
6462
6463 if (VECTOR_MODE_P (outermode)
6464 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6465 && vec_duplicate_p (op, &elt))
6466 return gen_vec_duplicate (outermode, elt);
6467
6468 if (outermode == GET_MODE_INNER (innermode)
6469 && vec_duplicate_p (op, &elt))
6470 return elt;
6471 }
6472
6473 if (CONST_SCALAR_INT_P (op)
6474 || CONST_DOUBLE_AS_FLOAT_P (op)
6475 || CONST_FIXED_P (op)
6476 || GET_CODE (op) == CONST_VECTOR)
6477 {
6478 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6479 the result from bytes, so it only works if the sizes of the modes
6480 and the value of the offset are known at compile time. Cases that
6481 that apply to general modes and offsets should be handled here
6482 before calling simplify_immed_subreg. */
6483 fixed_size_mode fs_outermode, fs_innermode;
6484 unsigned HOST_WIDE_INT cbyte;
6485 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6486 && is_a <fixed_size_mode> (innermode, &fs_innermode)
6487 && byte.is_constant (&cbyte))
6488 return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte,
6489 0, GET_MODE_SIZE (fs_innermode));
6490
6491 /* Handle constant-sized outer modes and variable-sized inner modes. */
6492 unsigned HOST_WIDE_INT first_elem;
6493 if (GET_CODE (op) == CONST_VECTOR
6494 && is_a <fixed_size_mode> (outermode, &fs_outermode)
6495 && constant_multiple_p (byte, GET_MODE_UNIT_SIZE (innermode),
6496 &first_elem))
6497 return simplify_immed_subreg (fs_outermode, op, innermode, 0,
6498 first_elem,
6499 GET_MODE_SIZE (fs_outermode));
6500
6501 return NULL_RTX;
6502 }
6503
6504 /* Changing mode twice with SUBREG => just change it once,
6505 or not at all if changing back op starting mode. */
6506 if (GET_CODE (op) == SUBREG)
6507 {
6508 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6509 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
6510 rtx newx;
6511
6512 if (outermode == innermostmode
6513 && known_eq (byte, 0U)
6514 && known_eq (SUBREG_BYTE (op), 0))
6515 return SUBREG_REG (op);
6516
6517 /* Work out the memory offset of the final OUTERMODE value relative
6518 to the inner value of OP. */
6519 poly_int64 mem_offset = subreg_memory_offset (outermode,
6520 innermode, byte);
6521 poly_int64 op_mem_offset = subreg_memory_offset (op);
6522 poly_int64 final_offset = mem_offset + op_mem_offset;
6523
6524 /* See whether resulting subreg will be paradoxical. */
6525 if (!paradoxical_subreg_p (outermode, innermostmode))
6526 {
6527 /* Bail out in case resulting subreg would be incorrect. */
6528 if (maybe_lt (final_offset, 0)
6529 || maybe_ge (poly_uint64 (final_offset), innermostsize)
6530 || !multiple_p (final_offset, outersize))
6531 return NULL_RTX;
6532 }
6533 else
6534 {
6535 poly_int64 required_offset = subreg_memory_offset (outermode,
6536 innermostmode, 0);
6537 if (maybe_ne (final_offset, required_offset))
6538 return NULL_RTX;
6539 /* Paradoxical subregs always have byte offset 0. */
6540 final_offset = 0;
6541 }
6542
6543 /* Recurse for further possible simplifications. */
6544 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6545 final_offset);
6546 if (newx)
6547 return newx;
6548 if (validate_subreg (outermode, innermostmode,
6549 SUBREG_REG (op), final_offset))
6550 {
6551 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6552 if (SUBREG_PROMOTED_VAR_P (op)
6553 && SUBREG_PROMOTED_SIGN (op) >= 0
6554 && GET_MODE_CLASS (outermode) == MODE_INT
6555 && known_ge (outersize, innersize)
6556 && known_le (outersize, innermostsize)
6557 && subreg_lowpart_p (newx))
6558 {
6559 SUBREG_PROMOTED_VAR_P (newx) = 1;
6560 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6561 }
6562 return newx;
6563 }
6564 return NULL_RTX;
6565 }
6566
6567 /* SUBREG of a hard register => just change the register number
6568 and/or mode. If the hard register is not valid in that mode,
6569 suppress this simplification. If the hard register is the stack,
6570 frame, or argument pointer, leave this as a SUBREG. */
6571
6572 if (REG_P (op) && HARD_REGISTER_P (op))
6573 {
6574 unsigned int regno, final_regno;
6575
6576 regno = REGNO (op);
6577 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6578 if (HARD_REGISTER_NUM_P (final_regno))
6579 {
6580 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6581 subreg_memory_offset (outermode,
6582 innermode, byte));
6583
6584 /* Propagate original regno. We don't have any way to specify
6585 the offset inside original regno, so do so only for lowpart.
6586 The information is used only by alias analysis that cannot
6587 grog partial register anyway. */
6588
6589 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6590 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6591 return x;
6592 }
6593 }
6594
6595 /* If we have a SUBREG of a register that we are replacing and we are
6596 replacing it with a MEM, make a new MEM and try replacing the
6597 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6598 or if we would be widening it. */
6599
6600 if (MEM_P (op)
6601 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6602 /* Allow splitting of volatile memory references in case we don't
6603 have instruction to move the whole thing. */
6604 && (! MEM_VOLATILE_P (op)
6605 || ! have_insn_for (SET, innermode))
6606 && known_le (outersize, innersize))
6607 return adjust_address_nv (op, outermode, byte);
6608
6609 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6610 of two parts. */
6611 if (GET_CODE (op) == CONCAT
6612 || GET_CODE (op) == VEC_CONCAT)
6613 {
6614 poly_uint64 final_offset;
6615 rtx part, res;
6616
6617 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6618 if (part_mode == VOIDmode)
6619 part_mode = GET_MODE_INNER (GET_MODE (op));
6620 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
6621 if (known_lt (byte, part_size))
6622 {
6623 part = XEXP (op, 0);
6624 final_offset = byte;
6625 }
6626 else if (known_ge (byte, part_size))
6627 {
6628 part = XEXP (op, 1);
6629 final_offset = byte - part_size;
6630 }
6631 else
6632 return NULL_RTX;
6633
6634 if (maybe_gt (final_offset + outersize, part_size))
6635 return NULL_RTX;
6636
6637 part_mode = GET_MODE (part);
6638 if (part_mode == VOIDmode)
6639 part_mode = GET_MODE_INNER (GET_MODE (op));
6640 res = simplify_subreg (outermode, part, part_mode, final_offset);
6641 if (res)
6642 return res;
6643 if (validate_subreg (outermode, part_mode, part, final_offset))
6644 return gen_rtx_SUBREG (outermode, part, final_offset);
6645 return NULL_RTX;
6646 }
6647
6648 /* Simplify
6649 (subreg (vec_merge (X)
6650 (vector)
6651 (const_int ((1 << N) | M)))
6652 (N * sizeof (outermode)))
6653 to
6654 (subreg (X) (N * sizeof (outermode)))
6655 */
6656 unsigned int idx;
6657 if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
6658 && idx < HOST_BITS_PER_WIDE_INT
6659 && GET_CODE (op) == VEC_MERGE
6660 && GET_MODE_INNER (innermode) == outermode
6661 && CONST_INT_P (XEXP (op, 2))
6662 && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
6663 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
6664
6665 /* A SUBREG resulting from a zero extension may fold to zero if
6666 it extracts higher bits that the ZERO_EXTEND's source bits. */
6667 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6668 {
6669 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
6670 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
6671 return CONST0_RTX (outermode);
6672 }
6673
6674 scalar_int_mode int_outermode, int_innermode;
6675 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6676 && is_a <scalar_int_mode> (innermode, &int_innermode)
6677 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
6678 {
6679 /* Handle polynomial integers. The upper bits of a paradoxical
6680 subreg are undefined, so this is safe regardless of whether
6681 we're truncating or extending. */
6682 if (CONST_POLY_INT_P (op))
6683 {
6684 poly_wide_int val
6685 = poly_wide_int::from (const_poly_int_value (op),
6686 GET_MODE_PRECISION (int_outermode),
6687 SIGNED);
6688 return immed_wide_int_const (val, int_outermode);
6689 }
6690
6691 if (GET_MODE_PRECISION (int_outermode)
6692 < GET_MODE_PRECISION (int_innermode))
6693 {
6694 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6695 if (tem)
6696 return tem;
6697 }
6698 }
6699
6700 return NULL_RTX;
6701 }
6702
6703 /* Make a SUBREG operation or equivalent if it folds. */
6704
6705 rtx
6706 simplify_gen_subreg (machine_mode outermode, rtx op,
6707 machine_mode innermode, poly_uint64 byte)
6708 {
6709 rtx newx;
6710
6711 newx = simplify_subreg (outermode, op, innermode, byte);
6712 if (newx)
6713 return newx;
6714
6715 if (GET_CODE (op) == SUBREG
6716 || GET_CODE (op) == CONCAT
6717 || GET_MODE (op) == VOIDmode)
6718 return NULL_RTX;
6719
6720 if (validate_subreg (outermode, innermode, op, byte))
6721 return gen_rtx_SUBREG (outermode, op, byte);
6722
6723 return NULL_RTX;
6724 }
6725
6726 /* Generates a subreg to get the least significant part of EXPR (in mode
6727 INNER_MODE) to OUTER_MODE. */
6728
6729 rtx
6730 lowpart_subreg (machine_mode outer_mode, rtx expr,
6731 machine_mode inner_mode)
6732 {
6733 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6734 subreg_lowpart_offset (outer_mode, inner_mode));
6735 }
6736
6737 /* Simplify X, an rtx expression.
6738
6739 Return the simplified expression or NULL if no simplifications
6740 were possible.
6741
6742 This is the preferred entry point into the simplification routines;
6743 however, we still allow passes to call the more specific routines.
6744
6745 Right now GCC has three (yes, three) major bodies of RTL simplification
6746 code that need to be unified.
6747
6748 1. fold_rtx in cse.c. This code uses various CSE specific
6749 information to aid in RTL simplification.
6750
6751 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6752 it uses combine specific information to aid in RTL
6753 simplification.
6754
6755 3. The routines in this file.
6756
6757
6758 Long term we want to only have one body of simplification code; to
6759 get to that state I recommend the following steps:
6760
6761 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6762 which are not pass dependent state into these routines.
6763
6764 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6765 use this routine whenever possible.
6766
6767 3. Allow for pass dependent state to be provided to these
6768 routines and add simplifications based on the pass dependent
6769 state. Remove code from cse.c & combine.c that becomes
6770 redundant/dead.
6771
6772 It will take time, but ultimately the compiler will be easier to
6773 maintain and improve. It's totally silly that when we add a
6774 simplification that it needs to be added to 4 places (3 for RTL
6775 simplification and 1 for tree simplification. */
6776
6777 rtx
6778 simplify_rtx (const_rtx x)
6779 {
6780 const enum rtx_code code = GET_CODE (x);
6781 const machine_mode mode = GET_MODE (x);
6782
6783 switch (GET_RTX_CLASS (code))
6784 {
6785 case RTX_UNARY:
6786 return simplify_unary_operation (code, mode,
6787 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6788 case RTX_COMM_ARITH:
6789 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6790 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6791
6792 /* Fall through. */
6793
6794 case RTX_BIN_ARITH:
6795 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6796
6797 case RTX_TERNARY:
6798 case RTX_BITFIELD_OPS:
6799 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6800 XEXP (x, 0), XEXP (x, 1),
6801 XEXP (x, 2));
6802
6803 case RTX_COMPARE:
6804 case RTX_COMM_COMPARE:
6805 return simplify_relational_operation (code, mode,
6806 ((GET_MODE (XEXP (x, 0))
6807 != VOIDmode)
6808 ? GET_MODE (XEXP (x, 0))
6809 : GET_MODE (XEXP (x, 1))),
6810 XEXP (x, 0),
6811 XEXP (x, 1));
6812
6813 case RTX_EXTRA:
6814 if (code == SUBREG)
6815 return simplify_subreg (mode, SUBREG_REG (x),
6816 GET_MODE (SUBREG_REG (x)),
6817 SUBREG_BYTE (x));
6818 break;
6819
6820 case RTX_OBJ:
6821 if (code == LO_SUM)
6822 {
6823 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6824 if (GET_CODE (XEXP (x, 0)) == HIGH
6825 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6826 return XEXP (x, 1);
6827 }
6828 break;
6829
6830 default:
6831 break;
6832 }
6833 return NULL;
6834 }
6835
6836 #if CHECKING_P
6837
6838 namespace selftest {
6839
6840 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6841
6842 static rtx
6843 make_test_reg (machine_mode mode)
6844 {
6845 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6846
6847 return gen_rtx_REG (mode, test_reg_num++);
6848 }
6849
6850 /* Test vector simplifications involving VEC_DUPLICATE in which the
6851 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6852 register that holds one element of MODE. */
6853
6854 static void
6855 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6856 {
6857 scalar_mode inner_mode = GET_MODE_INNER (mode);
6858 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6859 poly_uint64 nunits = GET_MODE_NUNITS (mode);
6860 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6861 {
6862 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6863 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6864 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6865 ASSERT_RTX_EQ (duplicate,
6866 simplify_unary_operation (NOT, mode,
6867 duplicate_not, mode));
6868
6869 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6870 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6871 ASSERT_RTX_EQ (duplicate,
6872 simplify_unary_operation (NEG, mode,
6873 duplicate_neg, mode));
6874
6875 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6876 ASSERT_RTX_EQ (duplicate,
6877 simplify_binary_operation (PLUS, mode, duplicate,
6878 CONST0_RTX (mode)));
6879
6880 ASSERT_RTX_EQ (duplicate,
6881 simplify_binary_operation (MINUS, mode, duplicate,
6882 CONST0_RTX (mode)));
6883
6884 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6885 simplify_binary_operation (MINUS, mode, duplicate,
6886 duplicate));
6887 }
6888
6889 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6890 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6891 ASSERT_RTX_PTR_EQ (scalar_reg,
6892 simplify_binary_operation (VEC_SELECT, inner_mode,
6893 duplicate, zero_par));
6894
6895 unsigned HOST_WIDE_INT const_nunits;
6896 if (nunits.is_constant (&const_nunits))
6897 {
6898 /* And again with the final element. */
6899 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
6900 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6901 ASSERT_RTX_PTR_EQ (scalar_reg,
6902 simplify_binary_operation (VEC_SELECT, inner_mode,
6903 duplicate, last_par));
6904
6905 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
6906 rtx vector_reg = make_test_reg (mode);
6907 for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
6908 {
6909 if (i >= HOST_BITS_PER_WIDE_INT)
6910 break;
6911 rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
6912 rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
6913 poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
6914 ASSERT_RTX_EQ (scalar_reg,
6915 simplify_gen_subreg (inner_mode, vm,
6916 mode, offset));
6917 }
6918 }
6919
6920 /* Test a scalar subreg of a VEC_DUPLICATE. */
6921 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
6922 ASSERT_RTX_EQ (scalar_reg,
6923 simplify_gen_subreg (inner_mode, duplicate,
6924 mode, offset));
6925
6926 machine_mode narrower_mode;
6927 if (maybe_ne (nunits, 2U)
6928 && multiple_p (nunits, 2)
6929 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6930 && VECTOR_MODE_P (narrower_mode))
6931 {
6932 /* Test VEC_SELECT of a vector. */
6933 rtx vec_par
6934 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6935 rtx narrower_duplicate
6936 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6937 ASSERT_RTX_EQ (narrower_duplicate,
6938 simplify_binary_operation (VEC_SELECT, narrower_mode,
6939 duplicate, vec_par));
6940
6941 /* Test a vector subreg of a VEC_DUPLICATE. */
6942 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
6943 ASSERT_RTX_EQ (narrower_duplicate,
6944 simplify_gen_subreg (narrower_mode, duplicate,
6945 mode, offset));
6946 }
6947 }
6948
6949 /* Test vector simplifications involving VEC_SERIES in which the
6950 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6951 register that holds one element of MODE. */
6952
6953 static void
6954 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6955 {
6956 /* Test unary cases with VEC_SERIES arguments. */
6957 scalar_mode inner_mode = GET_MODE_INNER (mode);
6958 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6959 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6960 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6961 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6962 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6963 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6964 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6965 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6966 neg_scalar_reg);
6967 ASSERT_RTX_EQ (series_0_r,
6968 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6969 ASSERT_RTX_EQ (series_r_m1,
6970 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6971 ASSERT_RTX_EQ (series_r_r,
6972 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6973
6974 /* Test that a VEC_SERIES with a zero step is simplified away. */
6975 ASSERT_RTX_EQ (duplicate,
6976 simplify_binary_operation (VEC_SERIES, mode,
6977 scalar_reg, const0_rtx));
6978
6979 /* Test PLUS and MINUS with VEC_SERIES. */
6980 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6981 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6982 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6983 ASSERT_RTX_EQ (series_r_r,
6984 simplify_binary_operation (PLUS, mode, series_0_r,
6985 duplicate));
6986 ASSERT_RTX_EQ (series_r_1,
6987 simplify_binary_operation (PLUS, mode, duplicate,
6988 series_0_1));
6989 ASSERT_RTX_EQ (series_r_m1,
6990 simplify_binary_operation (PLUS, mode, duplicate,
6991 series_0_m1));
6992 ASSERT_RTX_EQ (series_0_r,
6993 simplify_binary_operation (MINUS, mode, series_r_r,
6994 duplicate));
6995 ASSERT_RTX_EQ (series_r_m1,
6996 simplify_binary_operation (MINUS, mode, duplicate,
6997 series_0_1));
6998 ASSERT_RTX_EQ (series_r_1,
6999 simplify_binary_operation (MINUS, mode, duplicate,
7000 series_0_m1));
7001 ASSERT_RTX_EQ (series_0_m1,
7002 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
7003 constm1_rtx));
7004 }
7005
7006 /* Verify simplify_merge_mask works correctly. */
7007
7008 static void
7009 test_vec_merge (machine_mode mode)
7010 {
7011 rtx op0 = make_test_reg (mode);
7012 rtx op1 = make_test_reg (mode);
7013 rtx op2 = make_test_reg (mode);
7014 rtx op3 = make_test_reg (mode);
7015 rtx op4 = make_test_reg (mode);
7016 rtx op5 = make_test_reg (mode);
7017 rtx mask1 = make_test_reg (SImode);
7018 rtx mask2 = make_test_reg (SImode);
7019 rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
7020 rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
7021 rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
7022
7023 /* Simple vec_merge. */
7024 ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
7025 ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
7026 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
7027 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
7028
7029 /* Nested vec_merge.
7030 It's tempting to make this simplify right down to opN, but we don't
7031 because all the simplify_* functions assume that the operands have
7032 already been simplified. */
7033 rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
7034 ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
7035 ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
7036
7037 /* Intermediate unary op. */
7038 rtx unop = gen_rtx_NOT (mode, vm1);
7039 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
7040 simplify_merge_mask (unop, mask1, 0));
7041 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
7042 simplify_merge_mask (unop, mask1, 1));
7043
7044 /* Intermediate binary op. */
7045 rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
7046 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
7047 simplify_merge_mask (binop, mask1, 0));
7048 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
7049 simplify_merge_mask (binop, mask1, 1));
7050
7051 /* Intermediate ternary op. */
7052 rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
7053 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
7054 simplify_merge_mask (tenop, mask1, 0));
7055 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
7056 simplify_merge_mask (tenop, mask1, 1));
7057
7058 /* Side effects. */
7059 rtx badop0 = gen_rtx_PRE_INC (mode, op0);
7060 rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
7061 ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
7062 ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
7063
7064 /* Called indirectly. */
7065 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
7066 simplify_rtx (nvm));
7067 }
7068
7069 /* Verify some simplifications involving vectors. */
7070
7071 static void
7072 test_vector_ops ()
7073 {
7074 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7075 {
7076 machine_mode mode = (machine_mode) i;
7077 if (VECTOR_MODE_P (mode))
7078 {
7079 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
7080 test_vector_ops_duplicate (mode, scalar_reg);
7081 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
7082 && maybe_gt (GET_MODE_NUNITS (mode), 2))
7083 test_vector_ops_series (mode, scalar_reg);
7084 test_vec_merge (mode);
7085 }
7086 }
7087 }
7088
7089 template<unsigned int N>
7090 struct simplify_const_poly_int_tests
7091 {
7092 static void run ();
7093 };
7094
7095 template<>
7096 struct simplify_const_poly_int_tests<1>
7097 {
7098 static void run () {}
7099 };
7100
7101 /* Test various CONST_POLY_INT properties. */
7102
7103 template<unsigned int N>
7104 void
7105 simplify_const_poly_int_tests<N>::run ()
7106 {
7107 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
7108 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
7109 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
7110 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
7111 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
7112 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
7113 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
7114 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
7115 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
7116 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
7117 rtx two = GEN_INT (2);
7118 rtx six = GEN_INT (6);
7119 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
7120
7121 /* These tests only try limited operation combinations. Fuller arithmetic
7122 testing is done directly on poly_ints. */
7123 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
7124 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
7125 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
7126 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
7127 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
7128 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
7129 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
7130 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
7131 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
7132 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
7133 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
7134 }
7135
7136 /* Run all of the selftests within this file. */
7137
7138 void
7139 simplify_rtx_c_tests ()
7140 {
7141 test_vector_ops ();
7142 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
7143 }
7144
7145 } // namespace selftest
7146
7147 #endif /* CHECKING_P */