]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
poly_int: MEM_OFFSET and MEM_SIZE
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38
39 /* Simplification and canonicalization of RTL. */
40
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
47
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
58 \f
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
62 {
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
64
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
81
82 if (!is_int_mode (mode, &int_mode))
83 return false;
84
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
88
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
94 {
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
106 }
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
111 {
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
114 }
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
119
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
123 }
124
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
128
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133 scalar_int_mode int_mode;
134
135 if (!is_int_mode (mode, &int_mode))
136 return false;
137
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
141
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
144 }
145
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
150 {
151 unsigned int width;
152
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
156
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
160
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
163 }
164
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
169 {
170 unsigned int width;
171
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
175
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
179
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
182 }
183 \f
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
186
187 rtx
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
190 {
191 rtx tem;
192
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
197
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
202
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
204 }
205 \f
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
208 rtx
209 avoid_constant_pool_reference (rtx x)
210 {
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
214
215 switch (GET_CODE (x))
216 {
217 case MEM:
218 break;
219
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
228
229 default:
230 return x;
231 }
232
233 if (GET_MODE (x) == BLKmode)
234 return x;
235
236 addr = XEXP (x, 0);
237
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
240
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
245 {
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
248 }
249
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
252
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
257 {
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
260
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
267 {
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
271 }
272 }
273
274 return x;
275 }
276 \f
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
280
281 rtx
282 delegitimize_mem_from_attrs (rtx x)
283 {
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
289 {
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 poly_int64 offset = 0;
293
294 switch (TREE_CODE (decl))
295 {
296 default:
297 decl = NULL;
298 break;
299
300 case VAR_DECL:
301 break;
302
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
310 {
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
314
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
323 {
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
327 }
328 break;
329 }
330 }
331
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
339 {
340 rtx newx;
341
342 offset += MEM_OFFSET (x);
343
344 newx = DECL_RTL (decl);
345
346 if (MEM_P (newx))
347 {
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
349 poly_int64 n_offset, o_offset;
350
351 /* Avoid creating a new MEM needlessly if we already had
352 the same address. We do if there's no OFFSET and the
353 old address X is identical to NEWX, or if X is of the
354 form (plus NEWX OFFSET), or the NEWX is of the form
355 (plus Y (const_int Z)) and X is that with the offset
356 added: (plus Y (const_int Z+OFFSET)). */
357 n = strip_offset (n, &n_offset);
358 o = strip_offset (o, &o_offset);
359 if (!(known_eq (o_offset, n_offset + offset)
360 && rtx_equal_p (o, n)))
361 x = adjust_address_nv (newx, mode, offset);
362 }
363 else if (GET_MODE (x) == GET_MODE (newx)
364 && known_eq (offset, 0))
365 x = newx;
366 }
367 }
368
369 return x;
370 }
371 \f
372 /* Make a unary operation by first seeing if it folds and otherwise making
373 the specified operation. */
374
375 rtx
376 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
377 machine_mode op_mode)
378 {
379 rtx tem;
380
381 /* If this simplifies, use it. */
382 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
383 return tem;
384
385 return gen_rtx_fmt_e (code, mode, op);
386 }
387
388 /* Likewise for ternary operations. */
389
390 rtx
391 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
392 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
393 {
394 rtx tem;
395
396 /* If this simplifies, use it. */
397 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
398 op0, op1, op2)) != 0)
399 return tem;
400
401 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
402 }
403
404 /* Likewise, for relational operations.
405 CMP_MODE specifies mode comparison is done in. */
406
407 rtx
408 simplify_gen_relational (enum rtx_code code, machine_mode mode,
409 machine_mode cmp_mode, rtx op0, rtx op1)
410 {
411 rtx tem;
412
413 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
414 op0, op1)) != 0)
415 return tem;
416
417 return gen_rtx_fmt_ee (code, mode, op0, op1);
418 }
419 \f
420 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
421 and simplify the result. If FN is non-NULL, call this callback on each
422 X, if it returns non-NULL, replace X with its return value and simplify the
423 result. */
424
425 rtx
426 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
427 rtx (*fn) (rtx, const_rtx, void *), void *data)
428 {
429 enum rtx_code code = GET_CODE (x);
430 machine_mode mode = GET_MODE (x);
431 machine_mode op_mode;
432 const char *fmt;
433 rtx op0, op1, op2, newx, op;
434 rtvec vec, newvec;
435 int i, j;
436
437 if (__builtin_expect (fn != NULL, 0))
438 {
439 newx = fn (x, old_rtx, data);
440 if (newx)
441 return newx;
442 }
443 else if (rtx_equal_p (x, old_rtx))
444 return copy_rtx ((rtx) data);
445
446 switch (GET_RTX_CLASS (code))
447 {
448 case RTX_UNARY:
449 op0 = XEXP (x, 0);
450 op_mode = GET_MODE (op0);
451 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
452 if (op0 == XEXP (x, 0))
453 return x;
454 return simplify_gen_unary (code, mode, op0, op_mode);
455
456 case RTX_BIN_ARITH:
457 case RTX_COMM_ARITH:
458 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
459 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
460 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
461 return x;
462 return simplify_gen_binary (code, mode, op0, op1);
463
464 case RTX_COMPARE:
465 case RTX_COMM_COMPARE:
466 op0 = XEXP (x, 0);
467 op1 = XEXP (x, 1);
468 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
469 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
470 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
471 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
472 return x;
473 return simplify_gen_relational (code, mode, op_mode, op0, op1);
474
475 case RTX_TERNARY:
476 case RTX_BITFIELD_OPS:
477 op0 = XEXP (x, 0);
478 op_mode = GET_MODE (op0);
479 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
480 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
481 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
482 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
483 return x;
484 if (op_mode == VOIDmode)
485 op_mode = GET_MODE (op0);
486 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
487
488 case RTX_EXTRA:
489 if (code == SUBREG)
490 {
491 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
492 if (op0 == SUBREG_REG (x))
493 return x;
494 op0 = simplify_gen_subreg (GET_MODE (x), op0,
495 GET_MODE (SUBREG_REG (x)),
496 SUBREG_BYTE (x));
497 return op0 ? op0 : x;
498 }
499 break;
500
501 case RTX_OBJ:
502 if (code == MEM)
503 {
504 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
505 if (op0 == XEXP (x, 0))
506 return x;
507 return replace_equiv_address_nv (x, op0);
508 }
509 else if (code == LO_SUM)
510 {
511 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
512 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
513
514 /* (lo_sum (high x) y) -> y where x and y have the same base. */
515 if (GET_CODE (op0) == HIGH)
516 {
517 rtx base0, base1, offset0, offset1;
518 split_const (XEXP (op0, 0), &base0, &offset0);
519 split_const (op1, &base1, &offset1);
520 if (rtx_equal_p (base0, base1))
521 return op1;
522 }
523
524 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
525 return x;
526 return gen_rtx_LO_SUM (mode, op0, op1);
527 }
528 break;
529
530 default:
531 break;
532 }
533
534 newx = x;
535 fmt = GET_RTX_FORMAT (code);
536 for (i = 0; fmt[i]; i++)
537 switch (fmt[i])
538 {
539 case 'E':
540 vec = XVEC (x, i);
541 newvec = XVEC (newx, i);
542 for (j = 0; j < GET_NUM_ELEM (vec); j++)
543 {
544 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
545 old_rtx, fn, data);
546 if (op != RTVEC_ELT (vec, j))
547 {
548 if (newvec == vec)
549 {
550 newvec = shallow_copy_rtvec (vec);
551 if (x == newx)
552 newx = shallow_copy_rtx (x);
553 XVEC (newx, i) = newvec;
554 }
555 RTVEC_ELT (newvec, j) = op;
556 }
557 }
558 break;
559
560 case 'e':
561 if (XEXP (x, i))
562 {
563 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
564 if (op != XEXP (x, i))
565 {
566 if (x == newx)
567 newx = shallow_copy_rtx (x);
568 XEXP (newx, i) = op;
569 }
570 }
571 break;
572 }
573 return newx;
574 }
575
576 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
577 resulting RTX. Return a new RTX which is as simplified as possible. */
578
579 rtx
580 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
581 {
582 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
583 }
584 \f
585 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
586 Only handle cases where the truncated value is inherently an rvalue.
587
588 RTL provides two ways of truncating a value:
589
590 1. a lowpart subreg. This form is only a truncation when both
591 the outer and inner modes (here MODE and OP_MODE respectively)
592 are scalar integers, and only then when the subreg is used as
593 an rvalue.
594
595 It is only valid to form such truncating subregs if the
596 truncation requires no action by the target. The onus for
597 proving this is on the creator of the subreg -- e.g. the
598 caller to simplify_subreg or simplify_gen_subreg -- and typically
599 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
600
601 2. a TRUNCATE. This form handles both scalar and compound integers.
602
603 The first form is preferred where valid. However, the TRUNCATE
604 handling in simplify_unary_operation turns the second form into the
605 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
606 so it is generally safe to form rvalue truncations using:
607
608 simplify_gen_unary (TRUNCATE, ...)
609
610 and leave simplify_unary_operation to work out which representation
611 should be used.
612
613 Because of the proof requirements on (1), simplify_truncation must
614 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
615 regardless of whether the outer truncation came from a SUBREG or a
616 TRUNCATE. For example, if the caller has proven that an SImode
617 truncation of:
618
619 (and:DI X Y)
620
621 is a no-op and can be represented as a subreg, it does not follow
622 that SImode truncations of X and Y are also no-ops. On a target
623 like 64-bit MIPS that requires SImode values to be stored in
624 sign-extended form, an SImode truncation of:
625
626 (and:DI (reg:DI X) (const_int 63))
627
628 is trivially a no-op because only the lower 6 bits can be set.
629 However, X is still an arbitrary 64-bit number and so we cannot
630 assume that truncating it too is a no-op. */
631
632 static rtx
633 simplify_truncation (machine_mode mode, rtx op,
634 machine_mode op_mode)
635 {
636 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
637 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
638 scalar_int_mode int_mode, int_op_mode, subreg_mode;
639
640 gcc_assert (precision <= op_precision);
641
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op) == ZERO_EXTEND
644 || GET_CODE (op) == SIGN_EXTEND)
645 {
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
651 mode. */
652 machine_mode origmode = GET_MODE (XEXP (op, 0));
653 if (mode == origmode)
654 return XEXP (op, 0);
655 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
656 return simplify_gen_unary (TRUNCATE, mode,
657 XEXP (op, 0), origmode);
658 else
659 return simplify_gen_unary (GET_CODE (op), mode,
660 XEXP (op, 0), origmode);
661 }
662
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
666 if (1
667 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
668 && (GET_CODE (op) == PLUS
669 || GET_CODE (op) == MINUS
670 || GET_CODE (op) == MULT))
671 {
672 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
673 if (op0)
674 {
675 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
676 if (op1)
677 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
678 }
679 }
680
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op) == LSHIFTRT
685 || GET_CODE (op) == ASHIFTRT)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision <= op_precision
691 && CONST_INT_P (XEXP (op, 1))
692 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFTRT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
697
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op) == LSHIFTRT
702 || GET_CODE (op) == ASHIFTRT)
703 && CONST_INT_P (XEXP (op, 1))
704 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
706 && UINTVAL (XEXP (op, 1)) < precision)
707 return simplify_gen_binary (LSHIFTRT, mode,
708 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
709
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op) == ASHIFT
714 && CONST_INT_P (XEXP (op, 1))
715 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
718 && UINTVAL (XEXP (op, 1)) < precision)
719 return simplify_gen_binary (ASHIFT, mode,
720 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
721
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
724 and C2. */
725 if (GET_CODE (op) == AND
726 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
728 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
729 && CONST_INT_P (XEXP (op, 1)))
730 {
731 rtx op0 = (XEXP (XEXP (op, 0), 0));
732 rtx shift_op = XEXP (XEXP (op, 0), 1);
733 rtx mask_op = XEXP (op, 1);
734 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
735 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
736
737 if (shift < precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode) >> shift) & mask)
741 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
742 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
743 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
744 {
745 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
746 return simplify_gen_binary (AND, mode, op0, mask_op);
747 }
748 }
749
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
752 changing len. */
753 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
754 && REG_P (XEXP (op, 0))
755 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
756 && CONST_INT_P (XEXP (op, 1))
757 && CONST_INT_P (XEXP (op, 2)))
758 {
759 rtx op0 = XEXP (op, 0);
760 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
761 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
762 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
763 {
764 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
765 if (op0)
766 {
767 pos -= op_precision - precision;
768 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
769 XEXP (op, 1), GEN_INT (pos));
770 }
771 }
772 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
773 {
774 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
775 if (op0)
776 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
777 XEXP (op, 1), XEXP (op, 2));
778 }
779 }
780
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op) == LSHIFTRT
783 || GET_CODE (op) == ASHIFTRT)
784 && SCALAR_INT_MODE_P (mode)
785 && SCALAR_INT_MODE_P (op_mode)
786 && precision >= BITS_PER_WORD
787 && 2 * precision <= op_precision
788 && CONST_INT_P (XEXP (op, 1))
789 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
790 && UINTVAL (XEXP (op, 1)) < op_precision)
791 {
792 int byte = subreg_lowpart_offset (mode, op_mode);
793 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
794 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
795 (WORDS_BIG_ENDIAN
796 ? byte - shifted_bytes
797 : byte + shifted_bytes));
798 }
799
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op) == LSHIFTRT
804 || GET_CODE (op) == ASHIFTRT)
805 && is_a <scalar_int_mode> (mode, &int_mode)
806 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
807 && MEM_P (XEXP (op, 0))
808 && CONST_INT_P (XEXP (op, 1))
809 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
810 && INTVAL (XEXP (op, 1)) > 0
811 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
812 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
813 MEM_ADDR_SPACE (XEXP (op, 0)))
814 && ! MEM_VOLATILE_P (XEXP (op, 0))
815 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
816 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
817 {
818 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
819 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
820 return adjust_address_nv (XEXP (op, 0), int_mode,
821 (WORDS_BIG_ENDIAN
822 ? byte - shifted_bytes
823 : byte + shifted_bytes));
824 }
825
826 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
827 (OP:SI foo:SI) if OP is NEG or ABS. */
828 if ((GET_CODE (op) == ABS
829 || GET_CODE (op) == NEG)
830 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
831 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
832 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
833 return simplify_gen_unary (GET_CODE (op), mode,
834 XEXP (XEXP (op, 0), 0), mode);
835
836 /* (truncate:A (subreg:B (truncate:C X) 0)) is
837 (truncate:A X). */
838 if (GET_CODE (op) == SUBREG
839 && is_a <scalar_int_mode> (mode, &int_mode)
840 && SCALAR_INT_MODE_P (op_mode)
841 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
842 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
843 && subreg_lowpart_p (op))
844 {
845 rtx inner = XEXP (SUBREG_REG (op), 0);
846 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
847 return simplify_gen_unary (TRUNCATE, int_mode, inner,
848 GET_MODE (inner));
849 else
850 /* If subreg above is paradoxical and C is narrower
851 than A, return (subreg:A (truncate:C X) 0). */
852 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
853 }
854
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op) == TRUNCATE)
857 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
858 GET_MODE (XEXP (op, 0)));
859
860 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
861 in mode A. */
862 if (GET_CODE (op) == IOR
863 && SCALAR_INT_MODE_P (mode)
864 && SCALAR_INT_MODE_P (op_mode)
865 && CONST_INT_P (XEXP (op, 1))
866 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
867 return constm1_rtx;
868
869 return NULL_RTX;
870 }
871 \f
872 /* Try to simplify a unary operation CODE whose output mode is to be
873 MODE with input operand OP whose mode was originally OP_MODE.
874 Return zero if no simplification can be made. */
875 rtx
876 simplify_unary_operation (enum rtx_code code, machine_mode mode,
877 rtx op, machine_mode op_mode)
878 {
879 rtx trueop, tem;
880
881 trueop = avoid_constant_pool_reference (op);
882
883 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
884 if (tem)
885 return tem;
886
887 return simplify_unary_operation_1 (code, mode, op);
888 }
889
890 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
891 to be exact. */
892
893 static bool
894 exact_int_to_float_conversion_p (const_rtx op)
895 {
896 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
897 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
898 /* Constants shouldn't reach here. */
899 gcc_assert (op0_mode != VOIDmode);
900 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
901 int in_bits = in_prec;
902 if (HWI_COMPUTABLE_MODE_P (op0_mode))
903 {
904 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
905 if (GET_CODE (op) == FLOAT)
906 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
907 else if (GET_CODE (op) == UNSIGNED_FLOAT)
908 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
909 else
910 gcc_unreachable ();
911 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
912 }
913 return in_bits <= out_bits;
914 }
915
916 /* Perform some simplifications we can do even if the operands
917 aren't constant. */
918 static rtx
919 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
920 {
921 enum rtx_code reversed;
922 rtx temp, elt, base, step;
923 scalar_int_mode inner, int_mode, op_mode, op0_mode;
924
925 switch (code)
926 {
927 case NOT:
928 /* (not (not X)) == X. */
929 if (GET_CODE (op) == NOT)
930 return XEXP (op, 0);
931
932 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
933 comparison is all ones. */
934 if (COMPARISON_P (op)
935 && (mode == BImode || STORE_FLAG_VALUE == -1)
936 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
937 return simplify_gen_relational (reversed, mode, VOIDmode,
938 XEXP (op, 0), XEXP (op, 1));
939
940 /* (not (plus X -1)) can become (neg X). */
941 if (GET_CODE (op) == PLUS
942 && XEXP (op, 1) == constm1_rtx)
943 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
944
945 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
946 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
947 and MODE_VECTOR_INT. */
948 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
949 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
950 CONSTM1_RTX (mode));
951
952 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
953 if (GET_CODE (op) == XOR
954 && CONST_INT_P (XEXP (op, 1))
955 && (temp = simplify_unary_operation (NOT, mode,
956 XEXP (op, 1), mode)) != 0)
957 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
958
959 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
960 if (GET_CODE (op) == PLUS
961 && CONST_INT_P (XEXP (op, 1))
962 && mode_signbit_p (mode, XEXP (op, 1))
963 && (temp = simplify_unary_operation (NOT, mode,
964 XEXP (op, 1), mode)) != 0)
965 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
966
967
968 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
969 operands other than 1, but that is not valid. We could do a
970 similar simplification for (not (lshiftrt C X)) where C is
971 just the sign bit, but this doesn't seem common enough to
972 bother with. */
973 if (GET_CODE (op) == ASHIFT
974 && XEXP (op, 0) == const1_rtx)
975 {
976 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
977 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
978 }
979
980 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
981 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
982 so we can perform the above simplification. */
983 if (STORE_FLAG_VALUE == -1
984 && is_a <scalar_int_mode> (mode, &int_mode)
985 && GET_CODE (op) == ASHIFTRT
986 && CONST_INT_P (XEXP (op, 1))
987 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
988 return simplify_gen_relational (GE, int_mode, VOIDmode,
989 XEXP (op, 0), const0_rtx);
990
991
992 if (partial_subreg_p (op)
993 && subreg_lowpart_p (op)
994 && GET_CODE (SUBREG_REG (op)) == ASHIFT
995 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
996 {
997 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
998 rtx x;
999
1000 x = gen_rtx_ROTATE (inner_mode,
1001 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1002 inner_mode),
1003 XEXP (SUBREG_REG (op), 1));
1004 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1005 if (temp)
1006 return temp;
1007 }
1008
1009 /* Apply De Morgan's laws to reduce number of patterns for machines
1010 with negating logical insns (and-not, nand, etc.). If result has
1011 only one NOT, put it first, since that is how the patterns are
1012 coded. */
1013 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1014 {
1015 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1016 machine_mode op_mode;
1017
1018 op_mode = GET_MODE (in1);
1019 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1020
1021 op_mode = GET_MODE (in2);
1022 if (op_mode == VOIDmode)
1023 op_mode = mode;
1024 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1025
1026 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1027 std::swap (in1, in2);
1028
1029 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1030 mode, in1, in2);
1031 }
1032
1033 /* (not (bswap x)) -> (bswap (not x)). */
1034 if (GET_CODE (op) == BSWAP)
1035 {
1036 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1037 return simplify_gen_unary (BSWAP, mode, x, mode);
1038 }
1039 break;
1040
1041 case NEG:
1042 /* (neg (neg X)) == X. */
1043 if (GET_CODE (op) == NEG)
1044 return XEXP (op, 0);
1045
1046 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1047 If comparison is not reversible use
1048 x ? y : (neg y). */
1049 if (GET_CODE (op) == IF_THEN_ELSE)
1050 {
1051 rtx cond = XEXP (op, 0);
1052 rtx true_rtx = XEXP (op, 1);
1053 rtx false_rtx = XEXP (op, 2);
1054
1055 if ((GET_CODE (true_rtx) == NEG
1056 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1057 || (GET_CODE (false_rtx) == NEG
1058 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1059 {
1060 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1061 temp = reversed_comparison (cond, mode);
1062 else
1063 {
1064 temp = cond;
1065 std::swap (true_rtx, false_rtx);
1066 }
1067 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1068 mode, temp, true_rtx, false_rtx);
1069 }
1070 }
1071
1072 /* (neg (plus X 1)) can become (not X). */
1073 if (GET_CODE (op) == PLUS
1074 && XEXP (op, 1) == const1_rtx)
1075 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1076
1077 /* Similarly, (neg (not X)) is (plus X 1). */
1078 if (GET_CODE (op) == NOT)
1079 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1080 CONST1_RTX (mode));
1081
1082 /* (neg (minus X Y)) can become (minus Y X). This transformation
1083 isn't safe for modes with signed zeros, since if X and Y are
1084 both +0, (minus Y X) is the same as (minus X Y). If the
1085 rounding mode is towards +infinity (or -infinity) then the two
1086 expressions will be rounded differently. */
1087 if (GET_CODE (op) == MINUS
1088 && !HONOR_SIGNED_ZEROS (mode)
1089 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1090 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1091
1092 if (GET_CODE (op) == PLUS
1093 && !HONOR_SIGNED_ZEROS (mode)
1094 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1095 {
1096 /* (neg (plus A C)) is simplified to (minus -C A). */
1097 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1098 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1099 {
1100 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1101 if (temp)
1102 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1103 }
1104
1105 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1106 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1107 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1108 }
1109
1110 /* (neg (mult A B)) becomes (mult A (neg B)).
1111 This works even for floating-point values. */
1112 if (GET_CODE (op) == MULT
1113 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1114 {
1115 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1116 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1117 }
1118
1119 /* NEG commutes with ASHIFT since it is multiplication. Only do
1120 this if we can then eliminate the NEG (e.g., if the operand
1121 is a constant). */
1122 if (GET_CODE (op) == ASHIFT)
1123 {
1124 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1125 if (temp)
1126 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1127 }
1128
1129 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1130 C is equal to the width of MODE minus 1. */
1131 if (GET_CODE (op) == ASHIFTRT
1132 && CONST_INT_P (XEXP (op, 1))
1133 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1134 return simplify_gen_binary (LSHIFTRT, mode,
1135 XEXP (op, 0), XEXP (op, 1));
1136
1137 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1138 C is equal to the width of MODE minus 1. */
1139 if (GET_CODE (op) == LSHIFTRT
1140 && CONST_INT_P (XEXP (op, 1))
1141 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1142 return simplify_gen_binary (ASHIFTRT, mode,
1143 XEXP (op, 0), XEXP (op, 1));
1144
1145 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1146 if (GET_CODE (op) == XOR
1147 && XEXP (op, 1) == const1_rtx
1148 && nonzero_bits (XEXP (op, 0), mode) == 1)
1149 return plus_constant (mode, XEXP (op, 0), -1);
1150
1151 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1152 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1153 if (GET_CODE (op) == LT
1154 && XEXP (op, 1) == const0_rtx
1155 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1156 {
1157 int_mode = as_a <scalar_int_mode> (mode);
1158 int isize = GET_MODE_PRECISION (inner);
1159 if (STORE_FLAG_VALUE == 1)
1160 {
1161 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1162 gen_int_shift_amount (inner,
1163 isize - 1));
1164 if (int_mode == inner)
1165 return temp;
1166 if (GET_MODE_PRECISION (int_mode) > isize)
1167 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1168 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1169 }
1170 else if (STORE_FLAG_VALUE == -1)
1171 {
1172 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1173 gen_int_shift_amount (inner,
1174 isize - 1));
1175 if (int_mode == inner)
1176 return temp;
1177 if (GET_MODE_PRECISION (int_mode) > isize)
1178 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1179 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1180 }
1181 }
1182
1183 if (vec_series_p (op, &base, &step))
1184 {
1185 /* Only create a new series if we can simplify both parts. In other
1186 cases this isn't really a simplification, and it's not necessarily
1187 a win to replace a vector operation with a scalar operation. */
1188 scalar_mode inner_mode = GET_MODE_INNER (mode);
1189 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1190 if (base)
1191 {
1192 step = simplify_unary_operation (NEG, inner_mode,
1193 step, inner_mode);
1194 if (step)
1195 return gen_vec_series (mode, base, step);
1196 }
1197 }
1198 break;
1199
1200 case TRUNCATE:
1201 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1202 with the umulXi3_highpart patterns. */
1203 if (GET_CODE (op) == LSHIFTRT
1204 && GET_CODE (XEXP (op, 0)) == MULT)
1205 break;
1206
1207 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1208 {
1209 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1210 {
1211 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1212 if (temp)
1213 return temp;
1214 }
1215 /* We can't handle truncation to a partial integer mode here
1216 because we don't know the real bitsize of the partial
1217 integer mode. */
1218 break;
1219 }
1220
1221 if (GET_MODE (op) != VOIDmode)
1222 {
1223 temp = simplify_truncation (mode, op, GET_MODE (op));
1224 if (temp)
1225 return temp;
1226 }
1227
1228 /* If we know that the value is already truncated, we can
1229 replace the TRUNCATE with a SUBREG. */
1230 if (GET_MODE_NUNITS (mode) == 1
1231 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1232 || truncated_to_mode (mode, op)))
1233 {
1234 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1235 if (temp)
1236 return temp;
1237 }
1238
1239 /* A truncate of a comparison can be replaced with a subreg if
1240 STORE_FLAG_VALUE permits. This is like the previous test,
1241 but it works even if the comparison is done in a mode larger
1242 than HOST_BITS_PER_WIDE_INT. */
1243 if (HWI_COMPUTABLE_MODE_P (mode)
1244 && COMPARISON_P (op)
1245 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1246 {
1247 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1248 if (temp)
1249 return temp;
1250 }
1251
1252 /* A truncate of a memory is just loading the low part of the memory
1253 if we are not changing the meaning of the address. */
1254 if (GET_CODE (op) == MEM
1255 && !VECTOR_MODE_P (mode)
1256 && !MEM_VOLATILE_P (op)
1257 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1258 {
1259 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1260 if (temp)
1261 return temp;
1262 }
1263
1264 break;
1265
1266 case FLOAT_TRUNCATE:
1267 if (DECIMAL_FLOAT_MODE_P (mode))
1268 break;
1269
1270 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1271 if (GET_CODE (op) == FLOAT_EXTEND
1272 && GET_MODE (XEXP (op, 0)) == mode)
1273 return XEXP (op, 0);
1274
1275 /* (float_truncate:SF (float_truncate:DF foo:XF))
1276 = (float_truncate:SF foo:XF).
1277 This may eliminate double rounding, so it is unsafe.
1278
1279 (float_truncate:SF (float_extend:XF foo:DF))
1280 = (float_truncate:SF foo:DF).
1281
1282 (float_truncate:DF (float_extend:XF foo:SF))
1283 = (float_extend:DF foo:SF). */
1284 if ((GET_CODE (op) == FLOAT_TRUNCATE
1285 && flag_unsafe_math_optimizations)
1286 || GET_CODE (op) == FLOAT_EXTEND)
1287 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1288 > GET_MODE_UNIT_SIZE (mode)
1289 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1290 mode,
1291 XEXP (op, 0), mode);
1292
1293 /* (float_truncate (float x)) is (float x) */
1294 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1295 && (flag_unsafe_math_optimizations
1296 || exact_int_to_float_conversion_p (op)))
1297 return simplify_gen_unary (GET_CODE (op), mode,
1298 XEXP (op, 0),
1299 GET_MODE (XEXP (op, 0)));
1300
1301 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1302 (OP:SF foo:SF) if OP is NEG or ABS. */
1303 if ((GET_CODE (op) == ABS
1304 || GET_CODE (op) == NEG)
1305 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1306 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1307 return simplify_gen_unary (GET_CODE (op), mode,
1308 XEXP (XEXP (op, 0), 0), mode);
1309
1310 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1311 is (float_truncate:SF x). */
1312 if (GET_CODE (op) == SUBREG
1313 && subreg_lowpart_p (op)
1314 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1315 return SUBREG_REG (op);
1316 break;
1317
1318 case FLOAT_EXTEND:
1319 if (DECIMAL_FLOAT_MODE_P (mode))
1320 break;
1321
1322 /* (float_extend (float_extend x)) is (float_extend x)
1323
1324 (float_extend (float x)) is (float x) assuming that double
1325 rounding can't happen.
1326 */
1327 if (GET_CODE (op) == FLOAT_EXTEND
1328 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1329 && exact_int_to_float_conversion_p (op)))
1330 return simplify_gen_unary (GET_CODE (op), mode,
1331 XEXP (op, 0),
1332 GET_MODE (XEXP (op, 0)));
1333
1334 break;
1335
1336 case ABS:
1337 /* (abs (neg <foo>)) -> (abs <foo>) */
1338 if (GET_CODE (op) == NEG)
1339 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1341
1342 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1343 do nothing. */
1344 if (GET_MODE (op) == VOIDmode)
1345 break;
1346
1347 /* If operand is something known to be positive, ignore the ABS. */
1348 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1349 || val_signbit_known_clear_p (GET_MODE (op),
1350 nonzero_bits (op, GET_MODE (op))))
1351 return op;
1352
1353 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1354 if (is_a <scalar_int_mode> (mode, &int_mode)
1355 && (num_sign_bit_copies (op, int_mode)
1356 == GET_MODE_PRECISION (int_mode)))
1357 return gen_rtx_NEG (int_mode, op);
1358
1359 break;
1360
1361 case FFS:
1362 /* (ffs (*_extend <X>)) = (ffs <X>) */
1363 if (GET_CODE (op) == SIGN_EXTEND
1364 || GET_CODE (op) == ZERO_EXTEND)
1365 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1366 GET_MODE (XEXP (op, 0)));
1367 break;
1368
1369 case POPCOUNT:
1370 switch (GET_CODE (op))
1371 {
1372 case BSWAP:
1373 case ZERO_EXTEND:
1374 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1375 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1376 GET_MODE (XEXP (op, 0)));
1377
1378 case ROTATE:
1379 case ROTATERT:
1380 /* Rotations don't affect popcount. */
1381 if (!side_effects_p (XEXP (op, 1)))
1382 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1383 GET_MODE (XEXP (op, 0)));
1384 break;
1385
1386 default:
1387 break;
1388 }
1389 break;
1390
1391 case PARITY:
1392 switch (GET_CODE (op))
1393 {
1394 case NOT:
1395 case BSWAP:
1396 case ZERO_EXTEND:
1397 case SIGN_EXTEND:
1398 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1399 GET_MODE (XEXP (op, 0)));
1400
1401 case ROTATE:
1402 case ROTATERT:
1403 /* Rotations don't affect parity. */
1404 if (!side_effects_p (XEXP (op, 1)))
1405 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1406 GET_MODE (XEXP (op, 0)));
1407 break;
1408
1409 default:
1410 break;
1411 }
1412 break;
1413
1414 case BSWAP:
1415 /* (bswap (bswap x)) -> x. */
1416 if (GET_CODE (op) == BSWAP)
1417 return XEXP (op, 0);
1418 break;
1419
1420 case FLOAT:
1421 /* (float (sign_extend <X>)) = (float <X>). */
1422 if (GET_CODE (op) == SIGN_EXTEND)
1423 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1424 GET_MODE (XEXP (op, 0)));
1425 break;
1426
1427 case SIGN_EXTEND:
1428 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1429 becomes just the MINUS if its mode is MODE. This allows
1430 folding switch statements on machines using casesi (such as
1431 the VAX). */
1432 if (GET_CODE (op) == TRUNCATE
1433 && GET_MODE (XEXP (op, 0)) == mode
1434 && GET_CODE (XEXP (op, 0)) == MINUS
1435 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1436 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1437 return XEXP (op, 0);
1438
1439 /* Extending a widening multiplication should be canonicalized to
1440 a wider widening multiplication. */
1441 if (GET_CODE (op) == MULT)
1442 {
1443 rtx lhs = XEXP (op, 0);
1444 rtx rhs = XEXP (op, 1);
1445 enum rtx_code lcode = GET_CODE (lhs);
1446 enum rtx_code rcode = GET_CODE (rhs);
1447
1448 /* Widening multiplies usually extend both operands, but sometimes
1449 they use a shift to extract a portion of a register. */
1450 if ((lcode == SIGN_EXTEND
1451 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1452 && (rcode == SIGN_EXTEND
1453 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1454 {
1455 machine_mode lmode = GET_MODE (lhs);
1456 machine_mode rmode = GET_MODE (rhs);
1457 int bits;
1458
1459 if (lcode == ASHIFTRT)
1460 /* Number of bits not shifted off the end. */
1461 bits = (GET_MODE_UNIT_PRECISION (lmode)
1462 - INTVAL (XEXP (lhs, 1)));
1463 else /* lcode == SIGN_EXTEND */
1464 /* Size of inner mode. */
1465 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1466
1467 if (rcode == ASHIFTRT)
1468 bits += (GET_MODE_UNIT_PRECISION (rmode)
1469 - INTVAL (XEXP (rhs, 1)));
1470 else /* rcode == SIGN_EXTEND */
1471 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1472
1473 /* We can only widen multiplies if the result is mathematiclly
1474 equivalent. I.e. if overflow was impossible. */
1475 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1476 return simplify_gen_binary
1477 (MULT, mode,
1478 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1479 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1480 }
1481 }
1482
1483 /* Check for a sign extension of a subreg of a promoted
1484 variable, where the promotion is sign-extended, and the
1485 target mode is the same as the variable's promotion. */
1486 if (GET_CODE (op) == SUBREG
1487 && SUBREG_PROMOTED_VAR_P (op)
1488 && SUBREG_PROMOTED_SIGNED_P (op)
1489 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1490 {
1491 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1492 if (temp)
1493 return temp;
1494 }
1495
1496 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1497 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1498 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1499 {
1500 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1501 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1502 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1503 GET_MODE (XEXP (op, 0)));
1504 }
1505
1506 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1507 is (sign_extend:M (subreg:O <X>)) if there is mode with
1508 GET_MODE_BITSIZE (N) - I bits.
1509 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1510 is similarly (zero_extend:M (subreg:O <X>)). */
1511 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1512 && GET_CODE (XEXP (op, 0)) == ASHIFT
1513 && is_a <scalar_int_mode> (mode, &int_mode)
1514 && CONST_INT_P (XEXP (op, 1))
1515 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1516 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1517 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1518 {
1519 scalar_int_mode tmode;
1520 gcc_assert (GET_MODE_BITSIZE (int_mode)
1521 > GET_MODE_BITSIZE (op_mode));
1522 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1523 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1524 {
1525 rtx inner =
1526 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1527 if (inner)
1528 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1529 ? SIGN_EXTEND : ZERO_EXTEND,
1530 int_mode, inner, tmode);
1531 }
1532 }
1533
1534 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1535 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1536 if (GET_CODE (op) == LSHIFTRT
1537 && CONST_INT_P (XEXP (op, 1))
1538 && XEXP (op, 1) != const0_rtx)
1539 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1540
1541 #if defined(POINTERS_EXTEND_UNSIGNED)
1542 /* As we do not know which address space the pointer is referring to,
1543 we can do this only if the target does not support different pointer
1544 or address modes depending on the address space. */
1545 if (target_default_pointer_address_modes_p ()
1546 && ! POINTERS_EXTEND_UNSIGNED
1547 && mode == Pmode && GET_MODE (op) == ptr_mode
1548 && (CONSTANT_P (op)
1549 || (GET_CODE (op) == SUBREG
1550 && REG_P (SUBREG_REG (op))
1551 && REG_POINTER (SUBREG_REG (op))
1552 && GET_MODE (SUBREG_REG (op)) == Pmode))
1553 && !targetm.have_ptr_extend ())
1554 {
1555 temp
1556 = convert_memory_address_addr_space_1 (Pmode, op,
1557 ADDR_SPACE_GENERIC, false,
1558 true);
1559 if (temp)
1560 return temp;
1561 }
1562 #endif
1563 break;
1564
1565 case ZERO_EXTEND:
1566 /* Check for a zero extension of a subreg of a promoted
1567 variable, where the promotion is zero-extended, and the
1568 target mode is the same as the variable's promotion. */
1569 if (GET_CODE (op) == SUBREG
1570 && SUBREG_PROMOTED_VAR_P (op)
1571 && SUBREG_PROMOTED_UNSIGNED_P (op)
1572 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1573 {
1574 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1575 if (temp)
1576 return temp;
1577 }
1578
1579 /* Extending a widening multiplication should be canonicalized to
1580 a wider widening multiplication. */
1581 if (GET_CODE (op) == MULT)
1582 {
1583 rtx lhs = XEXP (op, 0);
1584 rtx rhs = XEXP (op, 1);
1585 enum rtx_code lcode = GET_CODE (lhs);
1586 enum rtx_code rcode = GET_CODE (rhs);
1587
1588 /* Widening multiplies usually extend both operands, but sometimes
1589 they use a shift to extract a portion of a register. */
1590 if ((lcode == ZERO_EXTEND
1591 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1592 && (rcode == ZERO_EXTEND
1593 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1594 {
1595 machine_mode lmode = GET_MODE (lhs);
1596 machine_mode rmode = GET_MODE (rhs);
1597 int bits;
1598
1599 if (lcode == LSHIFTRT)
1600 /* Number of bits not shifted off the end. */
1601 bits = (GET_MODE_UNIT_PRECISION (lmode)
1602 - INTVAL (XEXP (lhs, 1)));
1603 else /* lcode == ZERO_EXTEND */
1604 /* Size of inner mode. */
1605 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1606
1607 if (rcode == LSHIFTRT)
1608 bits += (GET_MODE_UNIT_PRECISION (rmode)
1609 - INTVAL (XEXP (rhs, 1)));
1610 else /* rcode == ZERO_EXTEND */
1611 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1612
1613 /* We can only widen multiplies if the result is mathematiclly
1614 equivalent. I.e. if overflow was impossible. */
1615 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1616 return simplify_gen_binary
1617 (MULT, mode,
1618 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1619 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1620 }
1621 }
1622
1623 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1624 if (GET_CODE (op) == ZERO_EXTEND)
1625 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1626 GET_MODE (XEXP (op, 0)));
1627
1628 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1629 is (zero_extend:M (subreg:O <X>)) if there is mode with
1630 GET_MODE_PRECISION (N) - I bits. */
1631 if (GET_CODE (op) == LSHIFTRT
1632 && GET_CODE (XEXP (op, 0)) == ASHIFT
1633 && is_a <scalar_int_mode> (mode, &int_mode)
1634 && CONST_INT_P (XEXP (op, 1))
1635 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1636 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1637 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1638 {
1639 scalar_int_mode tmode;
1640 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1641 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1642 {
1643 rtx inner =
1644 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1645 if (inner)
1646 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1647 inner, tmode);
1648 }
1649 }
1650
1651 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1652 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1653 of mode N. E.g.
1654 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1655 (and:SI (reg:SI) (const_int 63)). */
1656 if (partial_subreg_p (op)
1657 && is_a <scalar_int_mode> (mode, &int_mode)
1658 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1659 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1660 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1661 && subreg_lowpart_p (op)
1662 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1663 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1664 {
1665 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1666 return SUBREG_REG (op);
1667 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1668 op0_mode);
1669 }
1670
1671 #if defined(POINTERS_EXTEND_UNSIGNED)
1672 /* As we do not know which address space the pointer is referring to,
1673 we can do this only if the target does not support different pointer
1674 or address modes depending on the address space. */
1675 if (target_default_pointer_address_modes_p ()
1676 && POINTERS_EXTEND_UNSIGNED > 0
1677 && mode == Pmode && GET_MODE (op) == ptr_mode
1678 && (CONSTANT_P (op)
1679 || (GET_CODE (op) == SUBREG
1680 && REG_P (SUBREG_REG (op))
1681 && REG_POINTER (SUBREG_REG (op))
1682 && GET_MODE (SUBREG_REG (op)) == Pmode))
1683 && !targetm.have_ptr_extend ())
1684 {
1685 temp
1686 = convert_memory_address_addr_space_1 (Pmode, op,
1687 ADDR_SPACE_GENERIC, false,
1688 true);
1689 if (temp)
1690 return temp;
1691 }
1692 #endif
1693 break;
1694
1695 default:
1696 break;
1697 }
1698
1699 if (VECTOR_MODE_P (mode) && vec_duplicate_p (op, &elt))
1700 {
1701 /* Try applying the operator to ELT and see if that simplifies.
1702 We can duplicate the result if so.
1703
1704 The reason we don't use simplify_gen_unary is that it isn't
1705 necessarily a win to convert things like:
1706
1707 (neg:V (vec_duplicate:V (reg:S R)))
1708
1709 to:
1710
1711 (vec_duplicate:V (neg:S (reg:S R)))
1712
1713 The first might be done entirely in vector registers while the
1714 second might need a move between register files. */
1715 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1716 elt, GET_MODE_INNER (GET_MODE (op)));
1717 if (temp)
1718 return gen_vec_duplicate (mode, temp);
1719 }
1720
1721 return 0;
1722 }
1723
1724 /* Try to compute the value of a unary operation CODE whose output mode is to
1725 be MODE with input operand OP whose mode was originally OP_MODE.
1726 Return zero if the value cannot be computed. */
1727 rtx
1728 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1729 rtx op, machine_mode op_mode)
1730 {
1731 scalar_int_mode result_mode;
1732
1733 if (code == VEC_DUPLICATE)
1734 {
1735 gcc_assert (VECTOR_MODE_P (mode));
1736 if (GET_MODE (op) != VOIDmode)
1737 {
1738 if (!VECTOR_MODE_P (GET_MODE (op)))
1739 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1740 else
1741 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1742 (GET_MODE (op)));
1743 }
1744 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1745 return gen_const_vec_duplicate (mode, op);
1746 if (GET_CODE (op) == CONST_VECTOR)
1747 {
1748 unsigned int n_elts = GET_MODE_NUNITS (mode);
1749 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op);
1750 gcc_assert (in_n_elts < n_elts);
1751 gcc_assert ((n_elts % in_n_elts) == 0);
1752 rtvec v = rtvec_alloc (n_elts);
1753 for (unsigned i = 0; i < n_elts; i++)
1754 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1755 return gen_rtx_CONST_VECTOR (mode, v);
1756 }
1757 }
1758
1759 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1760 {
1761 int elt_size = GET_MODE_UNIT_SIZE (mode);
1762 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1763 machine_mode opmode = GET_MODE (op);
1764 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1765 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1766 rtvec v = rtvec_alloc (n_elts);
1767 unsigned int i;
1768
1769 gcc_assert (op_n_elts == n_elts);
1770 for (i = 0; i < n_elts; i++)
1771 {
1772 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1773 CONST_VECTOR_ELT (op, i),
1774 GET_MODE_INNER (opmode));
1775 if (!x)
1776 return 0;
1777 RTVEC_ELT (v, i) = x;
1778 }
1779 return gen_rtx_CONST_VECTOR (mode, v);
1780 }
1781
1782 /* The order of these tests is critical so that, for example, we don't
1783 check the wrong mode (input vs. output) for a conversion operation,
1784 such as FIX. At some point, this should be simplified. */
1785
1786 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1787 {
1788 REAL_VALUE_TYPE d;
1789
1790 if (op_mode == VOIDmode)
1791 {
1792 /* CONST_INT have VOIDmode as the mode. We assume that all
1793 the bits of the constant are significant, though, this is
1794 a dangerous assumption as many times CONST_INTs are
1795 created and used with garbage in the bits outside of the
1796 precision of the implied mode of the const_int. */
1797 op_mode = MAX_MODE_INT;
1798 }
1799
1800 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1801
1802 /* Avoid the folding if flag_signaling_nans is on and
1803 operand is a signaling NaN. */
1804 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1805 return 0;
1806
1807 d = real_value_truncate (mode, d);
1808 return const_double_from_real_value (d, mode);
1809 }
1810 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1811 {
1812 REAL_VALUE_TYPE d;
1813
1814 if (op_mode == VOIDmode)
1815 {
1816 /* CONST_INT have VOIDmode as the mode. We assume that all
1817 the bits of the constant are significant, though, this is
1818 a dangerous assumption as many times CONST_INTs are
1819 created and used with garbage in the bits outside of the
1820 precision of the implied mode of the const_int. */
1821 op_mode = MAX_MODE_INT;
1822 }
1823
1824 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1825
1826 /* Avoid the folding if flag_signaling_nans is on and
1827 operand is a signaling NaN. */
1828 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1829 return 0;
1830
1831 d = real_value_truncate (mode, d);
1832 return const_double_from_real_value (d, mode);
1833 }
1834
1835 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1836 {
1837 unsigned int width = GET_MODE_PRECISION (result_mode);
1838 wide_int result;
1839 scalar_int_mode imode = (op_mode == VOIDmode
1840 ? result_mode
1841 : as_a <scalar_int_mode> (op_mode));
1842 rtx_mode_t op0 = rtx_mode_t (op, imode);
1843 int int_value;
1844
1845 #if TARGET_SUPPORTS_WIDE_INT == 0
1846 /* This assert keeps the simplification from producing a result
1847 that cannot be represented in a CONST_DOUBLE but a lot of
1848 upstream callers expect that this function never fails to
1849 simplify something and so you if you added this to the test
1850 above the code would die later anyway. If this assert
1851 happens, you just need to make the port support wide int. */
1852 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1853 #endif
1854
1855 switch (code)
1856 {
1857 case NOT:
1858 result = wi::bit_not (op0);
1859 break;
1860
1861 case NEG:
1862 result = wi::neg (op0);
1863 break;
1864
1865 case ABS:
1866 result = wi::abs (op0);
1867 break;
1868
1869 case FFS:
1870 result = wi::shwi (wi::ffs (op0), result_mode);
1871 break;
1872
1873 case CLZ:
1874 if (wi::ne_p (op0, 0))
1875 int_value = wi::clz (op0);
1876 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1877 int_value = GET_MODE_PRECISION (imode);
1878 result = wi::shwi (int_value, result_mode);
1879 break;
1880
1881 case CLRSB:
1882 result = wi::shwi (wi::clrsb (op0), result_mode);
1883 break;
1884
1885 case CTZ:
1886 if (wi::ne_p (op0, 0))
1887 int_value = wi::ctz (op0);
1888 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1889 int_value = GET_MODE_PRECISION (imode);
1890 result = wi::shwi (int_value, result_mode);
1891 break;
1892
1893 case POPCOUNT:
1894 result = wi::shwi (wi::popcount (op0), result_mode);
1895 break;
1896
1897 case PARITY:
1898 result = wi::shwi (wi::parity (op0), result_mode);
1899 break;
1900
1901 case BSWAP:
1902 result = wide_int (op0).bswap ();
1903 break;
1904
1905 case TRUNCATE:
1906 case ZERO_EXTEND:
1907 result = wide_int::from (op0, width, UNSIGNED);
1908 break;
1909
1910 case SIGN_EXTEND:
1911 result = wide_int::from (op0, width, SIGNED);
1912 break;
1913
1914 case SQRT:
1915 default:
1916 return 0;
1917 }
1918
1919 return immed_wide_int_const (result, result_mode);
1920 }
1921
1922 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1923 && SCALAR_FLOAT_MODE_P (mode)
1924 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1925 {
1926 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1927 switch (code)
1928 {
1929 case SQRT:
1930 return 0;
1931 case ABS:
1932 d = real_value_abs (&d);
1933 break;
1934 case NEG:
1935 d = real_value_negate (&d);
1936 break;
1937 case FLOAT_TRUNCATE:
1938 /* Don't perform the operation if flag_signaling_nans is on
1939 and the operand is a signaling NaN. */
1940 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1941 return NULL_RTX;
1942 d = real_value_truncate (mode, d);
1943 break;
1944 case FLOAT_EXTEND:
1945 /* Don't perform the operation if flag_signaling_nans is on
1946 and the operand is a signaling NaN. */
1947 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1948 return NULL_RTX;
1949 /* All this does is change the mode, unless changing
1950 mode class. */
1951 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1952 real_convert (&d, mode, &d);
1953 break;
1954 case FIX:
1955 /* Don't perform the operation if flag_signaling_nans is on
1956 and the operand is a signaling NaN. */
1957 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1958 return NULL_RTX;
1959 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1960 break;
1961 case NOT:
1962 {
1963 long tmp[4];
1964 int i;
1965
1966 real_to_target (tmp, &d, GET_MODE (op));
1967 for (i = 0; i < 4; i++)
1968 tmp[i] = ~tmp[i];
1969 real_from_target (&d, tmp, mode);
1970 break;
1971 }
1972 default:
1973 gcc_unreachable ();
1974 }
1975 return const_double_from_real_value (d, mode);
1976 }
1977 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1978 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1979 && is_int_mode (mode, &result_mode))
1980 {
1981 unsigned int width = GET_MODE_PRECISION (result_mode);
1982 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1983 operators are intentionally left unspecified (to ease implementation
1984 by target backends), for consistency, this routine implements the
1985 same semantics for constant folding as used by the middle-end. */
1986
1987 /* This was formerly used only for non-IEEE float.
1988 eggert@twinsun.com says it is safe for IEEE also. */
1989 REAL_VALUE_TYPE t;
1990 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1991 wide_int wmax, wmin;
1992 /* This is part of the abi to real_to_integer, but we check
1993 things before making this call. */
1994 bool fail;
1995
1996 switch (code)
1997 {
1998 case FIX:
1999 if (REAL_VALUE_ISNAN (*x))
2000 return const0_rtx;
2001
2002 /* Test against the signed upper bound. */
2003 wmax = wi::max_value (width, SIGNED);
2004 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2005 if (real_less (&t, x))
2006 return immed_wide_int_const (wmax, mode);
2007
2008 /* Test against the signed lower bound. */
2009 wmin = wi::min_value (width, SIGNED);
2010 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2011 if (real_less (x, &t))
2012 return immed_wide_int_const (wmin, mode);
2013
2014 return immed_wide_int_const (real_to_integer (x, &fail, width),
2015 mode);
2016
2017 case UNSIGNED_FIX:
2018 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2019 return const0_rtx;
2020
2021 /* Test against the unsigned upper bound. */
2022 wmax = wi::max_value (width, UNSIGNED);
2023 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2024 if (real_less (&t, x))
2025 return immed_wide_int_const (wmax, mode);
2026
2027 return immed_wide_int_const (real_to_integer (x, &fail, width),
2028 mode);
2029
2030 default:
2031 gcc_unreachable ();
2032 }
2033 }
2034
2035 /* Handle polynomial integers. */
2036 else if (CONST_POLY_INT_P (op))
2037 {
2038 poly_wide_int result;
2039 switch (code)
2040 {
2041 case NEG:
2042 result = -const_poly_int_value (op);
2043 break;
2044
2045 case NOT:
2046 result = ~const_poly_int_value (op);
2047 break;
2048
2049 default:
2050 return NULL_RTX;
2051 }
2052 return immed_wide_int_const (result, mode);
2053 }
2054
2055 return NULL_RTX;
2056 }
2057 \f
2058 /* Subroutine of simplify_binary_operation to simplify a binary operation
2059 CODE that can commute with byte swapping, with result mode MODE and
2060 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2061 Return zero if no simplification or canonicalization is possible. */
2062
2063 static rtx
2064 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2065 rtx op0, rtx op1)
2066 {
2067 rtx tem;
2068
2069 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2070 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2071 {
2072 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2073 simplify_gen_unary (BSWAP, mode, op1, mode));
2074 return simplify_gen_unary (BSWAP, mode, tem, mode);
2075 }
2076
2077 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2078 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2079 {
2080 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2081 return simplify_gen_unary (BSWAP, mode, tem, mode);
2082 }
2083
2084 return NULL_RTX;
2085 }
2086
2087 /* Subroutine of simplify_binary_operation to simplify a commutative,
2088 associative binary operation CODE with result mode MODE, operating
2089 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2090 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2091 canonicalization is possible. */
2092
2093 static rtx
2094 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2095 rtx op0, rtx op1)
2096 {
2097 rtx tem;
2098
2099 /* Linearize the operator to the left. */
2100 if (GET_CODE (op1) == code)
2101 {
2102 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2103 if (GET_CODE (op0) == code)
2104 {
2105 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2106 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2107 }
2108
2109 /* "a op (b op c)" becomes "(b op c) op a". */
2110 if (! swap_commutative_operands_p (op1, op0))
2111 return simplify_gen_binary (code, mode, op1, op0);
2112
2113 std::swap (op0, op1);
2114 }
2115
2116 if (GET_CODE (op0) == code)
2117 {
2118 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2119 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2120 {
2121 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2122 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2123 }
2124
2125 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2126 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2127 if (tem != 0)
2128 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2129
2130 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2131 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2132 if (tem != 0)
2133 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2134 }
2135
2136 return 0;
2137 }
2138
2139
2140 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2141 and OP1. Return 0 if no simplification is possible.
2142
2143 Don't use this for relational operations such as EQ or LT.
2144 Use simplify_relational_operation instead. */
2145 rtx
2146 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2147 rtx op0, rtx op1)
2148 {
2149 rtx trueop0, trueop1;
2150 rtx tem;
2151
2152 /* Relational operations don't work here. We must know the mode
2153 of the operands in order to do the comparison correctly.
2154 Assuming a full word can give incorrect results.
2155 Consider comparing 128 with -128 in QImode. */
2156 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2157 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2158
2159 /* Make sure the constant is second. */
2160 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2161 && swap_commutative_operands_p (op0, op1))
2162 std::swap (op0, op1);
2163
2164 trueop0 = avoid_constant_pool_reference (op0);
2165 trueop1 = avoid_constant_pool_reference (op1);
2166
2167 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2168 if (tem)
2169 return tem;
2170 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2171
2172 if (tem)
2173 return tem;
2174
2175 /* If the above steps did not result in a simplification and op0 or op1
2176 were constant pool references, use the referenced constants directly. */
2177 if (trueop0 != op0 || trueop1 != op1)
2178 return simplify_gen_binary (code, mode, trueop0, trueop1);
2179
2180 return NULL_RTX;
2181 }
2182
2183 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2184 which OP0 and OP1 are both vector series or vector duplicates
2185 (which are really just series with a step of 0). If so, try to
2186 form a new series by applying CODE to the bases and to the steps.
2187 Return null if no simplification is possible.
2188
2189 MODE is the mode of the operation and is known to be a vector
2190 integer mode. */
2191
2192 static rtx
2193 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2194 rtx op0, rtx op1)
2195 {
2196 rtx base0, step0;
2197 if (vec_duplicate_p (op0, &base0))
2198 step0 = const0_rtx;
2199 else if (!vec_series_p (op0, &base0, &step0))
2200 return NULL_RTX;
2201
2202 rtx base1, step1;
2203 if (vec_duplicate_p (op1, &base1))
2204 step1 = const0_rtx;
2205 else if (!vec_series_p (op1, &base1, &step1))
2206 return NULL_RTX;
2207
2208 /* Only create a new series if we can simplify both parts. In other
2209 cases this isn't really a simplification, and it's not necessarily
2210 a win to replace a vector operation with a scalar operation. */
2211 scalar_mode inner_mode = GET_MODE_INNER (mode);
2212 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2213 if (!new_base)
2214 return NULL_RTX;
2215
2216 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2217 if (!new_step)
2218 return NULL_RTX;
2219
2220 return gen_vec_series (mode, new_base, new_step);
2221 }
2222
2223 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2224 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2225 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2226 actual constants. */
2227
2228 static rtx
2229 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2230 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2231 {
2232 rtx tem, reversed, opleft, opright, elt0, elt1;
2233 HOST_WIDE_INT val;
2234 scalar_int_mode int_mode, inner_mode;
2235 poly_int64 offset;
2236
2237 /* Even if we can't compute a constant result,
2238 there are some cases worth simplifying. */
2239
2240 switch (code)
2241 {
2242 case PLUS:
2243 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2244 when x is NaN, infinite, or finite and nonzero. They aren't
2245 when x is -0 and the rounding mode is not towards -infinity,
2246 since (-0) + 0 is then 0. */
2247 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2248 return op0;
2249
2250 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2251 transformations are safe even for IEEE. */
2252 if (GET_CODE (op0) == NEG)
2253 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2254 else if (GET_CODE (op1) == NEG)
2255 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2256
2257 /* (~a) + 1 -> -a */
2258 if (INTEGRAL_MODE_P (mode)
2259 && GET_CODE (op0) == NOT
2260 && trueop1 == const1_rtx)
2261 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2262
2263 /* Handle both-operands-constant cases. We can only add
2264 CONST_INTs to constants since the sum of relocatable symbols
2265 can't be handled by most assemblers. Don't add CONST_INT
2266 to CONST_INT since overflow won't be computed properly if wider
2267 than HOST_BITS_PER_WIDE_INT. */
2268
2269 if ((GET_CODE (op0) == CONST
2270 || GET_CODE (op0) == SYMBOL_REF
2271 || GET_CODE (op0) == LABEL_REF)
2272 && CONST_INT_P (op1))
2273 return plus_constant (mode, op0, INTVAL (op1));
2274 else if ((GET_CODE (op1) == CONST
2275 || GET_CODE (op1) == SYMBOL_REF
2276 || GET_CODE (op1) == LABEL_REF)
2277 && CONST_INT_P (op0))
2278 return plus_constant (mode, op1, INTVAL (op0));
2279
2280 /* See if this is something like X * C - X or vice versa or
2281 if the multiplication is written as a shift. If so, we can
2282 distribute and make a new multiply, shift, or maybe just
2283 have X (if C is 2 in the example above). But don't make
2284 something more expensive than we had before. */
2285
2286 if (is_a <scalar_int_mode> (mode, &int_mode))
2287 {
2288 rtx lhs = op0, rhs = op1;
2289
2290 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2291 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2292
2293 if (GET_CODE (lhs) == NEG)
2294 {
2295 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2296 lhs = XEXP (lhs, 0);
2297 }
2298 else if (GET_CODE (lhs) == MULT
2299 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2300 {
2301 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2302 lhs = XEXP (lhs, 0);
2303 }
2304 else if (GET_CODE (lhs) == ASHIFT
2305 && CONST_INT_P (XEXP (lhs, 1))
2306 && INTVAL (XEXP (lhs, 1)) >= 0
2307 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2308 {
2309 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2310 GET_MODE_PRECISION (int_mode));
2311 lhs = XEXP (lhs, 0);
2312 }
2313
2314 if (GET_CODE (rhs) == NEG)
2315 {
2316 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2317 rhs = XEXP (rhs, 0);
2318 }
2319 else if (GET_CODE (rhs) == MULT
2320 && CONST_INT_P (XEXP (rhs, 1)))
2321 {
2322 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2323 rhs = XEXP (rhs, 0);
2324 }
2325 else if (GET_CODE (rhs) == ASHIFT
2326 && CONST_INT_P (XEXP (rhs, 1))
2327 && INTVAL (XEXP (rhs, 1)) >= 0
2328 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2329 {
2330 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2331 GET_MODE_PRECISION (int_mode));
2332 rhs = XEXP (rhs, 0);
2333 }
2334
2335 if (rtx_equal_p (lhs, rhs))
2336 {
2337 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2338 rtx coeff;
2339 bool speed = optimize_function_for_speed_p (cfun);
2340
2341 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2342
2343 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2344 return (set_src_cost (tem, int_mode, speed)
2345 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2346 }
2347 }
2348
2349 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2350 if (CONST_SCALAR_INT_P (op1)
2351 && GET_CODE (op0) == XOR
2352 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2353 && mode_signbit_p (mode, op1))
2354 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2355 simplify_gen_binary (XOR, mode, op1,
2356 XEXP (op0, 1)));
2357
2358 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2359 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2360 && GET_CODE (op0) == MULT
2361 && GET_CODE (XEXP (op0, 0)) == NEG)
2362 {
2363 rtx in1, in2;
2364
2365 in1 = XEXP (XEXP (op0, 0), 0);
2366 in2 = XEXP (op0, 1);
2367 return simplify_gen_binary (MINUS, mode, op1,
2368 simplify_gen_binary (MULT, mode,
2369 in1, in2));
2370 }
2371
2372 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2373 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2374 is 1. */
2375 if (COMPARISON_P (op0)
2376 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2377 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2378 && (reversed = reversed_comparison (op0, mode)))
2379 return
2380 simplify_gen_unary (NEG, mode, reversed, mode);
2381
2382 /* If one of the operands is a PLUS or a MINUS, see if we can
2383 simplify this by the associative law.
2384 Don't use the associative law for floating point.
2385 The inaccuracy makes it nonassociative,
2386 and subtle programs can break if operations are associated. */
2387
2388 if (INTEGRAL_MODE_P (mode)
2389 && (plus_minus_operand_p (op0)
2390 || plus_minus_operand_p (op1))
2391 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2392 return tem;
2393
2394 /* Reassociate floating point addition only when the user
2395 specifies associative math operations. */
2396 if (FLOAT_MODE_P (mode)
2397 && flag_associative_math)
2398 {
2399 tem = simplify_associative_operation (code, mode, op0, op1);
2400 if (tem)
2401 return tem;
2402 }
2403
2404 /* Handle vector series. */
2405 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2406 {
2407 tem = simplify_binary_operation_series (code, mode, op0, op1);
2408 if (tem)
2409 return tem;
2410 }
2411 break;
2412
2413 case COMPARE:
2414 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2415 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2416 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2417 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2418 {
2419 rtx xop00 = XEXP (op0, 0);
2420 rtx xop10 = XEXP (op1, 0);
2421
2422 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2423 return xop00;
2424
2425 if (REG_P (xop00) && REG_P (xop10)
2426 && REGNO (xop00) == REGNO (xop10)
2427 && GET_MODE (xop00) == mode
2428 && GET_MODE (xop10) == mode
2429 && GET_MODE_CLASS (mode) == MODE_CC)
2430 return xop00;
2431 }
2432 break;
2433
2434 case MINUS:
2435 /* We can't assume x-x is 0 even with non-IEEE floating point,
2436 but since it is zero except in very strange circumstances, we
2437 will treat it as zero with -ffinite-math-only. */
2438 if (rtx_equal_p (trueop0, trueop1)
2439 && ! side_effects_p (op0)
2440 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2441 return CONST0_RTX (mode);
2442
2443 /* Change subtraction from zero into negation. (0 - x) is the
2444 same as -x when x is NaN, infinite, or finite and nonzero.
2445 But if the mode has signed zeros, and does not round towards
2446 -infinity, then 0 - 0 is 0, not -0. */
2447 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2448 return simplify_gen_unary (NEG, mode, op1, mode);
2449
2450 /* (-1 - a) is ~a, unless the expression contains symbolic
2451 constants, in which case not retaining additions and
2452 subtractions could cause invalid assembly to be produced. */
2453 if (trueop0 == constm1_rtx
2454 && !contains_symbolic_reference_p (op1))
2455 return simplify_gen_unary (NOT, mode, op1, mode);
2456
2457 /* Subtracting 0 has no effect unless the mode has signed zeros
2458 and supports rounding towards -infinity. In such a case,
2459 0 - 0 is -0. */
2460 if (!(HONOR_SIGNED_ZEROS (mode)
2461 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2462 && trueop1 == CONST0_RTX (mode))
2463 return op0;
2464
2465 /* See if this is something like X * C - X or vice versa or
2466 if the multiplication is written as a shift. If so, we can
2467 distribute and make a new multiply, shift, or maybe just
2468 have X (if C is 2 in the example above). But don't make
2469 something more expensive than we had before. */
2470
2471 if (is_a <scalar_int_mode> (mode, &int_mode))
2472 {
2473 rtx lhs = op0, rhs = op1;
2474
2475 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2476 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2477
2478 if (GET_CODE (lhs) == NEG)
2479 {
2480 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2481 lhs = XEXP (lhs, 0);
2482 }
2483 else if (GET_CODE (lhs) == MULT
2484 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2485 {
2486 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2487 lhs = XEXP (lhs, 0);
2488 }
2489 else if (GET_CODE (lhs) == ASHIFT
2490 && CONST_INT_P (XEXP (lhs, 1))
2491 && INTVAL (XEXP (lhs, 1)) >= 0
2492 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2493 {
2494 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2495 GET_MODE_PRECISION (int_mode));
2496 lhs = XEXP (lhs, 0);
2497 }
2498
2499 if (GET_CODE (rhs) == NEG)
2500 {
2501 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2502 rhs = XEXP (rhs, 0);
2503 }
2504 else if (GET_CODE (rhs) == MULT
2505 && CONST_INT_P (XEXP (rhs, 1)))
2506 {
2507 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2508 rhs = XEXP (rhs, 0);
2509 }
2510 else if (GET_CODE (rhs) == ASHIFT
2511 && CONST_INT_P (XEXP (rhs, 1))
2512 && INTVAL (XEXP (rhs, 1)) >= 0
2513 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2514 {
2515 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2516 GET_MODE_PRECISION (int_mode));
2517 negcoeff1 = -negcoeff1;
2518 rhs = XEXP (rhs, 0);
2519 }
2520
2521 if (rtx_equal_p (lhs, rhs))
2522 {
2523 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2524 rtx coeff;
2525 bool speed = optimize_function_for_speed_p (cfun);
2526
2527 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2528
2529 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2530 return (set_src_cost (tem, int_mode, speed)
2531 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2532 }
2533 }
2534
2535 /* (a - (-b)) -> (a + b). True even for IEEE. */
2536 if (GET_CODE (op1) == NEG)
2537 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2538
2539 /* (-x - c) may be simplified as (-c - x). */
2540 if (GET_CODE (op0) == NEG
2541 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2542 {
2543 tem = simplify_unary_operation (NEG, mode, op1, mode);
2544 if (tem)
2545 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2546 }
2547
2548 if ((GET_CODE (op0) == CONST
2549 || GET_CODE (op0) == SYMBOL_REF
2550 || GET_CODE (op0) == LABEL_REF)
2551 && poly_int_rtx_p (op1, &offset))
2552 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2553
2554 /* Don't let a relocatable value get a negative coeff. */
2555 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2556 return simplify_gen_binary (PLUS, mode,
2557 op0,
2558 neg_const_int (mode, op1));
2559
2560 /* (x - (x & y)) -> (x & ~y) */
2561 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2562 {
2563 if (rtx_equal_p (op0, XEXP (op1, 0)))
2564 {
2565 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2566 GET_MODE (XEXP (op1, 1)));
2567 return simplify_gen_binary (AND, mode, op0, tem);
2568 }
2569 if (rtx_equal_p (op0, XEXP (op1, 1)))
2570 {
2571 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2572 GET_MODE (XEXP (op1, 0)));
2573 return simplify_gen_binary (AND, mode, op0, tem);
2574 }
2575 }
2576
2577 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2578 by reversing the comparison code if valid. */
2579 if (STORE_FLAG_VALUE == 1
2580 && trueop0 == const1_rtx
2581 && COMPARISON_P (op1)
2582 && (reversed = reversed_comparison (op1, mode)))
2583 return reversed;
2584
2585 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2586 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2587 && GET_CODE (op1) == MULT
2588 && GET_CODE (XEXP (op1, 0)) == NEG)
2589 {
2590 rtx in1, in2;
2591
2592 in1 = XEXP (XEXP (op1, 0), 0);
2593 in2 = XEXP (op1, 1);
2594 return simplify_gen_binary (PLUS, mode,
2595 simplify_gen_binary (MULT, mode,
2596 in1, in2),
2597 op0);
2598 }
2599
2600 /* Canonicalize (minus (neg A) (mult B C)) to
2601 (minus (mult (neg B) C) A). */
2602 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2603 && GET_CODE (op1) == MULT
2604 && GET_CODE (op0) == NEG)
2605 {
2606 rtx in1, in2;
2607
2608 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2609 in2 = XEXP (op1, 1);
2610 return simplify_gen_binary (MINUS, mode,
2611 simplify_gen_binary (MULT, mode,
2612 in1, in2),
2613 XEXP (op0, 0));
2614 }
2615
2616 /* If one of the operands is a PLUS or a MINUS, see if we can
2617 simplify this by the associative law. This will, for example,
2618 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2619 Don't use the associative law for floating point.
2620 The inaccuracy makes it nonassociative,
2621 and subtle programs can break if operations are associated. */
2622
2623 if (INTEGRAL_MODE_P (mode)
2624 && (plus_minus_operand_p (op0)
2625 || plus_minus_operand_p (op1))
2626 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2627 return tem;
2628
2629 /* Handle vector series. */
2630 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2631 {
2632 tem = simplify_binary_operation_series (code, mode, op0, op1);
2633 if (tem)
2634 return tem;
2635 }
2636 break;
2637
2638 case MULT:
2639 if (trueop1 == constm1_rtx)
2640 return simplify_gen_unary (NEG, mode, op0, mode);
2641
2642 if (GET_CODE (op0) == NEG)
2643 {
2644 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2645 /* If op1 is a MULT as well and simplify_unary_operation
2646 just moved the NEG to the second operand, simplify_gen_binary
2647 below could through simplify_associative_operation move
2648 the NEG around again and recurse endlessly. */
2649 if (temp
2650 && GET_CODE (op1) == MULT
2651 && GET_CODE (temp) == MULT
2652 && XEXP (op1, 0) == XEXP (temp, 0)
2653 && GET_CODE (XEXP (temp, 1)) == NEG
2654 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2655 temp = NULL_RTX;
2656 if (temp)
2657 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2658 }
2659 if (GET_CODE (op1) == NEG)
2660 {
2661 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2662 /* If op0 is a MULT as well and simplify_unary_operation
2663 just moved the NEG to the second operand, simplify_gen_binary
2664 below could through simplify_associative_operation move
2665 the NEG around again and recurse endlessly. */
2666 if (temp
2667 && GET_CODE (op0) == MULT
2668 && GET_CODE (temp) == MULT
2669 && XEXP (op0, 0) == XEXP (temp, 0)
2670 && GET_CODE (XEXP (temp, 1)) == NEG
2671 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2672 temp = NULL_RTX;
2673 if (temp)
2674 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2675 }
2676
2677 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2678 x is NaN, since x * 0 is then also NaN. Nor is it valid
2679 when the mode has signed zeros, since multiplying a negative
2680 number by 0 will give -0, not 0. */
2681 if (!HONOR_NANS (mode)
2682 && !HONOR_SIGNED_ZEROS (mode)
2683 && trueop1 == CONST0_RTX (mode)
2684 && ! side_effects_p (op0))
2685 return op1;
2686
2687 /* In IEEE floating point, x*1 is not equivalent to x for
2688 signalling NaNs. */
2689 if (!HONOR_SNANS (mode)
2690 && trueop1 == CONST1_RTX (mode))
2691 return op0;
2692
2693 /* Convert multiply by constant power of two into shift. */
2694 if (CONST_SCALAR_INT_P (trueop1))
2695 {
2696 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2697 if (val >= 0)
2698 return simplify_gen_binary (ASHIFT, mode, op0,
2699 gen_int_shift_amount (mode, val));
2700 }
2701
2702 /* x*2 is x+x and x*(-1) is -x */
2703 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2704 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2705 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2706 && GET_MODE (op0) == mode)
2707 {
2708 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2709
2710 if (real_equal (d1, &dconst2))
2711 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2712
2713 if (!HONOR_SNANS (mode)
2714 && real_equal (d1, &dconstm1))
2715 return simplify_gen_unary (NEG, mode, op0, mode);
2716 }
2717
2718 /* Optimize -x * -x as x * x. */
2719 if (FLOAT_MODE_P (mode)
2720 && GET_CODE (op0) == NEG
2721 && GET_CODE (op1) == NEG
2722 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2723 && !side_effects_p (XEXP (op0, 0)))
2724 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2725
2726 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2727 if (SCALAR_FLOAT_MODE_P (mode)
2728 && GET_CODE (op0) == ABS
2729 && GET_CODE (op1) == ABS
2730 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2731 && !side_effects_p (XEXP (op0, 0)))
2732 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2733
2734 /* Reassociate multiplication, but for floating point MULTs
2735 only when the user specifies unsafe math optimizations. */
2736 if (! FLOAT_MODE_P (mode)
2737 || flag_unsafe_math_optimizations)
2738 {
2739 tem = simplify_associative_operation (code, mode, op0, op1);
2740 if (tem)
2741 return tem;
2742 }
2743 break;
2744
2745 case IOR:
2746 if (trueop1 == CONST0_RTX (mode))
2747 return op0;
2748 if (INTEGRAL_MODE_P (mode)
2749 && trueop1 == CONSTM1_RTX (mode)
2750 && !side_effects_p (op0))
2751 return op1;
2752 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2753 return op0;
2754 /* A | (~A) -> -1 */
2755 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2756 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2757 && ! side_effects_p (op0)
2758 && SCALAR_INT_MODE_P (mode))
2759 return constm1_rtx;
2760
2761 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2762 if (CONST_INT_P (op1)
2763 && HWI_COMPUTABLE_MODE_P (mode)
2764 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2765 && !side_effects_p (op0))
2766 return op1;
2767
2768 /* Canonicalize (X & C1) | C2. */
2769 if (GET_CODE (op0) == AND
2770 && CONST_INT_P (trueop1)
2771 && CONST_INT_P (XEXP (op0, 1)))
2772 {
2773 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2774 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2775 HOST_WIDE_INT c2 = INTVAL (trueop1);
2776
2777 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2778 if ((c1 & c2) == c1
2779 && !side_effects_p (XEXP (op0, 0)))
2780 return trueop1;
2781
2782 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2783 if (((c1|c2) & mask) == mask)
2784 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2785 }
2786
2787 /* Convert (A & B) | A to A. */
2788 if (GET_CODE (op0) == AND
2789 && (rtx_equal_p (XEXP (op0, 0), op1)
2790 || rtx_equal_p (XEXP (op0, 1), op1))
2791 && ! side_effects_p (XEXP (op0, 0))
2792 && ! side_effects_p (XEXP (op0, 1)))
2793 return op1;
2794
2795 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2796 mode size to (rotate A CX). */
2797
2798 if (GET_CODE (op1) == ASHIFT
2799 || GET_CODE (op1) == SUBREG)
2800 {
2801 opleft = op1;
2802 opright = op0;
2803 }
2804 else
2805 {
2806 opright = op1;
2807 opleft = op0;
2808 }
2809
2810 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2811 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2812 && CONST_INT_P (XEXP (opleft, 1))
2813 && CONST_INT_P (XEXP (opright, 1))
2814 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2815 == GET_MODE_UNIT_PRECISION (mode)))
2816 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2817
2818 /* Same, but for ashift that has been "simplified" to a wider mode
2819 by simplify_shift_const. */
2820
2821 if (GET_CODE (opleft) == SUBREG
2822 && is_a <scalar_int_mode> (mode, &int_mode)
2823 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2824 &inner_mode)
2825 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2826 && GET_CODE (opright) == LSHIFTRT
2827 && GET_CODE (XEXP (opright, 0)) == SUBREG
2828 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2829 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2830 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2831 SUBREG_REG (XEXP (opright, 0)))
2832 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2833 && CONST_INT_P (XEXP (opright, 1))
2834 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2835 + INTVAL (XEXP (opright, 1))
2836 == GET_MODE_PRECISION (int_mode)))
2837 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2838 XEXP (SUBREG_REG (opleft), 1));
2839
2840 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2841 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2842 the PLUS does not affect any of the bits in OP1: then we can do
2843 the IOR as a PLUS and we can associate. This is valid if OP1
2844 can be safely shifted left C bits. */
2845 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2846 && GET_CODE (XEXP (op0, 0)) == PLUS
2847 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2848 && CONST_INT_P (XEXP (op0, 1))
2849 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2850 {
2851 int count = INTVAL (XEXP (op0, 1));
2852 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2853
2854 if (mask >> count == INTVAL (trueop1)
2855 && trunc_int_for_mode (mask, mode) == mask
2856 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2857 return simplify_gen_binary (ASHIFTRT, mode,
2858 plus_constant (mode, XEXP (op0, 0),
2859 mask),
2860 XEXP (op0, 1));
2861 }
2862
2863 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2864 if (tem)
2865 return tem;
2866
2867 tem = simplify_associative_operation (code, mode, op0, op1);
2868 if (tem)
2869 return tem;
2870 break;
2871
2872 case XOR:
2873 if (trueop1 == CONST0_RTX (mode))
2874 return op0;
2875 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2876 return simplify_gen_unary (NOT, mode, op0, mode);
2877 if (rtx_equal_p (trueop0, trueop1)
2878 && ! side_effects_p (op0)
2879 && GET_MODE_CLASS (mode) != MODE_CC)
2880 return CONST0_RTX (mode);
2881
2882 /* Canonicalize XOR of the most significant bit to PLUS. */
2883 if (CONST_SCALAR_INT_P (op1)
2884 && mode_signbit_p (mode, op1))
2885 return simplify_gen_binary (PLUS, mode, op0, op1);
2886 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2887 if (CONST_SCALAR_INT_P (op1)
2888 && GET_CODE (op0) == PLUS
2889 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2890 && mode_signbit_p (mode, XEXP (op0, 1)))
2891 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2892 simplify_gen_binary (XOR, mode, op1,
2893 XEXP (op0, 1)));
2894
2895 /* If we are XORing two things that have no bits in common,
2896 convert them into an IOR. This helps to detect rotation encoded
2897 using those methods and possibly other simplifications. */
2898
2899 if (HWI_COMPUTABLE_MODE_P (mode)
2900 && (nonzero_bits (op0, mode)
2901 & nonzero_bits (op1, mode)) == 0)
2902 return (simplify_gen_binary (IOR, mode, op0, op1));
2903
2904 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2905 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2906 (NOT y). */
2907 {
2908 int num_negated = 0;
2909
2910 if (GET_CODE (op0) == NOT)
2911 num_negated++, op0 = XEXP (op0, 0);
2912 if (GET_CODE (op1) == NOT)
2913 num_negated++, op1 = XEXP (op1, 0);
2914
2915 if (num_negated == 2)
2916 return simplify_gen_binary (XOR, mode, op0, op1);
2917 else if (num_negated == 1)
2918 return simplify_gen_unary (NOT, mode,
2919 simplify_gen_binary (XOR, mode, op0, op1),
2920 mode);
2921 }
2922
2923 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2924 correspond to a machine insn or result in further simplifications
2925 if B is a constant. */
2926
2927 if (GET_CODE (op0) == AND
2928 && rtx_equal_p (XEXP (op0, 1), op1)
2929 && ! side_effects_p (op1))
2930 return simplify_gen_binary (AND, mode,
2931 simplify_gen_unary (NOT, mode,
2932 XEXP (op0, 0), mode),
2933 op1);
2934
2935 else if (GET_CODE (op0) == AND
2936 && rtx_equal_p (XEXP (op0, 0), op1)
2937 && ! side_effects_p (op1))
2938 return simplify_gen_binary (AND, mode,
2939 simplify_gen_unary (NOT, mode,
2940 XEXP (op0, 1), mode),
2941 op1);
2942
2943 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2944 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2945 out bits inverted twice and not set by C. Similarly, given
2946 (xor (and (xor A B) C) D), simplify without inverting C in
2947 the xor operand: (xor (and A C) (B&C)^D).
2948 */
2949 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2950 && GET_CODE (XEXP (op0, 0)) == XOR
2951 && CONST_INT_P (op1)
2952 && CONST_INT_P (XEXP (op0, 1))
2953 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2954 {
2955 enum rtx_code op = GET_CODE (op0);
2956 rtx a = XEXP (XEXP (op0, 0), 0);
2957 rtx b = XEXP (XEXP (op0, 0), 1);
2958 rtx c = XEXP (op0, 1);
2959 rtx d = op1;
2960 HOST_WIDE_INT bval = INTVAL (b);
2961 HOST_WIDE_INT cval = INTVAL (c);
2962 HOST_WIDE_INT dval = INTVAL (d);
2963 HOST_WIDE_INT xcval;
2964
2965 if (op == IOR)
2966 xcval = ~cval;
2967 else
2968 xcval = cval;
2969
2970 return simplify_gen_binary (XOR, mode,
2971 simplify_gen_binary (op, mode, a, c),
2972 gen_int_mode ((bval & xcval) ^ dval,
2973 mode));
2974 }
2975
2976 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2977 we can transform like this:
2978 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2979 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2980 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2981 Attempt a few simplifications when B and C are both constants. */
2982 if (GET_CODE (op0) == AND
2983 && CONST_INT_P (op1)
2984 && CONST_INT_P (XEXP (op0, 1)))
2985 {
2986 rtx a = XEXP (op0, 0);
2987 rtx b = XEXP (op0, 1);
2988 rtx c = op1;
2989 HOST_WIDE_INT bval = INTVAL (b);
2990 HOST_WIDE_INT cval = INTVAL (c);
2991
2992 /* Instead of computing ~A&C, we compute its negated value,
2993 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2994 optimize for sure. If it does not simplify, we still try
2995 to compute ~A&C below, but since that always allocates
2996 RTL, we don't try that before committing to returning a
2997 simplified expression. */
2998 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2999 GEN_INT (~cval));
3000
3001 if ((~cval & bval) == 0)
3002 {
3003 rtx na_c = NULL_RTX;
3004 if (n_na_c)
3005 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3006 else
3007 {
3008 /* If ~A does not simplify, don't bother: we don't
3009 want to simplify 2 operations into 3, and if na_c
3010 were to simplify with na, n_na_c would have
3011 simplified as well. */
3012 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3013 if (na)
3014 na_c = simplify_gen_binary (AND, mode, na, c);
3015 }
3016
3017 /* Try to simplify ~A&C | ~B&C. */
3018 if (na_c != NULL_RTX)
3019 return simplify_gen_binary (IOR, mode, na_c,
3020 gen_int_mode (~bval & cval, mode));
3021 }
3022 else
3023 {
3024 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3025 if (n_na_c == CONSTM1_RTX (mode))
3026 {
3027 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3028 gen_int_mode (~cval & bval,
3029 mode));
3030 return simplify_gen_binary (IOR, mode, a_nc_b,
3031 gen_int_mode (~bval & cval,
3032 mode));
3033 }
3034 }
3035 }
3036
3037 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3038 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3039 machines, and also has shorter instruction path length. */
3040 if (GET_CODE (op0) == AND
3041 && GET_CODE (XEXP (op0, 0)) == XOR
3042 && CONST_INT_P (XEXP (op0, 1))
3043 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3044 {
3045 rtx a = trueop1;
3046 rtx b = XEXP (XEXP (op0, 0), 1);
3047 rtx c = XEXP (op0, 1);
3048 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3049 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3050 rtx bc = simplify_gen_binary (AND, mode, b, c);
3051 return simplify_gen_binary (IOR, mode, a_nc, bc);
3052 }
3053 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3054 else if (GET_CODE (op0) == AND
3055 && GET_CODE (XEXP (op0, 0)) == XOR
3056 && CONST_INT_P (XEXP (op0, 1))
3057 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3058 {
3059 rtx a = XEXP (XEXP (op0, 0), 0);
3060 rtx b = trueop1;
3061 rtx c = XEXP (op0, 1);
3062 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3063 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3064 rtx ac = simplify_gen_binary (AND, mode, a, c);
3065 return simplify_gen_binary (IOR, mode, ac, b_nc);
3066 }
3067
3068 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3069 comparison if STORE_FLAG_VALUE is 1. */
3070 if (STORE_FLAG_VALUE == 1
3071 && trueop1 == const1_rtx
3072 && COMPARISON_P (op0)
3073 && (reversed = reversed_comparison (op0, mode)))
3074 return reversed;
3075
3076 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3077 is (lt foo (const_int 0)), so we can perform the above
3078 simplification if STORE_FLAG_VALUE is 1. */
3079
3080 if (is_a <scalar_int_mode> (mode, &int_mode)
3081 && STORE_FLAG_VALUE == 1
3082 && trueop1 == const1_rtx
3083 && GET_CODE (op0) == LSHIFTRT
3084 && CONST_INT_P (XEXP (op0, 1))
3085 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3086 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3087
3088 /* (xor (comparison foo bar) (const_int sign-bit))
3089 when STORE_FLAG_VALUE is the sign bit. */
3090 if (is_a <scalar_int_mode> (mode, &int_mode)
3091 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3092 && trueop1 == const_true_rtx
3093 && COMPARISON_P (op0)
3094 && (reversed = reversed_comparison (op0, int_mode)))
3095 return reversed;
3096
3097 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3098 if (tem)
3099 return tem;
3100
3101 tem = simplify_associative_operation (code, mode, op0, op1);
3102 if (tem)
3103 return tem;
3104 break;
3105
3106 case AND:
3107 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3108 return trueop1;
3109 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3110 return op0;
3111 if (HWI_COMPUTABLE_MODE_P (mode))
3112 {
3113 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3114 HOST_WIDE_INT nzop1;
3115 if (CONST_INT_P (trueop1))
3116 {
3117 HOST_WIDE_INT val1 = INTVAL (trueop1);
3118 /* If we are turning off bits already known off in OP0, we need
3119 not do an AND. */
3120 if ((nzop0 & ~val1) == 0)
3121 return op0;
3122 }
3123 nzop1 = nonzero_bits (trueop1, mode);
3124 /* If we are clearing all the nonzero bits, the result is zero. */
3125 if ((nzop1 & nzop0) == 0
3126 && !side_effects_p (op0) && !side_effects_p (op1))
3127 return CONST0_RTX (mode);
3128 }
3129 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3130 && GET_MODE_CLASS (mode) != MODE_CC)
3131 return op0;
3132 /* A & (~A) -> 0 */
3133 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3134 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3135 && ! side_effects_p (op0)
3136 && GET_MODE_CLASS (mode) != MODE_CC)
3137 return CONST0_RTX (mode);
3138
3139 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3140 there are no nonzero bits of C outside of X's mode. */
3141 if ((GET_CODE (op0) == SIGN_EXTEND
3142 || GET_CODE (op0) == ZERO_EXTEND)
3143 && CONST_INT_P (trueop1)
3144 && HWI_COMPUTABLE_MODE_P (mode)
3145 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3146 & UINTVAL (trueop1)) == 0)
3147 {
3148 machine_mode imode = GET_MODE (XEXP (op0, 0));
3149 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3150 gen_int_mode (INTVAL (trueop1),
3151 imode));
3152 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3153 }
3154
3155 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3156 we might be able to further simplify the AND with X and potentially
3157 remove the truncation altogether. */
3158 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3159 {
3160 rtx x = XEXP (op0, 0);
3161 machine_mode xmode = GET_MODE (x);
3162 tem = simplify_gen_binary (AND, xmode, x,
3163 gen_int_mode (INTVAL (trueop1), xmode));
3164 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3165 }
3166
3167 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3168 if (GET_CODE (op0) == IOR
3169 && CONST_INT_P (trueop1)
3170 && CONST_INT_P (XEXP (op0, 1)))
3171 {
3172 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3173 return simplify_gen_binary (IOR, mode,
3174 simplify_gen_binary (AND, mode,
3175 XEXP (op0, 0), op1),
3176 gen_int_mode (tmp, mode));
3177 }
3178
3179 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3180 insn (and may simplify more). */
3181 if (GET_CODE (op0) == XOR
3182 && rtx_equal_p (XEXP (op0, 0), op1)
3183 && ! side_effects_p (op1))
3184 return simplify_gen_binary (AND, mode,
3185 simplify_gen_unary (NOT, mode,
3186 XEXP (op0, 1), mode),
3187 op1);
3188
3189 if (GET_CODE (op0) == XOR
3190 && rtx_equal_p (XEXP (op0, 1), op1)
3191 && ! side_effects_p (op1))
3192 return simplify_gen_binary (AND, mode,
3193 simplify_gen_unary (NOT, mode,
3194 XEXP (op0, 0), mode),
3195 op1);
3196
3197 /* Similarly for (~(A ^ B)) & A. */
3198 if (GET_CODE (op0) == NOT
3199 && GET_CODE (XEXP (op0, 0)) == XOR
3200 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3201 && ! side_effects_p (op1))
3202 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3203
3204 if (GET_CODE (op0) == NOT
3205 && GET_CODE (XEXP (op0, 0)) == XOR
3206 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3207 && ! side_effects_p (op1))
3208 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3209
3210 /* Convert (A | B) & A to A. */
3211 if (GET_CODE (op0) == IOR
3212 && (rtx_equal_p (XEXP (op0, 0), op1)
3213 || rtx_equal_p (XEXP (op0, 1), op1))
3214 && ! side_effects_p (XEXP (op0, 0))
3215 && ! side_effects_p (XEXP (op0, 1)))
3216 return op1;
3217
3218 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3219 ((A & N) + B) & M -> (A + B) & M
3220 Similarly if (N & M) == 0,
3221 ((A | N) + B) & M -> (A + B) & M
3222 and for - instead of + and/or ^ instead of |.
3223 Also, if (N & M) == 0, then
3224 (A +- N) & M -> A & M. */
3225 if (CONST_INT_P (trueop1)
3226 && HWI_COMPUTABLE_MODE_P (mode)
3227 && ~UINTVAL (trueop1)
3228 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3229 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3230 {
3231 rtx pmop[2];
3232 int which;
3233
3234 pmop[0] = XEXP (op0, 0);
3235 pmop[1] = XEXP (op0, 1);
3236
3237 if (CONST_INT_P (pmop[1])
3238 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3239 return simplify_gen_binary (AND, mode, pmop[0], op1);
3240
3241 for (which = 0; which < 2; which++)
3242 {
3243 tem = pmop[which];
3244 switch (GET_CODE (tem))
3245 {
3246 case AND:
3247 if (CONST_INT_P (XEXP (tem, 1))
3248 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3249 == UINTVAL (trueop1))
3250 pmop[which] = XEXP (tem, 0);
3251 break;
3252 case IOR:
3253 case XOR:
3254 if (CONST_INT_P (XEXP (tem, 1))
3255 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3256 pmop[which] = XEXP (tem, 0);
3257 break;
3258 default:
3259 break;
3260 }
3261 }
3262
3263 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3264 {
3265 tem = simplify_gen_binary (GET_CODE (op0), mode,
3266 pmop[0], pmop[1]);
3267 return simplify_gen_binary (code, mode, tem, op1);
3268 }
3269 }
3270
3271 /* (and X (ior (not X) Y) -> (and X Y) */
3272 if (GET_CODE (op1) == IOR
3273 && GET_CODE (XEXP (op1, 0)) == NOT
3274 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3275 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3276
3277 /* (and (ior (not X) Y) X) -> (and X Y) */
3278 if (GET_CODE (op0) == IOR
3279 && GET_CODE (XEXP (op0, 0)) == NOT
3280 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3281 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3282
3283 /* (and X (ior Y (not X)) -> (and X Y) */
3284 if (GET_CODE (op1) == IOR
3285 && GET_CODE (XEXP (op1, 1)) == NOT
3286 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3287 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3288
3289 /* (and (ior Y (not X)) X) -> (and X Y) */
3290 if (GET_CODE (op0) == IOR
3291 && GET_CODE (XEXP (op0, 1)) == NOT
3292 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3293 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3294
3295 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3296 if (tem)
3297 return tem;
3298
3299 tem = simplify_associative_operation (code, mode, op0, op1);
3300 if (tem)
3301 return tem;
3302 break;
3303
3304 case UDIV:
3305 /* 0/x is 0 (or x&0 if x has side-effects). */
3306 if (trueop0 == CONST0_RTX (mode)
3307 && !cfun->can_throw_non_call_exceptions)
3308 {
3309 if (side_effects_p (op1))
3310 return simplify_gen_binary (AND, mode, op1, trueop0);
3311 return trueop0;
3312 }
3313 /* x/1 is x. */
3314 if (trueop1 == CONST1_RTX (mode))
3315 {
3316 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3317 if (tem)
3318 return tem;
3319 }
3320 /* Convert divide by power of two into shift. */
3321 if (CONST_INT_P (trueop1)
3322 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3323 return simplify_gen_binary (LSHIFTRT, mode, op0,
3324 gen_int_shift_amount (mode, val));
3325 break;
3326
3327 case DIV:
3328 /* Handle floating point and integers separately. */
3329 if (SCALAR_FLOAT_MODE_P (mode))
3330 {
3331 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3332 safe for modes with NaNs, since 0.0 / 0.0 will then be
3333 NaN rather than 0.0. Nor is it safe for modes with signed
3334 zeros, since dividing 0 by a negative number gives -0.0 */
3335 if (trueop0 == CONST0_RTX (mode)
3336 && !HONOR_NANS (mode)
3337 && !HONOR_SIGNED_ZEROS (mode)
3338 && ! side_effects_p (op1))
3339 return op0;
3340 /* x/1.0 is x. */
3341 if (trueop1 == CONST1_RTX (mode)
3342 && !HONOR_SNANS (mode))
3343 return op0;
3344
3345 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3346 && trueop1 != CONST0_RTX (mode))
3347 {
3348 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3349
3350 /* x/-1.0 is -x. */
3351 if (real_equal (d1, &dconstm1)
3352 && !HONOR_SNANS (mode))
3353 return simplify_gen_unary (NEG, mode, op0, mode);
3354
3355 /* Change FP division by a constant into multiplication.
3356 Only do this with -freciprocal-math. */
3357 if (flag_reciprocal_math
3358 && !real_equal (d1, &dconst0))
3359 {
3360 REAL_VALUE_TYPE d;
3361 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3362 tem = const_double_from_real_value (d, mode);
3363 return simplify_gen_binary (MULT, mode, op0, tem);
3364 }
3365 }
3366 }
3367 else if (SCALAR_INT_MODE_P (mode))
3368 {
3369 /* 0/x is 0 (or x&0 if x has side-effects). */
3370 if (trueop0 == CONST0_RTX (mode)
3371 && !cfun->can_throw_non_call_exceptions)
3372 {
3373 if (side_effects_p (op1))
3374 return simplify_gen_binary (AND, mode, op1, trueop0);
3375 return trueop0;
3376 }
3377 /* x/1 is x. */
3378 if (trueop1 == CONST1_RTX (mode))
3379 {
3380 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3381 if (tem)
3382 return tem;
3383 }
3384 /* x/-1 is -x. */
3385 if (trueop1 == constm1_rtx)
3386 {
3387 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3388 if (x)
3389 return simplify_gen_unary (NEG, mode, x, mode);
3390 }
3391 }
3392 break;
3393
3394 case UMOD:
3395 /* 0%x is 0 (or x&0 if x has side-effects). */
3396 if (trueop0 == CONST0_RTX (mode))
3397 {
3398 if (side_effects_p (op1))
3399 return simplify_gen_binary (AND, mode, op1, trueop0);
3400 return trueop0;
3401 }
3402 /* x%1 is 0 (of x&0 if x has side-effects). */
3403 if (trueop1 == CONST1_RTX (mode))
3404 {
3405 if (side_effects_p (op0))
3406 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3407 return CONST0_RTX (mode);
3408 }
3409 /* Implement modulus by power of two as AND. */
3410 if (CONST_INT_P (trueop1)
3411 && exact_log2 (UINTVAL (trueop1)) > 0)
3412 return simplify_gen_binary (AND, mode, op0,
3413 gen_int_mode (INTVAL (op1) - 1, mode));
3414 break;
3415
3416 case MOD:
3417 /* 0%x is 0 (or x&0 if x has side-effects). */
3418 if (trueop0 == CONST0_RTX (mode))
3419 {
3420 if (side_effects_p (op1))
3421 return simplify_gen_binary (AND, mode, op1, trueop0);
3422 return trueop0;
3423 }
3424 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3425 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3426 {
3427 if (side_effects_p (op0))
3428 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3429 return CONST0_RTX (mode);
3430 }
3431 break;
3432
3433 case ROTATERT:
3434 case ROTATE:
3435 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3436 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3437 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3438 amount instead. */
3439 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3440 if (CONST_INT_P (trueop1)
3441 && IN_RANGE (INTVAL (trueop1),
3442 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3443 GET_MODE_UNIT_PRECISION (mode) - 1))
3444 {
3445 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3446 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3447 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3448 mode, op0, new_amount_rtx);
3449 }
3450 #endif
3451 /* FALLTHRU */
3452 case ASHIFTRT:
3453 if (trueop1 == CONST0_RTX (mode))
3454 return op0;
3455 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3456 return op0;
3457 /* Rotating ~0 always results in ~0. */
3458 if (CONST_INT_P (trueop0)
3459 && HWI_COMPUTABLE_MODE_P (mode)
3460 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3461 && ! side_effects_p (op1))
3462 return op0;
3463
3464 canonicalize_shift:
3465 /* Given:
3466 scalar modes M1, M2
3467 scalar constants c1, c2
3468 size (M2) > size (M1)
3469 c1 == size (M2) - size (M1)
3470 optimize:
3471 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3472 <low_part>)
3473 (const_int <c2>))
3474 to:
3475 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3476 <low_part>). */
3477 if ((code == ASHIFTRT || code == LSHIFTRT)
3478 && is_a <scalar_int_mode> (mode, &int_mode)
3479 && SUBREG_P (op0)
3480 && CONST_INT_P (op1)
3481 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3482 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3483 &inner_mode)
3484 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3485 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3486 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3487 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3488 && subreg_lowpart_p (op0))
3489 {
3490 rtx tmp = gen_int_shift_amount
3491 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3492 tmp = simplify_gen_binary (code, inner_mode,
3493 XEXP (SUBREG_REG (op0), 0),
3494 tmp);
3495 return lowpart_subreg (int_mode, tmp, inner_mode);
3496 }
3497
3498 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3499 {
3500 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3501 if (val != INTVAL (op1))
3502 return simplify_gen_binary (code, mode, op0,
3503 gen_int_shift_amount (mode, val));
3504 }
3505 break;
3506
3507 case ASHIFT:
3508 case SS_ASHIFT:
3509 case US_ASHIFT:
3510 if (trueop1 == CONST0_RTX (mode))
3511 return op0;
3512 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3513 return op0;
3514 goto canonicalize_shift;
3515
3516 case LSHIFTRT:
3517 if (trueop1 == CONST0_RTX (mode))
3518 return op0;
3519 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3520 return op0;
3521 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3522 if (GET_CODE (op0) == CLZ
3523 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3524 && CONST_INT_P (trueop1)
3525 && STORE_FLAG_VALUE == 1
3526 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3527 {
3528 unsigned HOST_WIDE_INT zero_val = 0;
3529
3530 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3531 && zero_val == GET_MODE_PRECISION (inner_mode)
3532 && INTVAL (trueop1) == exact_log2 (zero_val))
3533 return simplify_gen_relational (EQ, mode, inner_mode,
3534 XEXP (op0, 0), const0_rtx);
3535 }
3536 goto canonicalize_shift;
3537
3538 case SMIN:
3539 if (HWI_COMPUTABLE_MODE_P (mode)
3540 && mode_signbit_p (mode, trueop1)
3541 && ! side_effects_p (op0))
3542 return op1;
3543 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3544 return op0;
3545 tem = simplify_associative_operation (code, mode, op0, op1);
3546 if (tem)
3547 return tem;
3548 break;
3549
3550 case SMAX:
3551 if (HWI_COMPUTABLE_MODE_P (mode)
3552 && CONST_INT_P (trueop1)
3553 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3554 && ! side_effects_p (op0))
3555 return op1;
3556 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3557 return op0;
3558 tem = simplify_associative_operation (code, mode, op0, op1);
3559 if (tem)
3560 return tem;
3561 break;
3562
3563 case UMIN:
3564 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3565 return op1;
3566 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3567 return op0;
3568 tem = simplify_associative_operation (code, mode, op0, op1);
3569 if (tem)
3570 return tem;
3571 break;
3572
3573 case UMAX:
3574 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3575 return op1;
3576 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3577 return op0;
3578 tem = simplify_associative_operation (code, mode, op0, op1);
3579 if (tem)
3580 return tem;
3581 break;
3582
3583 case SS_PLUS:
3584 case US_PLUS:
3585 case SS_MINUS:
3586 case US_MINUS:
3587 case SS_MULT:
3588 case US_MULT:
3589 case SS_DIV:
3590 case US_DIV:
3591 /* ??? There are simplifications that can be done. */
3592 return 0;
3593
3594 case VEC_SERIES:
3595 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3596 return gen_vec_duplicate (mode, op0);
3597 if (CONSTANT_P (op0) && CONSTANT_P (op1))
3598 return gen_const_vec_series (mode, op0, op1);
3599 return 0;
3600
3601 case VEC_SELECT:
3602 if (!VECTOR_MODE_P (mode))
3603 {
3604 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3605 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3606 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3607 gcc_assert (XVECLEN (trueop1, 0) == 1);
3608 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3609
3610 if (vec_duplicate_p (trueop0, &elt0))
3611 return elt0;
3612
3613 if (GET_CODE (trueop0) == CONST_VECTOR)
3614 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3615 (trueop1, 0, 0)));
3616
3617 /* Extract a scalar element from a nested VEC_SELECT expression
3618 (with optional nested VEC_CONCAT expression). Some targets
3619 (i386) extract scalar element from a vector using chain of
3620 nested VEC_SELECT expressions. When input operand is a memory
3621 operand, this operation can be simplified to a simple scalar
3622 load from an offseted memory address. */
3623 if (GET_CODE (trueop0) == VEC_SELECT)
3624 {
3625 rtx op0 = XEXP (trueop0, 0);
3626 rtx op1 = XEXP (trueop0, 1);
3627
3628 int n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3629
3630 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3631 int elem;
3632
3633 rtvec vec;
3634 rtx tmp_op, tmp;
3635
3636 gcc_assert (GET_CODE (op1) == PARALLEL);
3637 gcc_assert (i < n_elts);
3638
3639 /* Select element, pointed by nested selector. */
3640 elem = INTVAL (XVECEXP (op1, 0, i));
3641
3642 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3643 if (GET_CODE (op0) == VEC_CONCAT)
3644 {
3645 rtx op00 = XEXP (op0, 0);
3646 rtx op01 = XEXP (op0, 1);
3647
3648 machine_mode mode00, mode01;
3649 int n_elts00, n_elts01;
3650
3651 mode00 = GET_MODE (op00);
3652 mode01 = GET_MODE (op01);
3653
3654 /* Find out number of elements of each operand. */
3655 n_elts00 = GET_MODE_NUNITS (mode00);
3656 n_elts01 = GET_MODE_NUNITS (mode01);
3657
3658 gcc_assert (n_elts == n_elts00 + n_elts01);
3659
3660 /* Select correct operand of VEC_CONCAT
3661 and adjust selector. */
3662 if (elem < n_elts01)
3663 tmp_op = op00;
3664 else
3665 {
3666 tmp_op = op01;
3667 elem -= n_elts00;
3668 }
3669 }
3670 else
3671 tmp_op = op0;
3672
3673 vec = rtvec_alloc (1);
3674 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3675
3676 tmp = gen_rtx_fmt_ee (code, mode,
3677 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3678 return tmp;
3679 }
3680 }
3681 else
3682 {
3683 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3684 gcc_assert (GET_MODE_INNER (mode)
3685 == GET_MODE_INNER (GET_MODE (trueop0)));
3686 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3687
3688 if (vec_duplicate_p (trueop0, &elt0))
3689 /* It doesn't matter which elements are selected by trueop1,
3690 because they are all the same. */
3691 return gen_vec_duplicate (mode, elt0);
3692
3693 if (GET_CODE (trueop0) == CONST_VECTOR)
3694 {
3695 int elt_size = GET_MODE_UNIT_SIZE (mode);
3696 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3697 rtvec v = rtvec_alloc (n_elts);
3698 unsigned int i;
3699
3700 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3701 for (i = 0; i < n_elts; i++)
3702 {
3703 rtx x = XVECEXP (trueop1, 0, i);
3704
3705 gcc_assert (CONST_INT_P (x));
3706 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3707 INTVAL (x));
3708 }
3709
3710 return gen_rtx_CONST_VECTOR (mode, v);
3711 }
3712
3713 /* Recognize the identity. */
3714 if (GET_MODE (trueop0) == mode)
3715 {
3716 bool maybe_ident = true;
3717 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3718 {
3719 rtx j = XVECEXP (trueop1, 0, i);
3720 if (!CONST_INT_P (j) || INTVAL (j) != i)
3721 {
3722 maybe_ident = false;
3723 break;
3724 }
3725 }
3726 if (maybe_ident)
3727 return trueop0;
3728 }
3729
3730 /* If we build {a,b} then permute it, build the result directly. */
3731 if (XVECLEN (trueop1, 0) == 2
3732 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3733 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3734 && GET_CODE (trueop0) == VEC_CONCAT
3735 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3736 && GET_MODE (XEXP (trueop0, 0)) == mode
3737 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3738 && GET_MODE (XEXP (trueop0, 1)) == mode)
3739 {
3740 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3741 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3742 rtx subop0, subop1;
3743
3744 gcc_assert (i0 < 4 && i1 < 4);
3745 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3746 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3747
3748 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3749 }
3750
3751 if (XVECLEN (trueop1, 0) == 2
3752 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3753 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3754 && GET_CODE (trueop0) == VEC_CONCAT
3755 && GET_MODE (trueop0) == mode)
3756 {
3757 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3758 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3759 rtx subop0, subop1;
3760
3761 gcc_assert (i0 < 2 && i1 < 2);
3762 subop0 = XEXP (trueop0, i0);
3763 subop1 = XEXP (trueop0, i1);
3764
3765 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3766 }
3767
3768 /* If we select one half of a vec_concat, return that. */
3769 if (GET_CODE (trueop0) == VEC_CONCAT
3770 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3771 {
3772 rtx subop0 = XEXP (trueop0, 0);
3773 rtx subop1 = XEXP (trueop0, 1);
3774 machine_mode mode0 = GET_MODE (subop0);
3775 machine_mode mode1 = GET_MODE (subop1);
3776 int l0 = GET_MODE_NUNITS (mode0);
3777 int l1 = GET_MODE_NUNITS (mode1);
3778 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3779 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3780 {
3781 bool success = true;
3782 for (int i = 1; i < l0; ++i)
3783 {
3784 rtx j = XVECEXP (trueop1, 0, i);
3785 if (!CONST_INT_P (j) || INTVAL (j) != i)
3786 {
3787 success = false;
3788 break;
3789 }
3790 }
3791 if (success)
3792 return subop0;
3793 }
3794 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3795 {
3796 bool success = true;
3797 for (int i = 1; i < l1; ++i)
3798 {
3799 rtx j = XVECEXP (trueop1, 0, i);
3800 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3801 {
3802 success = false;
3803 break;
3804 }
3805 }
3806 if (success)
3807 return subop1;
3808 }
3809 }
3810 }
3811
3812 if (XVECLEN (trueop1, 0) == 1
3813 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3814 && GET_CODE (trueop0) == VEC_CONCAT)
3815 {
3816 rtx vec = trueop0;
3817 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3818
3819 /* Try to find the element in the VEC_CONCAT. */
3820 while (GET_MODE (vec) != mode
3821 && GET_CODE (vec) == VEC_CONCAT)
3822 {
3823 HOST_WIDE_INT vec_size;
3824
3825 if (CONST_INT_P (XEXP (vec, 0)))
3826 {
3827 /* vec_concat of two const_ints doesn't make sense with
3828 respect to modes. */
3829 if (CONST_INT_P (XEXP (vec, 1)))
3830 return 0;
3831
3832 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3833 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3834 }
3835 else
3836 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3837
3838 if (offset < vec_size)
3839 vec = XEXP (vec, 0);
3840 else
3841 {
3842 offset -= vec_size;
3843 vec = XEXP (vec, 1);
3844 }
3845 vec = avoid_constant_pool_reference (vec);
3846 }
3847
3848 if (GET_MODE (vec) == mode)
3849 return vec;
3850 }
3851
3852 /* If we select elements in a vec_merge that all come from the same
3853 operand, select from that operand directly. */
3854 if (GET_CODE (op0) == VEC_MERGE)
3855 {
3856 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3857 if (CONST_INT_P (trueop02))
3858 {
3859 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3860 bool all_operand0 = true;
3861 bool all_operand1 = true;
3862 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3863 {
3864 rtx j = XVECEXP (trueop1, 0, i);
3865 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3866 all_operand1 = false;
3867 else
3868 all_operand0 = false;
3869 }
3870 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3871 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3872 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3873 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3874 }
3875 }
3876
3877 /* If we have two nested selects that are inverses of each
3878 other, replace them with the source operand. */
3879 if (GET_CODE (trueop0) == VEC_SELECT
3880 && GET_MODE (XEXP (trueop0, 0)) == mode)
3881 {
3882 rtx op0_subop1 = XEXP (trueop0, 1);
3883 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3884 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3885
3886 /* Apply the outer ordering vector to the inner one. (The inner
3887 ordering vector is expressly permitted to be of a different
3888 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3889 then the two VEC_SELECTs cancel. */
3890 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3891 {
3892 rtx x = XVECEXP (trueop1, 0, i);
3893 if (!CONST_INT_P (x))
3894 return 0;
3895 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3896 if (!CONST_INT_P (y) || i != INTVAL (y))
3897 return 0;
3898 }
3899 return XEXP (trueop0, 0);
3900 }
3901
3902 return 0;
3903 case VEC_CONCAT:
3904 {
3905 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3906 ? GET_MODE (trueop0)
3907 : GET_MODE_INNER (mode));
3908 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3909 ? GET_MODE (trueop1)
3910 : GET_MODE_INNER (mode));
3911
3912 gcc_assert (VECTOR_MODE_P (mode));
3913 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3914 == GET_MODE_SIZE (mode));
3915
3916 if (VECTOR_MODE_P (op0_mode))
3917 gcc_assert (GET_MODE_INNER (mode)
3918 == GET_MODE_INNER (op0_mode));
3919 else
3920 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3921
3922 if (VECTOR_MODE_P (op1_mode))
3923 gcc_assert (GET_MODE_INNER (mode)
3924 == GET_MODE_INNER (op1_mode));
3925 else
3926 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3927
3928 if ((GET_CODE (trueop0) == CONST_VECTOR
3929 || CONST_SCALAR_INT_P (trueop0)
3930 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3931 && (GET_CODE (trueop1) == CONST_VECTOR
3932 || CONST_SCALAR_INT_P (trueop1)
3933 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3934 {
3935 unsigned n_elts = GET_MODE_NUNITS (mode);
3936 unsigned in_n_elts = GET_MODE_NUNITS (op0_mode);
3937 rtvec v = rtvec_alloc (n_elts);
3938 unsigned int i;
3939 for (i = 0; i < n_elts; i++)
3940 {
3941 if (i < in_n_elts)
3942 {
3943 if (!VECTOR_MODE_P (op0_mode))
3944 RTVEC_ELT (v, i) = trueop0;
3945 else
3946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3947 }
3948 else
3949 {
3950 if (!VECTOR_MODE_P (op1_mode))
3951 RTVEC_ELT (v, i) = trueop1;
3952 else
3953 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3954 i - in_n_elts);
3955 }
3956 }
3957
3958 return gen_rtx_CONST_VECTOR (mode, v);
3959 }
3960
3961 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3962 Restrict the transformation to avoid generating a VEC_SELECT with a
3963 mode unrelated to its operand. */
3964 if (GET_CODE (trueop0) == VEC_SELECT
3965 && GET_CODE (trueop1) == VEC_SELECT
3966 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3967 && GET_MODE (XEXP (trueop0, 0)) == mode)
3968 {
3969 rtx par0 = XEXP (trueop0, 1);
3970 rtx par1 = XEXP (trueop1, 1);
3971 int len0 = XVECLEN (par0, 0);
3972 int len1 = XVECLEN (par1, 0);
3973 rtvec vec = rtvec_alloc (len0 + len1);
3974 for (int i = 0; i < len0; i++)
3975 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3976 for (int i = 0; i < len1; i++)
3977 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3978 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3979 gen_rtx_PARALLEL (VOIDmode, vec));
3980 }
3981 }
3982 return 0;
3983
3984 default:
3985 gcc_unreachable ();
3986 }
3987
3988 if (mode == GET_MODE (op0)
3989 && mode == GET_MODE (op1)
3990 && vec_duplicate_p (op0, &elt0)
3991 && vec_duplicate_p (op1, &elt1))
3992 {
3993 /* Try applying the operator to ELT and see if that simplifies.
3994 We can duplicate the result if so.
3995
3996 The reason we don't use simplify_gen_binary is that it isn't
3997 necessarily a win to convert things like:
3998
3999 (plus:V (vec_duplicate:V (reg:S R1))
4000 (vec_duplicate:V (reg:S R2)))
4001
4002 to:
4003
4004 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4005
4006 The first might be done entirely in vector registers while the
4007 second might need a move between register files. */
4008 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4009 elt0, elt1);
4010 if (tem)
4011 return gen_vec_duplicate (mode, tem);
4012 }
4013
4014 return 0;
4015 }
4016
4017 rtx
4018 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4019 rtx op0, rtx op1)
4020 {
4021 if (VECTOR_MODE_P (mode)
4022 && code != VEC_CONCAT
4023 && GET_CODE (op0) == CONST_VECTOR
4024 && GET_CODE (op1) == CONST_VECTOR)
4025 {
4026 unsigned int n_elts = CONST_VECTOR_NUNITS (op0);
4027 gcc_assert (n_elts == (unsigned int) CONST_VECTOR_NUNITS (op1));
4028 gcc_assert (n_elts == GET_MODE_NUNITS (mode));
4029 rtvec v = rtvec_alloc (n_elts);
4030 unsigned int i;
4031
4032 for (i = 0; i < n_elts; i++)
4033 {
4034 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4035 CONST_VECTOR_ELT (op0, i),
4036 CONST_VECTOR_ELT (op1, i));
4037 if (!x)
4038 return 0;
4039 RTVEC_ELT (v, i) = x;
4040 }
4041
4042 return gen_rtx_CONST_VECTOR (mode, v);
4043 }
4044
4045 if (VECTOR_MODE_P (mode)
4046 && code == VEC_CONCAT
4047 && (CONST_SCALAR_INT_P (op0)
4048 || GET_CODE (op0) == CONST_FIXED
4049 || CONST_DOUBLE_AS_FLOAT_P (op0))
4050 && (CONST_SCALAR_INT_P (op1)
4051 || CONST_DOUBLE_AS_FLOAT_P (op1)
4052 || GET_CODE (op1) == CONST_FIXED))
4053 {
4054 unsigned n_elts = GET_MODE_NUNITS (mode);
4055 rtvec v = rtvec_alloc (n_elts);
4056
4057 gcc_assert (n_elts >= 2);
4058 if (n_elts == 2)
4059 {
4060 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4061 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4062
4063 RTVEC_ELT (v, 0) = op0;
4064 RTVEC_ELT (v, 1) = op1;
4065 }
4066 else
4067 {
4068 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
4069 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
4070 unsigned i;
4071
4072 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4073 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4074 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4075
4076 for (i = 0; i < op0_n_elts; ++i)
4077 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
4078 for (i = 0; i < op1_n_elts; ++i)
4079 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
4080 }
4081
4082 return gen_rtx_CONST_VECTOR (mode, v);
4083 }
4084
4085 if (SCALAR_FLOAT_MODE_P (mode)
4086 && CONST_DOUBLE_AS_FLOAT_P (op0)
4087 && CONST_DOUBLE_AS_FLOAT_P (op1)
4088 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4089 {
4090 if (code == AND
4091 || code == IOR
4092 || code == XOR)
4093 {
4094 long tmp0[4];
4095 long tmp1[4];
4096 REAL_VALUE_TYPE r;
4097 int i;
4098
4099 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4100 GET_MODE (op0));
4101 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4102 GET_MODE (op1));
4103 for (i = 0; i < 4; i++)
4104 {
4105 switch (code)
4106 {
4107 case AND:
4108 tmp0[i] &= tmp1[i];
4109 break;
4110 case IOR:
4111 tmp0[i] |= tmp1[i];
4112 break;
4113 case XOR:
4114 tmp0[i] ^= tmp1[i];
4115 break;
4116 default:
4117 gcc_unreachable ();
4118 }
4119 }
4120 real_from_target (&r, tmp0, mode);
4121 return const_double_from_real_value (r, mode);
4122 }
4123 else
4124 {
4125 REAL_VALUE_TYPE f0, f1, value, result;
4126 const REAL_VALUE_TYPE *opr0, *opr1;
4127 bool inexact;
4128
4129 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4130 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4131
4132 if (HONOR_SNANS (mode)
4133 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4134 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4135 return 0;
4136
4137 real_convert (&f0, mode, opr0);
4138 real_convert (&f1, mode, opr1);
4139
4140 if (code == DIV
4141 && real_equal (&f1, &dconst0)
4142 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4143 return 0;
4144
4145 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4146 && flag_trapping_math
4147 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4148 {
4149 int s0 = REAL_VALUE_NEGATIVE (f0);
4150 int s1 = REAL_VALUE_NEGATIVE (f1);
4151
4152 switch (code)
4153 {
4154 case PLUS:
4155 /* Inf + -Inf = NaN plus exception. */
4156 if (s0 != s1)
4157 return 0;
4158 break;
4159 case MINUS:
4160 /* Inf - Inf = NaN plus exception. */
4161 if (s0 == s1)
4162 return 0;
4163 break;
4164 case DIV:
4165 /* Inf / Inf = NaN plus exception. */
4166 return 0;
4167 default:
4168 break;
4169 }
4170 }
4171
4172 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4173 && flag_trapping_math
4174 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4175 || (REAL_VALUE_ISINF (f1)
4176 && real_equal (&f0, &dconst0))))
4177 /* Inf * 0 = NaN plus exception. */
4178 return 0;
4179
4180 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4181 &f0, &f1);
4182 real_convert (&result, mode, &value);
4183
4184 /* Don't constant fold this floating point operation if
4185 the result has overflowed and flag_trapping_math. */
4186
4187 if (flag_trapping_math
4188 && MODE_HAS_INFINITIES (mode)
4189 && REAL_VALUE_ISINF (result)
4190 && !REAL_VALUE_ISINF (f0)
4191 && !REAL_VALUE_ISINF (f1))
4192 /* Overflow plus exception. */
4193 return 0;
4194
4195 /* Don't constant fold this floating point operation if the
4196 result may dependent upon the run-time rounding mode and
4197 flag_rounding_math is set, or if GCC's software emulation
4198 is unable to accurately represent the result. */
4199
4200 if ((flag_rounding_math
4201 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4202 && (inexact || !real_identical (&result, &value)))
4203 return NULL_RTX;
4204
4205 return const_double_from_real_value (result, mode);
4206 }
4207 }
4208
4209 /* We can fold some multi-word operations. */
4210 scalar_int_mode int_mode;
4211 if (is_a <scalar_int_mode> (mode, &int_mode)
4212 && CONST_SCALAR_INT_P (op0)
4213 && CONST_SCALAR_INT_P (op1))
4214 {
4215 wide_int result;
4216 bool overflow;
4217 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4218 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4219
4220 #if TARGET_SUPPORTS_WIDE_INT == 0
4221 /* This assert keeps the simplification from producing a result
4222 that cannot be represented in a CONST_DOUBLE but a lot of
4223 upstream callers expect that this function never fails to
4224 simplify something and so you if you added this to the test
4225 above the code would die later anyway. If this assert
4226 happens, you just need to make the port support wide int. */
4227 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4228 #endif
4229 switch (code)
4230 {
4231 case MINUS:
4232 result = wi::sub (pop0, pop1);
4233 break;
4234
4235 case PLUS:
4236 result = wi::add (pop0, pop1);
4237 break;
4238
4239 case MULT:
4240 result = wi::mul (pop0, pop1);
4241 break;
4242
4243 case DIV:
4244 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4245 if (overflow)
4246 return NULL_RTX;
4247 break;
4248
4249 case MOD:
4250 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4251 if (overflow)
4252 return NULL_RTX;
4253 break;
4254
4255 case UDIV:
4256 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4257 if (overflow)
4258 return NULL_RTX;
4259 break;
4260
4261 case UMOD:
4262 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4263 if (overflow)
4264 return NULL_RTX;
4265 break;
4266
4267 case AND:
4268 result = wi::bit_and (pop0, pop1);
4269 break;
4270
4271 case IOR:
4272 result = wi::bit_or (pop0, pop1);
4273 break;
4274
4275 case XOR:
4276 result = wi::bit_xor (pop0, pop1);
4277 break;
4278
4279 case SMIN:
4280 result = wi::smin (pop0, pop1);
4281 break;
4282
4283 case SMAX:
4284 result = wi::smax (pop0, pop1);
4285 break;
4286
4287 case UMIN:
4288 result = wi::umin (pop0, pop1);
4289 break;
4290
4291 case UMAX:
4292 result = wi::umax (pop0, pop1);
4293 break;
4294
4295 case LSHIFTRT:
4296 case ASHIFTRT:
4297 case ASHIFT:
4298 {
4299 wide_int wop1 = pop1;
4300 if (SHIFT_COUNT_TRUNCATED)
4301 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4302 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4303 return NULL_RTX;
4304
4305 switch (code)
4306 {
4307 case LSHIFTRT:
4308 result = wi::lrshift (pop0, wop1);
4309 break;
4310
4311 case ASHIFTRT:
4312 result = wi::arshift (pop0, wop1);
4313 break;
4314
4315 case ASHIFT:
4316 result = wi::lshift (pop0, wop1);
4317 break;
4318
4319 default:
4320 gcc_unreachable ();
4321 }
4322 break;
4323 }
4324 case ROTATE:
4325 case ROTATERT:
4326 {
4327 if (wi::neg_p (pop1))
4328 return NULL_RTX;
4329
4330 switch (code)
4331 {
4332 case ROTATE:
4333 result = wi::lrotate (pop0, pop1);
4334 break;
4335
4336 case ROTATERT:
4337 result = wi::rrotate (pop0, pop1);
4338 break;
4339
4340 default:
4341 gcc_unreachable ();
4342 }
4343 break;
4344 }
4345 default:
4346 return NULL_RTX;
4347 }
4348 return immed_wide_int_const (result, int_mode);
4349 }
4350
4351 /* Handle polynomial integers. */
4352 if (NUM_POLY_INT_COEFFS > 1
4353 && is_a <scalar_int_mode> (mode, &int_mode)
4354 && poly_int_rtx_p (op0)
4355 && poly_int_rtx_p (op1))
4356 {
4357 poly_wide_int result;
4358 switch (code)
4359 {
4360 case PLUS:
4361 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4362 break;
4363
4364 case MINUS:
4365 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4366 break;
4367
4368 case MULT:
4369 if (CONST_SCALAR_INT_P (op1))
4370 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4371 else
4372 return NULL_RTX;
4373 break;
4374
4375 case ASHIFT:
4376 if (CONST_SCALAR_INT_P (op1))
4377 {
4378 wide_int shift = rtx_mode_t (op1, mode);
4379 if (SHIFT_COUNT_TRUNCATED)
4380 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4381 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4382 return NULL_RTX;
4383 result = wi::to_poly_wide (op0, mode) << shift;
4384 }
4385 else
4386 return NULL_RTX;
4387 break;
4388
4389 case IOR:
4390 if (!CONST_SCALAR_INT_P (op1)
4391 || !can_ior_p (wi::to_poly_wide (op0, mode),
4392 rtx_mode_t (op1, mode), &result))
4393 return NULL_RTX;
4394 break;
4395
4396 default:
4397 return NULL_RTX;
4398 }
4399 return immed_wide_int_const (result, int_mode);
4400 }
4401
4402 return NULL_RTX;
4403 }
4404
4405
4406 \f
4407 /* Return a positive integer if X should sort after Y. The value
4408 returned is 1 if and only if X and Y are both regs. */
4409
4410 static int
4411 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4412 {
4413 int result;
4414
4415 result = (commutative_operand_precedence (y)
4416 - commutative_operand_precedence (x));
4417 if (result)
4418 return result + result;
4419
4420 /* Group together equal REGs to do more simplification. */
4421 if (REG_P (x) && REG_P (y))
4422 return REGNO (x) > REGNO (y);
4423
4424 return 0;
4425 }
4426
4427 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4428 operands may be another PLUS or MINUS.
4429
4430 Rather than test for specific case, we do this by a brute-force method
4431 and do all possible simplifications until no more changes occur. Then
4432 we rebuild the operation.
4433
4434 May return NULL_RTX when no changes were made. */
4435
4436 static rtx
4437 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4438 rtx op1)
4439 {
4440 struct simplify_plus_minus_op_data
4441 {
4442 rtx op;
4443 short neg;
4444 } ops[16];
4445 rtx result, tem;
4446 int n_ops = 2;
4447 int changed, n_constants, canonicalized = 0;
4448 int i, j;
4449
4450 memset (ops, 0, sizeof ops);
4451
4452 /* Set up the two operands and then expand them until nothing has been
4453 changed. If we run out of room in our array, give up; this should
4454 almost never happen. */
4455
4456 ops[0].op = op0;
4457 ops[0].neg = 0;
4458 ops[1].op = op1;
4459 ops[1].neg = (code == MINUS);
4460
4461 do
4462 {
4463 changed = 0;
4464 n_constants = 0;
4465
4466 for (i = 0; i < n_ops; i++)
4467 {
4468 rtx this_op = ops[i].op;
4469 int this_neg = ops[i].neg;
4470 enum rtx_code this_code = GET_CODE (this_op);
4471
4472 switch (this_code)
4473 {
4474 case PLUS:
4475 case MINUS:
4476 if (n_ops == ARRAY_SIZE (ops))
4477 return NULL_RTX;
4478
4479 ops[n_ops].op = XEXP (this_op, 1);
4480 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4481 n_ops++;
4482
4483 ops[i].op = XEXP (this_op, 0);
4484 changed = 1;
4485 /* If this operand was negated then we will potentially
4486 canonicalize the expression. Similarly if we don't
4487 place the operands adjacent we're re-ordering the
4488 expression and thus might be performing a
4489 canonicalization. Ignore register re-ordering.
4490 ??? It might be better to shuffle the ops array here,
4491 but then (plus (plus (A, B), plus (C, D))) wouldn't
4492 be seen as non-canonical. */
4493 if (this_neg
4494 || (i != n_ops - 2
4495 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4496 canonicalized = 1;
4497 break;
4498
4499 case NEG:
4500 ops[i].op = XEXP (this_op, 0);
4501 ops[i].neg = ! this_neg;
4502 changed = 1;
4503 canonicalized = 1;
4504 break;
4505
4506 case CONST:
4507 if (n_ops != ARRAY_SIZE (ops)
4508 && GET_CODE (XEXP (this_op, 0)) == PLUS
4509 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4510 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4511 {
4512 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4513 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4514 ops[n_ops].neg = this_neg;
4515 n_ops++;
4516 changed = 1;
4517 canonicalized = 1;
4518 }
4519 break;
4520
4521 case NOT:
4522 /* ~a -> (-a - 1) */
4523 if (n_ops != ARRAY_SIZE (ops))
4524 {
4525 ops[n_ops].op = CONSTM1_RTX (mode);
4526 ops[n_ops++].neg = this_neg;
4527 ops[i].op = XEXP (this_op, 0);
4528 ops[i].neg = !this_neg;
4529 changed = 1;
4530 canonicalized = 1;
4531 }
4532 break;
4533
4534 case CONST_INT:
4535 n_constants++;
4536 if (this_neg)
4537 {
4538 ops[i].op = neg_const_int (mode, this_op);
4539 ops[i].neg = 0;
4540 changed = 1;
4541 canonicalized = 1;
4542 }
4543 break;
4544
4545 default:
4546 break;
4547 }
4548 }
4549 }
4550 while (changed);
4551
4552 if (n_constants > 1)
4553 canonicalized = 1;
4554
4555 gcc_assert (n_ops >= 2);
4556
4557 /* If we only have two operands, we can avoid the loops. */
4558 if (n_ops == 2)
4559 {
4560 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4561 rtx lhs, rhs;
4562
4563 /* Get the two operands. Be careful with the order, especially for
4564 the cases where code == MINUS. */
4565 if (ops[0].neg && ops[1].neg)
4566 {
4567 lhs = gen_rtx_NEG (mode, ops[0].op);
4568 rhs = ops[1].op;
4569 }
4570 else if (ops[0].neg)
4571 {
4572 lhs = ops[1].op;
4573 rhs = ops[0].op;
4574 }
4575 else
4576 {
4577 lhs = ops[0].op;
4578 rhs = ops[1].op;
4579 }
4580
4581 return simplify_const_binary_operation (code, mode, lhs, rhs);
4582 }
4583
4584 /* Now simplify each pair of operands until nothing changes. */
4585 while (1)
4586 {
4587 /* Insertion sort is good enough for a small array. */
4588 for (i = 1; i < n_ops; i++)
4589 {
4590 struct simplify_plus_minus_op_data save;
4591 int cmp;
4592
4593 j = i - 1;
4594 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4595 if (cmp <= 0)
4596 continue;
4597 /* Just swapping registers doesn't count as canonicalization. */
4598 if (cmp != 1)
4599 canonicalized = 1;
4600
4601 save = ops[i];
4602 do
4603 ops[j + 1] = ops[j];
4604 while (j--
4605 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4606 ops[j + 1] = save;
4607 }
4608
4609 changed = 0;
4610 for (i = n_ops - 1; i > 0; i--)
4611 for (j = i - 1; j >= 0; j--)
4612 {
4613 rtx lhs = ops[j].op, rhs = ops[i].op;
4614 int lneg = ops[j].neg, rneg = ops[i].neg;
4615
4616 if (lhs != 0 && rhs != 0)
4617 {
4618 enum rtx_code ncode = PLUS;
4619
4620 if (lneg != rneg)
4621 {
4622 ncode = MINUS;
4623 if (lneg)
4624 std::swap (lhs, rhs);
4625 }
4626 else if (swap_commutative_operands_p (lhs, rhs))
4627 std::swap (lhs, rhs);
4628
4629 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4630 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4631 {
4632 rtx tem_lhs, tem_rhs;
4633
4634 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4635 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4636 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4637 tem_rhs);
4638
4639 if (tem && !CONSTANT_P (tem))
4640 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4641 }
4642 else
4643 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4644
4645 if (tem)
4646 {
4647 /* Reject "simplifications" that just wrap the two
4648 arguments in a CONST. Failure to do so can result
4649 in infinite recursion with simplify_binary_operation
4650 when it calls us to simplify CONST operations.
4651 Also, if we find such a simplification, don't try
4652 any more combinations with this rhs: We must have
4653 something like symbol+offset, ie. one of the
4654 trivial CONST expressions we handle later. */
4655 if (GET_CODE (tem) == CONST
4656 && GET_CODE (XEXP (tem, 0)) == ncode
4657 && XEXP (XEXP (tem, 0), 0) == lhs
4658 && XEXP (XEXP (tem, 0), 1) == rhs)
4659 break;
4660 lneg &= rneg;
4661 if (GET_CODE (tem) == NEG)
4662 tem = XEXP (tem, 0), lneg = !lneg;
4663 if (CONST_INT_P (tem) && lneg)
4664 tem = neg_const_int (mode, tem), lneg = 0;
4665
4666 ops[i].op = tem;
4667 ops[i].neg = lneg;
4668 ops[j].op = NULL_RTX;
4669 changed = 1;
4670 canonicalized = 1;
4671 }
4672 }
4673 }
4674
4675 if (!changed)
4676 break;
4677
4678 /* Pack all the operands to the lower-numbered entries. */
4679 for (i = 0, j = 0; j < n_ops; j++)
4680 if (ops[j].op)
4681 {
4682 ops[i] = ops[j];
4683 i++;
4684 }
4685 n_ops = i;
4686 }
4687
4688 /* If nothing changed, check that rematerialization of rtl instructions
4689 is still required. */
4690 if (!canonicalized)
4691 {
4692 /* Perform rematerialization if only all operands are registers and
4693 all operations are PLUS. */
4694 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4695 around rs6000 and how it uses the CA register. See PR67145. */
4696 for (i = 0; i < n_ops; i++)
4697 if (ops[i].neg
4698 || !REG_P (ops[i].op)
4699 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4700 && fixed_regs[REGNO (ops[i].op)]
4701 && !global_regs[REGNO (ops[i].op)]
4702 && ops[i].op != frame_pointer_rtx
4703 && ops[i].op != arg_pointer_rtx
4704 && ops[i].op != stack_pointer_rtx))
4705 return NULL_RTX;
4706 goto gen_result;
4707 }
4708
4709 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4710 if (n_ops == 2
4711 && CONST_INT_P (ops[1].op)
4712 && CONSTANT_P (ops[0].op)
4713 && ops[0].neg)
4714 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4715
4716 /* We suppressed creation of trivial CONST expressions in the
4717 combination loop to avoid recursion. Create one manually now.
4718 The combination loop should have ensured that there is exactly
4719 one CONST_INT, and the sort will have ensured that it is last
4720 in the array and that any other constant will be next-to-last. */
4721
4722 if (n_ops > 1
4723 && CONST_INT_P (ops[n_ops - 1].op)
4724 && CONSTANT_P (ops[n_ops - 2].op))
4725 {
4726 rtx value = ops[n_ops - 1].op;
4727 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4728 value = neg_const_int (mode, value);
4729 if (CONST_INT_P (value))
4730 {
4731 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4732 INTVAL (value));
4733 n_ops--;
4734 }
4735 }
4736
4737 /* Put a non-negated operand first, if possible. */
4738
4739 for (i = 0; i < n_ops && ops[i].neg; i++)
4740 continue;
4741 if (i == n_ops)
4742 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4743 else if (i != 0)
4744 {
4745 tem = ops[0].op;
4746 ops[0] = ops[i];
4747 ops[i].op = tem;
4748 ops[i].neg = 1;
4749 }
4750
4751 /* Now make the result by performing the requested operations. */
4752 gen_result:
4753 result = ops[0].op;
4754 for (i = 1; i < n_ops; i++)
4755 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4756 mode, result, ops[i].op);
4757
4758 return result;
4759 }
4760
4761 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4762 static bool
4763 plus_minus_operand_p (const_rtx x)
4764 {
4765 return GET_CODE (x) == PLUS
4766 || GET_CODE (x) == MINUS
4767 || (GET_CODE (x) == CONST
4768 && GET_CODE (XEXP (x, 0)) == PLUS
4769 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4770 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4771 }
4772
4773 /* Like simplify_binary_operation except used for relational operators.
4774 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4775 not also be VOIDmode.
4776
4777 CMP_MODE specifies in which mode the comparison is done in, so it is
4778 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4779 the operands or, if both are VOIDmode, the operands are compared in
4780 "infinite precision". */
4781 rtx
4782 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4783 machine_mode cmp_mode, rtx op0, rtx op1)
4784 {
4785 rtx tem, trueop0, trueop1;
4786
4787 if (cmp_mode == VOIDmode)
4788 cmp_mode = GET_MODE (op0);
4789 if (cmp_mode == VOIDmode)
4790 cmp_mode = GET_MODE (op1);
4791
4792 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4793 if (tem)
4794 {
4795 if (SCALAR_FLOAT_MODE_P (mode))
4796 {
4797 if (tem == const0_rtx)
4798 return CONST0_RTX (mode);
4799 #ifdef FLOAT_STORE_FLAG_VALUE
4800 {
4801 REAL_VALUE_TYPE val;
4802 val = FLOAT_STORE_FLAG_VALUE (mode);
4803 return const_double_from_real_value (val, mode);
4804 }
4805 #else
4806 return NULL_RTX;
4807 #endif
4808 }
4809 if (VECTOR_MODE_P (mode))
4810 {
4811 if (tem == const0_rtx)
4812 return CONST0_RTX (mode);
4813 #ifdef VECTOR_STORE_FLAG_VALUE
4814 {
4815 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4816 if (val == NULL_RTX)
4817 return NULL_RTX;
4818 if (val == const1_rtx)
4819 return CONST1_RTX (mode);
4820
4821 return gen_const_vec_duplicate (mode, val);
4822 }
4823 #else
4824 return NULL_RTX;
4825 #endif
4826 }
4827
4828 return tem;
4829 }
4830
4831 /* For the following tests, ensure const0_rtx is op1. */
4832 if (swap_commutative_operands_p (op0, op1)
4833 || (op0 == const0_rtx && op1 != const0_rtx))
4834 std::swap (op0, op1), code = swap_condition (code);
4835
4836 /* If op0 is a compare, extract the comparison arguments from it. */
4837 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4838 return simplify_gen_relational (code, mode, VOIDmode,
4839 XEXP (op0, 0), XEXP (op0, 1));
4840
4841 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4842 || CC0_P (op0))
4843 return NULL_RTX;
4844
4845 trueop0 = avoid_constant_pool_reference (op0);
4846 trueop1 = avoid_constant_pool_reference (op1);
4847 return simplify_relational_operation_1 (code, mode, cmp_mode,
4848 trueop0, trueop1);
4849 }
4850
4851 /* This part of simplify_relational_operation is only used when CMP_MODE
4852 is not in class MODE_CC (i.e. it is a real comparison).
4853
4854 MODE is the mode of the result, while CMP_MODE specifies in which
4855 mode the comparison is done in, so it is the mode of the operands. */
4856
4857 static rtx
4858 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4859 machine_mode cmp_mode, rtx op0, rtx op1)
4860 {
4861 enum rtx_code op0code = GET_CODE (op0);
4862
4863 if (op1 == const0_rtx && COMPARISON_P (op0))
4864 {
4865 /* If op0 is a comparison, extract the comparison arguments
4866 from it. */
4867 if (code == NE)
4868 {
4869 if (GET_MODE (op0) == mode)
4870 return simplify_rtx (op0);
4871 else
4872 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4873 XEXP (op0, 0), XEXP (op0, 1));
4874 }
4875 else if (code == EQ)
4876 {
4877 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4878 if (new_code != UNKNOWN)
4879 return simplify_gen_relational (new_code, mode, VOIDmode,
4880 XEXP (op0, 0), XEXP (op0, 1));
4881 }
4882 }
4883
4884 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4885 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4886 if ((code == LTU || code == GEU)
4887 && GET_CODE (op0) == PLUS
4888 && CONST_INT_P (XEXP (op0, 1))
4889 && (rtx_equal_p (op1, XEXP (op0, 0))
4890 || rtx_equal_p (op1, XEXP (op0, 1)))
4891 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4892 && XEXP (op0, 1) != const0_rtx)
4893 {
4894 rtx new_cmp
4895 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4896 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4897 cmp_mode, XEXP (op0, 0), new_cmp);
4898 }
4899
4900 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4901 transformed into (LTU a -C). */
4902 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4903 && CONST_INT_P (XEXP (op0, 1))
4904 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4905 && XEXP (op0, 1) != const0_rtx)
4906 {
4907 rtx new_cmp
4908 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4909 return simplify_gen_relational (LTU, mode, cmp_mode,
4910 XEXP (op0, 0), new_cmp);
4911 }
4912
4913 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4914 if ((code == LTU || code == GEU)
4915 && GET_CODE (op0) == PLUS
4916 && rtx_equal_p (op1, XEXP (op0, 1))
4917 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4918 && !rtx_equal_p (op1, XEXP (op0, 0)))
4919 return simplify_gen_relational (code, mode, cmp_mode, op0,
4920 copy_rtx (XEXP (op0, 0)));
4921
4922 if (op1 == const0_rtx)
4923 {
4924 /* Canonicalize (GTU x 0) as (NE x 0). */
4925 if (code == GTU)
4926 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4927 /* Canonicalize (LEU x 0) as (EQ x 0). */
4928 if (code == LEU)
4929 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4930 }
4931 else if (op1 == const1_rtx)
4932 {
4933 switch (code)
4934 {
4935 case GE:
4936 /* Canonicalize (GE x 1) as (GT x 0). */
4937 return simplify_gen_relational (GT, mode, cmp_mode,
4938 op0, const0_rtx);
4939 case GEU:
4940 /* Canonicalize (GEU x 1) as (NE x 0). */
4941 return simplify_gen_relational (NE, mode, cmp_mode,
4942 op0, const0_rtx);
4943 case LT:
4944 /* Canonicalize (LT x 1) as (LE x 0). */
4945 return simplify_gen_relational (LE, mode, cmp_mode,
4946 op0, const0_rtx);
4947 case LTU:
4948 /* Canonicalize (LTU x 1) as (EQ x 0). */
4949 return simplify_gen_relational (EQ, mode, cmp_mode,
4950 op0, const0_rtx);
4951 default:
4952 break;
4953 }
4954 }
4955 else if (op1 == constm1_rtx)
4956 {
4957 /* Canonicalize (LE x -1) as (LT x 0). */
4958 if (code == LE)
4959 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4960 /* Canonicalize (GT x -1) as (GE x 0). */
4961 if (code == GT)
4962 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4963 }
4964
4965 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4966 if ((code == EQ || code == NE)
4967 && (op0code == PLUS || op0code == MINUS)
4968 && CONSTANT_P (op1)
4969 && CONSTANT_P (XEXP (op0, 1))
4970 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4971 {
4972 rtx x = XEXP (op0, 0);
4973 rtx c = XEXP (op0, 1);
4974 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4975 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4976
4977 /* Detect an infinite recursive condition, where we oscillate at this
4978 simplification case between:
4979 A + B == C <---> C - B == A,
4980 where A, B, and C are all constants with non-simplifiable expressions,
4981 usually SYMBOL_REFs. */
4982 if (GET_CODE (tem) == invcode
4983 && CONSTANT_P (x)
4984 && rtx_equal_p (c, XEXP (tem, 1)))
4985 return NULL_RTX;
4986
4987 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4988 }
4989
4990 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4991 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4992 scalar_int_mode int_mode, int_cmp_mode;
4993 if (code == NE
4994 && op1 == const0_rtx
4995 && is_int_mode (mode, &int_mode)
4996 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4997 /* ??? Work-around BImode bugs in the ia64 backend. */
4998 && int_mode != BImode
4999 && int_cmp_mode != BImode
5000 && nonzero_bits (op0, int_cmp_mode) == 1
5001 && STORE_FLAG_VALUE == 1)
5002 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5003 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5004 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5005
5006 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5007 if ((code == EQ || code == NE)
5008 && op1 == const0_rtx
5009 && op0code == XOR)
5010 return simplify_gen_relational (code, mode, cmp_mode,
5011 XEXP (op0, 0), XEXP (op0, 1));
5012
5013 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5014 if ((code == EQ || code == NE)
5015 && op0code == XOR
5016 && rtx_equal_p (XEXP (op0, 0), op1)
5017 && !side_effects_p (XEXP (op0, 0)))
5018 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5019 CONST0_RTX (mode));
5020
5021 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5022 if ((code == EQ || code == NE)
5023 && op0code == XOR
5024 && rtx_equal_p (XEXP (op0, 1), op1)
5025 && !side_effects_p (XEXP (op0, 1)))
5026 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5027 CONST0_RTX (mode));
5028
5029 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5030 if ((code == EQ || code == NE)
5031 && op0code == XOR
5032 && CONST_SCALAR_INT_P (op1)
5033 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5034 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5035 simplify_gen_binary (XOR, cmp_mode,
5036 XEXP (op0, 1), op1));
5037
5038 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
5039 can be implemented with a BICS instruction on some targets, or
5040 constant-folded if y is a constant. */
5041 if ((code == EQ || code == NE)
5042 && op0code == AND
5043 && rtx_equal_p (XEXP (op0, 0), op1)
5044 && !side_effects_p (op1)
5045 && op1 != CONST0_RTX (cmp_mode))
5046 {
5047 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
5048 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5049
5050 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5051 CONST0_RTX (cmp_mode));
5052 }
5053
5054 /* Likewise for (eq/ne (and x y) y). */
5055 if ((code == EQ || code == NE)
5056 && op0code == AND
5057 && rtx_equal_p (XEXP (op0, 1), op1)
5058 && !side_effects_p (op1)
5059 && op1 != CONST0_RTX (cmp_mode))
5060 {
5061 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
5062 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5063
5064 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5065 CONST0_RTX (cmp_mode));
5066 }
5067
5068 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5069 if ((code == EQ || code == NE)
5070 && GET_CODE (op0) == BSWAP
5071 && CONST_SCALAR_INT_P (op1))
5072 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5073 simplify_gen_unary (BSWAP, cmp_mode,
5074 op1, cmp_mode));
5075
5076 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5077 if ((code == EQ || code == NE)
5078 && GET_CODE (op0) == BSWAP
5079 && GET_CODE (op1) == BSWAP)
5080 return simplify_gen_relational (code, mode, cmp_mode,
5081 XEXP (op0, 0), XEXP (op1, 0));
5082
5083 if (op0code == POPCOUNT && op1 == const0_rtx)
5084 switch (code)
5085 {
5086 case EQ:
5087 case LE:
5088 case LEU:
5089 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5090 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5091 XEXP (op0, 0), const0_rtx);
5092
5093 case NE:
5094 case GT:
5095 case GTU:
5096 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5097 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5098 XEXP (op0, 0), const0_rtx);
5099
5100 default:
5101 break;
5102 }
5103
5104 return NULL_RTX;
5105 }
5106
5107 enum
5108 {
5109 CMP_EQ = 1,
5110 CMP_LT = 2,
5111 CMP_GT = 4,
5112 CMP_LTU = 8,
5113 CMP_GTU = 16
5114 };
5115
5116
5117 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5118 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5119 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5120 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5121 For floating-point comparisons, assume that the operands were ordered. */
5122
5123 static rtx
5124 comparison_result (enum rtx_code code, int known_results)
5125 {
5126 switch (code)
5127 {
5128 case EQ:
5129 case UNEQ:
5130 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5131 case NE:
5132 case LTGT:
5133 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5134
5135 case LT:
5136 case UNLT:
5137 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5138 case GE:
5139 case UNGE:
5140 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5141
5142 case GT:
5143 case UNGT:
5144 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5145 case LE:
5146 case UNLE:
5147 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5148
5149 case LTU:
5150 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5151 case GEU:
5152 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5153
5154 case GTU:
5155 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5156 case LEU:
5157 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5158
5159 case ORDERED:
5160 return const_true_rtx;
5161 case UNORDERED:
5162 return const0_rtx;
5163 default:
5164 gcc_unreachable ();
5165 }
5166 }
5167
5168 /* Check if the given comparison (done in the given MODE) is actually
5169 a tautology or a contradiction. If the mode is VOID_mode, the
5170 comparison is done in "infinite precision". If no simplification
5171 is possible, this function returns zero. Otherwise, it returns
5172 either const_true_rtx or const0_rtx. */
5173
5174 rtx
5175 simplify_const_relational_operation (enum rtx_code code,
5176 machine_mode mode,
5177 rtx op0, rtx op1)
5178 {
5179 rtx tem;
5180 rtx trueop0;
5181 rtx trueop1;
5182
5183 gcc_assert (mode != VOIDmode
5184 || (GET_MODE (op0) == VOIDmode
5185 && GET_MODE (op1) == VOIDmode));
5186
5187 /* If op0 is a compare, extract the comparison arguments from it. */
5188 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5189 {
5190 op1 = XEXP (op0, 1);
5191 op0 = XEXP (op0, 0);
5192
5193 if (GET_MODE (op0) != VOIDmode)
5194 mode = GET_MODE (op0);
5195 else if (GET_MODE (op1) != VOIDmode)
5196 mode = GET_MODE (op1);
5197 else
5198 return 0;
5199 }
5200
5201 /* We can't simplify MODE_CC values since we don't know what the
5202 actual comparison is. */
5203 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5204 return 0;
5205
5206 /* Make sure the constant is second. */
5207 if (swap_commutative_operands_p (op0, op1))
5208 {
5209 std::swap (op0, op1);
5210 code = swap_condition (code);
5211 }
5212
5213 trueop0 = avoid_constant_pool_reference (op0);
5214 trueop1 = avoid_constant_pool_reference (op1);
5215
5216 /* For integer comparisons of A and B maybe we can simplify A - B and can
5217 then simplify a comparison of that with zero. If A and B are both either
5218 a register or a CONST_INT, this can't help; testing for these cases will
5219 prevent infinite recursion here and speed things up.
5220
5221 We can only do this for EQ and NE comparisons as otherwise we may
5222 lose or introduce overflow which we cannot disregard as undefined as
5223 we do not know the signedness of the operation on either the left or
5224 the right hand side of the comparison. */
5225
5226 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5227 && (code == EQ || code == NE)
5228 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5229 && (REG_P (op1) || CONST_INT_P (trueop1)))
5230 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5231 /* We cannot do this if tem is a nonzero address. */
5232 && ! nonzero_address_p (tem))
5233 return simplify_const_relational_operation (signed_condition (code),
5234 mode, tem, const0_rtx);
5235
5236 if (! HONOR_NANS (mode) && code == ORDERED)
5237 return const_true_rtx;
5238
5239 if (! HONOR_NANS (mode) && code == UNORDERED)
5240 return const0_rtx;
5241
5242 /* For modes without NaNs, if the two operands are equal, we know the
5243 result except if they have side-effects. Even with NaNs we know
5244 the result of unordered comparisons and, if signaling NaNs are
5245 irrelevant, also the result of LT/GT/LTGT. */
5246 if ((! HONOR_NANS (trueop0)
5247 || code == UNEQ || code == UNLE || code == UNGE
5248 || ((code == LT || code == GT || code == LTGT)
5249 && ! HONOR_SNANS (trueop0)))
5250 && rtx_equal_p (trueop0, trueop1)
5251 && ! side_effects_p (trueop0))
5252 return comparison_result (code, CMP_EQ);
5253
5254 /* If the operands are floating-point constants, see if we can fold
5255 the result. */
5256 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5257 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5258 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5259 {
5260 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5261 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5262
5263 /* Comparisons are unordered iff at least one of the values is NaN. */
5264 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5265 switch (code)
5266 {
5267 case UNEQ:
5268 case UNLT:
5269 case UNGT:
5270 case UNLE:
5271 case UNGE:
5272 case NE:
5273 case UNORDERED:
5274 return const_true_rtx;
5275 case EQ:
5276 case LT:
5277 case GT:
5278 case LE:
5279 case GE:
5280 case LTGT:
5281 case ORDERED:
5282 return const0_rtx;
5283 default:
5284 return 0;
5285 }
5286
5287 return comparison_result (code,
5288 (real_equal (d0, d1) ? CMP_EQ :
5289 real_less (d0, d1) ? CMP_LT : CMP_GT));
5290 }
5291
5292 /* Otherwise, see if the operands are both integers. */
5293 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5294 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5295 {
5296 /* It would be nice if we really had a mode here. However, the
5297 largest int representable on the target is as good as
5298 infinite. */
5299 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5300 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5301 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5302
5303 if (wi::eq_p (ptrueop0, ptrueop1))
5304 return comparison_result (code, CMP_EQ);
5305 else
5306 {
5307 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5308 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5309 return comparison_result (code, cr);
5310 }
5311 }
5312
5313 /* Optimize comparisons with upper and lower bounds. */
5314 scalar_int_mode int_mode;
5315 if (CONST_INT_P (trueop1)
5316 && is_a <scalar_int_mode> (mode, &int_mode)
5317 && HWI_COMPUTABLE_MODE_P (int_mode)
5318 && !side_effects_p (trueop0))
5319 {
5320 int sign;
5321 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5322 HOST_WIDE_INT val = INTVAL (trueop1);
5323 HOST_WIDE_INT mmin, mmax;
5324
5325 if (code == GEU
5326 || code == LEU
5327 || code == GTU
5328 || code == LTU)
5329 sign = 0;
5330 else
5331 sign = 1;
5332
5333 /* Get a reduced range if the sign bit is zero. */
5334 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5335 {
5336 mmin = 0;
5337 mmax = nonzero;
5338 }
5339 else
5340 {
5341 rtx mmin_rtx, mmax_rtx;
5342 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5343
5344 mmin = INTVAL (mmin_rtx);
5345 mmax = INTVAL (mmax_rtx);
5346 if (sign)
5347 {
5348 unsigned int sign_copies
5349 = num_sign_bit_copies (trueop0, int_mode);
5350
5351 mmin >>= (sign_copies - 1);
5352 mmax >>= (sign_copies - 1);
5353 }
5354 }
5355
5356 switch (code)
5357 {
5358 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5359 case GEU:
5360 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5361 return const_true_rtx;
5362 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5363 return const0_rtx;
5364 break;
5365 case GE:
5366 if (val <= mmin)
5367 return const_true_rtx;
5368 if (val > mmax)
5369 return const0_rtx;
5370 break;
5371
5372 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5373 case LEU:
5374 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5375 return const_true_rtx;
5376 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5377 return const0_rtx;
5378 break;
5379 case LE:
5380 if (val >= mmax)
5381 return const_true_rtx;
5382 if (val < mmin)
5383 return const0_rtx;
5384 break;
5385
5386 case EQ:
5387 /* x == y is always false for y out of range. */
5388 if (val < mmin || val > mmax)
5389 return const0_rtx;
5390 break;
5391
5392 /* x > y is always false for y >= mmax, always true for y < mmin. */
5393 case GTU:
5394 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5395 return const0_rtx;
5396 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5397 return const_true_rtx;
5398 break;
5399 case GT:
5400 if (val >= mmax)
5401 return const0_rtx;
5402 if (val < mmin)
5403 return const_true_rtx;
5404 break;
5405
5406 /* x < y is always false for y <= mmin, always true for y > mmax. */
5407 case LTU:
5408 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5409 return const0_rtx;
5410 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5411 return const_true_rtx;
5412 break;
5413 case LT:
5414 if (val <= mmin)
5415 return const0_rtx;
5416 if (val > mmax)
5417 return const_true_rtx;
5418 break;
5419
5420 case NE:
5421 /* x != y is always true for y out of range. */
5422 if (val < mmin || val > mmax)
5423 return const_true_rtx;
5424 break;
5425
5426 default:
5427 break;
5428 }
5429 }
5430
5431 /* Optimize integer comparisons with zero. */
5432 if (is_a <scalar_int_mode> (mode, &int_mode)
5433 && trueop1 == const0_rtx
5434 && !side_effects_p (trueop0))
5435 {
5436 /* Some addresses are known to be nonzero. We don't know
5437 their sign, but equality comparisons are known. */
5438 if (nonzero_address_p (trueop0))
5439 {
5440 if (code == EQ || code == LEU)
5441 return const0_rtx;
5442 if (code == NE || code == GTU)
5443 return const_true_rtx;
5444 }
5445
5446 /* See if the first operand is an IOR with a constant. If so, we
5447 may be able to determine the result of this comparison. */
5448 if (GET_CODE (op0) == IOR)
5449 {
5450 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5451 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5452 {
5453 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5454 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5455 && (UINTVAL (inner_const)
5456 & (HOST_WIDE_INT_1U
5457 << sign_bitnum)));
5458
5459 switch (code)
5460 {
5461 case EQ:
5462 case LEU:
5463 return const0_rtx;
5464 case NE:
5465 case GTU:
5466 return const_true_rtx;
5467 case LT:
5468 case LE:
5469 if (has_sign)
5470 return const_true_rtx;
5471 break;
5472 case GT:
5473 case GE:
5474 if (has_sign)
5475 return const0_rtx;
5476 break;
5477 default:
5478 break;
5479 }
5480 }
5481 }
5482 }
5483
5484 /* Optimize comparison of ABS with zero. */
5485 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5486 && (GET_CODE (trueop0) == ABS
5487 || (GET_CODE (trueop0) == FLOAT_EXTEND
5488 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5489 {
5490 switch (code)
5491 {
5492 case LT:
5493 /* Optimize abs(x) < 0.0. */
5494 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5495 return const0_rtx;
5496 break;
5497
5498 case GE:
5499 /* Optimize abs(x) >= 0.0. */
5500 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5501 return const_true_rtx;
5502 break;
5503
5504 case UNGE:
5505 /* Optimize ! (abs(x) < 0.0). */
5506 return const_true_rtx;
5507
5508 default:
5509 break;
5510 }
5511 }
5512
5513 return 0;
5514 }
5515
5516 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5517 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5518 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5519 can be simplified to that or NULL_RTX if not.
5520 Assume X is compared against zero with CMP_CODE and the true
5521 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5522
5523 static rtx
5524 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5525 {
5526 if (cmp_code != EQ && cmp_code != NE)
5527 return NULL_RTX;
5528
5529 /* Result on X == 0 and X !=0 respectively. */
5530 rtx on_zero, on_nonzero;
5531 if (cmp_code == EQ)
5532 {
5533 on_zero = true_val;
5534 on_nonzero = false_val;
5535 }
5536 else
5537 {
5538 on_zero = false_val;
5539 on_nonzero = true_val;
5540 }
5541
5542 rtx_code op_code = GET_CODE (on_nonzero);
5543 if ((op_code != CLZ && op_code != CTZ)
5544 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5545 || !CONST_INT_P (on_zero))
5546 return NULL_RTX;
5547
5548 HOST_WIDE_INT op_val;
5549 scalar_int_mode mode ATTRIBUTE_UNUSED
5550 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5551 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5552 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5553 && op_val == INTVAL (on_zero))
5554 return on_nonzero;
5555
5556 return NULL_RTX;
5557 }
5558
5559 \f
5560 /* Simplify CODE, an operation with result mode MODE and three operands,
5561 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5562 a constant. Return 0 if no simplifications is possible. */
5563
5564 rtx
5565 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5566 machine_mode op0_mode, rtx op0, rtx op1,
5567 rtx op2)
5568 {
5569 bool any_change = false;
5570 rtx tem, trueop2;
5571 scalar_int_mode int_mode, int_op0_mode;
5572
5573 switch (code)
5574 {
5575 case FMA:
5576 /* Simplify negations around the multiplication. */
5577 /* -a * -b + c => a * b + c. */
5578 if (GET_CODE (op0) == NEG)
5579 {
5580 tem = simplify_unary_operation (NEG, mode, op1, mode);
5581 if (tem)
5582 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5583 }
5584 else if (GET_CODE (op1) == NEG)
5585 {
5586 tem = simplify_unary_operation (NEG, mode, op0, mode);
5587 if (tem)
5588 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5589 }
5590
5591 /* Canonicalize the two multiplication operands. */
5592 /* a * -b + c => -b * a + c. */
5593 if (swap_commutative_operands_p (op0, op1))
5594 std::swap (op0, op1), any_change = true;
5595
5596 if (any_change)
5597 return gen_rtx_FMA (mode, op0, op1, op2);
5598 return NULL_RTX;
5599
5600 case SIGN_EXTRACT:
5601 case ZERO_EXTRACT:
5602 if (CONST_INT_P (op0)
5603 && CONST_INT_P (op1)
5604 && CONST_INT_P (op2)
5605 && is_a <scalar_int_mode> (mode, &int_mode)
5606 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5607 && HWI_COMPUTABLE_MODE_P (int_mode))
5608 {
5609 /* Extracting a bit-field from a constant */
5610 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5611 HOST_WIDE_INT op1val = INTVAL (op1);
5612 HOST_WIDE_INT op2val = INTVAL (op2);
5613 if (!BITS_BIG_ENDIAN)
5614 val >>= op2val;
5615 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5616 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5617 else
5618 /* Not enough information to calculate the bit position. */
5619 break;
5620
5621 if (HOST_BITS_PER_WIDE_INT != op1val)
5622 {
5623 /* First zero-extend. */
5624 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5625 /* If desired, propagate sign bit. */
5626 if (code == SIGN_EXTRACT
5627 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5628 != 0)
5629 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5630 }
5631
5632 return gen_int_mode (val, int_mode);
5633 }
5634 break;
5635
5636 case IF_THEN_ELSE:
5637 if (CONST_INT_P (op0))
5638 return op0 != const0_rtx ? op1 : op2;
5639
5640 /* Convert c ? a : a into "a". */
5641 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5642 return op1;
5643
5644 /* Convert a != b ? a : b into "a". */
5645 if (GET_CODE (op0) == NE
5646 && ! side_effects_p (op0)
5647 && ! HONOR_NANS (mode)
5648 && ! HONOR_SIGNED_ZEROS (mode)
5649 && ((rtx_equal_p (XEXP (op0, 0), op1)
5650 && rtx_equal_p (XEXP (op0, 1), op2))
5651 || (rtx_equal_p (XEXP (op0, 0), op2)
5652 && rtx_equal_p (XEXP (op0, 1), op1))))
5653 return op1;
5654
5655 /* Convert a == b ? a : b into "b". */
5656 if (GET_CODE (op0) == EQ
5657 && ! side_effects_p (op0)
5658 && ! HONOR_NANS (mode)
5659 && ! HONOR_SIGNED_ZEROS (mode)
5660 && ((rtx_equal_p (XEXP (op0, 0), op1)
5661 && rtx_equal_p (XEXP (op0, 1), op2))
5662 || (rtx_equal_p (XEXP (op0, 0), op2)
5663 && rtx_equal_p (XEXP (op0, 1), op1))))
5664 return op2;
5665
5666 /* Convert (!c) != {0,...,0} ? a : b into
5667 c != {0,...,0} ? b : a for vector modes. */
5668 if (VECTOR_MODE_P (GET_MODE (op1))
5669 && GET_CODE (op0) == NE
5670 && GET_CODE (XEXP (op0, 0)) == NOT
5671 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5672 {
5673 rtx cv = XEXP (op0, 1);
5674 int nunits = CONST_VECTOR_NUNITS (cv);
5675 bool ok = true;
5676 for (int i = 0; i < nunits; ++i)
5677 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5678 {
5679 ok = false;
5680 break;
5681 }
5682 if (ok)
5683 {
5684 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5685 XEXP (XEXP (op0, 0), 0),
5686 XEXP (op0, 1));
5687 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5688 return retval;
5689 }
5690 }
5691
5692 /* Convert x == 0 ? N : clz (x) into clz (x) when
5693 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5694 Similarly for ctz (x). */
5695 if (COMPARISON_P (op0) && !side_effects_p (op0)
5696 && XEXP (op0, 1) == const0_rtx)
5697 {
5698 rtx simplified
5699 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5700 op1, op2);
5701 if (simplified)
5702 return simplified;
5703 }
5704
5705 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5706 {
5707 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5708 ? GET_MODE (XEXP (op0, 1))
5709 : GET_MODE (XEXP (op0, 0)));
5710 rtx temp;
5711
5712 /* Look for happy constants in op1 and op2. */
5713 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5714 {
5715 HOST_WIDE_INT t = INTVAL (op1);
5716 HOST_WIDE_INT f = INTVAL (op2);
5717
5718 if (t == STORE_FLAG_VALUE && f == 0)
5719 code = GET_CODE (op0);
5720 else if (t == 0 && f == STORE_FLAG_VALUE)
5721 {
5722 enum rtx_code tmp;
5723 tmp = reversed_comparison_code (op0, NULL);
5724 if (tmp == UNKNOWN)
5725 break;
5726 code = tmp;
5727 }
5728 else
5729 break;
5730
5731 return simplify_gen_relational (code, mode, cmp_mode,
5732 XEXP (op0, 0), XEXP (op0, 1));
5733 }
5734
5735 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5736 cmp_mode, XEXP (op0, 0),
5737 XEXP (op0, 1));
5738
5739 /* See if any simplifications were possible. */
5740 if (temp)
5741 {
5742 if (CONST_INT_P (temp))
5743 return temp == const0_rtx ? op2 : op1;
5744 else if (temp)
5745 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5746 }
5747 }
5748 break;
5749
5750 case VEC_MERGE:
5751 gcc_assert (GET_MODE (op0) == mode);
5752 gcc_assert (GET_MODE (op1) == mode);
5753 gcc_assert (VECTOR_MODE_P (mode));
5754 trueop2 = avoid_constant_pool_reference (op2);
5755 if (CONST_INT_P (trueop2))
5756 {
5757 unsigned n_elts = GET_MODE_NUNITS (mode);
5758 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5759 unsigned HOST_WIDE_INT mask;
5760 if (n_elts == HOST_BITS_PER_WIDE_INT)
5761 mask = -1;
5762 else
5763 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5764
5765 if (!(sel & mask) && !side_effects_p (op0))
5766 return op1;
5767 if ((sel & mask) == mask && !side_effects_p (op1))
5768 return op0;
5769
5770 rtx trueop0 = avoid_constant_pool_reference (op0);
5771 rtx trueop1 = avoid_constant_pool_reference (op1);
5772 if (GET_CODE (trueop0) == CONST_VECTOR
5773 && GET_CODE (trueop1) == CONST_VECTOR)
5774 {
5775 rtvec v = rtvec_alloc (n_elts);
5776 unsigned int i;
5777
5778 for (i = 0; i < n_elts; i++)
5779 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5780 ? CONST_VECTOR_ELT (trueop0, i)
5781 : CONST_VECTOR_ELT (trueop1, i));
5782 return gen_rtx_CONST_VECTOR (mode, v);
5783 }
5784
5785 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5786 if no element from a appears in the result. */
5787 if (GET_CODE (op0) == VEC_MERGE)
5788 {
5789 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5790 if (CONST_INT_P (tem))
5791 {
5792 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5793 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5794 return simplify_gen_ternary (code, mode, mode,
5795 XEXP (op0, 1), op1, op2);
5796 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5797 return simplify_gen_ternary (code, mode, mode,
5798 XEXP (op0, 0), op1, op2);
5799 }
5800 }
5801 if (GET_CODE (op1) == VEC_MERGE)
5802 {
5803 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5804 if (CONST_INT_P (tem))
5805 {
5806 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5807 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5808 return simplify_gen_ternary (code, mode, mode,
5809 op0, XEXP (op1, 1), op2);
5810 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5811 return simplify_gen_ternary (code, mode, mode,
5812 op0, XEXP (op1, 0), op2);
5813 }
5814 }
5815
5816 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5817 with a. */
5818 if (GET_CODE (op0) == VEC_DUPLICATE
5819 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5820 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5821 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5822 {
5823 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5824 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5825 {
5826 if (XEXP (XEXP (op0, 0), 0) == op1
5827 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5828 return op1;
5829 }
5830 }
5831 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5832 (const_int N))
5833 with (vec_concat (X) (B)) if N == 1 or
5834 (vec_concat (A) (X)) if N == 2. */
5835 if (GET_CODE (op0) == VEC_DUPLICATE
5836 && GET_CODE (op1) == CONST_VECTOR
5837 && CONST_VECTOR_NUNITS (op1) == 2
5838 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5839 && IN_RANGE (sel, 1, 2))
5840 {
5841 rtx newop0 = XEXP (op0, 0);
5842 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
5843 if (sel == 2)
5844 std::swap (newop0, newop1);
5845 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5846 }
5847 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5848 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5849 Only applies for vectors of two elements. */
5850 if (GET_CODE (op0) == VEC_DUPLICATE
5851 && GET_CODE (op1) == VEC_CONCAT
5852 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5853 && GET_MODE_NUNITS (GET_MODE (op1)) == 2
5854 && IN_RANGE (sel, 1, 2))
5855 {
5856 rtx newop0 = XEXP (op0, 0);
5857 rtx newop1 = XEXP (op1, 2 - sel);
5858 rtx otherop = XEXP (op1, sel - 1);
5859 if (sel == 2)
5860 std::swap (newop0, newop1);
5861 /* Don't want to throw away the other part of the vec_concat if
5862 it has side-effects. */
5863 if (!side_effects_p (otherop))
5864 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5865 }
5866
5867 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5868 (const_int n))
5869 with (vec_concat x y) or (vec_concat y x) depending on value
5870 of N. */
5871 if (GET_CODE (op0) == VEC_DUPLICATE
5872 && GET_CODE (op1) == VEC_DUPLICATE
5873 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5874 && GET_MODE_NUNITS (GET_MODE (op1)) == 2
5875 && IN_RANGE (sel, 1, 2))
5876 {
5877 rtx newop0 = XEXP (op0, 0);
5878 rtx newop1 = XEXP (op1, 0);
5879 if (sel == 2)
5880 std::swap (newop0, newop1);
5881
5882 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5883 }
5884 }
5885
5886 if (rtx_equal_p (op0, op1)
5887 && !side_effects_p (op2) && !side_effects_p (op1))
5888 return op0;
5889
5890 break;
5891
5892 default:
5893 gcc_unreachable ();
5894 }
5895
5896 return 0;
5897 }
5898
5899 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5900 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5901 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5902
5903 Works by unpacking OP into a collection of 8-bit values
5904 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5905 and then repacking them again for OUTERMODE. */
5906
5907 static rtx
5908 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
5909 fixed_size_mode innermode, unsigned int byte)
5910 {
5911 enum {
5912 value_bit = 8,
5913 value_mask = (1 << value_bit) - 1
5914 };
5915 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5916 int value_start;
5917 int i;
5918 int elem;
5919
5920 int num_elem;
5921 rtx * elems;
5922 int elem_bitsize;
5923 rtx result_s = NULL;
5924 rtvec result_v = NULL;
5925 enum mode_class outer_class;
5926 scalar_mode outer_submode;
5927 int max_bitsize;
5928
5929 /* Some ports misuse CCmode. */
5930 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5931 return op;
5932
5933 /* We have no way to represent a complex constant at the rtl level. */
5934 if (COMPLEX_MODE_P (outermode))
5935 return NULL_RTX;
5936
5937 /* We support any size mode. */
5938 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5939 GET_MODE_BITSIZE (innermode));
5940
5941 /* Unpack the value. */
5942
5943 if (GET_CODE (op) == CONST_VECTOR)
5944 {
5945 num_elem = CONST_VECTOR_NUNITS (op);
5946 elems = &CONST_VECTOR_ELT (op, 0);
5947 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5948 }
5949 else
5950 {
5951 num_elem = 1;
5952 elems = &op;
5953 elem_bitsize = max_bitsize;
5954 }
5955 /* If this asserts, it is too complicated; reducing value_bit may help. */
5956 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5957 /* I don't know how to handle endianness of sub-units. */
5958 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5959
5960 for (elem = 0; elem < num_elem; elem++)
5961 {
5962 unsigned char * vp;
5963 rtx el = elems[elem];
5964
5965 /* Vectors are kept in target memory order. (This is probably
5966 a mistake.) */
5967 {
5968 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5969 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5970 / BITS_PER_UNIT);
5971 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5972 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5973 unsigned bytele = (subword_byte % UNITS_PER_WORD
5974 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5975 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5976 }
5977
5978 switch (GET_CODE (el))
5979 {
5980 case CONST_INT:
5981 for (i = 0;
5982 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5983 i += value_bit)
5984 *vp++ = INTVAL (el) >> i;
5985 /* CONST_INTs are always logically sign-extended. */
5986 for (; i < elem_bitsize; i += value_bit)
5987 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5988 break;
5989
5990 case CONST_WIDE_INT:
5991 {
5992 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
5993 unsigned char extend = wi::sign_mask (val);
5994 int prec = wi::get_precision (val);
5995
5996 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5997 *vp++ = wi::extract_uhwi (val, i, value_bit);
5998 for (; i < elem_bitsize; i += value_bit)
5999 *vp++ = extend;
6000 }
6001 break;
6002
6003 case CONST_DOUBLE:
6004 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6005 {
6006 unsigned char extend = 0;
6007 /* If this triggers, someone should have generated a
6008 CONST_INT instead. */
6009 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6010
6011 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6012 *vp++ = CONST_DOUBLE_LOW (el) >> i;
6013 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6014 {
6015 *vp++
6016 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6017 i += value_bit;
6018 }
6019
6020 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6021 extend = -1;
6022 for (; i < elem_bitsize; i += value_bit)
6023 *vp++ = extend;
6024 }
6025 else
6026 {
6027 /* This is big enough for anything on the platform. */
6028 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6029 scalar_float_mode el_mode;
6030
6031 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6032 int bitsize = GET_MODE_BITSIZE (el_mode);
6033
6034 gcc_assert (bitsize <= elem_bitsize);
6035 gcc_assert (bitsize % value_bit == 0);
6036
6037 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6038 GET_MODE (el));
6039
6040 /* real_to_target produces its result in words affected by
6041 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6042 and use WORDS_BIG_ENDIAN instead; see the documentation
6043 of SUBREG in rtl.texi. */
6044 for (i = 0; i < bitsize; i += value_bit)
6045 {
6046 int ibase;
6047 if (WORDS_BIG_ENDIAN)
6048 ibase = bitsize - 1 - i;
6049 else
6050 ibase = i;
6051 *vp++ = tmp[ibase / 32] >> i % 32;
6052 }
6053
6054 /* It shouldn't matter what's done here, so fill it with
6055 zero. */
6056 for (; i < elem_bitsize; i += value_bit)
6057 *vp++ = 0;
6058 }
6059 break;
6060
6061 case CONST_FIXED:
6062 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6063 {
6064 for (i = 0; i < elem_bitsize; i += value_bit)
6065 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6066 }
6067 else
6068 {
6069 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6070 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6071 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6072 i += value_bit)
6073 *vp++ = CONST_FIXED_VALUE_HIGH (el)
6074 >> (i - HOST_BITS_PER_WIDE_INT);
6075 for (; i < elem_bitsize; i += value_bit)
6076 *vp++ = 0;
6077 }
6078 break;
6079
6080 default:
6081 gcc_unreachable ();
6082 }
6083 }
6084
6085 /* Now, pick the right byte to start with. */
6086 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6087 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6088 will already have offset 0. */
6089 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
6090 {
6091 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
6092 - byte);
6093 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6094 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6095 byte = (subword_byte % UNITS_PER_WORD
6096 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6097 }
6098
6099 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6100 so if it's become negative it will instead be very large.) */
6101 gcc_assert (byte < GET_MODE_SIZE (innermode));
6102
6103 /* Convert from bytes to chunks of size value_bit. */
6104 value_start = byte * (BITS_PER_UNIT / value_bit);
6105
6106 /* Re-pack the value. */
6107 num_elem = GET_MODE_NUNITS (outermode);
6108
6109 if (VECTOR_MODE_P (outermode))
6110 {
6111 result_v = rtvec_alloc (num_elem);
6112 elems = &RTVEC_ELT (result_v, 0);
6113 }
6114 else
6115 elems = &result_s;
6116
6117 outer_submode = GET_MODE_INNER (outermode);
6118 outer_class = GET_MODE_CLASS (outer_submode);
6119 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6120
6121 gcc_assert (elem_bitsize % value_bit == 0);
6122 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6123
6124 for (elem = 0; elem < num_elem; elem++)
6125 {
6126 unsigned char *vp;
6127
6128 /* Vectors are stored in target memory order. (This is probably
6129 a mistake.) */
6130 {
6131 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6132 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6133 / BITS_PER_UNIT);
6134 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6135 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6136 unsigned bytele = (subword_byte % UNITS_PER_WORD
6137 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6138 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6139 }
6140
6141 switch (outer_class)
6142 {
6143 case MODE_INT:
6144 case MODE_PARTIAL_INT:
6145 {
6146 int u;
6147 int base = 0;
6148 int units
6149 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6150 / HOST_BITS_PER_WIDE_INT;
6151 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6152 wide_int r;
6153
6154 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6155 return NULL_RTX;
6156 for (u = 0; u < units; u++)
6157 {
6158 unsigned HOST_WIDE_INT buf = 0;
6159 for (i = 0;
6160 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6161 i += value_bit)
6162 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6163
6164 tmp[u] = buf;
6165 base += HOST_BITS_PER_WIDE_INT;
6166 }
6167 r = wide_int::from_array (tmp, units,
6168 GET_MODE_PRECISION (outer_submode));
6169 #if TARGET_SUPPORTS_WIDE_INT == 0
6170 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6171 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6172 return NULL_RTX;
6173 #endif
6174 elems[elem] = immed_wide_int_const (r, outer_submode);
6175 }
6176 break;
6177
6178 case MODE_FLOAT:
6179 case MODE_DECIMAL_FLOAT:
6180 {
6181 REAL_VALUE_TYPE r;
6182 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6183
6184 /* real_from_target wants its input in words affected by
6185 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6186 and use WORDS_BIG_ENDIAN instead; see the documentation
6187 of SUBREG in rtl.texi. */
6188 for (i = 0; i < elem_bitsize; i += value_bit)
6189 {
6190 int ibase;
6191 if (WORDS_BIG_ENDIAN)
6192 ibase = elem_bitsize - 1 - i;
6193 else
6194 ibase = i;
6195 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6196 }
6197
6198 real_from_target (&r, tmp, outer_submode);
6199 elems[elem] = const_double_from_real_value (r, outer_submode);
6200 }
6201 break;
6202
6203 case MODE_FRACT:
6204 case MODE_UFRACT:
6205 case MODE_ACCUM:
6206 case MODE_UACCUM:
6207 {
6208 FIXED_VALUE_TYPE f;
6209 f.data.low = 0;
6210 f.data.high = 0;
6211 f.mode = outer_submode;
6212
6213 for (i = 0;
6214 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6215 i += value_bit)
6216 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6217 for (; i < elem_bitsize; i += value_bit)
6218 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6219 << (i - HOST_BITS_PER_WIDE_INT));
6220
6221 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6222 }
6223 break;
6224
6225 default:
6226 gcc_unreachable ();
6227 }
6228 }
6229 if (VECTOR_MODE_P (outermode))
6230 return gen_rtx_CONST_VECTOR (outermode, result_v);
6231 else
6232 return result_s;
6233 }
6234
6235 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6236 Return 0 if no simplifications are possible. */
6237 rtx
6238 simplify_subreg (machine_mode outermode, rtx op,
6239 machine_mode innermode, unsigned int byte)
6240 {
6241 /* Little bit of sanity checking. */
6242 gcc_assert (innermode != VOIDmode);
6243 gcc_assert (outermode != VOIDmode);
6244 gcc_assert (innermode != BLKmode);
6245 gcc_assert (outermode != BLKmode);
6246
6247 gcc_assert (GET_MODE (op) == innermode
6248 || GET_MODE (op) == VOIDmode);
6249
6250 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6251 return NULL_RTX;
6252
6253 if (byte >= GET_MODE_SIZE (innermode))
6254 return NULL_RTX;
6255
6256 if (outermode == innermode && !byte)
6257 return op;
6258
6259 if (byte % GET_MODE_UNIT_SIZE (innermode) == 0)
6260 {
6261 rtx elt;
6262
6263 if (VECTOR_MODE_P (outermode)
6264 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6265 && vec_duplicate_p (op, &elt))
6266 return gen_vec_duplicate (outermode, elt);
6267
6268 if (outermode == GET_MODE_INNER (innermode)
6269 && vec_duplicate_p (op, &elt))
6270 return elt;
6271 }
6272
6273 if (CONST_SCALAR_INT_P (op)
6274 || CONST_DOUBLE_AS_FLOAT_P (op)
6275 || GET_CODE (op) == CONST_FIXED
6276 || GET_CODE (op) == CONST_VECTOR)
6277 {
6278 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6279 the result from bytes, so it only works if the sizes of the modes
6280 are known at compile time. Cases that apply to general modes
6281 should be handled here before calling simplify_immed_subreg. */
6282 fixed_size_mode fs_outermode, fs_innermode;
6283 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6284 && is_a <fixed_size_mode> (innermode, &fs_innermode))
6285 return simplify_immed_subreg (fs_outermode, op, fs_innermode, byte);
6286
6287 return NULL_RTX;
6288 }
6289
6290 /* Changing mode twice with SUBREG => just change it once,
6291 or not at all if changing back op starting mode. */
6292 if (GET_CODE (op) == SUBREG)
6293 {
6294 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6295 rtx newx;
6296
6297 if (outermode == innermostmode
6298 && byte == 0 && SUBREG_BYTE (op) == 0)
6299 return SUBREG_REG (op);
6300
6301 /* Work out the memory offset of the final OUTERMODE value relative
6302 to the inner value of OP. */
6303 HOST_WIDE_INT mem_offset = subreg_memory_offset (outermode,
6304 innermode, byte);
6305 HOST_WIDE_INT op_mem_offset = subreg_memory_offset (op);
6306 HOST_WIDE_INT final_offset = mem_offset + op_mem_offset;
6307
6308 /* See whether resulting subreg will be paradoxical. */
6309 if (!paradoxical_subreg_p (outermode, innermostmode))
6310 {
6311 /* In nonparadoxical subregs we can't handle negative offsets. */
6312 if (final_offset < 0)
6313 return NULL_RTX;
6314 /* Bail out in case resulting subreg would be incorrect. */
6315 if (final_offset % GET_MODE_SIZE (outermode)
6316 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6317 return NULL_RTX;
6318 }
6319 else
6320 {
6321 HOST_WIDE_INT required_offset
6322 = subreg_memory_offset (outermode, innermostmode, 0);
6323 if (final_offset != required_offset)
6324 return NULL_RTX;
6325 /* Paradoxical subregs always have byte offset 0. */
6326 final_offset = 0;
6327 }
6328
6329 /* Recurse for further possible simplifications. */
6330 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6331 final_offset);
6332 if (newx)
6333 return newx;
6334 if (validate_subreg (outermode, innermostmode,
6335 SUBREG_REG (op), final_offset))
6336 {
6337 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6338 if (SUBREG_PROMOTED_VAR_P (op)
6339 && SUBREG_PROMOTED_SIGN (op) >= 0
6340 && GET_MODE_CLASS (outermode) == MODE_INT
6341 && IN_RANGE (GET_MODE_SIZE (outermode),
6342 GET_MODE_SIZE (innermode),
6343 GET_MODE_SIZE (innermostmode))
6344 && subreg_lowpart_p (newx))
6345 {
6346 SUBREG_PROMOTED_VAR_P (newx) = 1;
6347 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6348 }
6349 return newx;
6350 }
6351 return NULL_RTX;
6352 }
6353
6354 /* SUBREG of a hard register => just change the register number
6355 and/or mode. If the hard register is not valid in that mode,
6356 suppress this simplification. If the hard register is the stack,
6357 frame, or argument pointer, leave this as a SUBREG. */
6358
6359 if (REG_P (op) && HARD_REGISTER_P (op))
6360 {
6361 unsigned int regno, final_regno;
6362
6363 regno = REGNO (op);
6364 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6365 if (HARD_REGISTER_NUM_P (final_regno))
6366 {
6367 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6368 subreg_memory_offset (outermode,
6369 innermode, byte));
6370
6371 /* Propagate original regno. We don't have any way to specify
6372 the offset inside original regno, so do so only for lowpart.
6373 The information is used only by alias analysis that can not
6374 grog partial register anyway. */
6375
6376 if (subreg_lowpart_offset (outermode, innermode) == byte)
6377 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6378 return x;
6379 }
6380 }
6381
6382 /* If we have a SUBREG of a register that we are replacing and we are
6383 replacing it with a MEM, make a new MEM and try replacing the
6384 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6385 or if we would be widening it. */
6386
6387 if (MEM_P (op)
6388 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6389 /* Allow splitting of volatile memory references in case we don't
6390 have instruction to move the whole thing. */
6391 && (! MEM_VOLATILE_P (op)
6392 || ! have_insn_for (SET, innermode))
6393 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6394 return adjust_address_nv (op, outermode, byte);
6395
6396 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6397 of two parts. */
6398 if (GET_CODE (op) == CONCAT
6399 || GET_CODE (op) == VEC_CONCAT)
6400 {
6401 unsigned int part_size, final_offset;
6402 rtx part, res;
6403
6404 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6405 if (part_mode == VOIDmode)
6406 part_mode = GET_MODE_INNER (GET_MODE (op));
6407 part_size = GET_MODE_SIZE (part_mode);
6408 if (byte < part_size)
6409 {
6410 part = XEXP (op, 0);
6411 final_offset = byte;
6412 }
6413 else
6414 {
6415 part = XEXP (op, 1);
6416 final_offset = byte - part_size;
6417 }
6418
6419 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6420 return NULL_RTX;
6421
6422 part_mode = GET_MODE (part);
6423 if (part_mode == VOIDmode)
6424 part_mode = GET_MODE_INNER (GET_MODE (op));
6425 res = simplify_subreg (outermode, part, part_mode, final_offset);
6426 if (res)
6427 return res;
6428 if (validate_subreg (outermode, part_mode, part, final_offset))
6429 return gen_rtx_SUBREG (outermode, part, final_offset);
6430 return NULL_RTX;
6431 }
6432
6433 /* A SUBREG resulting from a zero extension may fold to zero if
6434 it extracts higher bits that the ZERO_EXTEND's source bits. */
6435 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6436 {
6437 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6438 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6439 return CONST0_RTX (outermode);
6440 }
6441
6442 scalar_int_mode int_outermode, int_innermode;
6443 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6444 && is_a <scalar_int_mode> (innermode, &int_innermode)
6445 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6446 {
6447 /* Handle polynomial integers. The upper bits of a paradoxical
6448 subreg are undefined, so this is safe regardless of whether
6449 we're truncating or extending. */
6450 if (CONST_POLY_INT_P (op))
6451 {
6452 poly_wide_int val
6453 = poly_wide_int::from (const_poly_int_value (op),
6454 GET_MODE_PRECISION (int_outermode),
6455 SIGNED);
6456 return immed_wide_int_const (val, int_outermode);
6457 }
6458
6459 if (GET_MODE_PRECISION (int_outermode)
6460 < GET_MODE_PRECISION (int_innermode))
6461 {
6462 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6463 if (tem)
6464 return tem;
6465 }
6466 }
6467
6468 return NULL_RTX;
6469 }
6470
6471 /* Make a SUBREG operation or equivalent if it folds. */
6472
6473 rtx
6474 simplify_gen_subreg (machine_mode outermode, rtx op,
6475 machine_mode innermode, unsigned int byte)
6476 {
6477 rtx newx;
6478
6479 newx = simplify_subreg (outermode, op, innermode, byte);
6480 if (newx)
6481 return newx;
6482
6483 if (GET_CODE (op) == SUBREG
6484 || GET_CODE (op) == CONCAT
6485 || GET_MODE (op) == VOIDmode)
6486 return NULL_RTX;
6487
6488 if (validate_subreg (outermode, innermode, op, byte))
6489 return gen_rtx_SUBREG (outermode, op, byte);
6490
6491 return NULL_RTX;
6492 }
6493
6494 /* Generates a subreg to get the least significant part of EXPR (in mode
6495 INNER_MODE) to OUTER_MODE. */
6496
6497 rtx
6498 lowpart_subreg (machine_mode outer_mode, rtx expr,
6499 machine_mode inner_mode)
6500 {
6501 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6502 subreg_lowpart_offset (outer_mode, inner_mode));
6503 }
6504
6505 /* Simplify X, an rtx expression.
6506
6507 Return the simplified expression or NULL if no simplifications
6508 were possible.
6509
6510 This is the preferred entry point into the simplification routines;
6511 however, we still allow passes to call the more specific routines.
6512
6513 Right now GCC has three (yes, three) major bodies of RTL simplification
6514 code that need to be unified.
6515
6516 1. fold_rtx in cse.c. This code uses various CSE specific
6517 information to aid in RTL simplification.
6518
6519 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6520 it uses combine specific information to aid in RTL
6521 simplification.
6522
6523 3. The routines in this file.
6524
6525
6526 Long term we want to only have one body of simplification code; to
6527 get to that state I recommend the following steps:
6528
6529 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6530 which are not pass dependent state into these routines.
6531
6532 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6533 use this routine whenever possible.
6534
6535 3. Allow for pass dependent state to be provided to these
6536 routines and add simplifications based on the pass dependent
6537 state. Remove code from cse.c & combine.c that becomes
6538 redundant/dead.
6539
6540 It will take time, but ultimately the compiler will be easier to
6541 maintain and improve. It's totally silly that when we add a
6542 simplification that it needs to be added to 4 places (3 for RTL
6543 simplification and 1 for tree simplification. */
6544
6545 rtx
6546 simplify_rtx (const_rtx x)
6547 {
6548 const enum rtx_code code = GET_CODE (x);
6549 const machine_mode mode = GET_MODE (x);
6550
6551 switch (GET_RTX_CLASS (code))
6552 {
6553 case RTX_UNARY:
6554 return simplify_unary_operation (code, mode,
6555 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6556 case RTX_COMM_ARITH:
6557 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6558 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6559
6560 /* Fall through. */
6561
6562 case RTX_BIN_ARITH:
6563 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6564
6565 case RTX_TERNARY:
6566 case RTX_BITFIELD_OPS:
6567 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6568 XEXP (x, 0), XEXP (x, 1),
6569 XEXP (x, 2));
6570
6571 case RTX_COMPARE:
6572 case RTX_COMM_COMPARE:
6573 return simplify_relational_operation (code, mode,
6574 ((GET_MODE (XEXP (x, 0))
6575 != VOIDmode)
6576 ? GET_MODE (XEXP (x, 0))
6577 : GET_MODE (XEXP (x, 1))),
6578 XEXP (x, 0),
6579 XEXP (x, 1));
6580
6581 case RTX_EXTRA:
6582 if (code == SUBREG)
6583 return simplify_subreg (mode, SUBREG_REG (x),
6584 GET_MODE (SUBREG_REG (x)),
6585 SUBREG_BYTE (x));
6586 break;
6587
6588 case RTX_OBJ:
6589 if (code == LO_SUM)
6590 {
6591 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6592 if (GET_CODE (XEXP (x, 0)) == HIGH
6593 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6594 return XEXP (x, 1);
6595 }
6596 break;
6597
6598 default:
6599 break;
6600 }
6601 return NULL;
6602 }
6603
6604 #if CHECKING_P
6605
6606 namespace selftest {
6607
6608 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6609
6610 static rtx
6611 make_test_reg (machine_mode mode)
6612 {
6613 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6614
6615 return gen_rtx_REG (mode, test_reg_num++);
6616 }
6617
6618 /* Test vector simplifications involving VEC_DUPLICATE in which the
6619 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6620 register that holds one element of MODE. */
6621
6622 static void
6623 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6624 {
6625 scalar_mode inner_mode = GET_MODE_INNER (mode);
6626 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6627 unsigned int nunits = GET_MODE_NUNITS (mode);
6628 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6629 {
6630 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6631 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6632 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6633 ASSERT_RTX_EQ (duplicate,
6634 simplify_unary_operation (NOT, mode,
6635 duplicate_not, mode));
6636
6637 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6638 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6639 ASSERT_RTX_EQ (duplicate,
6640 simplify_unary_operation (NEG, mode,
6641 duplicate_neg, mode));
6642
6643 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6644 ASSERT_RTX_EQ (duplicate,
6645 simplify_binary_operation (PLUS, mode, duplicate,
6646 CONST0_RTX (mode)));
6647
6648 ASSERT_RTX_EQ (duplicate,
6649 simplify_binary_operation (MINUS, mode, duplicate,
6650 CONST0_RTX (mode)));
6651
6652 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6653 simplify_binary_operation (MINUS, mode, duplicate,
6654 duplicate));
6655 }
6656
6657 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6658 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6659 ASSERT_RTX_PTR_EQ (scalar_reg,
6660 simplify_binary_operation (VEC_SELECT, inner_mode,
6661 duplicate, zero_par));
6662
6663 /* And again with the final element. */
6664 rtx last_index = gen_int_mode (GET_MODE_NUNITS (mode) - 1, word_mode);
6665 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6666 ASSERT_RTX_PTR_EQ (scalar_reg,
6667 simplify_binary_operation (VEC_SELECT, inner_mode,
6668 duplicate, last_par));
6669
6670 /* Test a scalar subreg of a VEC_DUPLICATE. */
6671 unsigned int offset = subreg_lowpart_offset (inner_mode, mode);
6672 ASSERT_RTX_EQ (scalar_reg,
6673 simplify_gen_subreg (inner_mode, duplicate,
6674 mode, offset));
6675
6676 machine_mode narrower_mode;
6677 if (nunits > 2
6678 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6679 && VECTOR_MODE_P (narrower_mode))
6680 {
6681 /* Test VEC_SELECT of a vector. */
6682 rtx vec_par
6683 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6684 rtx narrower_duplicate
6685 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6686 ASSERT_RTX_EQ (narrower_duplicate,
6687 simplify_binary_operation (VEC_SELECT, narrower_mode,
6688 duplicate, vec_par));
6689
6690 /* Test a vector subreg of a VEC_DUPLICATE. */
6691 unsigned int offset = subreg_lowpart_offset (narrower_mode, mode);
6692 ASSERT_RTX_EQ (narrower_duplicate,
6693 simplify_gen_subreg (narrower_mode, duplicate,
6694 mode, offset));
6695 }
6696 }
6697
6698 /* Test vector simplifications involving VEC_SERIES in which the
6699 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6700 register that holds one element of MODE. */
6701
6702 static void
6703 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6704 {
6705 /* Test unary cases with VEC_SERIES arguments. */
6706 scalar_mode inner_mode = GET_MODE_INNER (mode);
6707 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6708 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6709 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6710 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6711 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6712 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6713 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6714 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6715 neg_scalar_reg);
6716 ASSERT_RTX_EQ (series_0_r,
6717 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6718 ASSERT_RTX_EQ (series_r_m1,
6719 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6720 ASSERT_RTX_EQ (series_r_r,
6721 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6722
6723 /* Test that a VEC_SERIES with a zero step is simplified away. */
6724 ASSERT_RTX_EQ (duplicate,
6725 simplify_binary_operation (VEC_SERIES, mode,
6726 scalar_reg, const0_rtx));
6727
6728 /* Test PLUS and MINUS with VEC_SERIES. */
6729 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6730 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6731 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6732 ASSERT_RTX_EQ (series_r_r,
6733 simplify_binary_operation (PLUS, mode, series_0_r,
6734 duplicate));
6735 ASSERT_RTX_EQ (series_r_1,
6736 simplify_binary_operation (PLUS, mode, duplicate,
6737 series_0_1));
6738 ASSERT_RTX_EQ (series_r_m1,
6739 simplify_binary_operation (PLUS, mode, duplicate,
6740 series_0_m1));
6741 ASSERT_RTX_EQ (series_0_r,
6742 simplify_binary_operation (MINUS, mode, series_r_r,
6743 duplicate));
6744 ASSERT_RTX_EQ (series_r_m1,
6745 simplify_binary_operation (MINUS, mode, duplicate,
6746 series_0_1));
6747 ASSERT_RTX_EQ (series_r_1,
6748 simplify_binary_operation (MINUS, mode, duplicate,
6749 series_0_m1));
6750 ASSERT_RTX_EQ (series_0_m1,
6751 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
6752 constm1_rtx));
6753 }
6754
6755 /* Verify some simplifications involving vectors. */
6756
6757 static void
6758 test_vector_ops ()
6759 {
6760 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
6761 {
6762 machine_mode mode = (machine_mode) i;
6763 if (VECTOR_MODE_P (mode))
6764 {
6765 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
6766 test_vector_ops_duplicate (mode, scalar_reg);
6767 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6768 && GET_MODE_NUNITS (mode) > 2)
6769 test_vector_ops_series (mode, scalar_reg);
6770 }
6771 }
6772 }
6773
6774 template<unsigned int N>
6775 struct simplify_const_poly_int_tests
6776 {
6777 static void run ();
6778 };
6779
6780 template<>
6781 struct simplify_const_poly_int_tests<1>
6782 {
6783 static void run () {}
6784 };
6785
6786 /* Test various CONST_POLY_INT properties. */
6787
6788 template<unsigned int N>
6789 void
6790 simplify_const_poly_int_tests<N>::run ()
6791 {
6792 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
6793 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
6794 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
6795 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
6796 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
6797 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
6798 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
6799 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
6800 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
6801 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
6802 rtx two = GEN_INT (2);
6803 rtx six = GEN_INT (6);
6804 HOST_WIDE_INT offset = subreg_lowpart_offset (QImode, HImode);
6805
6806 /* These tests only try limited operation combinations. Fuller arithmetic
6807 testing is done directly on poly_ints. */
6808 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
6809 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
6810 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
6811 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
6812 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
6813 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
6814 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
6815 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
6816 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
6817 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
6818 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
6819 }
6820
6821 /* Run all of the selftests within this file. */
6822
6823 void
6824 simplify_rtx_c_tests ()
6825 {
6826 test_vector_ops ();
6827 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
6828 }
6829
6830 } // namespace selftest
6831
6832 #endif /* CHECKING_P */