]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
2015-06-17 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "alias.h"
27 #include "symtab.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "varasm.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "recog.h"
37 #include "function.h"
38 #include "insn-codes.h"
39 #include "optabs.h"
40 #include "expmed.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "calls.h"
44 #include "emit-rtl.h"
45 #include "stmt.h"
46 #include "expr.h"
47 #include "diagnostic-core.h"
48 #include "target.h"
49 #include "predict.h"
50
51 /* Simplification and canonicalization of RTL. */
52
53 /* Much code operates on (low, high) pairs; the low value is an
54 unsigned wide int, the high value a signed wide int. We
55 occasionally need to sign extend from low to high as if low were a
56 signed wide int. */
57 #define HWI_SIGN_EXTEND(low) \
58 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
59
60 static rtx neg_const_int (machine_mode, const_rtx);
61 static bool plus_minus_operand_p (const_rtx);
62 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
63 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
64 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
65 unsigned int);
66 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
67 rtx, rtx);
68 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
69 machine_mode, rtx, rtx);
70 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
71 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
72 rtx, rtx, rtx, rtx);
73 \f
74 /* Negate a CONST_INT rtx, truncating (because a conversion from a
75 maximally negative number can overflow). */
76 static rtx
77 neg_const_int (machine_mode mode, const_rtx i)
78 {
79 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
80 }
81
82 /* Test whether expression, X, is an immediate constant that represents
83 the most significant bit of machine mode MODE. */
84
85 bool
86 mode_signbit_p (machine_mode mode, const_rtx x)
87 {
88 unsigned HOST_WIDE_INT val;
89 unsigned int width;
90
91 if (GET_MODE_CLASS (mode) != MODE_INT)
92 return false;
93
94 width = GET_MODE_PRECISION (mode);
95 if (width == 0)
96 return false;
97
98 if (width <= HOST_BITS_PER_WIDE_INT
99 && CONST_INT_P (x))
100 val = INTVAL (x);
101 #if TARGET_SUPPORTS_WIDE_INT
102 else if (CONST_WIDE_INT_P (x))
103 {
104 unsigned int i;
105 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
106 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
107 return false;
108 for (i = 0; i < elts - 1; i++)
109 if (CONST_WIDE_INT_ELT (x, i) != 0)
110 return false;
111 val = CONST_WIDE_INT_ELT (x, elts - 1);
112 width %= HOST_BITS_PER_WIDE_INT;
113 if (width == 0)
114 width = HOST_BITS_PER_WIDE_INT;
115 }
116 #else
117 else if (width <= HOST_BITS_PER_DOUBLE_INT
118 && CONST_DOUBLE_AS_INT_P (x)
119 && CONST_DOUBLE_LOW (x) == 0)
120 {
121 val = CONST_DOUBLE_HIGH (x);
122 width -= HOST_BITS_PER_WIDE_INT;
123 }
124 #endif
125 else
126 /* X is not an integer constant. */
127 return false;
128
129 if (width < HOST_BITS_PER_WIDE_INT)
130 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
131 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
132 }
133
134 /* Test whether VAL is equal to the most significant bit of mode MODE
135 (after masking with the mode mask of MODE). Returns false if the
136 precision of MODE is too large to handle. */
137
138 bool
139 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
140 {
141 unsigned int width;
142
143 if (GET_MODE_CLASS (mode) != MODE_INT)
144 return false;
145
146 width = GET_MODE_PRECISION (mode);
147 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
148 return false;
149
150 val &= GET_MODE_MASK (mode);
151 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
152 }
153
154 /* Test whether the most significant bit of mode MODE is set in VAL.
155 Returns false if the precision of MODE is too large to handle. */
156 bool
157 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
158 {
159 unsigned int width;
160
161 if (GET_MODE_CLASS (mode) != MODE_INT)
162 return false;
163
164 width = GET_MODE_PRECISION (mode);
165 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
166 return false;
167
168 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
169 return val != 0;
170 }
171
172 /* Test whether the most significant bit of mode MODE is clear in VAL.
173 Returns false if the precision of MODE is too large to handle. */
174 bool
175 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
176 {
177 unsigned int width;
178
179 if (GET_MODE_CLASS (mode) != MODE_INT)
180 return false;
181
182 width = GET_MODE_PRECISION (mode);
183 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
184 return false;
185
186 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
187 return val == 0;
188 }
189 \f
190 /* Make a binary operation by properly ordering the operands and
191 seeing if the expression folds. */
192
193 rtx
194 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
195 rtx op1)
196 {
197 rtx tem;
198
199 /* If this simplifies, do it. */
200 tem = simplify_binary_operation (code, mode, op0, op1);
201 if (tem)
202 return tem;
203
204 /* Put complex operands first and constants second if commutative. */
205 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
206 && swap_commutative_operands_p (op0, op1))
207 std::swap (op0, op1);
208
209 return gen_rtx_fmt_ee (code, mode, op0, op1);
210 }
211 \f
212 /* If X is a MEM referencing the constant pool, return the real value.
213 Otherwise return X. */
214 rtx
215 avoid_constant_pool_reference (rtx x)
216 {
217 rtx c, tmp, addr;
218 machine_mode cmode;
219 HOST_WIDE_INT offset = 0;
220
221 switch (GET_CODE (x))
222 {
223 case MEM:
224 break;
225
226 case FLOAT_EXTEND:
227 /* Handle float extensions of constant pool references. */
228 tmp = XEXP (x, 0);
229 c = avoid_constant_pool_reference (tmp);
230 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
231 {
232 REAL_VALUE_TYPE d;
233
234 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
235 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
236 }
237 return x;
238
239 default:
240 return x;
241 }
242
243 if (GET_MODE (x) == BLKmode)
244 return x;
245
246 addr = XEXP (x, 0);
247
248 /* Call target hook to avoid the effects of -fpic etc.... */
249 addr = targetm.delegitimize_address (addr);
250
251 /* Split the address into a base and integer offset. */
252 if (GET_CODE (addr) == CONST
253 && GET_CODE (XEXP (addr, 0)) == PLUS
254 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
255 {
256 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
257 addr = XEXP (XEXP (addr, 0), 0);
258 }
259
260 if (GET_CODE (addr) == LO_SUM)
261 addr = XEXP (addr, 1);
262
263 /* If this is a constant pool reference, we can turn it into its
264 constant and hope that simplifications happen. */
265 if (GET_CODE (addr) == SYMBOL_REF
266 && CONSTANT_POOL_ADDRESS_P (addr))
267 {
268 c = get_pool_constant (addr);
269 cmode = get_pool_mode (addr);
270
271 /* If we're accessing the constant in a different mode than it was
272 originally stored, attempt to fix that up via subreg simplifications.
273 If that fails we have no choice but to return the original memory. */
274 if ((offset != 0 || cmode != GET_MODE (x))
275 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
276 {
277 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
278 if (tem && CONSTANT_P (tem))
279 return tem;
280 }
281 else
282 return c;
283 }
284
285 return x;
286 }
287 \f
288 /* Simplify a MEM based on its attributes. This is the default
289 delegitimize_address target hook, and it's recommended that every
290 overrider call it. */
291
292 rtx
293 delegitimize_mem_from_attrs (rtx x)
294 {
295 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
296 use their base addresses as equivalent. */
297 if (MEM_P (x)
298 && MEM_EXPR (x)
299 && MEM_OFFSET_KNOWN_P (x))
300 {
301 tree decl = MEM_EXPR (x);
302 machine_mode mode = GET_MODE (x);
303 HOST_WIDE_INT offset = 0;
304
305 switch (TREE_CODE (decl))
306 {
307 default:
308 decl = NULL;
309 break;
310
311 case VAR_DECL:
312 break;
313
314 case ARRAY_REF:
315 case ARRAY_RANGE_REF:
316 case COMPONENT_REF:
317 case BIT_FIELD_REF:
318 case REALPART_EXPR:
319 case IMAGPART_EXPR:
320 case VIEW_CONVERT_EXPR:
321 {
322 HOST_WIDE_INT bitsize, bitpos;
323 tree toffset;
324 int unsignedp, volatilep = 0;
325
326 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
327 &mode, &unsignedp, &volatilep, false);
328 if (bitsize != GET_MODE_BITSIZE (mode)
329 || (bitpos % BITS_PER_UNIT)
330 || (toffset && !tree_fits_shwi_p (toffset)))
331 decl = NULL;
332 else
333 {
334 offset += bitpos / BITS_PER_UNIT;
335 if (toffset)
336 offset += tree_to_shwi (toffset);
337 }
338 break;
339 }
340 }
341
342 if (decl
343 && mode == GET_MODE (x)
344 && TREE_CODE (decl) == VAR_DECL
345 && (TREE_STATIC (decl)
346 || DECL_THREAD_LOCAL_P (decl))
347 && DECL_RTL_SET_P (decl)
348 && MEM_P (DECL_RTL (decl)))
349 {
350 rtx newx;
351
352 offset += MEM_OFFSET (x);
353
354 newx = DECL_RTL (decl);
355
356 if (MEM_P (newx))
357 {
358 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
359
360 /* Avoid creating a new MEM needlessly if we already had
361 the same address. We do if there's no OFFSET and the
362 old address X is identical to NEWX, or if X is of the
363 form (plus NEWX OFFSET), or the NEWX is of the form
364 (plus Y (const_int Z)) and X is that with the offset
365 added: (plus Y (const_int Z+OFFSET)). */
366 if (!((offset == 0
367 || (GET_CODE (o) == PLUS
368 && GET_CODE (XEXP (o, 1)) == CONST_INT
369 && (offset == INTVAL (XEXP (o, 1))
370 || (GET_CODE (n) == PLUS
371 && GET_CODE (XEXP (n, 1)) == CONST_INT
372 && (INTVAL (XEXP (n, 1)) + offset
373 == INTVAL (XEXP (o, 1)))
374 && (n = XEXP (n, 0))))
375 && (o = XEXP (o, 0))))
376 && rtx_equal_p (o, n)))
377 x = adjust_address_nv (newx, mode, offset);
378 }
379 else if (GET_MODE (x) == GET_MODE (newx)
380 && offset == 0)
381 x = newx;
382 }
383 }
384
385 return x;
386 }
387 \f
388 /* Make a unary operation by first seeing if it folds and otherwise making
389 the specified operation. */
390
391 rtx
392 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
393 machine_mode op_mode)
394 {
395 rtx tem;
396
397 /* If this simplifies, use it. */
398 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
399 return tem;
400
401 return gen_rtx_fmt_e (code, mode, op);
402 }
403
404 /* Likewise for ternary operations. */
405
406 rtx
407 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
408 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
409 {
410 rtx tem;
411
412 /* If this simplifies, use it. */
413 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
414 op0, op1, op2)))
415 return tem;
416
417 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
418 }
419
420 /* Likewise, for relational operations.
421 CMP_MODE specifies mode comparison is done in. */
422
423 rtx
424 simplify_gen_relational (enum rtx_code code, machine_mode mode,
425 machine_mode cmp_mode, rtx op0, rtx op1)
426 {
427 rtx tem;
428
429 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
430 op0, op1)))
431 return tem;
432
433 return gen_rtx_fmt_ee (code, mode, op0, op1);
434 }
435 \f
436 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
437 and simplify the result. If FN is non-NULL, call this callback on each
438 X, if it returns non-NULL, replace X with its return value and simplify the
439 result. */
440
441 rtx
442 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
443 rtx (*fn) (rtx, const_rtx, void *), void *data)
444 {
445 enum rtx_code code = GET_CODE (x);
446 machine_mode mode = GET_MODE (x);
447 machine_mode op_mode;
448 const char *fmt;
449 rtx op0, op1, op2, newx, op;
450 rtvec vec, newvec;
451 int i, j;
452
453 if (__builtin_expect (fn != NULL, 0))
454 {
455 newx = fn (x, old_rtx, data);
456 if (newx)
457 return newx;
458 }
459 else if (rtx_equal_p (x, old_rtx))
460 return copy_rtx ((rtx) data);
461
462 switch (GET_RTX_CLASS (code))
463 {
464 case RTX_UNARY:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 if (op0 == XEXP (x, 0))
469 return x;
470 return simplify_gen_unary (code, mode, op0, op_mode);
471
472 case RTX_BIN_ARITH:
473 case RTX_COMM_ARITH:
474 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
475 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
476 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
477 return x;
478 return simplify_gen_binary (code, mode, op0, op1);
479
480 case RTX_COMPARE:
481 case RTX_COMM_COMPARE:
482 op0 = XEXP (x, 0);
483 op1 = XEXP (x, 1);
484 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
487 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
488 return x;
489 return simplify_gen_relational (code, mode, op_mode, op0, op1);
490
491 case RTX_TERNARY:
492 case RTX_BITFIELD_OPS:
493 op0 = XEXP (x, 0);
494 op_mode = GET_MODE (op0);
495 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
496 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
497 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
498 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
499 return x;
500 if (op_mode == VOIDmode)
501 op_mode = GET_MODE (op0);
502 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
503
504 case RTX_EXTRA:
505 if (code == SUBREG)
506 {
507 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
508 if (op0 == SUBREG_REG (x))
509 return x;
510 op0 = simplify_gen_subreg (GET_MODE (x), op0,
511 GET_MODE (SUBREG_REG (x)),
512 SUBREG_BYTE (x));
513 return op0 ? op0 : x;
514 }
515 break;
516
517 case RTX_OBJ:
518 if (code == MEM)
519 {
520 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
521 if (op0 == XEXP (x, 0))
522 return x;
523 return replace_equiv_address_nv (x, op0);
524 }
525 else if (code == LO_SUM)
526 {
527 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
528 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
529
530 /* (lo_sum (high x) y) -> y where x and y have the same base. */
531 if (GET_CODE (op0) == HIGH)
532 {
533 rtx base0, base1, offset0, offset1;
534 split_const (XEXP (op0, 0), &base0, &offset0);
535 split_const (op1, &base1, &offset1);
536 if (rtx_equal_p (base0, base1))
537 return op1;
538 }
539
540 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
541 return x;
542 return gen_rtx_LO_SUM (mode, op0, op1);
543 }
544 break;
545
546 default:
547 break;
548 }
549
550 newx = x;
551 fmt = GET_RTX_FORMAT (code);
552 for (i = 0; fmt[i]; i++)
553 switch (fmt[i])
554 {
555 case 'E':
556 vec = XVEC (x, i);
557 newvec = XVEC (newx, i);
558 for (j = 0; j < GET_NUM_ELEM (vec); j++)
559 {
560 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
561 old_rtx, fn, data);
562 if (op != RTVEC_ELT (vec, j))
563 {
564 if (newvec == vec)
565 {
566 newvec = shallow_copy_rtvec (vec);
567 if (x == newx)
568 newx = shallow_copy_rtx (x);
569 XVEC (newx, i) = newvec;
570 }
571 RTVEC_ELT (newvec, j) = op;
572 }
573 }
574 break;
575
576 case 'e':
577 if (XEXP (x, i))
578 {
579 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
580 if (op != XEXP (x, i))
581 {
582 if (x == newx)
583 newx = shallow_copy_rtx (x);
584 XEXP (newx, i) = op;
585 }
586 }
587 break;
588 }
589 return newx;
590 }
591
592 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
593 resulting RTX. Return a new RTX which is as simplified as possible. */
594
595 rtx
596 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
597 {
598 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
599 }
600 \f
601 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
602 Only handle cases where the truncated value is inherently an rvalue.
603
604 RTL provides two ways of truncating a value:
605
606 1. a lowpart subreg. This form is only a truncation when both
607 the outer and inner modes (here MODE and OP_MODE respectively)
608 are scalar integers, and only then when the subreg is used as
609 an rvalue.
610
611 It is only valid to form such truncating subregs if the
612 truncation requires no action by the target. The onus for
613 proving this is on the creator of the subreg -- e.g. the
614 caller to simplify_subreg or simplify_gen_subreg -- and typically
615 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
616
617 2. a TRUNCATE. This form handles both scalar and compound integers.
618
619 The first form is preferred where valid. However, the TRUNCATE
620 handling in simplify_unary_operation turns the second form into the
621 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
622 so it is generally safe to form rvalue truncations using:
623
624 simplify_gen_unary (TRUNCATE, ...)
625
626 and leave simplify_unary_operation to work out which representation
627 should be used.
628
629 Because of the proof requirements on (1), simplify_truncation must
630 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
631 regardless of whether the outer truncation came from a SUBREG or a
632 TRUNCATE. For example, if the caller has proven that an SImode
633 truncation of:
634
635 (and:DI X Y)
636
637 is a no-op and can be represented as a subreg, it does not follow
638 that SImode truncations of X and Y are also no-ops. On a target
639 like 64-bit MIPS that requires SImode values to be stored in
640 sign-extended form, an SImode truncation of:
641
642 (and:DI (reg:DI X) (const_int 63))
643
644 is trivially a no-op because only the lower 6 bits can be set.
645 However, X is still an arbitrary 64-bit number and so we cannot
646 assume that truncating it too is a no-op. */
647
648 static rtx
649 simplify_truncation (machine_mode mode, rtx op,
650 machine_mode op_mode)
651 {
652 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
653 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
654 gcc_assert (precision <= op_precision);
655
656 /* Optimize truncations of zero and sign extended values. */
657 if (GET_CODE (op) == ZERO_EXTEND
658 || GET_CODE (op) == SIGN_EXTEND)
659 {
660 /* There are three possibilities. If MODE is the same as the
661 origmode, we can omit both the extension and the subreg.
662 If MODE is not larger than the origmode, we can apply the
663 truncation without the extension. Finally, if the outermode
664 is larger than the origmode, we can just extend to the appropriate
665 mode. */
666 machine_mode origmode = GET_MODE (XEXP (op, 0));
667 if (mode == origmode)
668 return XEXP (op, 0);
669 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
670 return simplify_gen_unary (TRUNCATE, mode,
671 XEXP (op, 0), origmode);
672 else
673 return simplify_gen_unary (GET_CODE (op), mode,
674 XEXP (op, 0), origmode);
675 }
676
677 /* If the machine can perform operations in the truncated mode, distribute
678 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
679 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
680 if (1
681 #ifdef WORD_REGISTER_OPERATIONS
682 && precision >= BITS_PER_WORD
683 #endif
684 && (GET_CODE (op) == PLUS
685 || GET_CODE (op) == MINUS
686 || GET_CODE (op) == MULT))
687 {
688 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
689 if (op0)
690 {
691 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
692 if (op1)
693 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
694 }
695 }
696
697 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
698 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
699 the outer subreg is effectively a truncation to the original mode. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 /* Ensure that OP_MODE is at least twice as wide as MODE
703 to avoid the possibility that an outer LSHIFTRT shifts by more
704 than the sign extension's sign_bit_copies and introduces zeros
705 into the high bits of the result. */
706 && 2 * precision <= op_precision
707 && CONST_INT_P (XEXP (op, 1))
708 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
709 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
710 && UINTVAL (XEXP (op, 1)) < precision)
711 return simplify_gen_binary (ASHIFTRT, mode,
712 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
713
714 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
715 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
716 the outer subreg is effectively a truncation to the original mode. */
717 if ((GET_CODE (op) == LSHIFTRT
718 || GET_CODE (op) == ASHIFTRT)
719 && CONST_INT_P (XEXP (op, 1))
720 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
721 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
722 && UINTVAL (XEXP (op, 1)) < precision)
723 return simplify_gen_binary (LSHIFTRT, mode,
724 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
725
726 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
727 to (ashift:QI (x:QI) C), where C is a suitable small constant and
728 the outer subreg is effectively a truncation to the original mode. */
729 if (GET_CODE (op) == ASHIFT
730 && CONST_INT_P (XEXP (op, 1))
731 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
732 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
733 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
734 && UINTVAL (XEXP (op, 1)) < precision)
735 return simplify_gen_binary (ASHIFT, mode,
736 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
737
738 /* Recognize a word extraction from a multi-word subreg. */
739 if ((GET_CODE (op) == LSHIFTRT
740 || GET_CODE (op) == ASHIFTRT)
741 && SCALAR_INT_MODE_P (mode)
742 && SCALAR_INT_MODE_P (op_mode)
743 && precision >= BITS_PER_WORD
744 && 2 * precision <= op_precision
745 && CONST_INT_P (XEXP (op, 1))
746 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
747 && UINTVAL (XEXP (op, 1)) < op_precision)
748 {
749 int byte = subreg_lowpart_offset (mode, op_mode);
750 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
751 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
752 (WORDS_BIG_ENDIAN
753 ? byte - shifted_bytes
754 : byte + shifted_bytes));
755 }
756
757 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
758 and try replacing the TRUNCATE and shift with it. Don't do this
759 if the MEM has a mode-dependent address. */
760 if ((GET_CODE (op) == LSHIFTRT
761 || GET_CODE (op) == ASHIFTRT)
762 && SCALAR_INT_MODE_P (op_mode)
763 && MEM_P (XEXP (op, 0))
764 && CONST_INT_P (XEXP (op, 1))
765 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
766 && INTVAL (XEXP (op, 1)) > 0
767 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
768 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
769 MEM_ADDR_SPACE (XEXP (op, 0)))
770 && ! MEM_VOLATILE_P (XEXP (op, 0))
771 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
772 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
773 {
774 int byte = subreg_lowpart_offset (mode, op_mode);
775 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
776 return adjust_address_nv (XEXP (op, 0), mode,
777 (WORDS_BIG_ENDIAN
778 ? byte - shifted_bytes
779 : byte + shifted_bytes));
780 }
781
782 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
783 (OP:SI foo:SI) if OP is NEG or ABS. */
784 if ((GET_CODE (op) == ABS
785 || GET_CODE (op) == NEG)
786 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
787 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
788 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
789 return simplify_gen_unary (GET_CODE (op), mode,
790 XEXP (XEXP (op, 0), 0), mode);
791
792 /* (truncate:A (subreg:B (truncate:C X) 0)) is
793 (truncate:A X). */
794 if (GET_CODE (op) == SUBREG
795 && SCALAR_INT_MODE_P (mode)
796 && SCALAR_INT_MODE_P (op_mode)
797 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
798 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
799 && subreg_lowpart_p (op))
800 {
801 rtx inner = XEXP (SUBREG_REG (op), 0);
802 if (GET_MODE_PRECISION (mode)
803 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
804 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
805 else
806 /* If subreg above is paradoxical and C is narrower
807 than A, return (subreg:A (truncate:C X) 0). */
808 return simplify_gen_subreg (mode, SUBREG_REG (op),
809 GET_MODE (SUBREG_REG (op)), 0);
810 }
811
812 /* (truncate:A (truncate:B X)) is (truncate:A X). */
813 if (GET_CODE (op) == TRUNCATE)
814 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
815 GET_MODE (XEXP (op, 0)));
816
817 return NULL_RTX;
818 }
819 \f
820 /* Try to simplify a unary operation CODE whose output mode is to be
821 MODE with input operand OP whose mode was originally OP_MODE.
822 Return zero if no simplification can be made. */
823 rtx
824 simplify_unary_operation (enum rtx_code code, machine_mode mode,
825 rtx op, machine_mode op_mode)
826 {
827 rtx trueop, tem;
828
829 trueop = avoid_constant_pool_reference (op);
830
831 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
832 if (tem)
833 return tem;
834
835 return simplify_unary_operation_1 (code, mode, op);
836 }
837
838 /* Perform some simplifications we can do even if the operands
839 aren't constant. */
840 static rtx
841 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
842 {
843 enum rtx_code reversed;
844 rtx temp;
845
846 switch (code)
847 {
848 case NOT:
849 /* (not (not X)) == X. */
850 if (GET_CODE (op) == NOT)
851 return XEXP (op, 0);
852
853 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
854 comparison is all ones. */
855 if (COMPARISON_P (op)
856 && (mode == BImode || STORE_FLAG_VALUE == -1)
857 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
858 return simplify_gen_relational (reversed, mode, VOIDmode,
859 XEXP (op, 0), XEXP (op, 1));
860
861 /* (not (plus X -1)) can become (neg X). */
862 if (GET_CODE (op) == PLUS
863 && XEXP (op, 1) == constm1_rtx)
864 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
865
866 /* Similarly, (not (neg X)) is (plus X -1). */
867 if (GET_CODE (op) == NEG)
868 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
869 CONSTM1_RTX (mode));
870
871 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
872 if (GET_CODE (op) == XOR
873 && CONST_INT_P (XEXP (op, 1))
874 && (temp = simplify_unary_operation (NOT, mode,
875 XEXP (op, 1), mode)) != 0)
876 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
877
878 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
879 if (GET_CODE (op) == PLUS
880 && CONST_INT_P (XEXP (op, 1))
881 && mode_signbit_p (mode, XEXP (op, 1))
882 && (temp = simplify_unary_operation (NOT, mode,
883 XEXP (op, 1), mode)) != 0)
884 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
885
886
887 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
888 operands other than 1, but that is not valid. We could do a
889 similar simplification for (not (lshiftrt C X)) where C is
890 just the sign bit, but this doesn't seem common enough to
891 bother with. */
892 if (GET_CODE (op) == ASHIFT
893 && XEXP (op, 0) == const1_rtx)
894 {
895 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
896 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
897 }
898
899 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
900 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
901 so we can perform the above simplification. */
902 if (STORE_FLAG_VALUE == -1
903 && GET_CODE (op) == ASHIFTRT
904 && CONST_INT_P (XEXP (op, 1))
905 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
906 return simplify_gen_relational (GE, mode, VOIDmode,
907 XEXP (op, 0), const0_rtx);
908
909
910 if (GET_CODE (op) == SUBREG
911 && subreg_lowpart_p (op)
912 && (GET_MODE_SIZE (GET_MODE (op))
913 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
914 && GET_CODE (SUBREG_REG (op)) == ASHIFT
915 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
916 {
917 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
918 rtx x;
919
920 x = gen_rtx_ROTATE (inner_mode,
921 simplify_gen_unary (NOT, inner_mode, const1_rtx,
922 inner_mode),
923 XEXP (SUBREG_REG (op), 1));
924 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
925 if (temp)
926 return temp;
927 }
928
929 /* Apply De Morgan's laws to reduce number of patterns for machines
930 with negating logical insns (and-not, nand, etc.). If result has
931 only one NOT, put it first, since that is how the patterns are
932 coded. */
933 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
934 {
935 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
936 machine_mode op_mode;
937
938 op_mode = GET_MODE (in1);
939 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
940
941 op_mode = GET_MODE (in2);
942 if (op_mode == VOIDmode)
943 op_mode = mode;
944 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
945
946 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
947 {
948 rtx tem = in2;
949 in2 = in1; in1 = tem;
950 }
951
952 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
953 mode, in1, in2);
954 }
955
956 /* (not (bswap x)) -> (bswap (not x)). */
957 if (GET_CODE (op) == BSWAP)
958 {
959 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
960 return simplify_gen_unary (BSWAP, mode, x, mode);
961 }
962 break;
963
964 case NEG:
965 /* (neg (neg X)) == X. */
966 if (GET_CODE (op) == NEG)
967 return XEXP (op, 0);
968
969 /* (neg (plus X 1)) can become (not X). */
970 if (GET_CODE (op) == PLUS
971 && XEXP (op, 1) == const1_rtx)
972 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
973
974 /* Similarly, (neg (not X)) is (plus X 1). */
975 if (GET_CODE (op) == NOT)
976 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
977 CONST1_RTX (mode));
978
979 /* (neg (minus X Y)) can become (minus Y X). This transformation
980 isn't safe for modes with signed zeros, since if X and Y are
981 both +0, (minus Y X) is the same as (minus X Y). If the
982 rounding mode is towards +infinity (or -infinity) then the two
983 expressions will be rounded differently. */
984 if (GET_CODE (op) == MINUS
985 && !HONOR_SIGNED_ZEROS (mode)
986 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
987 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
988
989 if (GET_CODE (op) == PLUS
990 && !HONOR_SIGNED_ZEROS (mode)
991 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
992 {
993 /* (neg (plus A C)) is simplified to (minus -C A). */
994 if (CONST_SCALAR_INT_P (XEXP (op, 1))
995 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
996 {
997 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
998 if (temp)
999 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1000 }
1001
1002 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1003 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1004 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1005 }
1006
1007 /* (neg (mult A B)) becomes (mult A (neg B)).
1008 This works even for floating-point values. */
1009 if (GET_CODE (op) == MULT
1010 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1011 {
1012 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1013 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1014 }
1015
1016 /* NEG commutes with ASHIFT since it is multiplication. Only do
1017 this if we can then eliminate the NEG (e.g., if the operand
1018 is a constant). */
1019 if (GET_CODE (op) == ASHIFT)
1020 {
1021 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1022 if (temp)
1023 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1024 }
1025
1026 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1027 C is equal to the width of MODE minus 1. */
1028 if (GET_CODE (op) == ASHIFTRT
1029 && CONST_INT_P (XEXP (op, 1))
1030 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1031 return simplify_gen_binary (LSHIFTRT, mode,
1032 XEXP (op, 0), XEXP (op, 1));
1033
1034 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1035 C is equal to the width of MODE minus 1. */
1036 if (GET_CODE (op) == LSHIFTRT
1037 && CONST_INT_P (XEXP (op, 1))
1038 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1039 return simplify_gen_binary (ASHIFTRT, mode,
1040 XEXP (op, 0), XEXP (op, 1));
1041
1042 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1043 if (GET_CODE (op) == XOR
1044 && XEXP (op, 1) == const1_rtx
1045 && nonzero_bits (XEXP (op, 0), mode) == 1)
1046 return plus_constant (mode, XEXP (op, 0), -1);
1047
1048 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1049 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1050 if (GET_CODE (op) == LT
1051 && XEXP (op, 1) == const0_rtx
1052 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1053 {
1054 machine_mode inner = GET_MODE (XEXP (op, 0));
1055 int isize = GET_MODE_PRECISION (inner);
1056 if (STORE_FLAG_VALUE == 1)
1057 {
1058 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1059 GEN_INT (isize - 1));
1060 if (mode == inner)
1061 return temp;
1062 if (GET_MODE_PRECISION (mode) > isize)
1063 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1064 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1065 }
1066 else if (STORE_FLAG_VALUE == -1)
1067 {
1068 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1069 GEN_INT (isize - 1));
1070 if (mode == inner)
1071 return temp;
1072 if (GET_MODE_PRECISION (mode) > isize)
1073 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1074 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1075 }
1076 }
1077 break;
1078
1079 case TRUNCATE:
1080 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1081 with the umulXi3_highpart patterns. */
1082 if (GET_CODE (op) == LSHIFTRT
1083 && GET_CODE (XEXP (op, 0)) == MULT)
1084 break;
1085
1086 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1087 {
1088 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1089 {
1090 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1091 if (temp)
1092 return temp;
1093 }
1094 /* We can't handle truncation to a partial integer mode here
1095 because we don't know the real bitsize of the partial
1096 integer mode. */
1097 break;
1098 }
1099
1100 if (GET_MODE (op) != VOIDmode)
1101 {
1102 temp = simplify_truncation (mode, op, GET_MODE (op));
1103 if (temp)
1104 return temp;
1105 }
1106
1107 /* If we know that the value is already truncated, we can
1108 replace the TRUNCATE with a SUBREG. */
1109 if (GET_MODE_NUNITS (mode) == 1
1110 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1111 || truncated_to_mode (mode, op)))
1112 {
1113 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1114 if (temp)
1115 return temp;
1116 }
1117
1118 /* A truncate of a comparison can be replaced with a subreg if
1119 STORE_FLAG_VALUE permits. This is like the previous test,
1120 but it works even if the comparison is done in a mode larger
1121 than HOST_BITS_PER_WIDE_INT. */
1122 if (HWI_COMPUTABLE_MODE_P (mode)
1123 && COMPARISON_P (op)
1124 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1125 {
1126 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1127 if (temp)
1128 return temp;
1129 }
1130
1131 /* A truncate of a memory is just loading the low part of the memory
1132 if we are not changing the meaning of the address. */
1133 if (GET_CODE (op) == MEM
1134 && !VECTOR_MODE_P (mode)
1135 && !MEM_VOLATILE_P (op)
1136 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1137 {
1138 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1139 if (temp)
1140 return temp;
1141 }
1142
1143 break;
1144
1145 case FLOAT_TRUNCATE:
1146 if (DECIMAL_FLOAT_MODE_P (mode))
1147 break;
1148
1149 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1150 if (GET_CODE (op) == FLOAT_EXTEND
1151 && GET_MODE (XEXP (op, 0)) == mode)
1152 return XEXP (op, 0);
1153
1154 /* (float_truncate:SF (float_truncate:DF foo:XF))
1155 = (float_truncate:SF foo:XF).
1156 This may eliminate double rounding, so it is unsafe.
1157
1158 (float_truncate:SF (float_extend:XF foo:DF))
1159 = (float_truncate:SF foo:DF).
1160
1161 (float_truncate:DF (float_extend:XF foo:SF))
1162 = (float_extend:DF foo:SF). */
1163 if ((GET_CODE (op) == FLOAT_TRUNCATE
1164 && flag_unsafe_math_optimizations)
1165 || GET_CODE (op) == FLOAT_EXTEND)
1166 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1167 0)))
1168 > GET_MODE_SIZE (mode)
1169 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1170 mode,
1171 XEXP (op, 0), mode);
1172
1173 /* (float_truncate (float x)) is (float x) */
1174 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1175 && (flag_unsafe_math_optimizations
1176 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1177 && ((unsigned)significand_size (GET_MODE (op))
1178 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1179 - num_sign_bit_copies (XEXP (op, 0),
1180 GET_MODE (XEXP (op, 0))))))))
1181 return simplify_gen_unary (GET_CODE (op), mode,
1182 XEXP (op, 0),
1183 GET_MODE (XEXP (op, 0)));
1184
1185 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1186 (OP:SF foo:SF) if OP is NEG or ABS. */
1187 if ((GET_CODE (op) == ABS
1188 || GET_CODE (op) == NEG)
1189 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1190 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1191 return simplify_gen_unary (GET_CODE (op), mode,
1192 XEXP (XEXP (op, 0), 0), mode);
1193
1194 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1195 is (float_truncate:SF x). */
1196 if (GET_CODE (op) == SUBREG
1197 && subreg_lowpart_p (op)
1198 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1199 return SUBREG_REG (op);
1200 break;
1201
1202 case FLOAT_EXTEND:
1203 if (DECIMAL_FLOAT_MODE_P (mode))
1204 break;
1205
1206 /* (float_extend (float_extend x)) is (float_extend x)
1207
1208 (float_extend (float x)) is (float x) assuming that double
1209 rounding can't happen.
1210 */
1211 if (GET_CODE (op) == FLOAT_EXTEND
1212 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1213 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1214 && ((unsigned)significand_size (GET_MODE (op))
1215 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1216 - num_sign_bit_copies (XEXP (op, 0),
1217 GET_MODE (XEXP (op, 0)))))))
1218 return simplify_gen_unary (GET_CODE (op), mode,
1219 XEXP (op, 0),
1220 GET_MODE (XEXP (op, 0)));
1221
1222 break;
1223
1224 case ABS:
1225 /* (abs (neg <foo>)) -> (abs <foo>) */
1226 if (GET_CODE (op) == NEG)
1227 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1228 GET_MODE (XEXP (op, 0)));
1229
1230 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1231 do nothing. */
1232 if (GET_MODE (op) == VOIDmode)
1233 break;
1234
1235 /* If operand is something known to be positive, ignore the ABS. */
1236 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1237 || val_signbit_known_clear_p (GET_MODE (op),
1238 nonzero_bits (op, GET_MODE (op))))
1239 return op;
1240
1241 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1242 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1243 return gen_rtx_NEG (mode, op);
1244
1245 break;
1246
1247 case FFS:
1248 /* (ffs (*_extend <X>)) = (ffs <X>) */
1249 if (GET_CODE (op) == SIGN_EXTEND
1250 || GET_CODE (op) == ZERO_EXTEND)
1251 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1252 GET_MODE (XEXP (op, 0)));
1253 break;
1254
1255 case POPCOUNT:
1256 switch (GET_CODE (op))
1257 {
1258 case BSWAP:
1259 case ZERO_EXTEND:
1260 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1261 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1262 GET_MODE (XEXP (op, 0)));
1263
1264 case ROTATE:
1265 case ROTATERT:
1266 /* Rotations don't affect popcount. */
1267 if (!side_effects_p (XEXP (op, 1)))
1268 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1269 GET_MODE (XEXP (op, 0)));
1270 break;
1271
1272 default:
1273 break;
1274 }
1275 break;
1276
1277 case PARITY:
1278 switch (GET_CODE (op))
1279 {
1280 case NOT:
1281 case BSWAP:
1282 case ZERO_EXTEND:
1283 case SIGN_EXTEND:
1284 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1285 GET_MODE (XEXP (op, 0)));
1286
1287 case ROTATE:
1288 case ROTATERT:
1289 /* Rotations don't affect parity. */
1290 if (!side_effects_p (XEXP (op, 1)))
1291 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1292 GET_MODE (XEXP (op, 0)));
1293 break;
1294
1295 default:
1296 break;
1297 }
1298 break;
1299
1300 case BSWAP:
1301 /* (bswap (bswap x)) -> x. */
1302 if (GET_CODE (op) == BSWAP)
1303 return XEXP (op, 0);
1304 break;
1305
1306 case FLOAT:
1307 /* (float (sign_extend <X>)) = (float <X>). */
1308 if (GET_CODE (op) == SIGN_EXTEND)
1309 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1311 break;
1312
1313 case SIGN_EXTEND:
1314 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1315 becomes just the MINUS if its mode is MODE. This allows
1316 folding switch statements on machines using casesi (such as
1317 the VAX). */
1318 if (GET_CODE (op) == TRUNCATE
1319 && GET_MODE (XEXP (op, 0)) == mode
1320 && GET_CODE (XEXP (op, 0)) == MINUS
1321 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1322 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1323 return XEXP (op, 0);
1324
1325 /* Extending a widening multiplication should be canonicalized to
1326 a wider widening multiplication. */
1327 if (GET_CODE (op) == MULT)
1328 {
1329 rtx lhs = XEXP (op, 0);
1330 rtx rhs = XEXP (op, 1);
1331 enum rtx_code lcode = GET_CODE (lhs);
1332 enum rtx_code rcode = GET_CODE (rhs);
1333
1334 /* Widening multiplies usually extend both operands, but sometimes
1335 they use a shift to extract a portion of a register. */
1336 if ((lcode == SIGN_EXTEND
1337 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1338 && (rcode == SIGN_EXTEND
1339 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1340 {
1341 machine_mode lmode = GET_MODE (lhs);
1342 machine_mode rmode = GET_MODE (rhs);
1343 int bits;
1344
1345 if (lcode == ASHIFTRT)
1346 /* Number of bits not shifted off the end. */
1347 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1348 else /* lcode == SIGN_EXTEND */
1349 /* Size of inner mode. */
1350 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1351
1352 if (rcode == ASHIFTRT)
1353 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1354 else /* rcode == SIGN_EXTEND */
1355 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1356
1357 /* We can only widen multiplies if the result is mathematiclly
1358 equivalent. I.e. if overflow was impossible. */
1359 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1360 return simplify_gen_binary
1361 (MULT, mode,
1362 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1363 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1364 }
1365 }
1366
1367 /* Check for a sign extension of a subreg of a promoted
1368 variable, where the promotion is sign-extended, and the
1369 target mode is the same as the variable's promotion. */
1370 if (GET_CODE (op) == SUBREG
1371 && SUBREG_PROMOTED_VAR_P (op)
1372 && SUBREG_PROMOTED_SIGNED_P (op)
1373 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1374 {
1375 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1376 if (temp)
1377 return temp;
1378 }
1379
1380 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1381 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1382 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1383 {
1384 gcc_assert (GET_MODE_PRECISION (mode)
1385 > GET_MODE_PRECISION (GET_MODE (op)));
1386 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1387 GET_MODE (XEXP (op, 0)));
1388 }
1389
1390 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1391 is (sign_extend:M (subreg:O <X>)) if there is mode with
1392 GET_MODE_BITSIZE (N) - I bits.
1393 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1394 is similarly (zero_extend:M (subreg:O <X>)). */
1395 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1396 && GET_CODE (XEXP (op, 0)) == ASHIFT
1397 && CONST_INT_P (XEXP (op, 1))
1398 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1399 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1400 {
1401 machine_mode tmode
1402 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1403 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1404 gcc_assert (GET_MODE_BITSIZE (mode)
1405 > GET_MODE_BITSIZE (GET_MODE (op)));
1406 if (tmode != BLKmode)
1407 {
1408 rtx inner =
1409 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1410 if (inner)
1411 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1412 ? SIGN_EXTEND : ZERO_EXTEND,
1413 mode, inner, tmode);
1414 }
1415 }
1416
1417 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1418 /* As we do not know which address space the pointer is referring to,
1419 we can do this only if the target does not support different pointer
1420 or address modes depending on the address space. */
1421 if (target_default_pointer_address_modes_p ()
1422 && ! POINTERS_EXTEND_UNSIGNED
1423 && mode == Pmode && GET_MODE (op) == ptr_mode
1424 && (CONSTANT_P (op)
1425 || (GET_CODE (op) == SUBREG
1426 && REG_P (SUBREG_REG (op))
1427 && REG_POINTER (SUBREG_REG (op))
1428 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1429 return convert_memory_address (Pmode, op);
1430 #endif
1431 break;
1432
1433 case ZERO_EXTEND:
1434 /* Check for a zero extension of a subreg of a promoted
1435 variable, where the promotion is zero-extended, and the
1436 target mode is the same as the variable's promotion. */
1437 if (GET_CODE (op) == SUBREG
1438 && SUBREG_PROMOTED_VAR_P (op)
1439 && SUBREG_PROMOTED_UNSIGNED_P (op)
1440 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1441 {
1442 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1443 if (temp)
1444 return temp;
1445 }
1446
1447 /* Extending a widening multiplication should be canonicalized to
1448 a wider widening multiplication. */
1449 if (GET_CODE (op) == MULT)
1450 {
1451 rtx lhs = XEXP (op, 0);
1452 rtx rhs = XEXP (op, 1);
1453 enum rtx_code lcode = GET_CODE (lhs);
1454 enum rtx_code rcode = GET_CODE (rhs);
1455
1456 /* Widening multiplies usually extend both operands, but sometimes
1457 they use a shift to extract a portion of a register. */
1458 if ((lcode == ZERO_EXTEND
1459 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1460 && (rcode == ZERO_EXTEND
1461 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1462 {
1463 machine_mode lmode = GET_MODE (lhs);
1464 machine_mode rmode = GET_MODE (rhs);
1465 int bits;
1466
1467 if (lcode == LSHIFTRT)
1468 /* Number of bits not shifted off the end. */
1469 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1470 else /* lcode == ZERO_EXTEND */
1471 /* Size of inner mode. */
1472 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1473
1474 if (rcode == LSHIFTRT)
1475 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1476 else /* rcode == ZERO_EXTEND */
1477 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1478
1479 /* We can only widen multiplies if the result is mathematiclly
1480 equivalent. I.e. if overflow was impossible. */
1481 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1482 return simplify_gen_binary
1483 (MULT, mode,
1484 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1485 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1486 }
1487 }
1488
1489 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1490 if (GET_CODE (op) == ZERO_EXTEND)
1491 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1492 GET_MODE (XEXP (op, 0)));
1493
1494 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 is (zero_extend:M (subreg:O <X>)) if there is mode with
1496 GET_MODE_PRECISION (N) - I bits. */
1497 if (GET_CODE (op) == LSHIFTRT
1498 && GET_CODE (XEXP (op, 0)) == ASHIFT
1499 && CONST_INT_P (XEXP (op, 1))
1500 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1501 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1502 {
1503 machine_mode tmode
1504 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1505 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1506 if (tmode != BLKmode)
1507 {
1508 rtx inner =
1509 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1510 if (inner)
1511 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1512 }
1513 }
1514
1515 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1516 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1517 of mode N. E.g.
1518 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1519 (and:SI (reg:SI) (const_int 63)). */
1520 if (GET_CODE (op) == SUBREG
1521 && GET_MODE_PRECISION (GET_MODE (op))
1522 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1523 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1524 <= HOST_BITS_PER_WIDE_INT
1525 && GET_MODE_PRECISION (mode)
1526 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1527 && subreg_lowpart_p (op)
1528 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1529 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1530 {
1531 if (GET_MODE_PRECISION (mode)
1532 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1533 return SUBREG_REG (op);
1534 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1535 GET_MODE (SUBREG_REG (op)));
1536 }
1537
1538 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1539 /* As we do not know which address space the pointer is referring to,
1540 we can do this only if the target does not support different pointer
1541 or address modes depending on the address space. */
1542 if (target_default_pointer_address_modes_p ()
1543 && POINTERS_EXTEND_UNSIGNED > 0
1544 && mode == Pmode && GET_MODE (op) == ptr_mode
1545 && (CONSTANT_P (op)
1546 || (GET_CODE (op) == SUBREG
1547 && REG_P (SUBREG_REG (op))
1548 && REG_POINTER (SUBREG_REG (op))
1549 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1550 return convert_memory_address (Pmode, op);
1551 #endif
1552 break;
1553
1554 default:
1555 break;
1556 }
1557
1558 return 0;
1559 }
1560
1561 /* Try to compute the value of a unary operation CODE whose output mode is to
1562 be MODE with input operand OP whose mode was originally OP_MODE.
1563 Return zero if the value cannot be computed. */
1564 rtx
1565 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1566 rtx op, machine_mode op_mode)
1567 {
1568 unsigned int width = GET_MODE_PRECISION (mode);
1569
1570 if (code == VEC_DUPLICATE)
1571 {
1572 gcc_assert (VECTOR_MODE_P (mode));
1573 if (GET_MODE (op) != VOIDmode)
1574 {
1575 if (!VECTOR_MODE_P (GET_MODE (op)))
1576 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1577 else
1578 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1579 (GET_MODE (op)));
1580 }
1581 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1582 || GET_CODE (op) == CONST_VECTOR)
1583 {
1584 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1585 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1586 rtvec v = rtvec_alloc (n_elts);
1587 unsigned int i;
1588
1589 if (GET_CODE (op) != CONST_VECTOR)
1590 for (i = 0; i < n_elts; i++)
1591 RTVEC_ELT (v, i) = op;
1592 else
1593 {
1594 machine_mode inmode = GET_MODE (op);
1595 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1596 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1597
1598 gcc_assert (in_n_elts < n_elts);
1599 gcc_assert ((n_elts % in_n_elts) == 0);
1600 for (i = 0; i < n_elts; i++)
1601 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1602 }
1603 return gen_rtx_CONST_VECTOR (mode, v);
1604 }
1605 }
1606
1607 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1608 {
1609 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1610 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1611 machine_mode opmode = GET_MODE (op);
1612 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1613 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1614 rtvec v = rtvec_alloc (n_elts);
1615 unsigned int i;
1616
1617 gcc_assert (op_n_elts == n_elts);
1618 for (i = 0; i < n_elts; i++)
1619 {
1620 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1621 CONST_VECTOR_ELT (op, i),
1622 GET_MODE_INNER (opmode));
1623 if (!x)
1624 return 0;
1625 RTVEC_ELT (v, i) = x;
1626 }
1627 return gen_rtx_CONST_VECTOR (mode, v);
1628 }
1629
1630 /* The order of these tests is critical so that, for example, we don't
1631 check the wrong mode (input vs. output) for a conversion operation,
1632 such as FIX. At some point, this should be simplified. */
1633
1634 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1635 {
1636 REAL_VALUE_TYPE d;
1637
1638 if (op_mode == VOIDmode)
1639 {
1640 /* CONST_INT have VOIDmode as the mode. We assume that all
1641 the bits of the constant are significant, though, this is
1642 a dangerous assumption as many times CONST_INTs are
1643 created and used with garbage in the bits outside of the
1644 precision of the implied mode of the const_int. */
1645 op_mode = MAX_MODE_INT;
1646 }
1647
1648 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1649 d = real_value_truncate (mode, d);
1650 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1651 }
1652 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1653 {
1654 REAL_VALUE_TYPE d;
1655
1656 if (op_mode == VOIDmode)
1657 {
1658 /* CONST_INT have VOIDmode as the mode. We assume that all
1659 the bits of the constant are significant, though, this is
1660 a dangerous assumption as many times CONST_INTs are
1661 created and used with garbage in the bits outside of the
1662 precision of the implied mode of the const_int. */
1663 op_mode = MAX_MODE_INT;
1664 }
1665
1666 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1667 d = real_value_truncate (mode, d);
1668 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1669 }
1670
1671 if (CONST_SCALAR_INT_P (op) && width > 0)
1672 {
1673 wide_int result;
1674 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1675 rtx_mode_t op0 = std::make_pair (op, imode);
1676 int int_value;
1677
1678 #if TARGET_SUPPORTS_WIDE_INT == 0
1679 /* This assert keeps the simplification from producing a result
1680 that cannot be represented in a CONST_DOUBLE but a lot of
1681 upstream callers expect that this function never fails to
1682 simplify something and so you if you added this to the test
1683 above the code would die later anyway. If this assert
1684 happens, you just need to make the port support wide int. */
1685 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1686 #endif
1687
1688 switch (code)
1689 {
1690 case NOT:
1691 result = wi::bit_not (op0);
1692 break;
1693
1694 case NEG:
1695 result = wi::neg (op0);
1696 break;
1697
1698 case ABS:
1699 result = wi::abs (op0);
1700 break;
1701
1702 case FFS:
1703 result = wi::shwi (wi::ffs (op0), mode);
1704 break;
1705
1706 case CLZ:
1707 if (wi::ne_p (op0, 0))
1708 int_value = wi::clz (op0);
1709 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1710 int_value = GET_MODE_PRECISION (mode);
1711 result = wi::shwi (int_value, mode);
1712 break;
1713
1714 case CLRSB:
1715 result = wi::shwi (wi::clrsb (op0), mode);
1716 break;
1717
1718 case CTZ:
1719 if (wi::ne_p (op0, 0))
1720 int_value = wi::ctz (op0);
1721 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1722 int_value = GET_MODE_PRECISION (mode);
1723 result = wi::shwi (int_value, mode);
1724 break;
1725
1726 case POPCOUNT:
1727 result = wi::shwi (wi::popcount (op0), mode);
1728 break;
1729
1730 case PARITY:
1731 result = wi::shwi (wi::parity (op0), mode);
1732 break;
1733
1734 case BSWAP:
1735 result = wide_int (op0).bswap ();
1736 break;
1737
1738 case TRUNCATE:
1739 case ZERO_EXTEND:
1740 result = wide_int::from (op0, width, UNSIGNED);
1741 break;
1742
1743 case SIGN_EXTEND:
1744 result = wide_int::from (op0, width, SIGNED);
1745 break;
1746
1747 case SQRT:
1748 default:
1749 return 0;
1750 }
1751
1752 return immed_wide_int_const (result, mode);
1753 }
1754
1755 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1756 && SCALAR_FLOAT_MODE_P (mode)
1757 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1758 {
1759 REAL_VALUE_TYPE d;
1760 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1761
1762 switch (code)
1763 {
1764 case SQRT:
1765 return 0;
1766 case ABS:
1767 d = real_value_abs (&d);
1768 break;
1769 case NEG:
1770 d = real_value_negate (&d);
1771 break;
1772 case FLOAT_TRUNCATE:
1773 d = real_value_truncate (mode, d);
1774 break;
1775 case FLOAT_EXTEND:
1776 /* All this does is change the mode, unless changing
1777 mode class. */
1778 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1779 real_convert (&d, mode, &d);
1780 break;
1781 case FIX:
1782 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1783 break;
1784 case NOT:
1785 {
1786 long tmp[4];
1787 int i;
1788
1789 real_to_target (tmp, &d, GET_MODE (op));
1790 for (i = 0; i < 4; i++)
1791 tmp[i] = ~tmp[i];
1792 real_from_target (&d, tmp, mode);
1793 break;
1794 }
1795 default:
1796 gcc_unreachable ();
1797 }
1798 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1799 }
1800 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1801 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1802 && GET_MODE_CLASS (mode) == MODE_INT
1803 && width > 0)
1804 {
1805 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1806 operators are intentionally left unspecified (to ease implementation
1807 by target backends), for consistency, this routine implements the
1808 same semantics for constant folding as used by the middle-end. */
1809
1810 /* This was formerly used only for non-IEEE float.
1811 eggert@twinsun.com says it is safe for IEEE also. */
1812 REAL_VALUE_TYPE x, t;
1813 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1814 wide_int wmax, wmin;
1815 /* This is part of the abi to real_to_integer, but we check
1816 things before making this call. */
1817 bool fail;
1818
1819 switch (code)
1820 {
1821 case FIX:
1822 if (REAL_VALUE_ISNAN (x))
1823 return const0_rtx;
1824
1825 /* Test against the signed upper bound. */
1826 wmax = wi::max_value (width, SIGNED);
1827 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1828 if (REAL_VALUES_LESS (t, x))
1829 return immed_wide_int_const (wmax, mode);
1830
1831 /* Test against the signed lower bound. */
1832 wmin = wi::min_value (width, SIGNED);
1833 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1834 if (REAL_VALUES_LESS (x, t))
1835 return immed_wide_int_const (wmin, mode);
1836
1837 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1838 break;
1839
1840 case UNSIGNED_FIX:
1841 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1842 return const0_rtx;
1843
1844 /* Test against the unsigned upper bound. */
1845 wmax = wi::max_value (width, UNSIGNED);
1846 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1847 if (REAL_VALUES_LESS (t, x))
1848 return immed_wide_int_const (wmax, mode);
1849
1850 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1851 mode);
1852 break;
1853
1854 default:
1855 gcc_unreachable ();
1856 }
1857 }
1858
1859 return NULL_RTX;
1860 }
1861 \f
1862 /* Subroutine of simplify_binary_operation to simplify a binary operation
1863 CODE that can commute with byte swapping, with result mode MODE and
1864 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1865 Return zero if no simplification or canonicalization is possible. */
1866
1867 static rtx
1868 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1869 rtx op0, rtx op1)
1870 {
1871 rtx tem;
1872
1873 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1874 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1875 {
1876 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1877 simplify_gen_unary (BSWAP, mode, op1, mode));
1878 return simplify_gen_unary (BSWAP, mode, tem, mode);
1879 }
1880
1881 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1882 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1883 {
1884 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1885 return simplify_gen_unary (BSWAP, mode, tem, mode);
1886 }
1887
1888 return NULL_RTX;
1889 }
1890
1891 /* Subroutine of simplify_binary_operation to simplify a commutative,
1892 associative binary operation CODE with result mode MODE, operating
1893 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1894 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1895 canonicalization is possible. */
1896
1897 static rtx
1898 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1899 rtx op0, rtx op1)
1900 {
1901 rtx tem;
1902
1903 /* Linearize the operator to the left. */
1904 if (GET_CODE (op1) == code)
1905 {
1906 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1907 if (GET_CODE (op0) == code)
1908 {
1909 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1910 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1911 }
1912
1913 /* "a op (b op c)" becomes "(b op c) op a". */
1914 if (! swap_commutative_operands_p (op1, op0))
1915 return simplify_gen_binary (code, mode, op1, op0);
1916
1917 std::swap (op0, op1);
1918 }
1919
1920 if (GET_CODE (op0) == code)
1921 {
1922 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1923 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1924 {
1925 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1926 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1927 }
1928
1929 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1930 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1931 if (tem != 0)
1932 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1933
1934 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1935 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1936 if (tem != 0)
1937 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1938 }
1939
1940 return 0;
1941 }
1942
1943
1944 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1945 and OP1. Return 0 if no simplification is possible.
1946
1947 Don't use this for relational operations such as EQ or LT.
1948 Use simplify_relational_operation instead. */
1949 rtx
1950 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1951 rtx op0, rtx op1)
1952 {
1953 rtx trueop0, trueop1;
1954 rtx tem;
1955
1956 /* Relational operations don't work here. We must know the mode
1957 of the operands in order to do the comparison correctly.
1958 Assuming a full word can give incorrect results.
1959 Consider comparing 128 with -128 in QImode. */
1960 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1961 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1962
1963 /* Make sure the constant is second. */
1964 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1965 && swap_commutative_operands_p (op0, op1))
1966 std::swap (op0, op1);
1967
1968 trueop0 = avoid_constant_pool_reference (op0);
1969 trueop1 = avoid_constant_pool_reference (op1);
1970
1971 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1972 if (tem)
1973 return tem;
1974 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1975 }
1976
1977 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1978 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1979 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1980 actual constants. */
1981
1982 static rtx
1983 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1984 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1985 {
1986 rtx tem, reversed, opleft, opright;
1987 HOST_WIDE_INT val;
1988 unsigned int width = GET_MODE_PRECISION (mode);
1989
1990 /* Even if we can't compute a constant result,
1991 there are some cases worth simplifying. */
1992
1993 switch (code)
1994 {
1995 case PLUS:
1996 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1997 when x is NaN, infinite, or finite and nonzero. They aren't
1998 when x is -0 and the rounding mode is not towards -infinity,
1999 since (-0) + 0 is then 0. */
2000 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2001 return op0;
2002
2003 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2004 transformations are safe even for IEEE. */
2005 if (GET_CODE (op0) == NEG)
2006 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2007 else if (GET_CODE (op1) == NEG)
2008 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2009
2010 /* (~a) + 1 -> -a */
2011 if (INTEGRAL_MODE_P (mode)
2012 && GET_CODE (op0) == NOT
2013 && trueop1 == const1_rtx)
2014 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2015
2016 /* Handle both-operands-constant cases. We can only add
2017 CONST_INTs to constants since the sum of relocatable symbols
2018 can't be handled by most assemblers. Don't add CONST_INT
2019 to CONST_INT since overflow won't be computed properly if wider
2020 than HOST_BITS_PER_WIDE_INT. */
2021
2022 if ((GET_CODE (op0) == CONST
2023 || GET_CODE (op0) == SYMBOL_REF
2024 || GET_CODE (op0) == LABEL_REF)
2025 && CONST_INT_P (op1))
2026 return plus_constant (mode, op0, INTVAL (op1));
2027 else if ((GET_CODE (op1) == CONST
2028 || GET_CODE (op1) == SYMBOL_REF
2029 || GET_CODE (op1) == LABEL_REF)
2030 && CONST_INT_P (op0))
2031 return plus_constant (mode, op1, INTVAL (op0));
2032
2033 /* See if this is something like X * C - X or vice versa or
2034 if the multiplication is written as a shift. If so, we can
2035 distribute and make a new multiply, shift, or maybe just
2036 have X (if C is 2 in the example above). But don't make
2037 something more expensive than we had before. */
2038
2039 if (SCALAR_INT_MODE_P (mode))
2040 {
2041 rtx lhs = op0, rhs = op1;
2042
2043 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2044 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2045
2046 if (GET_CODE (lhs) == NEG)
2047 {
2048 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2049 lhs = XEXP (lhs, 0);
2050 }
2051 else if (GET_CODE (lhs) == MULT
2052 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2053 {
2054 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2055 lhs = XEXP (lhs, 0);
2056 }
2057 else if (GET_CODE (lhs) == ASHIFT
2058 && CONST_INT_P (XEXP (lhs, 1))
2059 && INTVAL (XEXP (lhs, 1)) >= 0
2060 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2061 {
2062 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2063 GET_MODE_PRECISION (mode));
2064 lhs = XEXP (lhs, 0);
2065 }
2066
2067 if (GET_CODE (rhs) == NEG)
2068 {
2069 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2070 rhs = XEXP (rhs, 0);
2071 }
2072 else if (GET_CODE (rhs) == MULT
2073 && CONST_INT_P (XEXP (rhs, 1)))
2074 {
2075 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2076 rhs = XEXP (rhs, 0);
2077 }
2078 else if (GET_CODE (rhs) == ASHIFT
2079 && CONST_INT_P (XEXP (rhs, 1))
2080 && INTVAL (XEXP (rhs, 1)) >= 0
2081 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2082 {
2083 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2084 GET_MODE_PRECISION (mode));
2085 rhs = XEXP (rhs, 0);
2086 }
2087
2088 if (rtx_equal_p (lhs, rhs))
2089 {
2090 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2091 rtx coeff;
2092 bool speed = optimize_function_for_speed_p (cfun);
2093
2094 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2095
2096 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2097 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2098 ? tem : 0;
2099 }
2100 }
2101
2102 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2103 if (CONST_SCALAR_INT_P (op1)
2104 && GET_CODE (op0) == XOR
2105 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2106 && mode_signbit_p (mode, op1))
2107 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2108 simplify_gen_binary (XOR, mode, op1,
2109 XEXP (op0, 1)));
2110
2111 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2112 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2113 && GET_CODE (op0) == MULT
2114 && GET_CODE (XEXP (op0, 0)) == NEG)
2115 {
2116 rtx in1, in2;
2117
2118 in1 = XEXP (XEXP (op0, 0), 0);
2119 in2 = XEXP (op0, 1);
2120 return simplify_gen_binary (MINUS, mode, op1,
2121 simplify_gen_binary (MULT, mode,
2122 in1, in2));
2123 }
2124
2125 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2126 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2127 is 1. */
2128 if (COMPARISON_P (op0)
2129 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2130 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2131 && (reversed = reversed_comparison (op0, mode)))
2132 return
2133 simplify_gen_unary (NEG, mode, reversed, mode);
2134
2135 /* If one of the operands is a PLUS or a MINUS, see if we can
2136 simplify this by the associative law.
2137 Don't use the associative law for floating point.
2138 The inaccuracy makes it nonassociative,
2139 and subtle programs can break if operations are associated. */
2140
2141 if (INTEGRAL_MODE_P (mode)
2142 && (plus_minus_operand_p (op0)
2143 || plus_minus_operand_p (op1))
2144 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2145 return tem;
2146
2147 /* Reassociate floating point addition only when the user
2148 specifies associative math operations. */
2149 if (FLOAT_MODE_P (mode)
2150 && flag_associative_math)
2151 {
2152 tem = simplify_associative_operation (code, mode, op0, op1);
2153 if (tem)
2154 return tem;
2155 }
2156 break;
2157
2158 case COMPARE:
2159 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2160 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2161 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2162 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2163 {
2164 rtx xop00 = XEXP (op0, 0);
2165 rtx xop10 = XEXP (op1, 0);
2166
2167 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2168 return xop00;
2169
2170 if (REG_P (xop00) && REG_P (xop10)
2171 && GET_MODE (xop00) == GET_MODE (xop10)
2172 && REGNO (xop00) == REGNO (xop10)
2173 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2174 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2175 return xop00;
2176 }
2177 break;
2178
2179 case MINUS:
2180 /* We can't assume x-x is 0 even with non-IEEE floating point,
2181 but since it is zero except in very strange circumstances, we
2182 will treat it as zero with -ffinite-math-only. */
2183 if (rtx_equal_p (trueop0, trueop1)
2184 && ! side_effects_p (op0)
2185 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2186 return CONST0_RTX (mode);
2187
2188 /* Change subtraction from zero into negation. (0 - x) is the
2189 same as -x when x is NaN, infinite, or finite and nonzero.
2190 But if the mode has signed zeros, and does not round towards
2191 -infinity, then 0 - 0 is 0, not -0. */
2192 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2193 return simplify_gen_unary (NEG, mode, op1, mode);
2194
2195 /* (-1 - a) is ~a. */
2196 if (trueop0 == constm1_rtx)
2197 return simplify_gen_unary (NOT, mode, op1, mode);
2198
2199 /* Subtracting 0 has no effect unless the mode has signed zeros
2200 and supports rounding towards -infinity. In such a case,
2201 0 - 0 is -0. */
2202 if (!(HONOR_SIGNED_ZEROS (mode)
2203 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2204 && trueop1 == CONST0_RTX (mode))
2205 return op0;
2206
2207 /* See if this is something like X * C - X or vice versa or
2208 if the multiplication is written as a shift. If so, we can
2209 distribute and make a new multiply, shift, or maybe just
2210 have X (if C is 2 in the example above). But don't make
2211 something more expensive than we had before. */
2212
2213 if (SCALAR_INT_MODE_P (mode))
2214 {
2215 rtx lhs = op0, rhs = op1;
2216
2217 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2218 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2219
2220 if (GET_CODE (lhs) == NEG)
2221 {
2222 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2223 lhs = XEXP (lhs, 0);
2224 }
2225 else if (GET_CODE (lhs) == MULT
2226 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2227 {
2228 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2229 lhs = XEXP (lhs, 0);
2230 }
2231 else if (GET_CODE (lhs) == ASHIFT
2232 && CONST_INT_P (XEXP (lhs, 1))
2233 && INTVAL (XEXP (lhs, 1)) >= 0
2234 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2235 {
2236 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2237 GET_MODE_PRECISION (mode));
2238 lhs = XEXP (lhs, 0);
2239 }
2240
2241 if (GET_CODE (rhs) == NEG)
2242 {
2243 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2244 rhs = XEXP (rhs, 0);
2245 }
2246 else if (GET_CODE (rhs) == MULT
2247 && CONST_INT_P (XEXP (rhs, 1)))
2248 {
2249 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2250 rhs = XEXP (rhs, 0);
2251 }
2252 else if (GET_CODE (rhs) == ASHIFT
2253 && CONST_INT_P (XEXP (rhs, 1))
2254 && INTVAL (XEXP (rhs, 1)) >= 0
2255 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2256 {
2257 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2258 GET_MODE_PRECISION (mode));
2259 negcoeff1 = -negcoeff1;
2260 rhs = XEXP (rhs, 0);
2261 }
2262
2263 if (rtx_equal_p (lhs, rhs))
2264 {
2265 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2266 rtx coeff;
2267 bool speed = optimize_function_for_speed_p (cfun);
2268
2269 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2270
2271 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2272 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2273 ? tem : 0;
2274 }
2275 }
2276
2277 /* (a - (-b)) -> (a + b). True even for IEEE. */
2278 if (GET_CODE (op1) == NEG)
2279 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2280
2281 /* (-x - c) may be simplified as (-c - x). */
2282 if (GET_CODE (op0) == NEG
2283 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2284 {
2285 tem = simplify_unary_operation (NEG, mode, op1, mode);
2286 if (tem)
2287 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2288 }
2289
2290 /* Don't let a relocatable value get a negative coeff. */
2291 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2292 return simplify_gen_binary (PLUS, mode,
2293 op0,
2294 neg_const_int (mode, op1));
2295
2296 /* (x - (x & y)) -> (x & ~y) */
2297 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2298 {
2299 if (rtx_equal_p (op0, XEXP (op1, 0)))
2300 {
2301 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2302 GET_MODE (XEXP (op1, 1)));
2303 return simplify_gen_binary (AND, mode, op0, tem);
2304 }
2305 if (rtx_equal_p (op0, XEXP (op1, 1)))
2306 {
2307 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2308 GET_MODE (XEXP (op1, 0)));
2309 return simplify_gen_binary (AND, mode, op0, tem);
2310 }
2311 }
2312
2313 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2314 by reversing the comparison code if valid. */
2315 if (STORE_FLAG_VALUE == 1
2316 && trueop0 == const1_rtx
2317 && COMPARISON_P (op1)
2318 && (reversed = reversed_comparison (op1, mode)))
2319 return reversed;
2320
2321 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2322 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2323 && GET_CODE (op1) == MULT
2324 && GET_CODE (XEXP (op1, 0)) == NEG)
2325 {
2326 rtx in1, in2;
2327
2328 in1 = XEXP (XEXP (op1, 0), 0);
2329 in2 = XEXP (op1, 1);
2330 return simplify_gen_binary (PLUS, mode,
2331 simplify_gen_binary (MULT, mode,
2332 in1, in2),
2333 op0);
2334 }
2335
2336 /* Canonicalize (minus (neg A) (mult B C)) to
2337 (minus (mult (neg B) C) A). */
2338 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2339 && GET_CODE (op1) == MULT
2340 && GET_CODE (op0) == NEG)
2341 {
2342 rtx in1, in2;
2343
2344 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2345 in2 = XEXP (op1, 1);
2346 return simplify_gen_binary (MINUS, mode,
2347 simplify_gen_binary (MULT, mode,
2348 in1, in2),
2349 XEXP (op0, 0));
2350 }
2351
2352 /* If one of the operands is a PLUS or a MINUS, see if we can
2353 simplify this by the associative law. This will, for example,
2354 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2355 Don't use the associative law for floating point.
2356 The inaccuracy makes it nonassociative,
2357 and subtle programs can break if operations are associated. */
2358
2359 if (INTEGRAL_MODE_P (mode)
2360 && (plus_minus_operand_p (op0)
2361 || plus_minus_operand_p (op1))
2362 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2363 return tem;
2364 break;
2365
2366 case MULT:
2367 if (trueop1 == constm1_rtx)
2368 return simplify_gen_unary (NEG, mode, op0, mode);
2369
2370 if (GET_CODE (op0) == NEG)
2371 {
2372 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2373 /* If op1 is a MULT as well and simplify_unary_operation
2374 just moved the NEG to the second operand, simplify_gen_binary
2375 below could through simplify_associative_operation move
2376 the NEG around again and recurse endlessly. */
2377 if (temp
2378 && GET_CODE (op1) == MULT
2379 && GET_CODE (temp) == MULT
2380 && XEXP (op1, 0) == XEXP (temp, 0)
2381 && GET_CODE (XEXP (temp, 1)) == NEG
2382 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2383 temp = NULL_RTX;
2384 if (temp)
2385 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2386 }
2387 if (GET_CODE (op1) == NEG)
2388 {
2389 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2390 /* If op0 is a MULT as well and simplify_unary_operation
2391 just moved the NEG to the second operand, simplify_gen_binary
2392 below could through simplify_associative_operation move
2393 the NEG around again and recurse endlessly. */
2394 if (temp
2395 && GET_CODE (op0) == MULT
2396 && GET_CODE (temp) == MULT
2397 && XEXP (op0, 0) == XEXP (temp, 0)
2398 && GET_CODE (XEXP (temp, 1)) == NEG
2399 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2400 temp = NULL_RTX;
2401 if (temp)
2402 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2403 }
2404
2405 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2406 x is NaN, since x * 0 is then also NaN. Nor is it valid
2407 when the mode has signed zeros, since multiplying a negative
2408 number by 0 will give -0, not 0. */
2409 if (!HONOR_NANS (mode)
2410 && !HONOR_SIGNED_ZEROS (mode)
2411 && trueop1 == CONST0_RTX (mode)
2412 && ! side_effects_p (op0))
2413 return op1;
2414
2415 /* In IEEE floating point, x*1 is not equivalent to x for
2416 signalling NaNs. */
2417 if (!HONOR_SNANS (mode)
2418 && trueop1 == CONST1_RTX (mode))
2419 return op0;
2420
2421 /* Convert multiply by constant power of two into shift. */
2422 if (CONST_SCALAR_INT_P (trueop1))
2423 {
2424 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2425 if (val >= 0)
2426 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2427 }
2428
2429 /* x*2 is x+x and x*(-1) is -x */
2430 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2431 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2432 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2433 && GET_MODE (op0) == mode)
2434 {
2435 REAL_VALUE_TYPE d;
2436 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2437
2438 if (REAL_VALUES_EQUAL (d, dconst2))
2439 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2440
2441 if (!HONOR_SNANS (mode)
2442 && REAL_VALUES_EQUAL (d, dconstm1))
2443 return simplify_gen_unary (NEG, mode, op0, mode);
2444 }
2445
2446 /* Optimize -x * -x as x * x. */
2447 if (FLOAT_MODE_P (mode)
2448 && GET_CODE (op0) == NEG
2449 && GET_CODE (op1) == NEG
2450 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2451 && !side_effects_p (XEXP (op0, 0)))
2452 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2453
2454 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2455 if (SCALAR_FLOAT_MODE_P (mode)
2456 && GET_CODE (op0) == ABS
2457 && GET_CODE (op1) == ABS
2458 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2459 && !side_effects_p (XEXP (op0, 0)))
2460 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2461
2462 /* Reassociate multiplication, but for floating point MULTs
2463 only when the user specifies unsafe math optimizations. */
2464 if (! FLOAT_MODE_P (mode)
2465 || flag_unsafe_math_optimizations)
2466 {
2467 tem = simplify_associative_operation (code, mode, op0, op1);
2468 if (tem)
2469 return tem;
2470 }
2471 break;
2472
2473 case IOR:
2474 if (trueop1 == CONST0_RTX (mode))
2475 return op0;
2476 if (INTEGRAL_MODE_P (mode)
2477 && trueop1 == CONSTM1_RTX (mode)
2478 && !side_effects_p (op0))
2479 return op1;
2480 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2481 return op0;
2482 /* A | (~A) -> -1 */
2483 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2484 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2485 && ! side_effects_p (op0)
2486 && SCALAR_INT_MODE_P (mode))
2487 return constm1_rtx;
2488
2489 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2490 if (CONST_INT_P (op1)
2491 && HWI_COMPUTABLE_MODE_P (mode)
2492 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2493 && !side_effects_p (op0))
2494 return op1;
2495
2496 /* Canonicalize (X & C1) | C2. */
2497 if (GET_CODE (op0) == AND
2498 && CONST_INT_P (trueop1)
2499 && CONST_INT_P (XEXP (op0, 1)))
2500 {
2501 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2502 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2503 HOST_WIDE_INT c2 = INTVAL (trueop1);
2504
2505 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2506 if ((c1 & c2) == c1
2507 && !side_effects_p (XEXP (op0, 0)))
2508 return trueop1;
2509
2510 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2511 if (((c1|c2) & mask) == mask)
2512 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2513
2514 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2515 if (((c1 & ~c2) & mask) != (c1 & mask))
2516 {
2517 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2518 gen_int_mode (c1 & ~c2, mode));
2519 return simplify_gen_binary (IOR, mode, tem, op1);
2520 }
2521 }
2522
2523 /* Convert (A & B) | A to A. */
2524 if (GET_CODE (op0) == AND
2525 && (rtx_equal_p (XEXP (op0, 0), op1)
2526 || rtx_equal_p (XEXP (op0, 1), op1))
2527 && ! side_effects_p (XEXP (op0, 0))
2528 && ! side_effects_p (XEXP (op0, 1)))
2529 return op1;
2530
2531 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2532 mode size to (rotate A CX). */
2533
2534 if (GET_CODE (op1) == ASHIFT
2535 || GET_CODE (op1) == SUBREG)
2536 {
2537 opleft = op1;
2538 opright = op0;
2539 }
2540 else
2541 {
2542 opright = op1;
2543 opleft = op0;
2544 }
2545
2546 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2547 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2548 && CONST_INT_P (XEXP (opleft, 1))
2549 && CONST_INT_P (XEXP (opright, 1))
2550 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2551 == GET_MODE_PRECISION (mode)))
2552 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2553
2554 /* Same, but for ashift that has been "simplified" to a wider mode
2555 by simplify_shift_const. */
2556
2557 if (GET_CODE (opleft) == SUBREG
2558 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2559 && GET_CODE (opright) == LSHIFTRT
2560 && GET_CODE (XEXP (opright, 0)) == SUBREG
2561 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2562 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2563 && (GET_MODE_SIZE (GET_MODE (opleft))
2564 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2565 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2566 SUBREG_REG (XEXP (opright, 0)))
2567 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2568 && CONST_INT_P (XEXP (opright, 1))
2569 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2570 == GET_MODE_PRECISION (mode)))
2571 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2572 XEXP (SUBREG_REG (opleft), 1));
2573
2574 /* If we have (ior (and (X C1) C2)), simplify this by making
2575 C1 as small as possible if C1 actually changes. */
2576 if (CONST_INT_P (op1)
2577 && (HWI_COMPUTABLE_MODE_P (mode)
2578 || INTVAL (op1) > 0)
2579 && GET_CODE (op0) == AND
2580 && CONST_INT_P (XEXP (op0, 1))
2581 && CONST_INT_P (op1)
2582 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2583 {
2584 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2585 gen_int_mode (UINTVAL (XEXP (op0, 1))
2586 & ~UINTVAL (op1),
2587 mode));
2588 return simplify_gen_binary (IOR, mode, tmp, op1);
2589 }
2590
2591 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2592 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2593 the PLUS does not affect any of the bits in OP1: then we can do
2594 the IOR as a PLUS and we can associate. This is valid if OP1
2595 can be safely shifted left C bits. */
2596 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2597 && GET_CODE (XEXP (op0, 0)) == PLUS
2598 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2599 && CONST_INT_P (XEXP (op0, 1))
2600 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2601 {
2602 int count = INTVAL (XEXP (op0, 1));
2603 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2604
2605 if (mask >> count == INTVAL (trueop1)
2606 && trunc_int_for_mode (mask, mode) == mask
2607 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2608 return simplify_gen_binary (ASHIFTRT, mode,
2609 plus_constant (mode, XEXP (op0, 0),
2610 mask),
2611 XEXP (op0, 1));
2612 }
2613
2614 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2615 if (tem)
2616 return tem;
2617
2618 tem = simplify_associative_operation (code, mode, op0, op1);
2619 if (tem)
2620 return tem;
2621 break;
2622
2623 case XOR:
2624 if (trueop1 == CONST0_RTX (mode))
2625 return op0;
2626 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2627 return simplify_gen_unary (NOT, mode, op0, mode);
2628 if (rtx_equal_p (trueop0, trueop1)
2629 && ! side_effects_p (op0)
2630 && GET_MODE_CLASS (mode) != MODE_CC)
2631 return CONST0_RTX (mode);
2632
2633 /* Canonicalize XOR of the most significant bit to PLUS. */
2634 if (CONST_SCALAR_INT_P (op1)
2635 && mode_signbit_p (mode, op1))
2636 return simplify_gen_binary (PLUS, mode, op0, op1);
2637 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2638 if (CONST_SCALAR_INT_P (op1)
2639 && GET_CODE (op0) == PLUS
2640 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2641 && mode_signbit_p (mode, XEXP (op0, 1)))
2642 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2643 simplify_gen_binary (XOR, mode, op1,
2644 XEXP (op0, 1)));
2645
2646 /* If we are XORing two things that have no bits in common,
2647 convert them into an IOR. This helps to detect rotation encoded
2648 using those methods and possibly other simplifications. */
2649
2650 if (HWI_COMPUTABLE_MODE_P (mode)
2651 && (nonzero_bits (op0, mode)
2652 & nonzero_bits (op1, mode)) == 0)
2653 return (simplify_gen_binary (IOR, mode, op0, op1));
2654
2655 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2656 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2657 (NOT y). */
2658 {
2659 int num_negated = 0;
2660
2661 if (GET_CODE (op0) == NOT)
2662 num_negated++, op0 = XEXP (op0, 0);
2663 if (GET_CODE (op1) == NOT)
2664 num_negated++, op1 = XEXP (op1, 0);
2665
2666 if (num_negated == 2)
2667 return simplify_gen_binary (XOR, mode, op0, op1);
2668 else if (num_negated == 1)
2669 return simplify_gen_unary (NOT, mode,
2670 simplify_gen_binary (XOR, mode, op0, op1),
2671 mode);
2672 }
2673
2674 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2675 correspond to a machine insn or result in further simplifications
2676 if B is a constant. */
2677
2678 if (GET_CODE (op0) == AND
2679 && rtx_equal_p (XEXP (op0, 1), op1)
2680 && ! side_effects_p (op1))
2681 return simplify_gen_binary (AND, mode,
2682 simplify_gen_unary (NOT, mode,
2683 XEXP (op0, 0), mode),
2684 op1);
2685
2686 else if (GET_CODE (op0) == AND
2687 && rtx_equal_p (XEXP (op0, 0), op1)
2688 && ! side_effects_p (op1))
2689 return simplify_gen_binary (AND, mode,
2690 simplify_gen_unary (NOT, mode,
2691 XEXP (op0, 1), mode),
2692 op1);
2693
2694 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2695 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2696 out bits inverted twice and not set by C. Similarly, given
2697 (xor (and (xor A B) C) D), simplify without inverting C in
2698 the xor operand: (xor (and A C) (B&C)^D).
2699 */
2700 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2701 && GET_CODE (XEXP (op0, 0)) == XOR
2702 && CONST_INT_P (op1)
2703 && CONST_INT_P (XEXP (op0, 1))
2704 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2705 {
2706 enum rtx_code op = GET_CODE (op0);
2707 rtx a = XEXP (XEXP (op0, 0), 0);
2708 rtx b = XEXP (XEXP (op0, 0), 1);
2709 rtx c = XEXP (op0, 1);
2710 rtx d = op1;
2711 HOST_WIDE_INT bval = INTVAL (b);
2712 HOST_WIDE_INT cval = INTVAL (c);
2713 HOST_WIDE_INT dval = INTVAL (d);
2714 HOST_WIDE_INT xcval;
2715
2716 if (op == IOR)
2717 xcval = ~cval;
2718 else
2719 xcval = cval;
2720
2721 return simplify_gen_binary (XOR, mode,
2722 simplify_gen_binary (op, mode, a, c),
2723 gen_int_mode ((bval & xcval) ^ dval,
2724 mode));
2725 }
2726
2727 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2728 we can transform like this:
2729 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2730 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2731 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2732 Attempt a few simplifications when B and C are both constants. */
2733 if (GET_CODE (op0) == AND
2734 && CONST_INT_P (op1)
2735 && CONST_INT_P (XEXP (op0, 1)))
2736 {
2737 rtx a = XEXP (op0, 0);
2738 rtx b = XEXP (op0, 1);
2739 rtx c = op1;
2740 HOST_WIDE_INT bval = INTVAL (b);
2741 HOST_WIDE_INT cval = INTVAL (c);
2742
2743 /* Instead of computing ~A&C, we compute its negated value,
2744 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2745 optimize for sure. If it does not simplify, we still try
2746 to compute ~A&C below, but since that always allocates
2747 RTL, we don't try that before committing to returning a
2748 simplified expression. */
2749 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2750 GEN_INT (~cval));
2751
2752 if ((~cval & bval) == 0)
2753 {
2754 rtx na_c = NULL_RTX;
2755 if (n_na_c)
2756 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2757 else
2758 {
2759 /* If ~A does not simplify, don't bother: we don't
2760 want to simplify 2 operations into 3, and if na_c
2761 were to simplify with na, n_na_c would have
2762 simplified as well. */
2763 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2764 if (na)
2765 na_c = simplify_gen_binary (AND, mode, na, c);
2766 }
2767
2768 /* Try to simplify ~A&C | ~B&C. */
2769 if (na_c != NULL_RTX)
2770 return simplify_gen_binary (IOR, mode, na_c,
2771 gen_int_mode (~bval & cval, mode));
2772 }
2773 else
2774 {
2775 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2776 if (n_na_c == CONSTM1_RTX (mode))
2777 {
2778 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2779 gen_int_mode (~cval & bval,
2780 mode));
2781 return simplify_gen_binary (IOR, mode, a_nc_b,
2782 gen_int_mode (~bval & cval,
2783 mode));
2784 }
2785 }
2786 }
2787
2788 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2789 comparison if STORE_FLAG_VALUE is 1. */
2790 if (STORE_FLAG_VALUE == 1
2791 && trueop1 == const1_rtx
2792 && COMPARISON_P (op0)
2793 && (reversed = reversed_comparison (op0, mode)))
2794 return reversed;
2795
2796 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2797 is (lt foo (const_int 0)), so we can perform the above
2798 simplification if STORE_FLAG_VALUE is 1. */
2799
2800 if (STORE_FLAG_VALUE == 1
2801 && trueop1 == const1_rtx
2802 && GET_CODE (op0) == LSHIFTRT
2803 && CONST_INT_P (XEXP (op0, 1))
2804 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2805 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2806
2807 /* (xor (comparison foo bar) (const_int sign-bit))
2808 when STORE_FLAG_VALUE is the sign bit. */
2809 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2810 && trueop1 == const_true_rtx
2811 && COMPARISON_P (op0)
2812 && (reversed = reversed_comparison (op0, mode)))
2813 return reversed;
2814
2815 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2816 if (tem)
2817 return tem;
2818
2819 tem = simplify_associative_operation (code, mode, op0, op1);
2820 if (tem)
2821 return tem;
2822 break;
2823
2824 case AND:
2825 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2826 return trueop1;
2827 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2828 return op0;
2829 if (HWI_COMPUTABLE_MODE_P (mode))
2830 {
2831 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2832 HOST_WIDE_INT nzop1;
2833 if (CONST_INT_P (trueop1))
2834 {
2835 HOST_WIDE_INT val1 = INTVAL (trueop1);
2836 /* If we are turning off bits already known off in OP0, we need
2837 not do an AND. */
2838 if ((nzop0 & ~val1) == 0)
2839 return op0;
2840 }
2841 nzop1 = nonzero_bits (trueop1, mode);
2842 /* If we are clearing all the nonzero bits, the result is zero. */
2843 if ((nzop1 & nzop0) == 0
2844 && !side_effects_p (op0) && !side_effects_p (op1))
2845 return CONST0_RTX (mode);
2846 }
2847 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2848 && GET_MODE_CLASS (mode) != MODE_CC)
2849 return op0;
2850 /* A & (~A) -> 0 */
2851 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2852 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2853 && ! side_effects_p (op0)
2854 && GET_MODE_CLASS (mode) != MODE_CC)
2855 return CONST0_RTX (mode);
2856
2857 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2858 there are no nonzero bits of C outside of X's mode. */
2859 if ((GET_CODE (op0) == SIGN_EXTEND
2860 || GET_CODE (op0) == ZERO_EXTEND)
2861 && CONST_INT_P (trueop1)
2862 && HWI_COMPUTABLE_MODE_P (mode)
2863 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2864 & UINTVAL (trueop1)) == 0)
2865 {
2866 machine_mode imode = GET_MODE (XEXP (op0, 0));
2867 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2868 gen_int_mode (INTVAL (trueop1),
2869 imode));
2870 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2871 }
2872
2873 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2874 we might be able to further simplify the AND with X and potentially
2875 remove the truncation altogether. */
2876 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2877 {
2878 rtx x = XEXP (op0, 0);
2879 machine_mode xmode = GET_MODE (x);
2880 tem = simplify_gen_binary (AND, xmode, x,
2881 gen_int_mode (INTVAL (trueop1), xmode));
2882 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2883 }
2884
2885 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2886 if (GET_CODE (op0) == IOR
2887 && CONST_INT_P (trueop1)
2888 && CONST_INT_P (XEXP (op0, 1)))
2889 {
2890 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2891 return simplify_gen_binary (IOR, mode,
2892 simplify_gen_binary (AND, mode,
2893 XEXP (op0, 0), op1),
2894 gen_int_mode (tmp, mode));
2895 }
2896
2897 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2898 insn (and may simplify more). */
2899 if (GET_CODE (op0) == XOR
2900 && rtx_equal_p (XEXP (op0, 0), op1)
2901 && ! side_effects_p (op1))
2902 return simplify_gen_binary (AND, mode,
2903 simplify_gen_unary (NOT, mode,
2904 XEXP (op0, 1), mode),
2905 op1);
2906
2907 if (GET_CODE (op0) == XOR
2908 && rtx_equal_p (XEXP (op0, 1), op1)
2909 && ! side_effects_p (op1))
2910 return simplify_gen_binary (AND, mode,
2911 simplify_gen_unary (NOT, mode,
2912 XEXP (op0, 0), mode),
2913 op1);
2914
2915 /* Similarly for (~(A ^ B)) & A. */
2916 if (GET_CODE (op0) == NOT
2917 && GET_CODE (XEXP (op0, 0)) == XOR
2918 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2919 && ! side_effects_p (op1))
2920 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2921
2922 if (GET_CODE (op0) == NOT
2923 && GET_CODE (XEXP (op0, 0)) == XOR
2924 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2925 && ! side_effects_p (op1))
2926 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2927
2928 /* Convert (A | B) & A to A. */
2929 if (GET_CODE (op0) == IOR
2930 && (rtx_equal_p (XEXP (op0, 0), op1)
2931 || rtx_equal_p (XEXP (op0, 1), op1))
2932 && ! side_effects_p (XEXP (op0, 0))
2933 && ! side_effects_p (XEXP (op0, 1)))
2934 return op1;
2935
2936 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2937 ((A & N) + B) & M -> (A + B) & M
2938 Similarly if (N & M) == 0,
2939 ((A | N) + B) & M -> (A + B) & M
2940 and for - instead of + and/or ^ instead of |.
2941 Also, if (N & M) == 0, then
2942 (A +- N) & M -> A & M. */
2943 if (CONST_INT_P (trueop1)
2944 && HWI_COMPUTABLE_MODE_P (mode)
2945 && ~UINTVAL (trueop1)
2946 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2947 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2948 {
2949 rtx pmop[2];
2950 int which;
2951
2952 pmop[0] = XEXP (op0, 0);
2953 pmop[1] = XEXP (op0, 1);
2954
2955 if (CONST_INT_P (pmop[1])
2956 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2957 return simplify_gen_binary (AND, mode, pmop[0], op1);
2958
2959 for (which = 0; which < 2; which++)
2960 {
2961 tem = pmop[which];
2962 switch (GET_CODE (tem))
2963 {
2964 case AND:
2965 if (CONST_INT_P (XEXP (tem, 1))
2966 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2967 == UINTVAL (trueop1))
2968 pmop[which] = XEXP (tem, 0);
2969 break;
2970 case IOR:
2971 case XOR:
2972 if (CONST_INT_P (XEXP (tem, 1))
2973 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2974 pmop[which] = XEXP (tem, 0);
2975 break;
2976 default:
2977 break;
2978 }
2979 }
2980
2981 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2982 {
2983 tem = simplify_gen_binary (GET_CODE (op0), mode,
2984 pmop[0], pmop[1]);
2985 return simplify_gen_binary (code, mode, tem, op1);
2986 }
2987 }
2988
2989 /* (and X (ior (not X) Y) -> (and X Y) */
2990 if (GET_CODE (op1) == IOR
2991 && GET_CODE (XEXP (op1, 0)) == NOT
2992 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
2993 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2994
2995 /* (and (ior (not X) Y) X) -> (and X Y) */
2996 if (GET_CODE (op0) == IOR
2997 && GET_CODE (XEXP (op0, 0)) == NOT
2998 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
2999 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3000
3001 /* (and X (ior Y (not X)) -> (and X Y) */
3002 if (GET_CODE (op1) == IOR
3003 && GET_CODE (XEXP (op1, 1)) == NOT
3004 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3005 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3006
3007 /* (and (ior Y (not X)) X) -> (and X Y) */
3008 if (GET_CODE (op0) == IOR
3009 && GET_CODE (XEXP (op0, 1)) == NOT
3010 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3011 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3012
3013 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3014 if (tem)
3015 return tem;
3016
3017 tem = simplify_associative_operation (code, mode, op0, op1);
3018 if (tem)
3019 return tem;
3020 break;
3021
3022 case UDIV:
3023 /* 0/x is 0 (or x&0 if x has side-effects). */
3024 if (trueop0 == CONST0_RTX (mode))
3025 {
3026 if (side_effects_p (op1))
3027 return simplify_gen_binary (AND, mode, op1, trueop0);
3028 return trueop0;
3029 }
3030 /* x/1 is x. */
3031 if (trueop1 == CONST1_RTX (mode))
3032 {
3033 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3034 if (tem)
3035 return tem;
3036 }
3037 /* Convert divide by power of two into shift. */
3038 if (CONST_INT_P (trueop1)
3039 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3040 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3041 break;
3042
3043 case DIV:
3044 /* Handle floating point and integers separately. */
3045 if (SCALAR_FLOAT_MODE_P (mode))
3046 {
3047 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3048 safe for modes with NaNs, since 0.0 / 0.0 will then be
3049 NaN rather than 0.0. Nor is it safe for modes with signed
3050 zeros, since dividing 0 by a negative number gives -0.0 */
3051 if (trueop0 == CONST0_RTX (mode)
3052 && !HONOR_NANS (mode)
3053 && !HONOR_SIGNED_ZEROS (mode)
3054 && ! side_effects_p (op1))
3055 return op0;
3056 /* x/1.0 is x. */
3057 if (trueop1 == CONST1_RTX (mode)
3058 && !HONOR_SNANS (mode))
3059 return op0;
3060
3061 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3062 && trueop1 != CONST0_RTX (mode))
3063 {
3064 REAL_VALUE_TYPE d;
3065 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3066
3067 /* x/-1.0 is -x. */
3068 if (REAL_VALUES_EQUAL (d, dconstm1)
3069 && !HONOR_SNANS (mode))
3070 return simplify_gen_unary (NEG, mode, op0, mode);
3071
3072 /* Change FP division by a constant into multiplication.
3073 Only do this with -freciprocal-math. */
3074 if (flag_reciprocal_math
3075 && !REAL_VALUES_EQUAL (d, dconst0))
3076 {
3077 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3078 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3079 return simplify_gen_binary (MULT, mode, op0, tem);
3080 }
3081 }
3082 }
3083 else if (SCALAR_INT_MODE_P (mode))
3084 {
3085 /* 0/x is 0 (or x&0 if x has side-effects). */
3086 if (trueop0 == CONST0_RTX (mode)
3087 && !cfun->can_throw_non_call_exceptions)
3088 {
3089 if (side_effects_p (op1))
3090 return simplify_gen_binary (AND, mode, op1, trueop0);
3091 return trueop0;
3092 }
3093 /* x/1 is x. */
3094 if (trueop1 == CONST1_RTX (mode))
3095 {
3096 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3097 if (tem)
3098 return tem;
3099 }
3100 /* x/-1 is -x. */
3101 if (trueop1 == constm1_rtx)
3102 {
3103 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3104 if (x)
3105 return simplify_gen_unary (NEG, mode, x, mode);
3106 }
3107 }
3108 break;
3109
3110 case UMOD:
3111 /* 0%x is 0 (or x&0 if x has side-effects). */
3112 if (trueop0 == CONST0_RTX (mode))
3113 {
3114 if (side_effects_p (op1))
3115 return simplify_gen_binary (AND, mode, op1, trueop0);
3116 return trueop0;
3117 }
3118 /* x%1 is 0 (of x&0 if x has side-effects). */
3119 if (trueop1 == CONST1_RTX (mode))
3120 {
3121 if (side_effects_p (op0))
3122 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3123 return CONST0_RTX (mode);
3124 }
3125 /* Implement modulus by power of two as AND. */
3126 if (CONST_INT_P (trueop1)
3127 && exact_log2 (UINTVAL (trueop1)) > 0)
3128 return simplify_gen_binary (AND, mode, op0,
3129 gen_int_mode (INTVAL (op1) - 1, mode));
3130 break;
3131
3132 case MOD:
3133 /* 0%x is 0 (or x&0 if x has side-effects). */
3134 if (trueop0 == CONST0_RTX (mode))
3135 {
3136 if (side_effects_p (op1))
3137 return simplify_gen_binary (AND, mode, op1, trueop0);
3138 return trueop0;
3139 }
3140 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3141 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3142 {
3143 if (side_effects_p (op0))
3144 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3145 return CONST0_RTX (mode);
3146 }
3147 break;
3148
3149 case ROTATERT:
3150 case ROTATE:
3151 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3152 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3153 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3154 amount instead. */
3155 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3156 if (CONST_INT_P (trueop1)
3157 && IN_RANGE (INTVAL (trueop1),
3158 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3159 GET_MODE_PRECISION (mode) - 1))
3160 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3161 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3162 - INTVAL (trueop1)));
3163 #endif
3164 /* FALLTHRU */
3165 case ASHIFTRT:
3166 if (trueop1 == CONST0_RTX (mode))
3167 return op0;
3168 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3169 return op0;
3170 /* Rotating ~0 always results in ~0. */
3171 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3172 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3173 && ! side_effects_p (op1))
3174 return op0;
3175 /* Given:
3176 scalar modes M1, M2
3177 scalar constants c1, c2
3178 size (M2) > size (M1)
3179 c1 == size (M2) - size (M1)
3180 optimize:
3181 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3182 <low_part>)
3183 (const_int <c2>))
3184 to:
3185 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3186 <low_part>). */
3187 if (code == ASHIFTRT
3188 && !VECTOR_MODE_P (mode)
3189 && SUBREG_P (op0)
3190 && CONST_INT_P (op1)
3191 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3192 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3193 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3194 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3195 > GET_MODE_BITSIZE (mode))
3196 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3197 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3198 - GET_MODE_BITSIZE (mode)))
3199 && subreg_lowpart_p (op0))
3200 {
3201 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3202 + INTVAL (op1));
3203 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3204 tmp = simplify_gen_binary (ASHIFTRT,
3205 GET_MODE (SUBREG_REG (op0)),
3206 XEXP (SUBREG_REG (op0), 0),
3207 tmp);
3208 return simplify_gen_subreg (mode, tmp, inner_mode,
3209 subreg_lowpart_offset (mode,
3210 inner_mode));
3211 }
3212 canonicalize_shift:
3213 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3214 {
3215 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3216 if (val != INTVAL (op1))
3217 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3218 }
3219 break;
3220
3221 case ASHIFT:
3222 case SS_ASHIFT:
3223 case US_ASHIFT:
3224 if (trueop1 == CONST0_RTX (mode))
3225 return op0;
3226 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3227 return op0;
3228 goto canonicalize_shift;
3229
3230 case LSHIFTRT:
3231 if (trueop1 == CONST0_RTX (mode))
3232 return op0;
3233 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3234 return op0;
3235 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3236 if (GET_CODE (op0) == CLZ
3237 && CONST_INT_P (trueop1)
3238 && STORE_FLAG_VALUE == 1
3239 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3240 {
3241 machine_mode imode = GET_MODE (XEXP (op0, 0));
3242 unsigned HOST_WIDE_INT zero_val = 0;
3243
3244 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3245 && zero_val == GET_MODE_PRECISION (imode)
3246 && INTVAL (trueop1) == exact_log2 (zero_val))
3247 return simplify_gen_relational (EQ, mode, imode,
3248 XEXP (op0, 0), const0_rtx);
3249 }
3250 goto canonicalize_shift;
3251
3252 case SMIN:
3253 if (width <= HOST_BITS_PER_WIDE_INT
3254 && mode_signbit_p (mode, trueop1)
3255 && ! side_effects_p (op0))
3256 return op1;
3257 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3258 return op0;
3259 tem = simplify_associative_operation (code, mode, op0, op1);
3260 if (tem)
3261 return tem;
3262 break;
3263
3264 case SMAX:
3265 if (width <= HOST_BITS_PER_WIDE_INT
3266 && CONST_INT_P (trueop1)
3267 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3268 && ! side_effects_p (op0))
3269 return op1;
3270 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3271 return op0;
3272 tem = simplify_associative_operation (code, mode, op0, op1);
3273 if (tem)
3274 return tem;
3275 break;
3276
3277 case UMIN:
3278 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3279 return op1;
3280 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3281 return op0;
3282 tem = simplify_associative_operation (code, mode, op0, op1);
3283 if (tem)
3284 return tem;
3285 break;
3286
3287 case UMAX:
3288 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3289 return op1;
3290 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3291 return op0;
3292 tem = simplify_associative_operation (code, mode, op0, op1);
3293 if (tem)
3294 return tem;
3295 break;
3296
3297 case SS_PLUS:
3298 case US_PLUS:
3299 case SS_MINUS:
3300 case US_MINUS:
3301 case SS_MULT:
3302 case US_MULT:
3303 case SS_DIV:
3304 case US_DIV:
3305 /* ??? There are simplifications that can be done. */
3306 return 0;
3307
3308 case VEC_SELECT:
3309 if (!VECTOR_MODE_P (mode))
3310 {
3311 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3312 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3313 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3314 gcc_assert (XVECLEN (trueop1, 0) == 1);
3315 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3316
3317 if (GET_CODE (trueop0) == CONST_VECTOR)
3318 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3319 (trueop1, 0, 0)));
3320
3321 /* Extract a scalar element from a nested VEC_SELECT expression
3322 (with optional nested VEC_CONCAT expression). Some targets
3323 (i386) extract scalar element from a vector using chain of
3324 nested VEC_SELECT expressions. When input operand is a memory
3325 operand, this operation can be simplified to a simple scalar
3326 load from an offseted memory address. */
3327 if (GET_CODE (trueop0) == VEC_SELECT)
3328 {
3329 rtx op0 = XEXP (trueop0, 0);
3330 rtx op1 = XEXP (trueop0, 1);
3331
3332 machine_mode opmode = GET_MODE (op0);
3333 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3334 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3335
3336 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3337 int elem;
3338
3339 rtvec vec;
3340 rtx tmp_op, tmp;
3341
3342 gcc_assert (GET_CODE (op1) == PARALLEL);
3343 gcc_assert (i < n_elts);
3344
3345 /* Select element, pointed by nested selector. */
3346 elem = INTVAL (XVECEXP (op1, 0, i));
3347
3348 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3349 if (GET_CODE (op0) == VEC_CONCAT)
3350 {
3351 rtx op00 = XEXP (op0, 0);
3352 rtx op01 = XEXP (op0, 1);
3353
3354 machine_mode mode00, mode01;
3355 int n_elts00, n_elts01;
3356
3357 mode00 = GET_MODE (op00);
3358 mode01 = GET_MODE (op01);
3359
3360 /* Find out number of elements of each operand. */
3361 if (VECTOR_MODE_P (mode00))
3362 {
3363 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3364 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3365 }
3366 else
3367 n_elts00 = 1;
3368
3369 if (VECTOR_MODE_P (mode01))
3370 {
3371 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3372 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3373 }
3374 else
3375 n_elts01 = 1;
3376
3377 gcc_assert (n_elts == n_elts00 + n_elts01);
3378
3379 /* Select correct operand of VEC_CONCAT
3380 and adjust selector. */
3381 if (elem < n_elts01)
3382 tmp_op = op00;
3383 else
3384 {
3385 tmp_op = op01;
3386 elem -= n_elts00;
3387 }
3388 }
3389 else
3390 tmp_op = op0;
3391
3392 vec = rtvec_alloc (1);
3393 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3394
3395 tmp = gen_rtx_fmt_ee (code, mode,
3396 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3397 return tmp;
3398 }
3399 if (GET_CODE (trueop0) == VEC_DUPLICATE
3400 && GET_MODE (XEXP (trueop0, 0)) == mode)
3401 return XEXP (trueop0, 0);
3402 }
3403 else
3404 {
3405 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3406 gcc_assert (GET_MODE_INNER (mode)
3407 == GET_MODE_INNER (GET_MODE (trueop0)));
3408 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3409
3410 if (GET_CODE (trueop0) == CONST_VECTOR)
3411 {
3412 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3413 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3414 rtvec v = rtvec_alloc (n_elts);
3415 unsigned int i;
3416
3417 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3418 for (i = 0; i < n_elts; i++)
3419 {
3420 rtx x = XVECEXP (trueop1, 0, i);
3421
3422 gcc_assert (CONST_INT_P (x));
3423 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3424 INTVAL (x));
3425 }
3426
3427 return gen_rtx_CONST_VECTOR (mode, v);
3428 }
3429
3430 /* Recognize the identity. */
3431 if (GET_MODE (trueop0) == mode)
3432 {
3433 bool maybe_ident = true;
3434 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3435 {
3436 rtx j = XVECEXP (trueop1, 0, i);
3437 if (!CONST_INT_P (j) || INTVAL (j) != i)
3438 {
3439 maybe_ident = false;
3440 break;
3441 }
3442 }
3443 if (maybe_ident)
3444 return trueop0;
3445 }
3446
3447 /* If we build {a,b} then permute it, build the result directly. */
3448 if (XVECLEN (trueop1, 0) == 2
3449 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3450 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3451 && GET_CODE (trueop0) == VEC_CONCAT
3452 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3453 && GET_MODE (XEXP (trueop0, 0)) == mode
3454 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3455 && GET_MODE (XEXP (trueop0, 1)) == mode)
3456 {
3457 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3458 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3459 rtx subop0, subop1;
3460
3461 gcc_assert (i0 < 4 && i1 < 4);
3462 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3463 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3464
3465 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3466 }
3467
3468 if (XVECLEN (trueop1, 0) == 2
3469 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3470 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3471 && GET_CODE (trueop0) == VEC_CONCAT
3472 && GET_MODE (trueop0) == mode)
3473 {
3474 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3475 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3476 rtx subop0, subop1;
3477
3478 gcc_assert (i0 < 2 && i1 < 2);
3479 subop0 = XEXP (trueop0, i0);
3480 subop1 = XEXP (trueop0, i1);
3481
3482 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3483 }
3484
3485 /* If we select one half of a vec_concat, return that. */
3486 if (GET_CODE (trueop0) == VEC_CONCAT
3487 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3488 {
3489 rtx subop0 = XEXP (trueop0, 0);
3490 rtx subop1 = XEXP (trueop0, 1);
3491 machine_mode mode0 = GET_MODE (subop0);
3492 machine_mode mode1 = GET_MODE (subop1);
3493 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3494 int l0 = GET_MODE_SIZE (mode0) / li;
3495 int l1 = GET_MODE_SIZE (mode1) / li;
3496 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3497 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3498 {
3499 bool success = true;
3500 for (int i = 1; i < l0; ++i)
3501 {
3502 rtx j = XVECEXP (trueop1, 0, i);
3503 if (!CONST_INT_P (j) || INTVAL (j) != i)
3504 {
3505 success = false;
3506 break;
3507 }
3508 }
3509 if (success)
3510 return subop0;
3511 }
3512 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3513 {
3514 bool success = true;
3515 for (int i = 1; i < l1; ++i)
3516 {
3517 rtx j = XVECEXP (trueop1, 0, i);
3518 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3519 {
3520 success = false;
3521 break;
3522 }
3523 }
3524 if (success)
3525 return subop1;
3526 }
3527 }
3528 }
3529
3530 if (XVECLEN (trueop1, 0) == 1
3531 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3532 && GET_CODE (trueop0) == VEC_CONCAT)
3533 {
3534 rtx vec = trueop0;
3535 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3536
3537 /* Try to find the element in the VEC_CONCAT. */
3538 while (GET_MODE (vec) != mode
3539 && GET_CODE (vec) == VEC_CONCAT)
3540 {
3541 HOST_WIDE_INT vec_size;
3542
3543 if (CONST_INT_P (XEXP (vec, 0)))
3544 {
3545 /* vec_concat of two const_ints doesn't make sense with
3546 respect to modes. */
3547 if (CONST_INT_P (XEXP (vec, 1)))
3548 return 0;
3549
3550 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3551 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3552 }
3553 else
3554 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3555
3556 if (offset < vec_size)
3557 vec = XEXP (vec, 0);
3558 else
3559 {
3560 offset -= vec_size;
3561 vec = XEXP (vec, 1);
3562 }
3563 vec = avoid_constant_pool_reference (vec);
3564 }
3565
3566 if (GET_MODE (vec) == mode)
3567 return vec;
3568 }
3569
3570 /* If we select elements in a vec_merge that all come from the same
3571 operand, select from that operand directly. */
3572 if (GET_CODE (op0) == VEC_MERGE)
3573 {
3574 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3575 if (CONST_INT_P (trueop02))
3576 {
3577 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3578 bool all_operand0 = true;
3579 bool all_operand1 = true;
3580 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3581 {
3582 rtx j = XVECEXP (trueop1, 0, i);
3583 if (sel & (1 << UINTVAL (j)))
3584 all_operand1 = false;
3585 else
3586 all_operand0 = false;
3587 }
3588 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3589 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3590 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3591 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3592 }
3593 }
3594
3595 /* If we have two nested selects that are inverses of each
3596 other, replace them with the source operand. */
3597 if (GET_CODE (trueop0) == VEC_SELECT
3598 && GET_MODE (XEXP (trueop0, 0)) == mode)
3599 {
3600 rtx op0_subop1 = XEXP (trueop0, 1);
3601 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3602 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3603
3604 /* Apply the outer ordering vector to the inner one. (The inner
3605 ordering vector is expressly permitted to be of a different
3606 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3607 then the two VEC_SELECTs cancel. */
3608 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3609 {
3610 rtx x = XVECEXP (trueop1, 0, i);
3611 if (!CONST_INT_P (x))
3612 return 0;
3613 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3614 if (!CONST_INT_P (y) || i != INTVAL (y))
3615 return 0;
3616 }
3617 return XEXP (trueop0, 0);
3618 }
3619
3620 return 0;
3621 case VEC_CONCAT:
3622 {
3623 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3624 ? GET_MODE (trueop0)
3625 : GET_MODE_INNER (mode));
3626 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3627 ? GET_MODE (trueop1)
3628 : GET_MODE_INNER (mode));
3629
3630 gcc_assert (VECTOR_MODE_P (mode));
3631 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3632 == GET_MODE_SIZE (mode));
3633
3634 if (VECTOR_MODE_P (op0_mode))
3635 gcc_assert (GET_MODE_INNER (mode)
3636 == GET_MODE_INNER (op0_mode));
3637 else
3638 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3639
3640 if (VECTOR_MODE_P (op1_mode))
3641 gcc_assert (GET_MODE_INNER (mode)
3642 == GET_MODE_INNER (op1_mode));
3643 else
3644 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3645
3646 if ((GET_CODE (trueop0) == CONST_VECTOR
3647 || CONST_SCALAR_INT_P (trueop0)
3648 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3649 && (GET_CODE (trueop1) == CONST_VECTOR
3650 || CONST_SCALAR_INT_P (trueop1)
3651 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3652 {
3653 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3654 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3655 rtvec v = rtvec_alloc (n_elts);
3656 unsigned int i;
3657 unsigned in_n_elts = 1;
3658
3659 if (VECTOR_MODE_P (op0_mode))
3660 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3661 for (i = 0; i < n_elts; i++)
3662 {
3663 if (i < in_n_elts)
3664 {
3665 if (!VECTOR_MODE_P (op0_mode))
3666 RTVEC_ELT (v, i) = trueop0;
3667 else
3668 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3669 }
3670 else
3671 {
3672 if (!VECTOR_MODE_P (op1_mode))
3673 RTVEC_ELT (v, i) = trueop1;
3674 else
3675 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3676 i - in_n_elts);
3677 }
3678 }
3679
3680 return gen_rtx_CONST_VECTOR (mode, v);
3681 }
3682
3683 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3684 Restrict the transformation to avoid generating a VEC_SELECT with a
3685 mode unrelated to its operand. */
3686 if (GET_CODE (trueop0) == VEC_SELECT
3687 && GET_CODE (trueop1) == VEC_SELECT
3688 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3689 && GET_MODE (XEXP (trueop0, 0)) == mode)
3690 {
3691 rtx par0 = XEXP (trueop0, 1);
3692 rtx par1 = XEXP (trueop1, 1);
3693 int len0 = XVECLEN (par0, 0);
3694 int len1 = XVECLEN (par1, 0);
3695 rtvec vec = rtvec_alloc (len0 + len1);
3696 for (int i = 0; i < len0; i++)
3697 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3698 for (int i = 0; i < len1; i++)
3699 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3700 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3701 gen_rtx_PARALLEL (VOIDmode, vec));
3702 }
3703 }
3704 return 0;
3705
3706 default:
3707 gcc_unreachable ();
3708 }
3709
3710 return 0;
3711 }
3712
3713 rtx
3714 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3715 rtx op0, rtx op1)
3716 {
3717 unsigned int width = GET_MODE_PRECISION (mode);
3718
3719 if (VECTOR_MODE_P (mode)
3720 && code != VEC_CONCAT
3721 && GET_CODE (op0) == CONST_VECTOR
3722 && GET_CODE (op1) == CONST_VECTOR)
3723 {
3724 unsigned n_elts = GET_MODE_NUNITS (mode);
3725 machine_mode op0mode = GET_MODE (op0);
3726 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3727 machine_mode op1mode = GET_MODE (op1);
3728 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3729 rtvec v = rtvec_alloc (n_elts);
3730 unsigned int i;
3731
3732 gcc_assert (op0_n_elts == n_elts);
3733 gcc_assert (op1_n_elts == n_elts);
3734 for (i = 0; i < n_elts; i++)
3735 {
3736 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3737 CONST_VECTOR_ELT (op0, i),
3738 CONST_VECTOR_ELT (op1, i));
3739 if (!x)
3740 return 0;
3741 RTVEC_ELT (v, i) = x;
3742 }
3743
3744 return gen_rtx_CONST_VECTOR (mode, v);
3745 }
3746
3747 if (VECTOR_MODE_P (mode)
3748 && code == VEC_CONCAT
3749 && (CONST_SCALAR_INT_P (op0)
3750 || GET_CODE (op0) == CONST_FIXED
3751 || CONST_DOUBLE_AS_FLOAT_P (op0))
3752 && (CONST_SCALAR_INT_P (op1)
3753 || CONST_DOUBLE_AS_FLOAT_P (op1)
3754 || GET_CODE (op1) == CONST_FIXED))
3755 {
3756 unsigned n_elts = GET_MODE_NUNITS (mode);
3757 rtvec v = rtvec_alloc (n_elts);
3758
3759 gcc_assert (n_elts >= 2);
3760 if (n_elts == 2)
3761 {
3762 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3763 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3764
3765 RTVEC_ELT (v, 0) = op0;
3766 RTVEC_ELT (v, 1) = op1;
3767 }
3768 else
3769 {
3770 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3771 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3772 unsigned i;
3773
3774 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3775 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3776 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3777
3778 for (i = 0; i < op0_n_elts; ++i)
3779 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3780 for (i = 0; i < op1_n_elts; ++i)
3781 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3782 }
3783
3784 return gen_rtx_CONST_VECTOR (mode, v);
3785 }
3786
3787 if (SCALAR_FLOAT_MODE_P (mode)
3788 && CONST_DOUBLE_AS_FLOAT_P (op0)
3789 && CONST_DOUBLE_AS_FLOAT_P (op1)
3790 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3791 {
3792 if (code == AND
3793 || code == IOR
3794 || code == XOR)
3795 {
3796 long tmp0[4];
3797 long tmp1[4];
3798 REAL_VALUE_TYPE r;
3799 int i;
3800
3801 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3802 GET_MODE (op0));
3803 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3804 GET_MODE (op1));
3805 for (i = 0; i < 4; i++)
3806 {
3807 switch (code)
3808 {
3809 case AND:
3810 tmp0[i] &= tmp1[i];
3811 break;
3812 case IOR:
3813 tmp0[i] |= tmp1[i];
3814 break;
3815 case XOR:
3816 tmp0[i] ^= tmp1[i];
3817 break;
3818 default:
3819 gcc_unreachable ();
3820 }
3821 }
3822 real_from_target (&r, tmp0, mode);
3823 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3824 }
3825 else
3826 {
3827 REAL_VALUE_TYPE f0, f1, value, result;
3828 bool inexact;
3829
3830 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3831 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3832 real_convert (&f0, mode, &f0);
3833 real_convert (&f1, mode, &f1);
3834
3835 if (HONOR_SNANS (mode)
3836 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3837 return 0;
3838
3839 if (code == DIV
3840 && REAL_VALUES_EQUAL (f1, dconst0)
3841 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3842 return 0;
3843
3844 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3845 && flag_trapping_math
3846 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3847 {
3848 int s0 = REAL_VALUE_NEGATIVE (f0);
3849 int s1 = REAL_VALUE_NEGATIVE (f1);
3850
3851 switch (code)
3852 {
3853 case PLUS:
3854 /* Inf + -Inf = NaN plus exception. */
3855 if (s0 != s1)
3856 return 0;
3857 break;
3858 case MINUS:
3859 /* Inf - Inf = NaN plus exception. */
3860 if (s0 == s1)
3861 return 0;
3862 break;
3863 case DIV:
3864 /* Inf / Inf = NaN plus exception. */
3865 return 0;
3866 default:
3867 break;
3868 }
3869 }
3870
3871 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3872 && flag_trapping_math
3873 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3874 || (REAL_VALUE_ISINF (f1)
3875 && REAL_VALUES_EQUAL (f0, dconst0))))
3876 /* Inf * 0 = NaN plus exception. */
3877 return 0;
3878
3879 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3880 &f0, &f1);
3881 real_convert (&result, mode, &value);
3882
3883 /* Don't constant fold this floating point operation if
3884 the result has overflowed and flag_trapping_math. */
3885
3886 if (flag_trapping_math
3887 && MODE_HAS_INFINITIES (mode)
3888 && REAL_VALUE_ISINF (result)
3889 && !REAL_VALUE_ISINF (f0)
3890 && !REAL_VALUE_ISINF (f1))
3891 /* Overflow plus exception. */
3892 return 0;
3893
3894 /* Don't constant fold this floating point operation if the
3895 result may dependent upon the run-time rounding mode and
3896 flag_rounding_math is set, or if GCC's software emulation
3897 is unable to accurately represent the result. */
3898
3899 if ((flag_rounding_math
3900 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3901 && (inexact || !real_identical (&result, &value)))
3902 return NULL_RTX;
3903
3904 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3905 }
3906 }
3907
3908 /* We can fold some multi-word operations. */
3909 if ((GET_MODE_CLASS (mode) == MODE_INT
3910 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3911 && CONST_SCALAR_INT_P (op0)
3912 && CONST_SCALAR_INT_P (op1))
3913 {
3914 wide_int result;
3915 bool overflow;
3916 rtx_mode_t pop0 = std::make_pair (op0, mode);
3917 rtx_mode_t pop1 = std::make_pair (op1, mode);
3918
3919 #if TARGET_SUPPORTS_WIDE_INT == 0
3920 /* This assert keeps the simplification from producing a result
3921 that cannot be represented in a CONST_DOUBLE but a lot of
3922 upstream callers expect that this function never fails to
3923 simplify something and so you if you added this to the test
3924 above the code would die later anyway. If this assert
3925 happens, you just need to make the port support wide int. */
3926 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3927 #endif
3928 switch (code)
3929 {
3930 case MINUS:
3931 result = wi::sub (pop0, pop1);
3932 break;
3933
3934 case PLUS:
3935 result = wi::add (pop0, pop1);
3936 break;
3937
3938 case MULT:
3939 result = wi::mul (pop0, pop1);
3940 break;
3941
3942 case DIV:
3943 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3944 if (overflow)
3945 return NULL_RTX;
3946 break;
3947
3948 case MOD:
3949 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3950 if (overflow)
3951 return NULL_RTX;
3952 break;
3953
3954 case UDIV:
3955 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3956 if (overflow)
3957 return NULL_RTX;
3958 break;
3959
3960 case UMOD:
3961 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3962 if (overflow)
3963 return NULL_RTX;
3964 break;
3965
3966 case AND:
3967 result = wi::bit_and (pop0, pop1);
3968 break;
3969
3970 case IOR:
3971 result = wi::bit_or (pop0, pop1);
3972 break;
3973
3974 case XOR:
3975 result = wi::bit_xor (pop0, pop1);
3976 break;
3977
3978 case SMIN:
3979 result = wi::smin (pop0, pop1);
3980 break;
3981
3982 case SMAX:
3983 result = wi::smax (pop0, pop1);
3984 break;
3985
3986 case UMIN:
3987 result = wi::umin (pop0, pop1);
3988 break;
3989
3990 case UMAX:
3991 result = wi::umax (pop0, pop1);
3992 break;
3993
3994 case LSHIFTRT:
3995 case ASHIFTRT:
3996 case ASHIFT:
3997 {
3998 wide_int wop1 = pop1;
3999 if (SHIFT_COUNT_TRUNCATED)
4000 wop1 = wi::umod_trunc (wop1, width);
4001 else if (wi::geu_p (wop1, width))
4002 return NULL_RTX;
4003
4004 switch (code)
4005 {
4006 case LSHIFTRT:
4007 result = wi::lrshift (pop0, wop1);
4008 break;
4009
4010 case ASHIFTRT:
4011 result = wi::arshift (pop0, wop1);
4012 break;
4013
4014 case ASHIFT:
4015 result = wi::lshift (pop0, wop1);
4016 break;
4017
4018 default:
4019 gcc_unreachable ();
4020 }
4021 break;
4022 }
4023 case ROTATE:
4024 case ROTATERT:
4025 {
4026 if (wi::neg_p (pop1))
4027 return NULL_RTX;
4028
4029 switch (code)
4030 {
4031 case ROTATE:
4032 result = wi::lrotate (pop0, pop1);
4033 break;
4034
4035 case ROTATERT:
4036 result = wi::rrotate (pop0, pop1);
4037 break;
4038
4039 default:
4040 gcc_unreachable ();
4041 }
4042 break;
4043 }
4044 default:
4045 return NULL_RTX;
4046 }
4047 return immed_wide_int_const (result, mode);
4048 }
4049
4050 return NULL_RTX;
4051 }
4052
4053
4054 \f
4055 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4056 PLUS or MINUS.
4057
4058 Rather than test for specific case, we do this by a brute-force method
4059 and do all possible simplifications until no more changes occur. Then
4060 we rebuild the operation. */
4061
4062 struct simplify_plus_minus_op_data
4063 {
4064 rtx op;
4065 short neg;
4066 };
4067
4068 static bool
4069 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4070 {
4071 int result;
4072
4073 result = (commutative_operand_precedence (y)
4074 - commutative_operand_precedence (x));
4075 if (result)
4076 return result > 0;
4077
4078 /* Group together equal REGs to do more simplification. */
4079 if (REG_P (x) && REG_P (y))
4080 return REGNO (x) > REGNO (y);
4081 else
4082 return false;
4083 }
4084
4085 static rtx
4086 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4087 rtx op1)
4088 {
4089 struct simplify_plus_minus_op_data ops[16];
4090 rtx result, tem;
4091 int n_ops = 2;
4092 int changed, n_constants, canonicalized = 0;
4093 int i, j;
4094
4095 memset (ops, 0, sizeof ops);
4096
4097 /* Set up the two operands and then expand them until nothing has been
4098 changed. If we run out of room in our array, give up; this should
4099 almost never happen. */
4100
4101 ops[0].op = op0;
4102 ops[0].neg = 0;
4103 ops[1].op = op1;
4104 ops[1].neg = (code == MINUS);
4105
4106 do
4107 {
4108 changed = 0;
4109 n_constants = 0;
4110
4111 for (i = 0; i < n_ops; i++)
4112 {
4113 rtx this_op = ops[i].op;
4114 int this_neg = ops[i].neg;
4115 enum rtx_code this_code = GET_CODE (this_op);
4116
4117 switch (this_code)
4118 {
4119 case PLUS:
4120 case MINUS:
4121 if (n_ops == ARRAY_SIZE (ops))
4122 return NULL_RTX;
4123
4124 ops[n_ops].op = XEXP (this_op, 1);
4125 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4126 n_ops++;
4127
4128 ops[i].op = XEXP (this_op, 0);
4129 changed = 1;
4130 canonicalized |= this_neg || i != n_ops - 2;
4131 break;
4132
4133 case NEG:
4134 ops[i].op = XEXP (this_op, 0);
4135 ops[i].neg = ! this_neg;
4136 changed = 1;
4137 canonicalized = 1;
4138 break;
4139
4140 case CONST:
4141 if (n_ops != ARRAY_SIZE (ops)
4142 && GET_CODE (XEXP (this_op, 0)) == PLUS
4143 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4144 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4145 {
4146 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4147 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4148 ops[n_ops].neg = this_neg;
4149 n_ops++;
4150 changed = 1;
4151 canonicalized = 1;
4152 }
4153 break;
4154
4155 case NOT:
4156 /* ~a -> (-a - 1) */
4157 if (n_ops != ARRAY_SIZE (ops))
4158 {
4159 ops[n_ops].op = CONSTM1_RTX (mode);
4160 ops[n_ops++].neg = this_neg;
4161 ops[i].op = XEXP (this_op, 0);
4162 ops[i].neg = !this_neg;
4163 changed = 1;
4164 canonicalized = 1;
4165 }
4166 break;
4167
4168 case CONST_INT:
4169 n_constants++;
4170 if (this_neg)
4171 {
4172 ops[i].op = neg_const_int (mode, this_op);
4173 ops[i].neg = 0;
4174 changed = 1;
4175 canonicalized = 1;
4176 }
4177 break;
4178
4179 default:
4180 break;
4181 }
4182 }
4183 }
4184 while (changed);
4185
4186 if (n_constants > 1)
4187 canonicalized = 1;
4188
4189 gcc_assert (n_ops >= 2);
4190
4191 /* If we only have two operands, we can avoid the loops. */
4192 if (n_ops == 2)
4193 {
4194 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4195 rtx lhs, rhs;
4196
4197 /* Get the two operands. Be careful with the order, especially for
4198 the cases where code == MINUS. */
4199 if (ops[0].neg && ops[1].neg)
4200 {
4201 lhs = gen_rtx_NEG (mode, ops[0].op);
4202 rhs = ops[1].op;
4203 }
4204 else if (ops[0].neg)
4205 {
4206 lhs = ops[1].op;
4207 rhs = ops[0].op;
4208 }
4209 else
4210 {
4211 lhs = ops[0].op;
4212 rhs = ops[1].op;
4213 }
4214
4215 return simplify_const_binary_operation (code, mode, lhs, rhs);
4216 }
4217
4218 /* Now simplify each pair of operands until nothing changes. */
4219 do
4220 {
4221 /* Insertion sort is good enough for a small array. */
4222 for (i = 1; i < n_ops; i++)
4223 {
4224 struct simplify_plus_minus_op_data save;
4225 j = i - 1;
4226 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4227 continue;
4228
4229 canonicalized = 1;
4230 save = ops[i];
4231 do
4232 ops[j + 1] = ops[j];
4233 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4234 ops[j + 1] = save;
4235 }
4236
4237 changed = 0;
4238 for (i = n_ops - 1; i > 0; i--)
4239 for (j = i - 1; j >= 0; j--)
4240 {
4241 rtx lhs = ops[j].op, rhs = ops[i].op;
4242 int lneg = ops[j].neg, rneg = ops[i].neg;
4243
4244 if (lhs != 0 && rhs != 0)
4245 {
4246 enum rtx_code ncode = PLUS;
4247
4248 if (lneg != rneg)
4249 {
4250 ncode = MINUS;
4251 if (lneg)
4252 std::swap (lhs, rhs);
4253 }
4254 else if (swap_commutative_operands_p (lhs, rhs))
4255 std::swap (lhs, rhs);
4256
4257 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4258 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4259 {
4260 rtx tem_lhs, tem_rhs;
4261
4262 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4263 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4264 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4265
4266 if (tem && !CONSTANT_P (tem))
4267 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4268 }
4269 else
4270 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4271
4272 if (tem)
4273 {
4274 /* Reject "simplifications" that just wrap the two
4275 arguments in a CONST. Failure to do so can result
4276 in infinite recursion with simplify_binary_operation
4277 when it calls us to simplify CONST operations.
4278 Also, if we find such a simplification, don't try
4279 any more combinations with this rhs: We must have
4280 something like symbol+offset, ie. one of the
4281 trivial CONST expressions we handle later. */
4282 if (GET_CODE (tem) == CONST
4283 && GET_CODE (XEXP (tem, 0)) == ncode
4284 && XEXP (XEXP (tem, 0), 0) == lhs
4285 && XEXP (XEXP (tem, 0), 1) == rhs)
4286 break;
4287 lneg &= rneg;
4288 if (GET_CODE (tem) == NEG)
4289 tem = XEXP (tem, 0), lneg = !lneg;
4290 if (CONST_INT_P (tem) && lneg)
4291 tem = neg_const_int (mode, tem), lneg = 0;
4292
4293 ops[i].op = tem;
4294 ops[i].neg = lneg;
4295 ops[j].op = NULL_RTX;
4296 changed = 1;
4297 canonicalized = 1;
4298 }
4299 }
4300 }
4301
4302 /* If nothing changed, fail. */
4303 if (!canonicalized)
4304 return NULL_RTX;
4305
4306 /* Pack all the operands to the lower-numbered entries. */
4307 for (i = 0, j = 0; j < n_ops; j++)
4308 if (ops[j].op)
4309 {
4310 ops[i] = ops[j];
4311 i++;
4312 }
4313 n_ops = i;
4314 }
4315 while (changed);
4316
4317 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4318 if (n_ops == 2
4319 && CONST_INT_P (ops[1].op)
4320 && CONSTANT_P (ops[0].op)
4321 && ops[0].neg)
4322 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4323
4324 /* We suppressed creation of trivial CONST expressions in the
4325 combination loop to avoid recursion. Create one manually now.
4326 The combination loop should have ensured that there is exactly
4327 one CONST_INT, and the sort will have ensured that it is last
4328 in the array and that any other constant will be next-to-last. */
4329
4330 if (n_ops > 1
4331 && CONST_INT_P (ops[n_ops - 1].op)
4332 && CONSTANT_P (ops[n_ops - 2].op))
4333 {
4334 rtx value = ops[n_ops - 1].op;
4335 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4336 value = neg_const_int (mode, value);
4337 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4338 INTVAL (value));
4339 n_ops--;
4340 }
4341
4342 /* Put a non-negated operand first, if possible. */
4343
4344 for (i = 0; i < n_ops && ops[i].neg; i++)
4345 continue;
4346 if (i == n_ops)
4347 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4348 else if (i != 0)
4349 {
4350 tem = ops[0].op;
4351 ops[0] = ops[i];
4352 ops[i].op = tem;
4353 ops[i].neg = 1;
4354 }
4355
4356 /* Now make the result by performing the requested operations. */
4357 result = ops[0].op;
4358 for (i = 1; i < n_ops; i++)
4359 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4360 mode, result, ops[i].op);
4361
4362 return result;
4363 }
4364
4365 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4366 static bool
4367 plus_minus_operand_p (const_rtx x)
4368 {
4369 return GET_CODE (x) == PLUS
4370 || GET_CODE (x) == MINUS
4371 || (GET_CODE (x) == CONST
4372 && GET_CODE (XEXP (x, 0)) == PLUS
4373 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4374 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4375 }
4376
4377 /* Like simplify_binary_operation except used for relational operators.
4378 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4379 not also be VOIDmode.
4380
4381 CMP_MODE specifies in which mode the comparison is done in, so it is
4382 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4383 the operands or, if both are VOIDmode, the operands are compared in
4384 "infinite precision". */
4385 rtx
4386 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4387 machine_mode cmp_mode, rtx op0, rtx op1)
4388 {
4389 rtx tem, trueop0, trueop1;
4390
4391 if (cmp_mode == VOIDmode)
4392 cmp_mode = GET_MODE (op0);
4393 if (cmp_mode == VOIDmode)
4394 cmp_mode = GET_MODE (op1);
4395
4396 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4397 if (tem)
4398 {
4399 if (SCALAR_FLOAT_MODE_P (mode))
4400 {
4401 if (tem == const0_rtx)
4402 return CONST0_RTX (mode);
4403 #ifdef FLOAT_STORE_FLAG_VALUE
4404 {
4405 REAL_VALUE_TYPE val;
4406 val = FLOAT_STORE_FLAG_VALUE (mode);
4407 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4408 }
4409 #else
4410 return NULL_RTX;
4411 #endif
4412 }
4413 if (VECTOR_MODE_P (mode))
4414 {
4415 if (tem == const0_rtx)
4416 return CONST0_RTX (mode);
4417 #ifdef VECTOR_STORE_FLAG_VALUE
4418 {
4419 int i, units;
4420 rtvec v;
4421
4422 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4423 if (val == NULL_RTX)
4424 return NULL_RTX;
4425 if (val == const1_rtx)
4426 return CONST1_RTX (mode);
4427
4428 units = GET_MODE_NUNITS (mode);
4429 v = rtvec_alloc (units);
4430 for (i = 0; i < units; i++)
4431 RTVEC_ELT (v, i) = val;
4432 return gen_rtx_raw_CONST_VECTOR (mode, v);
4433 }
4434 #else
4435 return NULL_RTX;
4436 #endif
4437 }
4438
4439 return tem;
4440 }
4441
4442 /* For the following tests, ensure const0_rtx is op1. */
4443 if (swap_commutative_operands_p (op0, op1)
4444 || (op0 == const0_rtx && op1 != const0_rtx))
4445 std::swap (op0, op1), code = swap_condition (code);
4446
4447 /* If op0 is a compare, extract the comparison arguments from it. */
4448 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4449 return simplify_gen_relational (code, mode, VOIDmode,
4450 XEXP (op0, 0), XEXP (op0, 1));
4451
4452 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4453 || CC0_P (op0))
4454 return NULL_RTX;
4455
4456 trueop0 = avoid_constant_pool_reference (op0);
4457 trueop1 = avoid_constant_pool_reference (op1);
4458 return simplify_relational_operation_1 (code, mode, cmp_mode,
4459 trueop0, trueop1);
4460 }
4461
4462 /* This part of simplify_relational_operation is only used when CMP_MODE
4463 is not in class MODE_CC (i.e. it is a real comparison).
4464
4465 MODE is the mode of the result, while CMP_MODE specifies in which
4466 mode the comparison is done in, so it is the mode of the operands. */
4467
4468 static rtx
4469 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4470 machine_mode cmp_mode, rtx op0, rtx op1)
4471 {
4472 enum rtx_code op0code = GET_CODE (op0);
4473
4474 if (op1 == const0_rtx && COMPARISON_P (op0))
4475 {
4476 /* If op0 is a comparison, extract the comparison arguments
4477 from it. */
4478 if (code == NE)
4479 {
4480 if (GET_MODE (op0) == mode)
4481 return simplify_rtx (op0);
4482 else
4483 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4484 XEXP (op0, 0), XEXP (op0, 1));
4485 }
4486 else if (code == EQ)
4487 {
4488 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4489 if (new_code != UNKNOWN)
4490 return simplify_gen_relational (new_code, mode, VOIDmode,
4491 XEXP (op0, 0), XEXP (op0, 1));
4492 }
4493 }
4494
4495 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4496 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4497 if ((code == LTU || code == GEU)
4498 && GET_CODE (op0) == PLUS
4499 && CONST_INT_P (XEXP (op0, 1))
4500 && (rtx_equal_p (op1, XEXP (op0, 0))
4501 || rtx_equal_p (op1, XEXP (op0, 1)))
4502 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4503 && XEXP (op0, 1) != const0_rtx)
4504 {
4505 rtx new_cmp
4506 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4507 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4508 cmp_mode, XEXP (op0, 0), new_cmp);
4509 }
4510
4511 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4512 if ((code == LTU || code == GEU)
4513 && GET_CODE (op0) == PLUS
4514 && rtx_equal_p (op1, XEXP (op0, 1))
4515 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4516 && !rtx_equal_p (op1, XEXP (op0, 0)))
4517 return simplify_gen_relational (code, mode, cmp_mode, op0,
4518 copy_rtx (XEXP (op0, 0)));
4519
4520 if (op1 == const0_rtx)
4521 {
4522 /* Canonicalize (GTU x 0) as (NE x 0). */
4523 if (code == GTU)
4524 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4525 /* Canonicalize (LEU x 0) as (EQ x 0). */
4526 if (code == LEU)
4527 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4528 }
4529 else if (op1 == const1_rtx)
4530 {
4531 switch (code)
4532 {
4533 case GE:
4534 /* Canonicalize (GE x 1) as (GT x 0). */
4535 return simplify_gen_relational (GT, mode, cmp_mode,
4536 op0, const0_rtx);
4537 case GEU:
4538 /* Canonicalize (GEU x 1) as (NE x 0). */
4539 return simplify_gen_relational (NE, mode, cmp_mode,
4540 op0, const0_rtx);
4541 case LT:
4542 /* Canonicalize (LT x 1) as (LE x 0). */
4543 return simplify_gen_relational (LE, mode, cmp_mode,
4544 op0, const0_rtx);
4545 case LTU:
4546 /* Canonicalize (LTU x 1) as (EQ x 0). */
4547 return simplify_gen_relational (EQ, mode, cmp_mode,
4548 op0, const0_rtx);
4549 default:
4550 break;
4551 }
4552 }
4553 else if (op1 == constm1_rtx)
4554 {
4555 /* Canonicalize (LE x -1) as (LT x 0). */
4556 if (code == LE)
4557 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4558 /* Canonicalize (GT x -1) as (GE x 0). */
4559 if (code == GT)
4560 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4561 }
4562
4563 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4564 if ((code == EQ || code == NE)
4565 && (op0code == PLUS || op0code == MINUS)
4566 && CONSTANT_P (op1)
4567 && CONSTANT_P (XEXP (op0, 1))
4568 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4569 {
4570 rtx x = XEXP (op0, 0);
4571 rtx c = XEXP (op0, 1);
4572 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4573 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4574
4575 /* Detect an infinite recursive condition, where we oscillate at this
4576 simplification case between:
4577 A + B == C <---> C - B == A,
4578 where A, B, and C are all constants with non-simplifiable expressions,
4579 usually SYMBOL_REFs. */
4580 if (GET_CODE (tem) == invcode
4581 && CONSTANT_P (x)
4582 && rtx_equal_p (c, XEXP (tem, 1)))
4583 return NULL_RTX;
4584
4585 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4586 }
4587
4588 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4589 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4590 if (code == NE
4591 && op1 == const0_rtx
4592 && GET_MODE_CLASS (mode) == MODE_INT
4593 && cmp_mode != VOIDmode
4594 /* ??? Work-around BImode bugs in the ia64 backend. */
4595 && mode != BImode
4596 && cmp_mode != BImode
4597 && nonzero_bits (op0, cmp_mode) == 1
4598 && STORE_FLAG_VALUE == 1)
4599 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4600 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4601 : lowpart_subreg (mode, op0, cmp_mode);
4602
4603 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4604 if ((code == EQ || code == NE)
4605 && op1 == const0_rtx
4606 && op0code == XOR)
4607 return simplify_gen_relational (code, mode, cmp_mode,
4608 XEXP (op0, 0), XEXP (op0, 1));
4609
4610 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4611 if ((code == EQ || code == NE)
4612 && op0code == XOR
4613 && rtx_equal_p (XEXP (op0, 0), op1)
4614 && !side_effects_p (XEXP (op0, 0)))
4615 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4616 CONST0_RTX (mode));
4617
4618 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4619 if ((code == EQ || code == NE)
4620 && op0code == XOR
4621 && rtx_equal_p (XEXP (op0, 1), op1)
4622 && !side_effects_p (XEXP (op0, 1)))
4623 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4624 CONST0_RTX (mode));
4625
4626 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4627 if ((code == EQ || code == NE)
4628 && op0code == XOR
4629 && CONST_SCALAR_INT_P (op1)
4630 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4631 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4632 simplify_gen_binary (XOR, cmp_mode,
4633 XEXP (op0, 1), op1));
4634
4635 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4636 can be implemented with a BICS instruction on some targets, or
4637 constant-folded if y is a constant. */
4638 if ((code == EQ || code == NE)
4639 && op0code == AND
4640 && rtx_equal_p (XEXP (op0, 0), op1)
4641 && !side_effects_p (op1)
4642 && op1 != CONST0_RTX (cmp_mode))
4643 {
4644 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4645 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4646
4647 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4648 CONST0_RTX (cmp_mode));
4649 }
4650
4651 /* Likewise for (eq/ne (and x y) y). */
4652 if ((code == EQ || code == NE)
4653 && op0code == AND
4654 && rtx_equal_p (XEXP (op0, 1), op1)
4655 && !side_effects_p (op1)
4656 && op1 != CONST0_RTX (cmp_mode))
4657 {
4658 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4659 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4660
4661 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4662 CONST0_RTX (cmp_mode));
4663 }
4664
4665 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4666 if ((code == EQ || code == NE)
4667 && GET_CODE (op0) == BSWAP
4668 && CONST_SCALAR_INT_P (op1))
4669 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4670 simplify_gen_unary (BSWAP, cmp_mode,
4671 op1, cmp_mode));
4672
4673 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4674 if ((code == EQ || code == NE)
4675 && GET_CODE (op0) == BSWAP
4676 && GET_CODE (op1) == BSWAP)
4677 return simplify_gen_relational (code, mode, cmp_mode,
4678 XEXP (op0, 0), XEXP (op1, 0));
4679
4680 if (op0code == POPCOUNT && op1 == const0_rtx)
4681 switch (code)
4682 {
4683 case EQ:
4684 case LE:
4685 case LEU:
4686 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4687 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4688 XEXP (op0, 0), const0_rtx);
4689
4690 case NE:
4691 case GT:
4692 case GTU:
4693 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4694 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4695 XEXP (op0, 0), const0_rtx);
4696
4697 default:
4698 break;
4699 }
4700
4701 return NULL_RTX;
4702 }
4703
4704 enum
4705 {
4706 CMP_EQ = 1,
4707 CMP_LT = 2,
4708 CMP_GT = 4,
4709 CMP_LTU = 8,
4710 CMP_GTU = 16
4711 };
4712
4713
4714 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4715 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4716 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4717 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4718 For floating-point comparisons, assume that the operands were ordered. */
4719
4720 static rtx
4721 comparison_result (enum rtx_code code, int known_results)
4722 {
4723 switch (code)
4724 {
4725 case EQ:
4726 case UNEQ:
4727 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4728 case NE:
4729 case LTGT:
4730 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4731
4732 case LT:
4733 case UNLT:
4734 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4735 case GE:
4736 case UNGE:
4737 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4738
4739 case GT:
4740 case UNGT:
4741 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4742 case LE:
4743 case UNLE:
4744 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4745
4746 case LTU:
4747 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4748 case GEU:
4749 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4750
4751 case GTU:
4752 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4753 case LEU:
4754 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4755
4756 case ORDERED:
4757 return const_true_rtx;
4758 case UNORDERED:
4759 return const0_rtx;
4760 default:
4761 gcc_unreachable ();
4762 }
4763 }
4764
4765 /* Check if the given comparison (done in the given MODE) is actually
4766 a tautology or a contradiction. If the mode is VOID_mode, the
4767 comparison is done in "infinite precision". If no simplification
4768 is possible, this function returns zero. Otherwise, it returns
4769 either const_true_rtx or const0_rtx. */
4770
4771 rtx
4772 simplify_const_relational_operation (enum rtx_code code,
4773 machine_mode mode,
4774 rtx op0, rtx op1)
4775 {
4776 rtx tem;
4777 rtx trueop0;
4778 rtx trueop1;
4779
4780 gcc_assert (mode != VOIDmode
4781 || (GET_MODE (op0) == VOIDmode
4782 && GET_MODE (op1) == VOIDmode));
4783
4784 /* If op0 is a compare, extract the comparison arguments from it. */
4785 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4786 {
4787 op1 = XEXP (op0, 1);
4788 op0 = XEXP (op0, 0);
4789
4790 if (GET_MODE (op0) != VOIDmode)
4791 mode = GET_MODE (op0);
4792 else if (GET_MODE (op1) != VOIDmode)
4793 mode = GET_MODE (op1);
4794 else
4795 return 0;
4796 }
4797
4798 /* We can't simplify MODE_CC values since we don't know what the
4799 actual comparison is. */
4800 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4801 return 0;
4802
4803 /* Make sure the constant is second. */
4804 if (swap_commutative_operands_p (op0, op1))
4805 {
4806 std::swap (op0, op1);
4807 code = swap_condition (code);
4808 }
4809
4810 trueop0 = avoid_constant_pool_reference (op0);
4811 trueop1 = avoid_constant_pool_reference (op1);
4812
4813 /* For integer comparisons of A and B maybe we can simplify A - B and can
4814 then simplify a comparison of that with zero. If A and B are both either
4815 a register or a CONST_INT, this can't help; testing for these cases will
4816 prevent infinite recursion here and speed things up.
4817
4818 We can only do this for EQ and NE comparisons as otherwise we may
4819 lose or introduce overflow which we cannot disregard as undefined as
4820 we do not know the signedness of the operation on either the left or
4821 the right hand side of the comparison. */
4822
4823 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4824 && (code == EQ || code == NE)
4825 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4826 && (REG_P (op1) || CONST_INT_P (trueop1)))
4827 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4828 /* We cannot do this if tem is a nonzero address. */
4829 && ! nonzero_address_p (tem))
4830 return simplify_const_relational_operation (signed_condition (code),
4831 mode, tem, const0_rtx);
4832
4833 if (! HONOR_NANS (mode) && code == ORDERED)
4834 return const_true_rtx;
4835
4836 if (! HONOR_NANS (mode) && code == UNORDERED)
4837 return const0_rtx;
4838
4839 /* For modes without NaNs, if the two operands are equal, we know the
4840 result except if they have side-effects. Even with NaNs we know
4841 the result of unordered comparisons and, if signaling NaNs are
4842 irrelevant, also the result of LT/GT/LTGT. */
4843 if ((! HONOR_NANS (trueop0)
4844 || code == UNEQ || code == UNLE || code == UNGE
4845 || ((code == LT || code == GT || code == LTGT)
4846 && ! HONOR_SNANS (trueop0)))
4847 && rtx_equal_p (trueop0, trueop1)
4848 && ! side_effects_p (trueop0))
4849 return comparison_result (code, CMP_EQ);
4850
4851 /* If the operands are floating-point constants, see if we can fold
4852 the result. */
4853 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4854 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4855 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4856 {
4857 REAL_VALUE_TYPE d0, d1;
4858
4859 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4860 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4861
4862 /* Comparisons are unordered iff at least one of the values is NaN. */
4863 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4864 switch (code)
4865 {
4866 case UNEQ:
4867 case UNLT:
4868 case UNGT:
4869 case UNLE:
4870 case UNGE:
4871 case NE:
4872 case UNORDERED:
4873 return const_true_rtx;
4874 case EQ:
4875 case LT:
4876 case GT:
4877 case LE:
4878 case GE:
4879 case LTGT:
4880 case ORDERED:
4881 return const0_rtx;
4882 default:
4883 return 0;
4884 }
4885
4886 return comparison_result (code,
4887 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4888 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4889 }
4890
4891 /* Otherwise, see if the operands are both integers. */
4892 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4893 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4894 {
4895 /* It would be nice if we really had a mode here. However, the
4896 largest int representable on the target is as good as
4897 infinite. */
4898 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4899 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4900 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4901
4902 if (wi::eq_p (ptrueop0, ptrueop1))
4903 return comparison_result (code, CMP_EQ);
4904 else
4905 {
4906 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4907 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4908 return comparison_result (code, cr);
4909 }
4910 }
4911
4912 /* Optimize comparisons with upper and lower bounds. */
4913 if (HWI_COMPUTABLE_MODE_P (mode)
4914 && CONST_INT_P (trueop1))
4915 {
4916 int sign;
4917 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4918 HOST_WIDE_INT val = INTVAL (trueop1);
4919 HOST_WIDE_INT mmin, mmax;
4920
4921 if (code == GEU
4922 || code == LEU
4923 || code == GTU
4924 || code == LTU)
4925 sign = 0;
4926 else
4927 sign = 1;
4928
4929 /* Get a reduced range if the sign bit is zero. */
4930 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4931 {
4932 mmin = 0;
4933 mmax = nonzero;
4934 }
4935 else
4936 {
4937 rtx mmin_rtx, mmax_rtx;
4938 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4939
4940 mmin = INTVAL (mmin_rtx);
4941 mmax = INTVAL (mmax_rtx);
4942 if (sign)
4943 {
4944 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4945
4946 mmin >>= (sign_copies - 1);
4947 mmax >>= (sign_copies - 1);
4948 }
4949 }
4950
4951 switch (code)
4952 {
4953 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4954 case GEU:
4955 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4956 return const_true_rtx;
4957 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4958 return const0_rtx;
4959 break;
4960 case GE:
4961 if (val <= mmin)
4962 return const_true_rtx;
4963 if (val > mmax)
4964 return const0_rtx;
4965 break;
4966
4967 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4968 case LEU:
4969 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4970 return const_true_rtx;
4971 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4972 return const0_rtx;
4973 break;
4974 case LE:
4975 if (val >= mmax)
4976 return const_true_rtx;
4977 if (val < mmin)
4978 return const0_rtx;
4979 break;
4980
4981 case EQ:
4982 /* x == y is always false for y out of range. */
4983 if (val < mmin || val > mmax)
4984 return const0_rtx;
4985 break;
4986
4987 /* x > y is always false for y >= mmax, always true for y < mmin. */
4988 case GTU:
4989 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4990 return const0_rtx;
4991 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4992 return const_true_rtx;
4993 break;
4994 case GT:
4995 if (val >= mmax)
4996 return const0_rtx;
4997 if (val < mmin)
4998 return const_true_rtx;
4999 break;
5000
5001 /* x < y is always false for y <= mmin, always true for y > mmax. */
5002 case LTU:
5003 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5004 return const0_rtx;
5005 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5006 return const_true_rtx;
5007 break;
5008 case LT:
5009 if (val <= mmin)
5010 return const0_rtx;
5011 if (val > mmax)
5012 return const_true_rtx;
5013 break;
5014
5015 case NE:
5016 /* x != y is always true for y out of range. */
5017 if (val < mmin || val > mmax)
5018 return const_true_rtx;
5019 break;
5020
5021 default:
5022 break;
5023 }
5024 }
5025
5026 /* Optimize integer comparisons with zero. */
5027 if (trueop1 == const0_rtx)
5028 {
5029 /* Some addresses are known to be nonzero. We don't know
5030 their sign, but equality comparisons are known. */
5031 if (nonzero_address_p (trueop0))
5032 {
5033 if (code == EQ || code == LEU)
5034 return const0_rtx;
5035 if (code == NE || code == GTU)
5036 return const_true_rtx;
5037 }
5038
5039 /* See if the first operand is an IOR with a constant. If so, we
5040 may be able to determine the result of this comparison. */
5041 if (GET_CODE (op0) == IOR)
5042 {
5043 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5044 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5045 {
5046 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5047 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5048 && (UINTVAL (inner_const)
5049 & ((unsigned HOST_WIDE_INT) 1
5050 << sign_bitnum)));
5051
5052 switch (code)
5053 {
5054 case EQ:
5055 case LEU:
5056 return const0_rtx;
5057 case NE:
5058 case GTU:
5059 return const_true_rtx;
5060 case LT:
5061 case LE:
5062 if (has_sign)
5063 return const_true_rtx;
5064 break;
5065 case GT:
5066 case GE:
5067 if (has_sign)
5068 return const0_rtx;
5069 break;
5070 default:
5071 break;
5072 }
5073 }
5074 }
5075 }
5076
5077 /* Optimize comparison of ABS with zero. */
5078 if (trueop1 == CONST0_RTX (mode)
5079 && (GET_CODE (trueop0) == ABS
5080 || (GET_CODE (trueop0) == FLOAT_EXTEND
5081 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5082 {
5083 switch (code)
5084 {
5085 case LT:
5086 /* Optimize abs(x) < 0.0. */
5087 if (!HONOR_SNANS (mode)
5088 && (!INTEGRAL_MODE_P (mode)
5089 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5090 {
5091 if (INTEGRAL_MODE_P (mode)
5092 && (issue_strict_overflow_warning
5093 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5094 warning (OPT_Wstrict_overflow,
5095 ("assuming signed overflow does not occur when "
5096 "assuming abs (x) < 0 is false"));
5097 return const0_rtx;
5098 }
5099 break;
5100
5101 case GE:
5102 /* Optimize abs(x) >= 0.0. */
5103 if (!HONOR_NANS (mode)
5104 && (!INTEGRAL_MODE_P (mode)
5105 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5106 {
5107 if (INTEGRAL_MODE_P (mode)
5108 && (issue_strict_overflow_warning
5109 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5110 warning (OPT_Wstrict_overflow,
5111 ("assuming signed overflow does not occur when "
5112 "assuming abs (x) >= 0 is true"));
5113 return const_true_rtx;
5114 }
5115 break;
5116
5117 case UNGE:
5118 /* Optimize ! (abs(x) < 0.0). */
5119 return const_true_rtx;
5120
5121 default:
5122 break;
5123 }
5124 }
5125
5126 return 0;
5127 }
5128 \f
5129 /* Simplify CODE, an operation with result mode MODE and three operands,
5130 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5131 a constant. Return 0 if no simplifications is possible. */
5132
5133 rtx
5134 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5135 machine_mode op0_mode, rtx op0, rtx op1,
5136 rtx op2)
5137 {
5138 unsigned int width = GET_MODE_PRECISION (mode);
5139 bool any_change = false;
5140 rtx tem, trueop2;
5141
5142 /* VOIDmode means "infinite" precision. */
5143 if (width == 0)
5144 width = HOST_BITS_PER_WIDE_INT;
5145
5146 switch (code)
5147 {
5148 case FMA:
5149 /* Simplify negations around the multiplication. */
5150 /* -a * -b + c => a * b + c. */
5151 if (GET_CODE (op0) == NEG)
5152 {
5153 tem = simplify_unary_operation (NEG, mode, op1, mode);
5154 if (tem)
5155 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5156 }
5157 else if (GET_CODE (op1) == NEG)
5158 {
5159 tem = simplify_unary_operation (NEG, mode, op0, mode);
5160 if (tem)
5161 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5162 }
5163
5164 /* Canonicalize the two multiplication operands. */
5165 /* a * -b + c => -b * a + c. */
5166 if (swap_commutative_operands_p (op0, op1))
5167 std::swap (op0, op1), any_change = true;
5168
5169 if (any_change)
5170 return gen_rtx_FMA (mode, op0, op1, op2);
5171 return NULL_RTX;
5172
5173 case SIGN_EXTRACT:
5174 case ZERO_EXTRACT:
5175 if (CONST_INT_P (op0)
5176 && CONST_INT_P (op1)
5177 && CONST_INT_P (op2)
5178 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5179 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5180 {
5181 /* Extracting a bit-field from a constant */
5182 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5183 HOST_WIDE_INT op1val = INTVAL (op1);
5184 HOST_WIDE_INT op2val = INTVAL (op2);
5185 if (BITS_BIG_ENDIAN)
5186 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5187 else
5188 val >>= op2val;
5189
5190 if (HOST_BITS_PER_WIDE_INT != op1val)
5191 {
5192 /* First zero-extend. */
5193 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5194 /* If desired, propagate sign bit. */
5195 if (code == SIGN_EXTRACT
5196 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5197 != 0)
5198 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5199 }
5200
5201 return gen_int_mode (val, mode);
5202 }
5203 break;
5204
5205 case IF_THEN_ELSE:
5206 if (CONST_INT_P (op0))
5207 return op0 != const0_rtx ? op1 : op2;
5208
5209 /* Convert c ? a : a into "a". */
5210 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5211 return op1;
5212
5213 /* Convert a != b ? a : b into "a". */
5214 if (GET_CODE (op0) == NE
5215 && ! side_effects_p (op0)
5216 && ! HONOR_NANS (mode)
5217 && ! HONOR_SIGNED_ZEROS (mode)
5218 && ((rtx_equal_p (XEXP (op0, 0), op1)
5219 && rtx_equal_p (XEXP (op0, 1), op2))
5220 || (rtx_equal_p (XEXP (op0, 0), op2)
5221 && rtx_equal_p (XEXP (op0, 1), op1))))
5222 return op1;
5223
5224 /* Convert a == b ? a : b into "b". */
5225 if (GET_CODE (op0) == EQ
5226 && ! side_effects_p (op0)
5227 && ! HONOR_NANS (mode)
5228 && ! HONOR_SIGNED_ZEROS (mode)
5229 && ((rtx_equal_p (XEXP (op0, 0), op1)
5230 && rtx_equal_p (XEXP (op0, 1), op2))
5231 || (rtx_equal_p (XEXP (op0, 0), op2)
5232 && rtx_equal_p (XEXP (op0, 1), op1))))
5233 return op2;
5234
5235 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5236 {
5237 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5238 ? GET_MODE (XEXP (op0, 1))
5239 : GET_MODE (XEXP (op0, 0)));
5240 rtx temp;
5241
5242 /* Look for happy constants in op1 and op2. */
5243 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5244 {
5245 HOST_WIDE_INT t = INTVAL (op1);
5246 HOST_WIDE_INT f = INTVAL (op2);
5247
5248 if (t == STORE_FLAG_VALUE && f == 0)
5249 code = GET_CODE (op0);
5250 else if (t == 0 && f == STORE_FLAG_VALUE)
5251 {
5252 enum rtx_code tmp;
5253 tmp = reversed_comparison_code (op0, NULL_RTX);
5254 if (tmp == UNKNOWN)
5255 break;
5256 code = tmp;
5257 }
5258 else
5259 break;
5260
5261 return simplify_gen_relational (code, mode, cmp_mode,
5262 XEXP (op0, 0), XEXP (op0, 1));
5263 }
5264
5265 if (cmp_mode == VOIDmode)
5266 cmp_mode = op0_mode;
5267 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5268 cmp_mode, XEXP (op0, 0),
5269 XEXP (op0, 1));
5270
5271 /* See if any simplifications were possible. */
5272 if (temp)
5273 {
5274 if (CONST_INT_P (temp))
5275 return temp == const0_rtx ? op2 : op1;
5276 else if (temp)
5277 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5278 }
5279 }
5280 break;
5281
5282 case VEC_MERGE:
5283 gcc_assert (GET_MODE (op0) == mode);
5284 gcc_assert (GET_MODE (op1) == mode);
5285 gcc_assert (VECTOR_MODE_P (mode));
5286 trueop2 = avoid_constant_pool_reference (op2);
5287 if (CONST_INT_P (trueop2))
5288 {
5289 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5290 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5291 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5292 unsigned HOST_WIDE_INT mask;
5293 if (n_elts == HOST_BITS_PER_WIDE_INT)
5294 mask = -1;
5295 else
5296 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5297
5298 if (!(sel & mask) && !side_effects_p (op0))
5299 return op1;
5300 if ((sel & mask) == mask && !side_effects_p (op1))
5301 return op0;
5302
5303 rtx trueop0 = avoid_constant_pool_reference (op0);
5304 rtx trueop1 = avoid_constant_pool_reference (op1);
5305 if (GET_CODE (trueop0) == CONST_VECTOR
5306 && GET_CODE (trueop1) == CONST_VECTOR)
5307 {
5308 rtvec v = rtvec_alloc (n_elts);
5309 unsigned int i;
5310
5311 for (i = 0; i < n_elts; i++)
5312 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5313 ? CONST_VECTOR_ELT (trueop0, i)
5314 : CONST_VECTOR_ELT (trueop1, i));
5315 return gen_rtx_CONST_VECTOR (mode, v);
5316 }
5317
5318 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5319 if no element from a appears in the result. */
5320 if (GET_CODE (op0) == VEC_MERGE)
5321 {
5322 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5323 if (CONST_INT_P (tem))
5324 {
5325 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5326 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5327 return simplify_gen_ternary (code, mode, mode,
5328 XEXP (op0, 1), op1, op2);
5329 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5330 return simplify_gen_ternary (code, mode, mode,
5331 XEXP (op0, 0), op1, op2);
5332 }
5333 }
5334 if (GET_CODE (op1) == VEC_MERGE)
5335 {
5336 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5337 if (CONST_INT_P (tem))
5338 {
5339 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5340 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5341 return simplify_gen_ternary (code, mode, mode,
5342 op0, XEXP (op1, 1), op2);
5343 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5344 return simplify_gen_ternary (code, mode, mode,
5345 op0, XEXP (op1, 0), op2);
5346 }
5347 }
5348
5349 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5350 with a. */
5351 if (GET_CODE (op0) == VEC_DUPLICATE
5352 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5353 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5354 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5355 {
5356 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5357 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5358 {
5359 if (XEXP (XEXP (op0, 0), 0) == op1
5360 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5361 return op1;
5362 }
5363 }
5364 }
5365
5366 if (rtx_equal_p (op0, op1)
5367 && !side_effects_p (op2) && !side_effects_p (op1))
5368 return op0;
5369
5370 break;
5371
5372 default:
5373 gcc_unreachable ();
5374 }
5375
5376 return 0;
5377 }
5378
5379 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5380 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5381 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5382
5383 Works by unpacking OP into a collection of 8-bit values
5384 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5385 and then repacking them again for OUTERMODE. */
5386
5387 static rtx
5388 simplify_immed_subreg (machine_mode outermode, rtx op,
5389 machine_mode innermode, unsigned int byte)
5390 {
5391 enum {
5392 value_bit = 8,
5393 value_mask = (1 << value_bit) - 1
5394 };
5395 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5396 int value_start;
5397 int i;
5398 int elem;
5399
5400 int num_elem;
5401 rtx * elems;
5402 int elem_bitsize;
5403 rtx result_s;
5404 rtvec result_v = NULL;
5405 enum mode_class outer_class;
5406 machine_mode outer_submode;
5407 int max_bitsize;
5408
5409 /* Some ports misuse CCmode. */
5410 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5411 return op;
5412
5413 /* We have no way to represent a complex constant at the rtl level. */
5414 if (COMPLEX_MODE_P (outermode))
5415 return NULL_RTX;
5416
5417 /* We support any size mode. */
5418 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5419 GET_MODE_BITSIZE (innermode));
5420
5421 /* Unpack the value. */
5422
5423 if (GET_CODE (op) == CONST_VECTOR)
5424 {
5425 num_elem = CONST_VECTOR_NUNITS (op);
5426 elems = &CONST_VECTOR_ELT (op, 0);
5427 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5428 }
5429 else
5430 {
5431 num_elem = 1;
5432 elems = &op;
5433 elem_bitsize = max_bitsize;
5434 }
5435 /* If this asserts, it is too complicated; reducing value_bit may help. */
5436 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5437 /* I don't know how to handle endianness of sub-units. */
5438 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5439
5440 for (elem = 0; elem < num_elem; elem++)
5441 {
5442 unsigned char * vp;
5443 rtx el = elems[elem];
5444
5445 /* Vectors are kept in target memory order. (This is probably
5446 a mistake.) */
5447 {
5448 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5449 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5450 / BITS_PER_UNIT);
5451 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5452 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5453 unsigned bytele = (subword_byte % UNITS_PER_WORD
5454 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5455 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5456 }
5457
5458 switch (GET_CODE (el))
5459 {
5460 case CONST_INT:
5461 for (i = 0;
5462 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5463 i += value_bit)
5464 *vp++ = INTVAL (el) >> i;
5465 /* CONST_INTs are always logically sign-extended. */
5466 for (; i < elem_bitsize; i += value_bit)
5467 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5468 break;
5469
5470 case CONST_WIDE_INT:
5471 {
5472 rtx_mode_t val = std::make_pair (el, innermode);
5473 unsigned char extend = wi::sign_mask (val);
5474
5475 for (i = 0; i < elem_bitsize; i += value_bit)
5476 *vp++ = wi::extract_uhwi (val, i, value_bit);
5477 for (; i < elem_bitsize; i += value_bit)
5478 *vp++ = extend;
5479 }
5480 break;
5481
5482 case CONST_DOUBLE:
5483 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5484 {
5485 unsigned char extend = 0;
5486 /* If this triggers, someone should have generated a
5487 CONST_INT instead. */
5488 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5489
5490 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5491 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5492 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5493 {
5494 *vp++
5495 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5496 i += value_bit;
5497 }
5498
5499 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5500 extend = -1;
5501 for (; i < elem_bitsize; i += value_bit)
5502 *vp++ = extend;
5503 }
5504 else
5505 {
5506 /* This is big enough for anything on the platform. */
5507 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5508 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5509
5510 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5511 gcc_assert (bitsize <= elem_bitsize);
5512 gcc_assert (bitsize % value_bit == 0);
5513
5514 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5515 GET_MODE (el));
5516
5517 /* real_to_target produces its result in words affected by
5518 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5519 and use WORDS_BIG_ENDIAN instead; see the documentation
5520 of SUBREG in rtl.texi. */
5521 for (i = 0; i < bitsize; i += value_bit)
5522 {
5523 int ibase;
5524 if (WORDS_BIG_ENDIAN)
5525 ibase = bitsize - 1 - i;
5526 else
5527 ibase = i;
5528 *vp++ = tmp[ibase / 32] >> i % 32;
5529 }
5530
5531 /* It shouldn't matter what's done here, so fill it with
5532 zero. */
5533 for (; i < elem_bitsize; i += value_bit)
5534 *vp++ = 0;
5535 }
5536 break;
5537
5538 case CONST_FIXED:
5539 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5540 {
5541 for (i = 0; i < elem_bitsize; i += value_bit)
5542 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5543 }
5544 else
5545 {
5546 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5547 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5548 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5549 i += value_bit)
5550 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5551 >> (i - HOST_BITS_PER_WIDE_INT);
5552 for (; i < elem_bitsize; i += value_bit)
5553 *vp++ = 0;
5554 }
5555 break;
5556
5557 default:
5558 gcc_unreachable ();
5559 }
5560 }
5561
5562 /* Now, pick the right byte to start with. */
5563 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5564 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5565 will already have offset 0. */
5566 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5567 {
5568 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5569 - byte);
5570 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5571 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5572 byte = (subword_byte % UNITS_PER_WORD
5573 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5574 }
5575
5576 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5577 so if it's become negative it will instead be very large.) */
5578 gcc_assert (byte < GET_MODE_SIZE (innermode));
5579
5580 /* Convert from bytes to chunks of size value_bit. */
5581 value_start = byte * (BITS_PER_UNIT / value_bit);
5582
5583 /* Re-pack the value. */
5584
5585 if (VECTOR_MODE_P (outermode))
5586 {
5587 num_elem = GET_MODE_NUNITS (outermode);
5588 result_v = rtvec_alloc (num_elem);
5589 elems = &RTVEC_ELT (result_v, 0);
5590 outer_submode = GET_MODE_INNER (outermode);
5591 }
5592 else
5593 {
5594 num_elem = 1;
5595 elems = &result_s;
5596 outer_submode = outermode;
5597 }
5598
5599 outer_class = GET_MODE_CLASS (outer_submode);
5600 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5601
5602 gcc_assert (elem_bitsize % value_bit == 0);
5603 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5604
5605 for (elem = 0; elem < num_elem; elem++)
5606 {
5607 unsigned char *vp;
5608
5609 /* Vectors are stored in target memory order. (This is probably
5610 a mistake.) */
5611 {
5612 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5613 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5614 / BITS_PER_UNIT);
5615 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5616 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5617 unsigned bytele = (subword_byte % UNITS_PER_WORD
5618 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5619 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5620 }
5621
5622 switch (outer_class)
5623 {
5624 case MODE_INT:
5625 case MODE_PARTIAL_INT:
5626 {
5627 int u;
5628 int base = 0;
5629 int units
5630 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5631 / HOST_BITS_PER_WIDE_INT;
5632 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5633 wide_int r;
5634
5635 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5636 return NULL_RTX;
5637 for (u = 0; u < units; u++)
5638 {
5639 unsigned HOST_WIDE_INT buf = 0;
5640 for (i = 0;
5641 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5642 i += value_bit)
5643 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5644
5645 tmp[u] = buf;
5646 base += HOST_BITS_PER_WIDE_INT;
5647 }
5648 r = wide_int::from_array (tmp, units,
5649 GET_MODE_PRECISION (outer_submode));
5650 #if TARGET_SUPPORTS_WIDE_INT == 0
5651 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5652 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5653 return NULL_RTX;
5654 #endif
5655 elems[elem] = immed_wide_int_const (r, outer_submode);
5656 }
5657 break;
5658
5659 case MODE_FLOAT:
5660 case MODE_DECIMAL_FLOAT:
5661 {
5662 REAL_VALUE_TYPE r;
5663 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5664
5665 /* real_from_target wants its input in words affected by
5666 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5667 and use WORDS_BIG_ENDIAN instead; see the documentation
5668 of SUBREG in rtl.texi. */
5669 for (i = 0; i < max_bitsize / 32; i++)
5670 tmp[i] = 0;
5671 for (i = 0; i < elem_bitsize; i += value_bit)
5672 {
5673 int ibase;
5674 if (WORDS_BIG_ENDIAN)
5675 ibase = elem_bitsize - 1 - i;
5676 else
5677 ibase = i;
5678 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5679 }
5680
5681 real_from_target (&r, tmp, outer_submode);
5682 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5683 }
5684 break;
5685
5686 case MODE_FRACT:
5687 case MODE_UFRACT:
5688 case MODE_ACCUM:
5689 case MODE_UACCUM:
5690 {
5691 FIXED_VALUE_TYPE f;
5692 f.data.low = 0;
5693 f.data.high = 0;
5694 f.mode = outer_submode;
5695
5696 for (i = 0;
5697 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5698 i += value_bit)
5699 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5700 for (; i < elem_bitsize; i += value_bit)
5701 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5702 << (i - HOST_BITS_PER_WIDE_INT));
5703
5704 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5705 }
5706 break;
5707
5708 default:
5709 gcc_unreachable ();
5710 }
5711 }
5712 if (VECTOR_MODE_P (outermode))
5713 return gen_rtx_CONST_VECTOR (outermode, result_v);
5714 else
5715 return result_s;
5716 }
5717
5718 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5719 Return 0 if no simplifications are possible. */
5720 rtx
5721 simplify_subreg (machine_mode outermode, rtx op,
5722 machine_mode innermode, unsigned int byte)
5723 {
5724 /* Little bit of sanity checking. */
5725 gcc_assert (innermode != VOIDmode);
5726 gcc_assert (outermode != VOIDmode);
5727 gcc_assert (innermode != BLKmode);
5728 gcc_assert (outermode != BLKmode);
5729
5730 gcc_assert (GET_MODE (op) == innermode
5731 || GET_MODE (op) == VOIDmode);
5732
5733 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5734 return NULL_RTX;
5735
5736 if (byte >= GET_MODE_SIZE (innermode))
5737 return NULL_RTX;
5738
5739 if (outermode == innermode && !byte)
5740 return op;
5741
5742 if (CONST_SCALAR_INT_P (op)
5743 || CONST_DOUBLE_AS_FLOAT_P (op)
5744 || GET_CODE (op) == CONST_FIXED
5745 || GET_CODE (op) == CONST_VECTOR)
5746 return simplify_immed_subreg (outermode, op, innermode, byte);
5747
5748 /* Changing mode twice with SUBREG => just change it once,
5749 or not at all if changing back op starting mode. */
5750 if (GET_CODE (op) == SUBREG)
5751 {
5752 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5753 int final_offset = byte + SUBREG_BYTE (op);
5754 rtx newx;
5755
5756 if (outermode == innermostmode
5757 && byte == 0 && SUBREG_BYTE (op) == 0)
5758 return SUBREG_REG (op);
5759
5760 /* The SUBREG_BYTE represents offset, as if the value were stored
5761 in memory. Irritating exception is paradoxical subreg, where
5762 we define SUBREG_BYTE to be 0. On big endian machines, this
5763 value should be negative. For a moment, undo this exception. */
5764 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5765 {
5766 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5767 if (WORDS_BIG_ENDIAN)
5768 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5769 if (BYTES_BIG_ENDIAN)
5770 final_offset += difference % UNITS_PER_WORD;
5771 }
5772 if (SUBREG_BYTE (op) == 0
5773 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5774 {
5775 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5776 if (WORDS_BIG_ENDIAN)
5777 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5778 if (BYTES_BIG_ENDIAN)
5779 final_offset += difference % UNITS_PER_WORD;
5780 }
5781
5782 /* See whether resulting subreg will be paradoxical. */
5783 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5784 {
5785 /* In nonparadoxical subregs we can't handle negative offsets. */
5786 if (final_offset < 0)
5787 return NULL_RTX;
5788 /* Bail out in case resulting subreg would be incorrect. */
5789 if (final_offset % GET_MODE_SIZE (outermode)
5790 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5791 return NULL_RTX;
5792 }
5793 else
5794 {
5795 int offset = 0;
5796 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5797
5798 /* In paradoxical subreg, see if we are still looking on lower part.
5799 If so, our SUBREG_BYTE will be 0. */
5800 if (WORDS_BIG_ENDIAN)
5801 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5802 if (BYTES_BIG_ENDIAN)
5803 offset += difference % UNITS_PER_WORD;
5804 if (offset == final_offset)
5805 final_offset = 0;
5806 else
5807 return NULL_RTX;
5808 }
5809
5810 /* Recurse for further possible simplifications. */
5811 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5812 final_offset);
5813 if (newx)
5814 return newx;
5815 if (validate_subreg (outermode, innermostmode,
5816 SUBREG_REG (op), final_offset))
5817 {
5818 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5819 if (SUBREG_PROMOTED_VAR_P (op)
5820 && SUBREG_PROMOTED_SIGN (op) >= 0
5821 && GET_MODE_CLASS (outermode) == MODE_INT
5822 && IN_RANGE (GET_MODE_SIZE (outermode),
5823 GET_MODE_SIZE (innermode),
5824 GET_MODE_SIZE (innermostmode))
5825 && subreg_lowpart_p (newx))
5826 {
5827 SUBREG_PROMOTED_VAR_P (newx) = 1;
5828 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5829 }
5830 return newx;
5831 }
5832 return NULL_RTX;
5833 }
5834
5835 /* SUBREG of a hard register => just change the register number
5836 and/or mode. If the hard register is not valid in that mode,
5837 suppress this simplification. If the hard register is the stack,
5838 frame, or argument pointer, leave this as a SUBREG. */
5839
5840 if (REG_P (op) && HARD_REGISTER_P (op))
5841 {
5842 unsigned int regno, final_regno;
5843
5844 regno = REGNO (op);
5845 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5846 if (HARD_REGISTER_NUM_P (final_regno))
5847 {
5848 rtx x;
5849 int final_offset = byte;
5850
5851 /* Adjust offset for paradoxical subregs. */
5852 if (byte == 0
5853 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5854 {
5855 int difference = (GET_MODE_SIZE (innermode)
5856 - GET_MODE_SIZE (outermode));
5857 if (WORDS_BIG_ENDIAN)
5858 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5859 if (BYTES_BIG_ENDIAN)
5860 final_offset += difference % UNITS_PER_WORD;
5861 }
5862
5863 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5864
5865 /* Propagate original regno. We don't have any way to specify
5866 the offset inside original regno, so do so only for lowpart.
5867 The information is used only by alias analysis that can not
5868 grog partial register anyway. */
5869
5870 if (subreg_lowpart_offset (outermode, innermode) == byte)
5871 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5872 return x;
5873 }
5874 }
5875
5876 /* If we have a SUBREG of a register that we are replacing and we are
5877 replacing it with a MEM, make a new MEM and try replacing the
5878 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5879 or if we would be widening it. */
5880
5881 if (MEM_P (op)
5882 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5883 /* Allow splitting of volatile memory references in case we don't
5884 have instruction to move the whole thing. */
5885 && (! MEM_VOLATILE_P (op)
5886 || ! have_insn_for (SET, innermode))
5887 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5888 return adjust_address_nv (op, outermode, byte);
5889
5890 /* Handle complex values represented as CONCAT
5891 of real and imaginary part. */
5892 if (GET_CODE (op) == CONCAT)
5893 {
5894 unsigned int part_size, final_offset;
5895 rtx part, res;
5896
5897 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5898 if (byte < part_size)
5899 {
5900 part = XEXP (op, 0);
5901 final_offset = byte;
5902 }
5903 else
5904 {
5905 part = XEXP (op, 1);
5906 final_offset = byte - part_size;
5907 }
5908
5909 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5910 return NULL_RTX;
5911
5912 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5913 if (res)
5914 return res;
5915 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5916 return gen_rtx_SUBREG (outermode, part, final_offset);
5917 return NULL_RTX;
5918 }
5919
5920 /* A SUBREG resulting from a zero extension may fold to zero if
5921 it extracts higher bits that the ZERO_EXTEND's source bits. */
5922 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5923 {
5924 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5925 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5926 return CONST0_RTX (outermode);
5927 }
5928
5929 if (SCALAR_INT_MODE_P (outermode)
5930 && SCALAR_INT_MODE_P (innermode)
5931 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5932 && byte == subreg_lowpart_offset (outermode, innermode))
5933 {
5934 rtx tem = simplify_truncation (outermode, op, innermode);
5935 if (tem)
5936 return tem;
5937 }
5938
5939 return NULL_RTX;
5940 }
5941
5942 /* Make a SUBREG operation or equivalent if it folds. */
5943
5944 rtx
5945 simplify_gen_subreg (machine_mode outermode, rtx op,
5946 machine_mode innermode, unsigned int byte)
5947 {
5948 rtx newx;
5949
5950 newx = simplify_subreg (outermode, op, innermode, byte);
5951 if (newx)
5952 return newx;
5953
5954 if (GET_CODE (op) == SUBREG
5955 || GET_CODE (op) == CONCAT
5956 || GET_MODE (op) == VOIDmode)
5957 return NULL_RTX;
5958
5959 if (validate_subreg (outermode, innermode, op, byte))
5960 return gen_rtx_SUBREG (outermode, op, byte);
5961
5962 return NULL_RTX;
5963 }
5964
5965 /* Simplify X, an rtx expression.
5966
5967 Return the simplified expression or NULL if no simplifications
5968 were possible.
5969
5970 This is the preferred entry point into the simplification routines;
5971 however, we still allow passes to call the more specific routines.
5972
5973 Right now GCC has three (yes, three) major bodies of RTL simplification
5974 code that need to be unified.
5975
5976 1. fold_rtx in cse.c. This code uses various CSE specific
5977 information to aid in RTL simplification.
5978
5979 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5980 it uses combine specific information to aid in RTL
5981 simplification.
5982
5983 3. The routines in this file.
5984
5985
5986 Long term we want to only have one body of simplification code; to
5987 get to that state I recommend the following steps:
5988
5989 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5990 which are not pass dependent state into these routines.
5991
5992 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5993 use this routine whenever possible.
5994
5995 3. Allow for pass dependent state to be provided to these
5996 routines and add simplifications based on the pass dependent
5997 state. Remove code from cse.c & combine.c that becomes
5998 redundant/dead.
5999
6000 It will take time, but ultimately the compiler will be easier to
6001 maintain and improve. It's totally silly that when we add a
6002 simplification that it needs to be added to 4 places (3 for RTL
6003 simplification and 1 for tree simplification. */
6004
6005 rtx
6006 simplify_rtx (const_rtx x)
6007 {
6008 const enum rtx_code code = GET_CODE (x);
6009 const machine_mode mode = GET_MODE (x);
6010
6011 switch (GET_RTX_CLASS (code))
6012 {
6013 case RTX_UNARY:
6014 return simplify_unary_operation (code, mode,
6015 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6016 case RTX_COMM_ARITH:
6017 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6018 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6019
6020 /* Fall through.... */
6021
6022 case RTX_BIN_ARITH:
6023 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6024
6025 case RTX_TERNARY:
6026 case RTX_BITFIELD_OPS:
6027 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6028 XEXP (x, 0), XEXP (x, 1),
6029 XEXP (x, 2));
6030
6031 case RTX_COMPARE:
6032 case RTX_COMM_COMPARE:
6033 return simplify_relational_operation (code, mode,
6034 ((GET_MODE (XEXP (x, 0))
6035 != VOIDmode)
6036 ? GET_MODE (XEXP (x, 0))
6037 : GET_MODE (XEXP (x, 1))),
6038 XEXP (x, 0),
6039 XEXP (x, 1));
6040
6041 case RTX_EXTRA:
6042 if (code == SUBREG)
6043 return simplify_subreg (mode, SUBREG_REG (x),
6044 GET_MODE (SUBREG_REG (x)),
6045 SUBREG_BYTE (x));
6046 break;
6047
6048 case RTX_OBJ:
6049 if (code == LO_SUM)
6050 {
6051 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6052 if (GET_CODE (XEXP (x, 0)) == HIGH
6053 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6054 return XEXP (x, 1);
6055 }
6056 break;
6057
6058 default:
6059 break;
6060 }
6061 return NULL;
6062 }