]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
32df80d331e74a2d1d335cf1582b60f23d22db3e
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "ggc.h"
38 #include "target.h"
39
40 /* Simplification and canonicalization of RTL. */
41
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
45 signed wide int. */
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48
49 static rtx neg_const_int (enum machine_mode, const_rtx);
50 static bool plus_minus_operand_p (const_rtx);
51 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
52 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
53 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
54 unsigned int);
55 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
56 rtx, rtx);
57 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
58 enum machine_mode, rtx, rtx);
59 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
60 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
61 rtx, rtx, rtx, rtx);
62 \f
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
65 static rtx
66 neg_const_int (enum machine_mode mode, const_rtx i)
67 {
68 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
69 }
70
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
73
74 bool
75 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 {
77 unsigned HOST_WIDE_INT val;
78 unsigned int width;
79
80 if (GET_MODE_CLASS (mode) != MODE_INT)
81 return false;
82
83 width = GET_MODE_PRECISION (mode);
84 if (width == 0)
85 return false;
86
87 if (width <= HOST_BITS_PER_WIDE_INT
88 && CONST_INT_P (x))
89 val = INTVAL (x);
90 #if TARGET_SUPPORTS_WIDE_INT
91 else if (CONST_WIDE_INT_P (x))
92 {
93 unsigned int i;
94 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
95 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
96 return false;
97 for (i = 0; i < elts - 1; i++)
98 if (CONST_WIDE_INT_ELT (x, i) != 0)
99 return false;
100 val = CONST_WIDE_INT_ELT (x, elts - 1);
101 width %= HOST_BITS_PER_WIDE_INT;
102 if (width == 0)
103 width = HOST_BITS_PER_WIDE_INT;
104 }
105 #else
106 else if (width <= HOST_BITS_PER_DOUBLE_INT
107 && CONST_DOUBLE_AS_INT_P (x)
108 && CONST_DOUBLE_LOW (x) == 0)
109 {
110 val = CONST_DOUBLE_HIGH (x);
111 width -= HOST_BITS_PER_WIDE_INT;
112 }
113 #endif
114 else
115 /* X is not an integer constant. */
116 return false;
117
118 if (width < HOST_BITS_PER_WIDE_INT)
119 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
120 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
121 }
122
123 /* Test whether VAL is equal to the most significant bit of mode MODE
124 (after masking with the mode mask of MODE). Returns false if the
125 precision of MODE is too large to handle. */
126
127 bool
128 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
129 {
130 unsigned int width;
131
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
134
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
138
139 val &= GET_MODE_MASK (mode);
140 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
141 }
142
143 /* Test whether the most significant bit of mode MODE is set in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
147 {
148 unsigned int width;
149
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
152
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
156
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val != 0;
159 }
160
161 /* Test whether the most significant bit of mode MODE is clear in VAL.
162 Returns false if the precision of MODE is too large to handle. */
163 bool
164 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
165 {
166 unsigned int width;
167
168 if (GET_MODE_CLASS (mode) != MODE_INT)
169 return false;
170
171 width = GET_MODE_PRECISION (mode);
172 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
173 return false;
174
175 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
176 return val == 0;
177 }
178 \f
179 /* Make a binary operation by properly ordering the operands and
180 seeing if the expression folds. */
181
182 rtx
183 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
184 rtx op1)
185 {
186 rtx tem;
187
188 /* If this simplifies, do it. */
189 tem = simplify_binary_operation (code, mode, op0, op1);
190 if (tem)
191 return tem;
192
193 /* Put complex operands first and constants second if commutative. */
194 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
195 && swap_commutative_operands_p (op0, op1))
196 tem = op0, op0 = op1, op1 = tem;
197
198 return gen_rtx_fmt_ee (code, mode, op0, op1);
199 }
200 \f
201 /* If X is a MEM referencing the constant pool, return the real value.
202 Otherwise return X. */
203 rtx
204 avoid_constant_pool_reference (rtx x)
205 {
206 rtx c, tmp, addr;
207 enum machine_mode cmode;
208 HOST_WIDE_INT offset = 0;
209
210 switch (GET_CODE (x))
211 {
212 case MEM:
213 break;
214
215 case FLOAT_EXTEND:
216 /* Handle float extensions of constant pool references. */
217 tmp = XEXP (x, 0);
218 c = avoid_constant_pool_reference (tmp);
219 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
220 {
221 REAL_VALUE_TYPE d;
222
223 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
224 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
225 }
226 return x;
227
228 default:
229 return x;
230 }
231
232 if (GET_MODE (x) == BLKmode)
233 return x;
234
235 addr = XEXP (x, 0);
236
237 /* Call target hook to avoid the effects of -fpic etc.... */
238 addr = targetm.delegitimize_address (addr);
239
240 /* Split the address into a base and integer offset. */
241 if (GET_CODE (addr) == CONST
242 && GET_CODE (XEXP (addr, 0)) == PLUS
243 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
244 {
245 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
246 addr = XEXP (XEXP (addr, 0), 0);
247 }
248
249 if (GET_CODE (addr) == LO_SUM)
250 addr = XEXP (addr, 1);
251
252 /* If this is a constant pool reference, we can turn it into its
253 constant and hope that simplifications happen. */
254 if (GET_CODE (addr) == SYMBOL_REF
255 && CONSTANT_POOL_ADDRESS_P (addr))
256 {
257 c = get_pool_constant (addr);
258 cmode = get_pool_mode (addr);
259
260 /* If we're accessing the constant in a different mode than it was
261 originally stored, attempt to fix that up via subreg simplifications.
262 If that fails we have no choice but to return the original memory. */
263 if ((offset != 0 || cmode != GET_MODE (x))
264 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
265 {
266 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
267 if (tem && CONSTANT_P (tem))
268 return tem;
269 }
270 else
271 return c;
272 }
273
274 return x;
275 }
276 \f
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
280
281 rtx
282 delegitimize_mem_from_attrs (rtx x)
283 {
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
289 {
290 tree decl = MEM_EXPR (x);
291 enum machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
293
294 switch (TREE_CODE (decl))
295 {
296 default:
297 decl = NULL;
298 break;
299
300 case VAR_DECL:
301 break;
302
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
310 {
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, volatilep = 0;
314
315 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
316 &mode, &unsignedp, &volatilep, false);
317 if (bitsize != GET_MODE_BITSIZE (mode)
318 || (bitpos % BITS_PER_UNIT)
319 || (toffset && !tree_fits_shwi_p (toffset)))
320 decl = NULL;
321 else
322 {
323 offset += bitpos / BITS_PER_UNIT;
324 if (toffset)
325 offset += tree_to_shwi (toffset);
326 }
327 break;
328 }
329 }
330
331 if (decl
332 && mode == GET_MODE (x)
333 && TREE_CODE (decl) == VAR_DECL
334 && (TREE_STATIC (decl)
335 || DECL_THREAD_LOCAL_P (decl))
336 && DECL_RTL_SET_P (decl)
337 && MEM_P (DECL_RTL (decl)))
338 {
339 rtx newx;
340
341 offset += MEM_OFFSET (x);
342
343 newx = DECL_RTL (decl);
344
345 if (MEM_P (newx))
346 {
347 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
348
349 /* Avoid creating a new MEM needlessly if we already had
350 the same address. We do if there's no OFFSET and the
351 old address X is identical to NEWX, or if X is of the
352 form (plus NEWX OFFSET), or the NEWX is of the form
353 (plus Y (const_int Z)) and X is that with the offset
354 added: (plus Y (const_int Z+OFFSET)). */
355 if (!((offset == 0
356 || (GET_CODE (o) == PLUS
357 && GET_CODE (XEXP (o, 1)) == CONST_INT
358 && (offset == INTVAL (XEXP (o, 1))
359 || (GET_CODE (n) == PLUS
360 && GET_CODE (XEXP (n, 1)) == CONST_INT
361 && (INTVAL (XEXP (n, 1)) + offset
362 == INTVAL (XEXP (o, 1)))
363 && (n = XEXP (n, 0))))
364 && (o = XEXP (o, 0))))
365 && rtx_equal_p (o, n)))
366 x = adjust_address_nv (newx, mode, offset);
367 }
368 else if (GET_MODE (x) == GET_MODE (newx)
369 && offset == 0)
370 x = newx;
371 }
372 }
373
374 return x;
375 }
376 \f
377 /* Make a unary operation by first seeing if it folds and otherwise making
378 the specified operation. */
379
380 rtx
381 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
382 enum machine_mode op_mode)
383 {
384 rtx tem;
385
386 /* If this simplifies, use it. */
387 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
388 return tem;
389
390 return gen_rtx_fmt_e (code, mode, op);
391 }
392
393 /* Likewise for ternary operations. */
394
395 rtx
396 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
398 {
399 rtx tem;
400
401 /* If this simplifies, use it. */
402 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
403 op0, op1, op2)))
404 return tem;
405
406 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
407 }
408
409 /* Likewise, for relational operations.
410 CMP_MODE specifies mode comparison is done in. */
411
412 rtx
413 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
414 enum machine_mode cmp_mode, rtx op0, rtx op1)
415 {
416 rtx tem;
417
418 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
419 op0, op1)))
420 return tem;
421
422 return gen_rtx_fmt_ee (code, mode, op0, op1);
423 }
424 \f
425 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
426 and simplify the result. If FN is non-NULL, call this callback on each
427 X, if it returns non-NULL, replace X with its return value and simplify the
428 result. */
429
430 rtx
431 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
432 rtx (*fn) (rtx, const_rtx, void *), void *data)
433 {
434 enum rtx_code code = GET_CODE (x);
435 enum machine_mode mode = GET_MODE (x);
436 enum machine_mode op_mode;
437 const char *fmt;
438 rtx op0, op1, op2, newx, op;
439 rtvec vec, newvec;
440 int i, j;
441
442 if (__builtin_expect (fn != NULL, 0))
443 {
444 newx = fn (x, old_rtx, data);
445 if (newx)
446 return newx;
447 }
448 else if (rtx_equal_p (x, old_rtx))
449 return copy_rtx ((rtx) data);
450
451 switch (GET_RTX_CLASS (code))
452 {
453 case RTX_UNARY:
454 op0 = XEXP (x, 0);
455 op_mode = GET_MODE (op0);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 if (op0 == XEXP (x, 0))
458 return x;
459 return simplify_gen_unary (code, mode, op0, op_mode);
460
461 case RTX_BIN_ARITH:
462 case RTX_COMM_ARITH:
463 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
464 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
465 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
466 return x;
467 return simplify_gen_binary (code, mode, op0, op1);
468
469 case RTX_COMPARE:
470 case RTX_COMM_COMPARE:
471 op0 = XEXP (x, 0);
472 op1 = XEXP (x, 1);
473 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
474 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
475 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
476 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
477 return x;
478 return simplify_gen_relational (code, mode, op_mode, op0, op1);
479
480 case RTX_TERNARY:
481 case RTX_BITFIELD_OPS:
482 op0 = XEXP (x, 0);
483 op_mode = GET_MODE (op0);
484 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
485 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
486 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
487 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
488 return x;
489 if (op_mode == VOIDmode)
490 op_mode = GET_MODE (op0);
491 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
492
493 case RTX_EXTRA:
494 if (code == SUBREG)
495 {
496 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
497 if (op0 == SUBREG_REG (x))
498 return x;
499 op0 = simplify_gen_subreg (GET_MODE (x), op0,
500 GET_MODE (SUBREG_REG (x)),
501 SUBREG_BYTE (x));
502 return op0 ? op0 : x;
503 }
504 break;
505
506 case RTX_OBJ:
507 if (code == MEM)
508 {
509 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
510 if (op0 == XEXP (x, 0))
511 return x;
512 return replace_equiv_address_nv (x, op0);
513 }
514 else if (code == LO_SUM)
515 {
516 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
517 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
518
519 /* (lo_sum (high x) x) -> x */
520 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
521 return op1;
522
523 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
524 return x;
525 return gen_rtx_LO_SUM (mode, op0, op1);
526 }
527 break;
528
529 default:
530 break;
531 }
532
533 newx = x;
534 fmt = GET_RTX_FORMAT (code);
535 for (i = 0; fmt[i]; i++)
536 switch (fmt[i])
537 {
538 case 'E':
539 vec = XVEC (x, i);
540 newvec = XVEC (newx, i);
541 for (j = 0; j < GET_NUM_ELEM (vec); j++)
542 {
543 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
544 old_rtx, fn, data);
545 if (op != RTVEC_ELT (vec, j))
546 {
547 if (newvec == vec)
548 {
549 newvec = shallow_copy_rtvec (vec);
550 if (x == newx)
551 newx = shallow_copy_rtx (x);
552 XVEC (newx, i) = newvec;
553 }
554 RTVEC_ELT (newvec, j) = op;
555 }
556 }
557 break;
558
559 case 'e':
560 if (XEXP (x, i))
561 {
562 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
563 if (op != XEXP (x, i))
564 {
565 if (x == newx)
566 newx = shallow_copy_rtx (x);
567 XEXP (newx, i) = op;
568 }
569 }
570 break;
571 }
572 return newx;
573 }
574
575 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
576 resulting RTX. Return a new RTX which is as simplified as possible. */
577
578 rtx
579 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
580 {
581 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
582 }
583 \f
584 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
585 Only handle cases where the truncated value is inherently an rvalue.
586
587 RTL provides two ways of truncating a value:
588
589 1. a lowpart subreg. This form is only a truncation when both
590 the outer and inner modes (here MODE and OP_MODE respectively)
591 are scalar integers, and only then when the subreg is used as
592 an rvalue.
593
594 It is only valid to form such truncating subregs if the
595 truncation requires no action by the target. The onus for
596 proving this is on the creator of the subreg -- e.g. the
597 caller to simplify_subreg or simplify_gen_subreg -- and typically
598 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
599
600 2. a TRUNCATE. This form handles both scalar and compound integers.
601
602 The first form is preferred where valid. However, the TRUNCATE
603 handling in simplify_unary_operation turns the second form into the
604 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
605 so it is generally safe to form rvalue truncations using:
606
607 simplify_gen_unary (TRUNCATE, ...)
608
609 and leave simplify_unary_operation to work out which representation
610 should be used.
611
612 Because of the proof requirements on (1), simplify_truncation must
613 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
614 regardless of whether the outer truncation came from a SUBREG or a
615 TRUNCATE. For example, if the caller has proven that an SImode
616 truncation of:
617
618 (and:DI X Y)
619
620 is a no-op and can be represented as a subreg, it does not follow
621 that SImode truncations of X and Y are also no-ops. On a target
622 like 64-bit MIPS that requires SImode values to be stored in
623 sign-extended form, an SImode truncation of:
624
625 (and:DI (reg:DI X) (const_int 63))
626
627 is trivially a no-op because only the lower 6 bits can be set.
628 However, X is still an arbitrary 64-bit number and so we cannot
629 assume that truncating it too is a no-op. */
630
631 static rtx
632 simplify_truncation (enum machine_mode mode, rtx op,
633 enum machine_mode op_mode)
634 {
635 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
636 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
637 gcc_assert (precision <= op_precision);
638
639 /* Optimize truncations of zero and sign extended values. */
640 if (GET_CODE (op) == ZERO_EXTEND
641 || GET_CODE (op) == SIGN_EXTEND)
642 {
643 /* There are three possibilities. If MODE is the same as the
644 origmode, we can omit both the extension and the subreg.
645 If MODE is not larger than the origmode, we can apply the
646 truncation without the extension. Finally, if the outermode
647 is larger than the origmode, we can just extend to the appropriate
648 mode. */
649 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
650 if (mode == origmode)
651 return XEXP (op, 0);
652 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
653 return simplify_gen_unary (TRUNCATE, mode,
654 XEXP (op, 0), origmode);
655 else
656 return simplify_gen_unary (GET_CODE (op), mode,
657 XEXP (op, 0), origmode);
658 }
659
660 /* If the machine can perform operations in the truncated mode, distribute
661 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
662 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
663 if (1
664 #ifdef WORD_REGISTER_OPERATIONS
665 && precision >= BITS_PER_WORD
666 #endif
667 && (GET_CODE (op) == PLUS
668 || GET_CODE (op) == MINUS
669 || GET_CODE (op) == MULT))
670 {
671 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
672 if (op0)
673 {
674 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
675 if (op1)
676 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
677 }
678 }
679
680 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
681 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
682 the outer subreg is effectively a truncation to the original mode. */
683 if ((GET_CODE (op) == LSHIFTRT
684 || GET_CODE (op) == ASHIFTRT)
685 /* Ensure that OP_MODE is at least twice as wide as MODE
686 to avoid the possibility that an outer LSHIFTRT shifts by more
687 than the sign extension's sign_bit_copies and introduces zeros
688 into the high bits of the result. */
689 && 2 * precision <= op_precision
690 && CONST_INT_P (XEXP (op, 1))
691 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
692 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
693 && UINTVAL (XEXP (op, 1)) < precision)
694 return simplify_gen_binary (ASHIFTRT, mode,
695 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
696
697 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
698 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
699 the outer subreg is effectively a truncation to the original mode. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 && CONST_INT_P (XEXP (op, 1))
703 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
704 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
705 && UINTVAL (XEXP (op, 1)) < precision)
706 return simplify_gen_binary (LSHIFTRT, mode,
707 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
708
709 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
710 to (ashift:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if (GET_CODE (op) == ASHIFT
713 && CONST_INT_P (XEXP (op, 1))
714 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
715 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
717 && UINTVAL (XEXP (op, 1)) < precision)
718 return simplify_gen_binary (ASHIFT, mode,
719 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
720
721 /* Recognize a word extraction from a multi-word subreg. */
722 if ((GET_CODE (op) == LSHIFTRT
723 || GET_CODE (op) == ASHIFTRT)
724 && SCALAR_INT_MODE_P (mode)
725 && SCALAR_INT_MODE_P (op_mode)
726 && precision >= BITS_PER_WORD
727 && 2 * precision <= op_precision
728 && CONST_INT_P (XEXP (op, 1))
729 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
730 && UINTVAL (XEXP (op, 1)) < op_precision)
731 {
732 int byte = subreg_lowpart_offset (mode, op_mode);
733 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
734 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
735 (WORDS_BIG_ENDIAN
736 ? byte - shifted_bytes
737 : byte + shifted_bytes));
738 }
739
740 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
741 and try replacing the TRUNCATE and shift with it. Don't do this
742 if the MEM has a mode-dependent address. */
743 if ((GET_CODE (op) == LSHIFTRT
744 || GET_CODE (op) == ASHIFTRT)
745 && SCALAR_INT_MODE_P (op_mode)
746 && MEM_P (XEXP (op, 0))
747 && CONST_INT_P (XEXP (op, 1))
748 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
749 && INTVAL (XEXP (op, 1)) > 0
750 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
751 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
752 MEM_ADDR_SPACE (XEXP (op, 0)))
753 && ! MEM_VOLATILE_P (XEXP (op, 0))
754 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
755 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
756 {
757 int byte = subreg_lowpart_offset (mode, op_mode);
758 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
759 return adjust_address_nv (XEXP (op, 0), mode,
760 (WORDS_BIG_ENDIAN
761 ? byte - shifted_bytes
762 : byte + shifted_bytes));
763 }
764
765 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
766 (OP:SI foo:SI) if OP is NEG or ABS. */
767 if ((GET_CODE (op) == ABS
768 || GET_CODE (op) == NEG)
769 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
770 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
771 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
772 return simplify_gen_unary (GET_CODE (op), mode,
773 XEXP (XEXP (op, 0), 0), mode);
774
775 /* (truncate:A (subreg:B (truncate:C X) 0)) is
776 (truncate:A X). */
777 if (GET_CODE (op) == SUBREG
778 && SCALAR_INT_MODE_P (mode)
779 && SCALAR_INT_MODE_P (op_mode)
780 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
781 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
782 && subreg_lowpart_p (op))
783 {
784 rtx inner = XEXP (SUBREG_REG (op), 0);
785 if (GET_MODE_PRECISION (mode)
786 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
787 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
788 else
789 /* If subreg above is paradoxical and C is narrower
790 than A, return (subreg:A (truncate:C X) 0). */
791 return simplify_gen_subreg (mode, SUBREG_REG (op),
792 GET_MODE (SUBREG_REG (op)), 0);
793 }
794
795 /* (truncate:A (truncate:B X)) is (truncate:A X). */
796 if (GET_CODE (op) == TRUNCATE)
797 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
798 GET_MODE (XEXP (op, 0)));
799
800 return NULL_RTX;
801 }
802 \f
803 /* Try to simplify a unary operation CODE whose output mode is to be
804 MODE with input operand OP whose mode was originally OP_MODE.
805 Return zero if no simplification can be made. */
806 rtx
807 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
808 rtx op, enum machine_mode op_mode)
809 {
810 rtx trueop, tem;
811
812 trueop = avoid_constant_pool_reference (op);
813
814 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
815 if (tem)
816 return tem;
817
818 return simplify_unary_operation_1 (code, mode, op);
819 }
820
821 /* Perform some simplifications we can do even if the operands
822 aren't constant. */
823 static rtx
824 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
825 {
826 enum rtx_code reversed;
827 rtx temp;
828
829 switch (code)
830 {
831 case NOT:
832 /* (not (not X)) == X. */
833 if (GET_CODE (op) == NOT)
834 return XEXP (op, 0);
835
836 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
837 comparison is all ones. */
838 if (COMPARISON_P (op)
839 && (mode == BImode || STORE_FLAG_VALUE == -1)
840 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
841 return simplify_gen_relational (reversed, mode, VOIDmode,
842 XEXP (op, 0), XEXP (op, 1));
843
844 /* (not (plus X -1)) can become (neg X). */
845 if (GET_CODE (op) == PLUS
846 && XEXP (op, 1) == constm1_rtx)
847 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
848
849 /* Similarly, (not (neg X)) is (plus X -1). */
850 if (GET_CODE (op) == NEG)
851 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
852 CONSTM1_RTX (mode));
853
854 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
855 if (GET_CODE (op) == XOR
856 && CONST_INT_P (XEXP (op, 1))
857 && (temp = simplify_unary_operation (NOT, mode,
858 XEXP (op, 1), mode)) != 0)
859 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
860
861 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
862 if (GET_CODE (op) == PLUS
863 && CONST_INT_P (XEXP (op, 1))
864 && mode_signbit_p (mode, XEXP (op, 1))
865 && (temp = simplify_unary_operation (NOT, mode,
866 XEXP (op, 1), mode)) != 0)
867 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
868
869
870 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
871 operands other than 1, but that is not valid. We could do a
872 similar simplification for (not (lshiftrt C X)) where C is
873 just the sign bit, but this doesn't seem common enough to
874 bother with. */
875 if (GET_CODE (op) == ASHIFT
876 && XEXP (op, 0) == const1_rtx)
877 {
878 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
879 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
880 }
881
882 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
883 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
884 so we can perform the above simplification. */
885 if (STORE_FLAG_VALUE == -1
886 && GET_CODE (op) == ASHIFTRT
887 && CONST_INT_P (XEXP (op, 1))
888 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
889 return simplify_gen_relational (GE, mode, VOIDmode,
890 XEXP (op, 0), const0_rtx);
891
892
893 if (GET_CODE (op) == SUBREG
894 && subreg_lowpart_p (op)
895 && (GET_MODE_SIZE (GET_MODE (op))
896 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
897 && GET_CODE (SUBREG_REG (op)) == ASHIFT
898 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
899 {
900 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
901 rtx x;
902
903 x = gen_rtx_ROTATE (inner_mode,
904 simplify_gen_unary (NOT, inner_mode, const1_rtx,
905 inner_mode),
906 XEXP (SUBREG_REG (op), 1));
907 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
908 if (temp)
909 return temp;
910 }
911
912 /* Apply De Morgan's laws to reduce number of patterns for machines
913 with negating logical insns (and-not, nand, etc.). If result has
914 only one NOT, put it first, since that is how the patterns are
915 coded. */
916 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
917 {
918 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
919 enum machine_mode op_mode;
920
921 op_mode = GET_MODE (in1);
922 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
923
924 op_mode = GET_MODE (in2);
925 if (op_mode == VOIDmode)
926 op_mode = mode;
927 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
928
929 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
930 {
931 rtx tem = in2;
932 in2 = in1; in1 = tem;
933 }
934
935 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
936 mode, in1, in2);
937 }
938
939 /* (not (bswap x)) -> (bswap (not x)). */
940 if (GET_CODE (op) == BSWAP)
941 {
942 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
943 return simplify_gen_unary (BSWAP, mode, x, mode);
944 }
945 break;
946
947 case NEG:
948 /* (neg (neg X)) == X. */
949 if (GET_CODE (op) == NEG)
950 return XEXP (op, 0);
951
952 /* (neg (plus X 1)) can become (not X). */
953 if (GET_CODE (op) == PLUS
954 && XEXP (op, 1) == const1_rtx)
955 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
956
957 /* Similarly, (neg (not X)) is (plus X 1). */
958 if (GET_CODE (op) == NOT)
959 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
960 CONST1_RTX (mode));
961
962 /* (neg (minus X Y)) can become (minus Y X). This transformation
963 isn't safe for modes with signed zeros, since if X and Y are
964 both +0, (minus Y X) is the same as (minus X Y). If the
965 rounding mode is towards +infinity (or -infinity) then the two
966 expressions will be rounded differently. */
967 if (GET_CODE (op) == MINUS
968 && !HONOR_SIGNED_ZEROS (mode)
969 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
970 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
971
972 if (GET_CODE (op) == PLUS
973 && !HONOR_SIGNED_ZEROS (mode)
974 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
975 {
976 /* (neg (plus A C)) is simplified to (minus -C A). */
977 if (CONST_SCALAR_INT_P (XEXP (op, 1))
978 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
979 {
980 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
981 if (temp)
982 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
983 }
984
985 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
986 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
987 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
988 }
989
990 /* (neg (mult A B)) becomes (mult A (neg B)).
991 This works even for floating-point values. */
992 if (GET_CODE (op) == MULT
993 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
994 {
995 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
996 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
997 }
998
999 /* NEG commutes with ASHIFT since it is multiplication. Only do
1000 this if we can then eliminate the NEG (e.g., if the operand
1001 is a constant). */
1002 if (GET_CODE (op) == ASHIFT)
1003 {
1004 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1005 if (temp)
1006 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1007 }
1008
1009 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1010 C is equal to the width of MODE minus 1. */
1011 if (GET_CODE (op) == ASHIFTRT
1012 && CONST_INT_P (XEXP (op, 1))
1013 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1014 return simplify_gen_binary (LSHIFTRT, mode,
1015 XEXP (op, 0), XEXP (op, 1));
1016
1017 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == LSHIFTRT
1020 && CONST_INT_P (XEXP (op, 1))
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1022 return simplify_gen_binary (ASHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1024
1025 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1026 if (GET_CODE (op) == XOR
1027 && XEXP (op, 1) == const1_rtx
1028 && nonzero_bits (XEXP (op, 0), mode) == 1)
1029 return plus_constant (mode, XEXP (op, 0), -1);
1030
1031 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1032 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1033 if (GET_CODE (op) == LT
1034 && XEXP (op, 1) == const0_rtx
1035 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1036 {
1037 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1038 int isize = GET_MODE_PRECISION (inner);
1039 if (STORE_FLAG_VALUE == 1)
1040 {
1041 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1042 GEN_INT (isize - 1));
1043 if (mode == inner)
1044 return temp;
1045 if (GET_MODE_PRECISION (mode) > isize)
1046 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1047 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1048 }
1049 else if (STORE_FLAG_VALUE == -1)
1050 {
1051 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1052 GEN_INT (isize - 1));
1053 if (mode == inner)
1054 return temp;
1055 if (GET_MODE_PRECISION (mode) > isize)
1056 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1057 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1058 }
1059 }
1060 break;
1061
1062 case TRUNCATE:
1063 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1064 with the umulXi3_highpart patterns. */
1065 if (GET_CODE (op) == LSHIFTRT
1066 && GET_CODE (XEXP (op, 0)) == MULT)
1067 break;
1068
1069 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1070 {
1071 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1072 {
1073 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1074 if (temp)
1075 return temp;
1076 }
1077 /* We can't handle truncation to a partial integer mode here
1078 because we don't know the real bitsize of the partial
1079 integer mode. */
1080 break;
1081 }
1082
1083 if (GET_MODE (op) != VOIDmode)
1084 {
1085 temp = simplify_truncation (mode, op, GET_MODE (op));
1086 if (temp)
1087 return temp;
1088 }
1089
1090 /* If we know that the value is already truncated, we can
1091 replace the TRUNCATE with a SUBREG. */
1092 if (GET_MODE_NUNITS (mode) == 1
1093 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1094 || truncated_to_mode (mode, op)))
1095 {
1096 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1097 if (temp)
1098 return temp;
1099 }
1100
1101 /* A truncate of a comparison can be replaced with a subreg if
1102 STORE_FLAG_VALUE permits. This is like the previous test,
1103 but it works even if the comparison is done in a mode larger
1104 than HOST_BITS_PER_WIDE_INT. */
1105 if (HWI_COMPUTABLE_MODE_P (mode)
1106 && COMPARISON_P (op)
1107 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1108 {
1109 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1110 if (temp)
1111 return temp;
1112 }
1113
1114 /* A truncate of a memory is just loading the low part of the memory
1115 if we are not changing the meaning of the address. */
1116 if (GET_CODE (op) == MEM
1117 && !VECTOR_MODE_P (mode)
1118 && !MEM_VOLATILE_P (op)
1119 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1120 {
1121 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1122 if (temp)
1123 return temp;
1124 }
1125
1126 break;
1127
1128 case FLOAT_TRUNCATE:
1129 if (DECIMAL_FLOAT_MODE_P (mode))
1130 break;
1131
1132 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1133 if (GET_CODE (op) == FLOAT_EXTEND
1134 && GET_MODE (XEXP (op, 0)) == mode)
1135 return XEXP (op, 0);
1136
1137 /* (float_truncate:SF (float_truncate:DF foo:XF))
1138 = (float_truncate:SF foo:XF).
1139 This may eliminate double rounding, so it is unsafe.
1140
1141 (float_truncate:SF (float_extend:XF foo:DF))
1142 = (float_truncate:SF foo:DF).
1143
1144 (float_truncate:DF (float_extend:XF foo:SF))
1145 = (float_extend:SF foo:DF). */
1146 if ((GET_CODE (op) == FLOAT_TRUNCATE
1147 && flag_unsafe_math_optimizations)
1148 || GET_CODE (op) == FLOAT_EXTEND)
1149 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1150 0)))
1151 > GET_MODE_SIZE (mode)
1152 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1153 mode,
1154 XEXP (op, 0), mode);
1155
1156 /* (float_truncate (float x)) is (float x) */
1157 if (GET_CODE (op) == FLOAT
1158 && (flag_unsafe_math_optimizations
1159 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1160 && ((unsigned)significand_size (GET_MODE (op))
1161 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1162 - num_sign_bit_copies (XEXP (op, 0),
1163 GET_MODE (XEXP (op, 0))))))))
1164 return simplify_gen_unary (FLOAT, mode,
1165 XEXP (op, 0),
1166 GET_MODE (XEXP (op, 0)));
1167
1168 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1169 (OP:SF foo:SF) if OP is NEG or ABS. */
1170 if ((GET_CODE (op) == ABS
1171 || GET_CODE (op) == NEG)
1172 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1173 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1174 return simplify_gen_unary (GET_CODE (op), mode,
1175 XEXP (XEXP (op, 0), 0), mode);
1176
1177 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1178 is (float_truncate:SF x). */
1179 if (GET_CODE (op) == SUBREG
1180 && subreg_lowpart_p (op)
1181 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1182 return SUBREG_REG (op);
1183 break;
1184
1185 case FLOAT_EXTEND:
1186 if (DECIMAL_FLOAT_MODE_P (mode))
1187 break;
1188
1189 /* (float_extend (float_extend x)) is (float_extend x)
1190
1191 (float_extend (float x)) is (float x) assuming that double
1192 rounding can't happen.
1193 */
1194 if (GET_CODE (op) == FLOAT_EXTEND
1195 || (GET_CODE (op) == FLOAT
1196 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1197 && ((unsigned)significand_size (GET_MODE (op))
1198 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1199 - num_sign_bit_copies (XEXP (op, 0),
1200 GET_MODE (XEXP (op, 0)))))))
1201 return simplify_gen_unary (GET_CODE (op), mode,
1202 XEXP (op, 0),
1203 GET_MODE (XEXP (op, 0)));
1204
1205 break;
1206
1207 case ABS:
1208 /* (abs (neg <foo>)) -> (abs <foo>) */
1209 if (GET_CODE (op) == NEG)
1210 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1211 GET_MODE (XEXP (op, 0)));
1212
1213 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1214 do nothing. */
1215 if (GET_MODE (op) == VOIDmode)
1216 break;
1217
1218 /* If operand is something known to be positive, ignore the ABS. */
1219 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1220 || val_signbit_known_clear_p (GET_MODE (op),
1221 nonzero_bits (op, GET_MODE (op))))
1222 return op;
1223
1224 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1225 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1226 return gen_rtx_NEG (mode, op);
1227
1228 break;
1229
1230 case FFS:
1231 /* (ffs (*_extend <X>)) = (ffs <X>) */
1232 if (GET_CODE (op) == SIGN_EXTEND
1233 || GET_CODE (op) == ZERO_EXTEND)
1234 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1236 break;
1237
1238 case POPCOUNT:
1239 switch (GET_CODE (op))
1240 {
1241 case BSWAP:
1242 case ZERO_EXTEND:
1243 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1244 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1246
1247 case ROTATE:
1248 case ROTATERT:
1249 /* Rotations don't affect popcount. */
1250 if (!side_effects_p (XEXP (op, 1)))
1251 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1252 GET_MODE (XEXP (op, 0)));
1253 break;
1254
1255 default:
1256 break;
1257 }
1258 break;
1259
1260 case PARITY:
1261 switch (GET_CODE (op))
1262 {
1263 case NOT:
1264 case BSWAP:
1265 case ZERO_EXTEND:
1266 case SIGN_EXTEND:
1267 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1269
1270 case ROTATE:
1271 case ROTATERT:
1272 /* Rotations don't affect parity. */
1273 if (!side_effects_p (XEXP (op, 1)))
1274 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1275 GET_MODE (XEXP (op, 0)));
1276 break;
1277
1278 default:
1279 break;
1280 }
1281 break;
1282
1283 case BSWAP:
1284 /* (bswap (bswap x)) -> x. */
1285 if (GET_CODE (op) == BSWAP)
1286 return XEXP (op, 0);
1287 break;
1288
1289 case FLOAT:
1290 /* (float (sign_extend <X>)) = (float <X>). */
1291 if (GET_CODE (op) == SIGN_EXTEND)
1292 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1293 GET_MODE (XEXP (op, 0)));
1294 break;
1295
1296 case SIGN_EXTEND:
1297 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1298 becomes just the MINUS if its mode is MODE. This allows
1299 folding switch statements on machines using casesi (such as
1300 the VAX). */
1301 if (GET_CODE (op) == TRUNCATE
1302 && GET_MODE (XEXP (op, 0)) == mode
1303 && GET_CODE (XEXP (op, 0)) == MINUS
1304 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1305 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1306 return XEXP (op, 0);
1307
1308 /* Extending a widening multiplication should be canonicalized to
1309 a wider widening multiplication. */
1310 if (GET_CODE (op) == MULT)
1311 {
1312 rtx lhs = XEXP (op, 0);
1313 rtx rhs = XEXP (op, 1);
1314 enum rtx_code lcode = GET_CODE (lhs);
1315 enum rtx_code rcode = GET_CODE (rhs);
1316
1317 /* Widening multiplies usually extend both operands, but sometimes
1318 they use a shift to extract a portion of a register. */
1319 if ((lcode == SIGN_EXTEND
1320 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1321 && (rcode == SIGN_EXTEND
1322 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1323 {
1324 enum machine_mode lmode = GET_MODE (lhs);
1325 enum machine_mode rmode = GET_MODE (rhs);
1326 int bits;
1327
1328 if (lcode == ASHIFTRT)
1329 /* Number of bits not shifted off the end. */
1330 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1331 else /* lcode == SIGN_EXTEND */
1332 /* Size of inner mode. */
1333 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1334
1335 if (rcode == ASHIFTRT)
1336 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1337 else /* rcode == SIGN_EXTEND */
1338 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1339
1340 /* We can only widen multiplies if the result is mathematiclly
1341 equivalent. I.e. if overflow was impossible. */
1342 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1343 return simplify_gen_binary
1344 (MULT, mode,
1345 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1346 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1347 }
1348 }
1349
1350 /* Check for a sign extension of a subreg of a promoted
1351 variable, where the promotion is sign-extended, and the
1352 target mode is the same as the variable's promotion. */
1353 if (GET_CODE (op) == SUBREG
1354 && SUBREG_PROMOTED_VAR_P (op)
1355 && SUBREG_PROMOTED_SIGNED_P (op)
1356 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1357 {
1358 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1359 if (temp)
1360 return temp;
1361 }
1362
1363 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1364 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1365 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1366 {
1367 gcc_assert (GET_MODE_PRECISION (mode)
1368 > GET_MODE_PRECISION (GET_MODE (op)));
1369 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1370 GET_MODE (XEXP (op, 0)));
1371 }
1372
1373 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1374 is (sign_extend:M (subreg:O <X>)) if there is mode with
1375 GET_MODE_BITSIZE (N) - I bits.
1376 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1377 is similarly (zero_extend:M (subreg:O <X>)). */
1378 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1379 && GET_CODE (XEXP (op, 0)) == ASHIFT
1380 && CONST_INT_P (XEXP (op, 1))
1381 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1382 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1383 {
1384 enum machine_mode tmode
1385 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1386 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1387 gcc_assert (GET_MODE_BITSIZE (mode)
1388 > GET_MODE_BITSIZE (GET_MODE (op)));
1389 if (tmode != BLKmode)
1390 {
1391 rtx inner =
1392 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1393 if (inner)
1394 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1395 ? SIGN_EXTEND : ZERO_EXTEND,
1396 mode, inner, tmode);
1397 }
1398 }
1399
1400 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1401 /* As we do not know which address space the pointer is referring to,
1402 we can do this only if the target does not support different pointer
1403 or address modes depending on the address space. */
1404 if (target_default_pointer_address_modes_p ()
1405 && ! POINTERS_EXTEND_UNSIGNED
1406 && mode == Pmode && GET_MODE (op) == ptr_mode
1407 && (CONSTANT_P (op)
1408 || (GET_CODE (op) == SUBREG
1409 && REG_P (SUBREG_REG (op))
1410 && REG_POINTER (SUBREG_REG (op))
1411 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1412 return convert_memory_address (Pmode, op);
1413 #endif
1414 break;
1415
1416 case ZERO_EXTEND:
1417 /* Check for a zero extension of a subreg of a promoted
1418 variable, where the promotion is zero-extended, and the
1419 target mode is the same as the variable's promotion. */
1420 if (GET_CODE (op) == SUBREG
1421 && SUBREG_PROMOTED_VAR_P (op)
1422 && SUBREG_PROMOTED_UNSIGNED_P (op)
1423 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1424 {
1425 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1426 if (temp)
1427 return temp;
1428 }
1429
1430 /* Extending a widening multiplication should be canonicalized to
1431 a wider widening multiplication. */
1432 if (GET_CODE (op) == MULT)
1433 {
1434 rtx lhs = XEXP (op, 0);
1435 rtx rhs = XEXP (op, 1);
1436 enum rtx_code lcode = GET_CODE (lhs);
1437 enum rtx_code rcode = GET_CODE (rhs);
1438
1439 /* Widening multiplies usually extend both operands, but sometimes
1440 they use a shift to extract a portion of a register. */
1441 if ((lcode == ZERO_EXTEND
1442 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1443 && (rcode == ZERO_EXTEND
1444 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1445 {
1446 enum machine_mode lmode = GET_MODE (lhs);
1447 enum machine_mode rmode = GET_MODE (rhs);
1448 int bits;
1449
1450 if (lcode == LSHIFTRT)
1451 /* Number of bits not shifted off the end. */
1452 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1453 else /* lcode == ZERO_EXTEND */
1454 /* Size of inner mode. */
1455 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1456
1457 if (rcode == LSHIFTRT)
1458 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1459 else /* rcode == ZERO_EXTEND */
1460 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1461
1462 /* We can only widen multiplies if the result is mathematiclly
1463 equivalent. I.e. if overflow was impossible. */
1464 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1465 return simplify_gen_binary
1466 (MULT, mode,
1467 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1468 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1469 }
1470 }
1471
1472 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1473 if (GET_CODE (op) == ZERO_EXTEND)
1474 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1475 GET_MODE (XEXP (op, 0)));
1476
1477 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1478 is (zero_extend:M (subreg:O <X>)) if there is mode with
1479 GET_MODE_PRECISION (N) - I bits. */
1480 if (GET_CODE (op) == LSHIFTRT
1481 && GET_CODE (XEXP (op, 0)) == ASHIFT
1482 && CONST_INT_P (XEXP (op, 1))
1483 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1484 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1485 {
1486 enum machine_mode tmode
1487 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1488 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1489 if (tmode != BLKmode)
1490 {
1491 rtx inner =
1492 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1493 if (inner)
1494 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1495 }
1496 }
1497
1498 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1499 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1500 of mode N. E.g.
1501 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1502 (and:SI (reg:SI) (const_int 63)). */
1503 if (GET_CODE (op) == SUBREG
1504 && GET_MODE_PRECISION (GET_MODE (op))
1505 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1506 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1507 <= HOST_BITS_PER_WIDE_INT
1508 && GET_MODE_PRECISION (mode)
1509 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1510 && subreg_lowpart_p (op)
1511 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1512 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1513 {
1514 if (GET_MODE_PRECISION (mode)
1515 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1516 return SUBREG_REG (op);
1517 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1518 GET_MODE (SUBREG_REG (op)));
1519 }
1520
1521 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1522 /* As we do not know which address space the pointer is referring to,
1523 we can do this only if the target does not support different pointer
1524 or address modes depending on the address space. */
1525 if (target_default_pointer_address_modes_p ()
1526 && POINTERS_EXTEND_UNSIGNED > 0
1527 && mode == Pmode && GET_MODE (op) == ptr_mode
1528 && (CONSTANT_P (op)
1529 || (GET_CODE (op) == SUBREG
1530 && REG_P (SUBREG_REG (op))
1531 && REG_POINTER (SUBREG_REG (op))
1532 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1533 return convert_memory_address (Pmode, op);
1534 #endif
1535 break;
1536
1537 default:
1538 break;
1539 }
1540
1541 return 0;
1542 }
1543
1544 /* Try to compute the value of a unary operation CODE whose output mode is to
1545 be MODE with input operand OP whose mode was originally OP_MODE.
1546 Return zero if the value cannot be computed. */
1547 rtx
1548 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1549 rtx op, enum machine_mode op_mode)
1550 {
1551 unsigned int width = GET_MODE_PRECISION (mode);
1552
1553 if (code == VEC_DUPLICATE)
1554 {
1555 gcc_assert (VECTOR_MODE_P (mode));
1556 if (GET_MODE (op) != VOIDmode)
1557 {
1558 if (!VECTOR_MODE_P (GET_MODE (op)))
1559 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1560 else
1561 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1562 (GET_MODE (op)));
1563 }
1564 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1565 || GET_CODE (op) == CONST_VECTOR)
1566 {
1567 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1568 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1569 rtvec v = rtvec_alloc (n_elts);
1570 unsigned int i;
1571
1572 if (GET_CODE (op) != CONST_VECTOR)
1573 for (i = 0; i < n_elts; i++)
1574 RTVEC_ELT (v, i) = op;
1575 else
1576 {
1577 enum machine_mode inmode = GET_MODE (op);
1578 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1579 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1580
1581 gcc_assert (in_n_elts < n_elts);
1582 gcc_assert ((n_elts % in_n_elts) == 0);
1583 for (i = 0; i < n_elts; i++)
1584 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1585 }
1586 return gen_rtx_CONST_VECTOR (mode, v);
1587 }
1588 }
1589
1590 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1591 {
1592 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1593 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1594 enum machine_mode opmode = GET_MODE (op);
1595 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1596 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1597 rtvec v = rtvec_alloc (n_elts);
1598 unsigned int i;
1599
1600 gcc_assert (op_n_elts == n_elts);
1601 for (i = 0; i < n_elts; i++)
1602 {
1603 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1604 CONST_VECTOR_ELT (op, i),
1605 GET_MODE_INNER (opmode));
1606 if (!x)
1607 return 0;
1608 RTVEC_ELT (v, i) = x;
1609 }
1610 return gen_rtx_CONST_VECTOR (mode, v);
1611 }
1612
1613 /* The order of these tests is critical so that, for example, we don't
1614 check the wrong mode (input vs. output) for a conversion operation,
1615 such as FIX. At some point, this should be simplified. */
1616
1617 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1618 {
1619 REAL_VALUE_TYPE d;
1620
1621 if (op_mode == VOIDmode)
1622 {
1623 /* CONST_INT have VOIDmode as the mode. We assume that all
1624 the bits of the constant are significant, though, this is
1625 a dangerous assumption as many times CONST_INTs are
1626 created and used with garbage in the bits outside of the
1627 precision of the implied mode of the const_int. */
1628 op_mode = MAX_MODE_INT;
1629 }
1630
1631 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1632 d = real_value_truncate (mode, d);
1633 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1634 }
1635 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1636 {
1637 REAL_VALUE_TYPE d;
1638
1639 if (op_mode == VOIDmode)
1640 {
1641 /* CONST_INT have VOIDmode as the mode. We assume that all
1642 the bits of the constant are significant, though, this is
1643 a dangerous assumption as many times CONST_INTs are
1644 created and used with garbage in the bits outside of the
1645 precision of the implied mode of the const_int. */
1646 op_mode = MAX_MODE_INT;
1647 }
1648
1649 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1650 d = real_value_truncate (mode, d);
1651 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1652 }
1653
1654 if (CONST_SCALAR_INT_P (op) && width > 0)
1655 {
1656 wide_int result;
1657 enum machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1658 rtx_mode_t op0 = std::make_pair (op, imode);
1659 int int_value;
1660
1661 #if TARGET_SUPPORTS_WIDE_INT == 0
1662 /* This assert keeps the simplification from producing a result
1663 that cannot be represented in a CONST_DOUBLE but a lot of
1664 upstream callers expect that this function never fails to
1665 simplify something and so you if you added this to the test
1666 above the code would die later anyway. If this assert
1667 happens, you just need to make the port support wide int. */
1668 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1669 #endif
1670
1671 switch (code)
1672 {
1673 case NOT:
1674 result = wi::bit_not (op0);
1675 break;
1676
1677 case NEG:
1678 result = wi::neg (op0);
1679 break;
1680
1681 case ABS:
1682 result = wi::abs (op0);
1683 break;
1684
1685 case FFS:
1686 result = wi::shwi (wi::ffs (op0), mode);
1687 break;
1688
1689 case CLZ:
1690 if (wi::ne_p (op0, 0))
1691 int_value = wi::clz (op0);
1692 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1693 int_value = GET_MODE_PRECISION (mode);
1694 result = wi::shwi (int_value, mode);
1695 break;
1696
1697 case CLRSB:
1698 result = wi::shwi (wi::clrsb (op0), mode);
1699 break;
1700
1701 case CTZ:
1702 if (wi::ne_p (op0, 0))
1703 int_value = wi::ctz (op0);
1704 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1705 int_value = GET_MODE_PRECISION (mode);
1706 result = wi::shwi (int_value, mode);
1707 break;
1708
1709 case POPCOUNT:
1710 result = wi::shwi (wi::popcount (op0), mode);
1711 break;
1712
1713 case PARITY:
1714 result = wi::shwi (wi::parity (op0), mode);
1715 break;
1716
1717 case BSWAP:
1718 result = wide_int (op0).bswap ();
1719 break;
1720
1721 case TRUNCATE:
1722 case ZERO_EXTEND:
1723 result = wide_int::from (op0, width, UNSIGNED);
1724 break;
1725
1726 case SIGN_EXTEND:
1727 result = wide_int::from (op0, width, SIGNED);
1728 break;
1729
1730 case SQRT:
1731 default:
1732 return 0;
1733 }
1734
1735 return immed_wide_int_const (result, mode);
1736 }
1737
1738 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1739 && SCALAR_FLOAT_MODE_P (mode)
1740 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1741 {
1742 REAL_VALUE_TYPE d;
1743 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1744
1745 switch (code)
1746 {
1747 case SQRT:
1748 return 0;
1749 case ABS:
1750 d = real_value_abs (&d);
1751 break;
1752 case NEG:
1753 d = real_value_negate (&d);
1754 break;
1755 case FLOAT_TRUNCATE:
1756 d = real_value_truncate (mode, d);
1757 break;
1758 case FLOAT_EXTEND:
1759 /* All this does is change the mode, unless changing
1760 mode class. */
1761 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1762 real_convert (&d, mode, &d);
1763 break;
1764 case FIX:
1765 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1766 break;
1767 case NOT:
1768 {
1769 long tmp[4];
1770 int i;
1771
1772 real_to_target (tmp, &d, GET_MODE (op));
1773 for (i = 0; i < 4; i++)
1774 tmp[i] = ~tmp[i];
1775 real_from_target (&d, tmp, mode);
1776 break;
1777 }
1778 default:
1779 gcc_unreachable ();
1780 }
1781 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1782 }
1783 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1784 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1785 && GET_MODE_CLASS (mode) == MODE_INT
1786 && width > 0)
1787 {
1788 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1789 operators are intentionally left unspecified (to ease implementation
1790 by target backends), for consistency, this routine implements the
1791 same semantics for constant folding as used by the middle-end. */
1792
1793 /* This was formerly used only for non-IEEE float.
1794 eggert@twinsun.com says it is safe for IEEE also. */
1795 REAL_VALUE_TYPE x, t;
1796 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1797 wide_int wmax, wmin;
1798 /* This is part of the abi to real_to_integer, but we check
1799 things before making this call. */
1800 bool fail;
1801
1802 switch (code)
1803 {
1804 case FIX:
1805 if (REAL_VALUE_ISNAN (x))
1806 return const0_rtx;
1807
1808 /* Test against the signed upper bound. */
1809 wmax = wi::max_value (width, SIGNED);
1810 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1811 if (REAL_VALUES_LESS (t, x))
1812 return immed_wide_int_const (wmax, mode);
1813
1814 /* Test against the signed lower bound. */
1815 wmin = wi::min_value (width, SIGNED);
1816 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1817 if (REAL_VALUES_LESS (x, t))
1818 return immed_wide_int_const (wmin, mode);
1819
1820 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1821 break;
1822
1823 case UNSIGNED_FIX:
1824 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1825 return const0_rtx;
1826
1827 /* Test against the unsigned upper bound. */
1828 wmax = wi::max_value (width, UNSIGNED);
1829 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1830 if (REAL_VALUES_LESS (t, x))
1831 return immed_wide_int_const (wmax, mode);
1832
1833 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1834 mode);
1835 break;
1836
1837 default:
1838 gcc_unreachable ();
1839 }
1840 }
1841
1842 return NULL_RTX;
1843 }
1844 \f
1845 /* Subroutine of simplify_binary_operation to simplify a binary operation
1846 CODE that can commute with byte swapping, with result mode MODE and
1847 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1848 Return zero if no simplification or canonicalization is possible. */
1849
1850 static rtx
1851 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
1852 rtx op0, rtx op1)
1853 {
1854 rtx tem;
1855
1856 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1857 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1858 {
1859 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1860 simplify_gen_unary (BSWAP, mode, op1, mode));
1861 return simplify_gen_unary (BSWAP, mode, tem, mode);
1862 }
1863
1864 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1865 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1866 {
1867 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1868 return simplify_gen_unary (BSWAP, mode, tem, mode);
1869 }
1870
1871 return NULL_RTX;
1872 }
1873
1874 /* Subroutine of simplify_binary_operation to simplify a commutative,
1875 associative binary operation CODE with result mode MODE, operating
1876 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1877 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1878 canonicalization is possible. */
1879
1880 static rtx
1881 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1882 rtx op0, rtx op1)
1883 {
1884 rtx tem;
1885
1886 /* Linearize the operator to the left. */
1887 if (GET_CODE (op1) == code)
1888 {
1889 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1890 if (GET_CODE (op0) == code)
1891 {
1892 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1893 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1894 }
1895
1896 /* "a op (b op c)" becomes "(b op c) op a". */
1897 if (! swap_commutative_operands_p (op1, op0))
1898 return simplify_gen_binary (code, mode, op1, op0);
1899
1900 tem = op0;
1901 op0 = op1;
1902 op1 = tem;
1903 }
1904
1905 if (GET_CODE (op0) == code)
1906 {
1907 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1908 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1909 {
1910 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1911 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1912 }
1913
1914 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1915 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1916 if (tem != 0)
1917 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1918
1919 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1920 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1921 if (tem != 0)
1922 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1923 }
1924
1925 return 0;
1926 }
1927
1928
1929 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1930 and OP1. Return 0 if no simplification is possible.
1931
1932 Don't use this for relational operations such as EQ or LT.
1933 Use simplify_relational_operation instead. */
1934 rtx
1935 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1936 rtx op0, rtx op1)
1937 {
1938 rtx trueop0, trueop1;
1939 rtx tem;
1940
1941 /* Relational operations don't work here. We must know the mode
1942 of the operands in order to do the comparison correctly.
1943 Assuming a full word can give incorrect results.
1944 Consider comparing 128 with -128 in QImode. */
1945 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1946 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1947
1948 /* Make sure the constant is second. */
1949 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1950 && swap_commutative_operands_p (op0, op1))
1951 {
1952 tem = op0, op0 = op1, op1 = tem;
1953 }
1954
1955 trueop0 = avoid_constant_pool_reference (op0);
1956 trueop1 = avoid_constant_pool_reference (op1);
1957
1958 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1959 if (tem)
1960 return tem;
1961 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1962 }
1963
1964 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1965 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1966 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1967 actual constants. */
1968
1969 static rtx
1970 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1971 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1972 {
1973 rtx tem, reversed, opleft, opright;
1974 HOST_WIDE_INT val;
1975 unsigned int width = GET_MODE_PRECISION (mode);
1976
1977 /* Even if we can't compute a constant result,
1978 there are some cases worth simplifying. */
1979
1980 switch (code)
1981 {
1982 case PLUS:
1983 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1984 when x is NaN, infinite, or finite and nonzero. They aren't
1985 when x is -0 and the rounding mode is not towards -infinity,
1986 since (-0) + 0 is then 0. */
1987 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1988 return op0;
1989
1990 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1991 transformations are safe even for IEEE. */
1992 if (GET_CODE (op0) == NEG)
1993 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1994 else if (GET_CODE (op1) == NEG)
1995 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1996
1997 /* (~a) + 1 -> -a */
1998 if (INTEGRAL_MODE_P (mode)
1999 && GET_CODE (op0) == NOT
2000 && trueop1 == const1_rtx)
2001 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2002
2003 /* Handle both-operands-constant cases. We can only add
2004 CONST_INTs to constants since the sum of relocatable symbols
2005 can't be handled by most assemblers. Don't add CONST_INT
2006 to CONST_INT since overflow won't be computed properly if wider
2007 than HOST_BITS_PER_WIDE_INT. */
2008
2009 if ((GET_CODE (op0) == CONST
2010 || GET_CODE (op0) == SYMBOL_REF
2011 || GET_CODE (op0) == LABEL_REF)
2012 && CONST_INT_P (op1))
2013 return plus_constant (mode, op0, INTVAL (op1));
2014 else if ((GET_CODE (op1) == CONST
2015 || GET_CODE (op1) == SYMBOL_REF
2016 || GET_CODE (op1) == LABEL_REF)
2017 && CONST_INT_P (op0))
2018 return plus_constant (mode, op1, INTVAL (op0));
2019
2020 /* See if this is something like X * C - X or vice versa or
2021 if the multiplication is written as a shift. If so, we can
2022 distribute and make a new multiply, shift, or maybe just
2023 have X (if C is 2 in the example above). But don't make
2024 something more expensive than we had before. */
2025
2026 if (SCALAR_INT_MODE_P (mode))
2027 {
2028 rtx lhs = op0, rhs = op1;
2029
2030 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2031 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2032
2033 if (GET_CODE (lhs) == NEG)
2034 {
2035 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2036 lhs = XEXP (lhs, 0);
2037 }
2038 else if (GET_CODE (lhs) == MULT
2039 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2040 {
2041 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2042 lhs = XEXP (lhs, 0);
2043 }
2044 else if (GET_CODE (lhs) == ASHIFT
2045 && CONST_INT_P (XEXP (lhs, 1))
2046 && INTVAL (XEXP (lhs, 1)) >= 0
2047 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2048 {
2049 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2050 GET_MODE_PRECISION (mode));
2051 lhs = XEXP (lhs, 0);
2052 }
2053
2054 if (GET_CODE (rhs) == NEG)
2055 {
2056 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2057 rhs = XEXP (rhs, 0);
2058 }
2059 else if (GET_CODE (rhs) == MULT
2060 && CONST_INT_P (XEXP (rhs, 1)))
2061 {
2062 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2063 rhs = XEXP (rhs, 0);
2064 }
2065 else if (GET_CODE (rhs) == ASHIFT
2066 && CONST_INT_P (XEXP (rhs, 1))
2067 && INTVAL (XEXP (rhs, 1)) >= 0
2068 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2069 {
2070 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2071 GET_MODE_PRECISION (mode));
2072 rhs = XEXP (rhs, 0);
2073 }
2074
2075 if (rtx_equal_p (lhs, rhs))
2076 {
2077 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2078 rtx coeff;
2079 bool speed = optimize_function_for_speed_p (cfun);
2080
2081 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2082
2083 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2084 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2085 ? tem : 0;
2086 }
2087 }
2088
2089 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2090 if (CONST_SCALAR_INT_P (op1)
2091 && GET_CODE (op0) == XOR
2092 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2093 && mode_signbit_p (mode, op1))
2094 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2095 simplify_gen_binary (XOR, mode, op1,
2096 XEXP (op0, 1)));
2097
2098 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2099 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2100 && GET_CODE (op0) == MULT
2101 && GET_CODE (XEXP (op0, 0)) == NEG)
2102 {
2103 rtx in1, in2;
2104
2105 in1 = XEXP (XEXP (op0, 0), 0);
2106 in2 = XEXP (op0, 1);
2107 return simplify_gen_binary (MINUS, mode, op1,
2108 simplify_gen_binary (MULT, mode,
2109 in1, in2));
2110 }
2111
2112 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2113 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2114 is 1. */
2115 if (COMPARISON_P (op0)
2116 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2117 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2118 && (reversed = reversed_comparison (op0, mode)))
2119 return
2120 simplify_gen_unary (NEG, mode, reversed, mode);
2121
2122 /* If one of the operands is a PLUS or a MINUS, see if we can
2123 simplify this by the associative law.
2124 Don't use the associative law for floating point.
2125 The inaccuracy makes it nonassociative,
2126 and subtle programs can break if operations are associated. */
2127
2128 if (INTEGRAL_MODE_P (mode)
2129 && (plus_minus_operand_p (op0)
2130 || plus_minus_operand_p (op1))
2131 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2132 return tem;
2133
2134 /* Reassociate floating point addition only when the user
2135 specifies associative math operations. */
2136 if (FLOAT_MODE_P (mode)
2137 && flag_associative_math)
2138 {
2139 tem = simplify_associative_operation (code, mode, op0, op1);
2140 if (tem)
2141 return tem;
2142 }
2143 break;
2144
2145 case COMPARE:
2146 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2147 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2148 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2149 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2150 {
2151 rtx xop00 = XEXP (op0, 0);
2152 rtx xop10 = XEXP (op1, 0);
2153
2154 #ifdef HAVE_cc0
2155 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2156 #else
2157 if (REG_P (xop00) && REG_P (xop10)
2158 && GET_MODE (xop00) == GET_MODE (xop10)
2159 && REGNO (xop00) == REGNO (xop10)
2160 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2161 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2162 #endif
2163 return xop00;
2164 }
2165 break;
2166
2167 case MINUS:
2168 /* We can't assume x-x is 0 even with non-IEEE floating point,
2169 but since it is zero except in very strange circumstances, we
2170 will treat it as zero with -ffinite-math-only. */
2171 if (rtx_equal_p (trueop0, trueop1)
2172 && ! side_effects_p (op0)
2173 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2174 return CONST0_RTX (mode);
2175
2176 /* Change subtraction from zero into negation. (0 - x) is the
2177 same as -x when x is NaN, infinite, or finite and nonzero.
2178 But if the mode has signed zeros, and does not round towards
2179 -infinity, then 0 - 0 is 0, not -0. */
2180 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2181 return simplify_gen_unary (NEG, mode, op1, mode);
2182
2183 /* (-1 - a) is ~a. */
2184 if (trueop0 == constm1_rtx)
2185 return simplify_gen_unary (NOT, mode, op1, mode);
2186
2187 /* Subtracting 0 has no effect unless the mode has signed zeros
2188 and supports rounding towards -infinity. In such a case,
2189 0 - 0 is -0. */
2190 if (!(HONOR_SIGNED_ZEROS (mode)
2191 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2192 && trueop1 == CONST0_RTX (mode))
2193 return op0;
2194
2195 /* See if this is something like X * C - X or vice versa or
2196 if the multiplication is written as a shift. If so, we can
2197 distribute and make a new multiply, shift, or maybe just
2198 have X (if C is 2 in the example above). But don't make
2199 something more expensive than we had before. */
2200
2201 if (SCALAR_INT_MODE_P (mode))
2202 {
2203 rtx lhs = op0, rhs = op1;
2204
2205 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2206 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2207
2208 if (GET_CODE (lhs) == NEG)
2209 {
2210 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2211 lhs = XEXP (lhs, 0);
2212 }
2213 else if (GET_CODE (lhs) == MULT
2214 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2215 {
2216 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2217 lhs = XEXP (lhs, 0);
2218 }
2219 else if (GET_CODE (lhs) == ASHIFT
2220 && CONST_INT_P (XEXP (lhs, 1))
2221 && INTVAL (XEXP (lhs, 1)) >= 0
2222 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2223 {
2224 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2225 GET_MODE_PRECISION (mode));
2226 lhs = XEXP (lhs, 0);
2227 }
2228
2229 if (GET_CODE (rhs) == NEG)
2230 {
2231 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2232 rhs = XEXP (rhs, 0);
2233 }
2234 else if (GET_CODE (rhs) == MULT
2235 && CONST_INT_P (XEXP (rhs, 1)))
2236 {
2237 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2238 rhs = XEXP (rhs, 0);
2239 }
2240 else if (GET_CODE (rhs) == ASHIFT
2241 && CONST_INT_P (XEXP (rhs, 1))
2242 && INTVAL (XEXP (rhs, 1)) >= 0
2243 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2244 {
2245 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2246 GET_MODE_PRECISION (mode));
2247 negcoeff1 = -negcoeff1;
2248 rhs = XEXP (rhs, 0);
2249 }
2250
2251 if (rtx_equal_p (lhs, rhs))
2252 {
2253 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2254 rtx coeff;
2255 bool speed = optimize_function_for_speed_p (cfun);
2256
2257 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2258
2259 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2260 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2261 ? tem : 0;
2262 }
2263 }
2264
2265 /* (a - (-b)) -> (a + b). True even for IEEE. */
2266 if (GET_CODE (op1) == NEG)
2267 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2268
2269 /* (-x - c) may be simplified as (-c - x). */
2270 if (GET_CODE (op0) == NEG
2271 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2272 {
2273 tem = simplify_unary_operation (NEG, mode, op1, mode);
2274 if (tem)
2275 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2276 }
2277
2278 /* Don't let a relocatable value get a negative coeff. */
2279 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2280 return simplify_gen_binary (PLUS, mode,
2281 op0,
2282 neg_const_int (mode, op1));
2283
2284 /* (x - (x & y)) -> (x & ~y) */
2285 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2286 {
2287 if (rtx_equal_p (op0, XEXP (op1, 0)))
2288 {
2289 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2290 GET_MODE (XEXP (op1, 1)));
2291 return simplify_gen_binary (AND, mode, op0, tem);
2292 }
2293 if (rtx_equal_p (op0, XEXP (op1, 1)))
2294 {
2295 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2296 GET_MODE (XEXP (op1, 0)));
2297 return simplify_gen_binary (AND, mode, op0, tem);
2298 }
2299 }
2300
2301 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2302 by reversing the comparison code if valid. */
2303 if (STORE_FLAG_VALUE == 1
2304 && trueop0 == const1_rtx
2305 && COMPARISON_P (op1)
2306 && (reversed = reversed_comparison (op1, mode)))
2307 return reversed;
2308
2309 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2310 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2311 && GET_CODE (op1) == MULT
2312 && GET_CODE (XEXP (op1, 0)) == NEG)
2313 {
2314 rtx in1, in2;
2315
2316 in1 = XEXP (XEXP (op1, 0), 0);
2317 in2 = XEXP (op1, 1);
2318 return simplify_gen_binary (PLUS, mode,
2319 simplify_gen_binary (MULT, mode,
2320 in1, in2),
2321 op0);
2322 }
2323
2324 /* Canonicalize (minus (neg A) (mult B C)) to
2325 (minus (mult (neg B) C) A). */
2326 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2327 && GET_CODE (op1) == MULT
2328 && GET_CODE (op0) == NEG)
2329 {
2330 rtx in1, in2;
2331
2332 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2333 in2 = XEXP (op1, 1);
2334 return simplify_gen_binary (MINUS, mode,
2335 simplify_gen_binary (MULT, mode,
2336 in1, in2),
2337 XEXP (op0, 0));
2338 }
2339
2340 /* If one of the operands is a PLUS or a MINUS, see if we can
2341 simplify this by the associative law. This will, for example,
2342 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2343 Don't use the associative law for floating point.
2344 The inaccuracy makes it nonassociative,
2345 and subtle programs can break if operations are associated. */
2346
2347 if (INTEGRAL_MODE_P (mode)
2348 && (plus_minus_operand_p (op0)
2349 || plus_minus_operand_p (op1))
2350 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2351 return tem;
2352 break;
2353
2354 case MULT:
2355 if (trueop1 == constm1_rtx)
2356 return simplify_gen_unary (NEG, mode, op0, mode);
2357
2358 if (GET_CODE (op0) == NEG)
2359 {
2360 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2361 /* If op1 is a MULT as well and simplify_unary_operation
2362 just moved the NEG to the second operand, simplify_gen_binary
2363 below could through simplify_associative_operation move
2364 the NEG around again and recurse endlessly. */
2365 if (temp
2366 && GET_CODE (op1) == MULT
2367 && GET_CODE (temp) == MULT
2368 && XEXP (op1, 0) == XEXP (temp, 0)
2369 && GET_CODE (XEXP (temp, 1)) == NEG
2370 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2371 temp = NULL_RTX;
2372 if (temp)
2373 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2374 }
2375 if (GET_CODE (op1) == NEG)
2376 {
2377 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2378 /* If op0 is a MULT as well and simplify_unary_operation
2379 just moved the NEG to the second operand, simplify_gen_binary
2380 below could through simplify_associative_operation move
2381 the NEG around again and recurse endlessly. */
2382 if (temp
2383 && GET_CODE (op0) == MULT
2384 && GET_CODE (temp) == MULT
2385 && XEXP (op0, 0) == XEXP (temp, 0)
2386 && GET_CODE (XEXP (temp, 1)) == NEG
2387 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2388 temp = NULL_RTX;
2389 if (temp)
2390 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2391 }
2392
2393 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2394 x is NaN, since x * 0 is then also NaN. Nor is it valid
2395 when the mode has signed zeros, since multiplying a negative
2396 number by 0 will give -0, not 0. */
2397 if (!HONOR_NANS (mode)
2398 && !HONOR_SIGNED_ZEROS (mode)
2399 && trueop1 == CONST0_RTX (mode)
2400 && ! side_effects_p (op0))
2401 return op1;
2402
2403 /* In IEEE floating point, x*1 is not equivalent to x for
2404 signalling NaNs. */
2405 if (!HONOR_SNANS (mode)
2406 && trueop1 == CONST1_RTX (mode))
2407 return op0;
2408
2409 /* Convert multiply by constant power of two into shift. */
2410 if (CONST_SCALAR_INT_P (trueop1))
2411 {
2412 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2413 if (val >= 0)
2414 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2415 }
2416
2417 /* x*2 is x+x and x*(-1) is -x */
2418 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2419 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2420 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2421 && GET_MODE (op0) == mode)
2422 {
2423 REAL_VALUE_TYPE d;
2424 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2425
2426 if (REAL_VALUES_EQUAL (d, dconst2))
2427 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2428
2429 if (!HONOR_SNANS (mode)
2430 && REAL_VALUES_EQUAL (d, dconstm1))
2431 return simplify_gen_unary (NEG, mode, op0, mode);
2432 }
2433
2434 /* Optimize -x * -x as x * x. */
2435 if (FLOAT_MODE_P (mode)
2436 && GET_CODE (op0) == NEG
2437 && GET_CODE (op1) == NEG
2438 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2439 && !side_effects_p (XEXP (op0, 0)))
2440 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2441
2442 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2443 if (SCALAR_FLOAT_MODE_P (mode)
2444 && GET_CODE (op0) == ABS
2445 && GET_CODE (op1) == ABS
2446 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2447 && !side_effects_p (XEXP (op0, 0)))
2448 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2449
2450 /* Reassociate multiplication, but for floating point MULTs
2451 only when the user specifies unsafe math optimizations. */
2452 if (! FLOAT_MODE_P (mode)
2453 || flag_unsafe_math_optimizations)
2454 {
2455 tem = simplify_associative_operation (code, mode, op0, op1);
2456 if (tem)
2457 return tem;
2458 }
2459 break;
2460
2461 case IOR:
2462 if (trueop1 == CONST0_RTX (mode))
2463 return op0;
2464 if (INTEGRAL_MODE_P (mode)
2465 && trueop1 == CONSTM1_RTX (mode)
2466 && !side_effects_p (op0))
2467 return op1;
2468 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2469 return op0;
2470 /* A | (~A) -> -1 */
2471 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2472 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2473 && ! side_effects_p (op0)
2474 && SCALAR_INT_MODE_P (mode))
2475 return constm1_rtx;
2476
2477 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2478 if (CONST_INT_P (op1)
2479 && HWI_COMPUTABLE_MODE_P (mode)
2480 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2481 && !side_effects_p (op0))
2482 return op1;
2483
2484 /* Canonicalize (X & C1) | C2. */
2485 if (GET_CODE (op0) == AND
2486 && CONST_INT_P (trueop1)
2487 && CONST_INT_P (XEXP (op0, 1)))
2488 {
2489 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2490 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2491 HOST_WIDE_INT c2 = INTVAL (trueop1);
2492
2493 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2494 if ((c1 & c2) == c1
2495 && !side_effects_p (XEXP (op0, 0)))
2496 return trueop1;
2497
2498 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2499 if (((c1|c2) & mask) == mask)
2500 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2501
2502 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2503 if (((c1 & ~c2) & mask) != (c1 & mask))
2504 {
2505 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2506 gen_int_mode (c1 & ~c2, mode));
2507 return simplify_gen_binary (IOR, mode, tem, op1);
2508 }
2509 }
2510
2511 /* Convert (A & B) | A to A. */
2512 if (GET_CODE (op0) == AND
2513 && (rtx_equal_p (XEXP (op0, 0), op1)
2514 || rtx_equal_p (XEXP (op0, 1), op1))
2515 && ! side_effects_p (XEXP (op0, 0))
2516 && ! side_effects_p (XEXP (op0, 1)))
2517 return op1;
2518
2519 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2520 mode size to (rotate A CX). */
2521
2522 if (GET_CODE (op1) == ASHIFT
2523 || GET_CODE (op1) == SUBREG)
2524 {
2525 opleft = op1;
2526 opright = op0;
2527 }
2528 else
2529 {
2530 opright = op1;
2531 opleft = op0;
2532 }
2533
2534 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2535 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2536 && CONST_INT_P (XEXP (opleft, 1))
2537 && CONST_INT_P (XEXP (opright, 1))
2538 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2539 == GET_MODE_PRECISION (mode)))
2540 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2541
2542 /* Same, but for ashift that has been "simplified" to a wider mode
2543 by simplify_shift_const. */
2544
2545 if (GET_CODE (opleft) == SUBREG
2546 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2547 && GET_CODE (opright) == LSHIFTRT
2548 && GET_CODE (XEXP (opright, 0)) == SUBREG
2549 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2550 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2551 && (GET_MODE_SIZE (GET_MODE (opleft))
2552 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2553 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2554 SUBREG_REG (XEXP (opright, 0)))
2555 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2556 && CONST_INT_P (XEXP (opright, 1))
2557 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2558 == GET_MODE_PRECISION (mode)))
2559 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2560 XEXP (SUBREG_REG (opleft), 1));
2561
2562 /* If we have (ior (and (X C1) C2)), simplify this by making
2563 C1 as small as possible if C1 actually changes. */
2564 if (CONST_INT_P (op1)
2565 && (HWI_COMPUTABLE_MODE_P (mode)
2566 || INTVAL (op1) > 0)
2567 && GET_CODE (op0) == AND
2568 && CONST_INT_P (XEXP (op0, 1))
2569 && CONST_INT_P (op1)
2570 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2571 {
2572 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2573 gen_int_mode (UINTVAL (XEXP (op0, 1))
2574 & ~UINTVAL (op1),
2575 mode));
2576 return simplify_gen_binary (IOR, mode, tmp, op1);
2577 }
2578
2579 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2580 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2581 the PLUS does not affect any of the bits in OP1: then we can do
2582 the IOR as a PLUS and we can associate. This is valid if OP1
2583 can be safely shifted left C bits. */
2584 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2585 && GET_CODE (XEXP (op0, 0)) == PLUS
2586 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2587 && CONST_INT_P (XEXP (op0, 1))
2588 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2589 {
2590 int count = INTVAL (XEXP (op0, 1));
2591 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2592
2593 if (mask >> count == INTVAL (trueop1)
2594 && trunc_int_for_mode (mask, mode) == mask
2595 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2596 return simplify_gen_binary (ASHIFTRT, mode,
2597 plus_constant (mode, XEXP (op0, 0),
2598 mask),
2599 XEXP (op0, 1));
2600 }
2601
2602 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2603 if (tem)
2604 return tem;
2605
2606 tem = simplify_associative_operation (code, mode, op0, op1);
2607 if (tem)
2608 return tem;
2609 break;
2610
2611 case XOR:
2612 if (trueop1 == CONST0_RTX (mode))
2613 return op0;
2614 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2615 return simplify_gen_unary (NOT, mode, op0, mode);
2616 if (rtx_equal_p (trueop0, trueop1)
2617 && ! side_effects_p (op0)
2618 && GET_MODE_CLASS (mode) != MODE_CC)
2619 return CONST0_RTX (mode);
2620
2621 /* Canonicalize XOR of the most significant bit to PLUS. */
2622 if (CONST_SCALAR_INT_P (op1)
2623 && mode_signbit_p (mode, op1))
2624 return simplify_gen_binary (PLUS, mode, op0, op1);
2625 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2626 if (CONST_SCALAR_INT_P (op1)
2627 && GET_CODE (op0) == PLUS
2628 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2629 && mode_signbit_p (mode, XEXP (op0, 1)))
2630 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2631 simplify_gen_binary (XOR, mode, op1,
2632 XEXP (op0, 1)));
2633
2634 /* If we are XORing two things that have no bits in common,
2635 convert them into an IOR. This helps to detect rotation encoded
2636 using those methods and possibly other simplifications. */
2637
2638 if (HWI_COMPUTABLE_MODE_P (mode)
2639 && (nonzero_bits (op0, mode)
2640 & nonzero_bits (op1, mode)) == 0)
2641 return (simplify_gen_binary (IOR, mode, op0, op1));
2642
2643 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2644 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2645 (NOT y). */
2646 {
2647 int num_negated = 0;
2648
2649 if (GET_CODE (op0) == NOT)
2650 num_negated++, op0 = XEXP (op0, 0);
2651 if (GET_CODE (op1) == NOT)
2652 num_negated++, op1 = XEXP (op1, 0);
2653
2654 if (num_negated == 2)
2655 return simplify_gen_binary (XOR, mode, op0, op1);
2656 else if (num_negated == 1)
2657 return simplify_gen_unary (NOT, mode,
2658 simplify_gen_binary (XOR, mode, op0, op1),
2659 mode);
2660 }
2661
2662 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2663 correspond to a machine insn or result in further simplifications
2664 if B is a constant. */
2665
2666 if (GET_CODE (op0) == AND
2667 && rtx_equal_p (XEXP (op0, 1), op1)
2668 && ! side_effects_p (op1))
2669 return simplify_gen_binary (AND, mode,
2670 simplify_gen_unary (NOT, mode,
2671 XEXP (op0, 0), mode),
2672 op1);
2673
2674 else if (GET_CODE (op0) == AND
2675 && rtx_equal_p (XEXP (op0, 0), op1)
2676 && ! side_effects_p (op1))
2677 return simplify_gen_binary (AND, mode,
2678 simplify_gen_unary (NOT, mode,
2679 XEXP (op0, 1), mode),
2680 op1);
2681
2682 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2683 we can transform like this:
2684 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2685 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2686 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2687 Attempt a few simplifications when B and C are both constants. */
2688 if (GET_CODE (op0) == AND
2689 && CONST_INT_P (op1)
2690 && CONST_INT_P (XEXP (op0, 1)))
2691 {
2692 rtx a = XEXP (op0, 0);
2693 rtx b = XEXP (op0, 1);
2694 rtx c = op1;
2695 HOST_WIDE_INT bval = INTVAL (b);
2696 HOST_WIDE_INT cval = INTVAL (c);
2697
2698 rtx na_c
2699 = simplify_binary_operation (AND, mode,
2700 simplify_gen_unary (NOT, mode, a, mode),
2701 c);
2702 if ((~cval & bval) == 0)
2703 {
2704 /* Try to simplify ~A&C | ~B&C. */
2705 if (na_c != NULL_RTX)
2706 return simplify_gen_binary (IOR, mode, na_c,
2707 gen_int_mode (~bval & cval, mode));
2708 }
2709 else
2710 {
2711 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2712 if (na_c == const0_rtx)
2713 {
2714 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2715 gen_int_mode (~cval & bval,
2716 mode));
2717 return simplify_gen_binary (IOR, mode, a_nc_b,
2718 gen_int_mode (~bval & cval,
2719 mode));
2720 }
2721 }
2722 }
2723
2724 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2725 comparison if STORE_FLAG_VALUE is 1. */
2726 if (STORE_FLAG_VALUE == 1
2727 && trueop1 == const1_rtx
2728 && COMPARISON_P (op0)
2729 && (reversed = reversed_comparison (op0, mode)))
2730 return reversed;
2731
2732 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2733 is (lt foo (const_int 0)), so we can perform the above
2734 simplification if STORE_FLAG_VALUE is 1. */
2735
2736 if (STORE_FLAG_VALUE == 1
2737 && trueop1 == const1_rtx
2738 && GET_CODE (op0) == LSHIFTRT
2739 && CONST_INT_P (XEXP (op0, 1))
2740 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2741 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2742
2743 /* (xor (comparison foo bar) (const_int sign-bit))
2744 when STORE_FLAG_VALUE is the sign bit. */
2745 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2746 && trueop1 == const_true_rtx
2747 && COMPARISON_P (op0)
2748 && (reversed = reversed_comparison (op0, mode)))
2749 return reversed;
2750
2751 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2752 if (tem)
2753 return tem;
2754
2755 tem = simplify_associative_operation (code, mode, op0, op1);
2756 if (tem)
2757 return tem;
2758 break;
2759
2760 case AND:
2761 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2762 return trueop1;
2763 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2764 return op0;
2765 if (HWI_COMPUTABLE_MODE_P (mode))
2766 {
2767 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2768 HOST_WIDE_INT nzop1;
2769 if (CONST_INT_P (trueop1))
2770 {
2771 HOST_WIDE_INT val1 = INTVAL (trueop1);
2772 /* If we are turning off bits already known off in OP0, we need
2773 not do an AND. */
2774 if ((nzop0 & ~val1) == 0)
2775 return op0;
2776 }
2777 nzop1 = nonzero_bits (trueop1, mode);
2778 /* If we are clearing all the nonzero bits, the result is zero. */
2779 if ((nzop1 & nzop0) == 0
2780 && !side_effects_p (op0) && !side_effects_p (op1))
2781 return CONST0_RTX (mode);
2782 }
2783 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2784 && GET_MODE_CLASS (mode) != MODE_CC)
2785 return op0;
2786 /* A & (~A) -> 0 */
2787 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2788 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2789 && ! side_effects_p (op0)
2790 && GET_MODE_CLASS (mode) != MODE_CC)
2791 return CONST0_RTX (mode);
2792
2793 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2794 there are no nonzero bits of C outside of X's mode. */
2795 if ((GET_CODE (op0) == SIGN_EXTEND
2796 || GET_CODE (op0) == ZERO_EXTEND)
2797 && CONST_INT_P (trueop1)
2798 && HWI_COMPUTABLE_MODE_P (mode)
2799 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2800 & UINTVAL (trueop1)) == 0)
2801 {
2802 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2803 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2804 gen_int_mode (INTVAL (trueop1),
2805 imode));
2806 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2807 }
2808
2809 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2810 we might be able to further simplify the AND with X and potentially
2811 remove the truncation altogether. */
2812 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2813 {
2814 rtx x = XEXP (op0, 0);
2815 enum machine_mode xmode = GET_MODE (x);
2816 tem = simplify_gen_binary (AND, xmode, x,
2817 gen_int_mode (INTVAL (trueop1), xmode));
2818 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2819 }
2820
2821 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2822 if (GET_CODE (op0) == IOR
2823 && CONST_INT_P (trueop1)
2824 && CONST_INT_P (XEXP (op0, 1)))
2825 {
2826 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2827 return simplify_gen_binary (IOR, mode,
2828 simplify_gen_binary (AND, mode,
2829 XEXP (op0, 0), op1),
2830 gen_int_mode (tmp, mode));
2831 }
2832
2833 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2834 insn (and may simplify more). */
2835 if (GET_CODE (op0) == XOR
2836 && rtx_equal_p (XEXP (op0, 0), op1)
2837 && ! side_effects_p (op1))
2838 return simplify_gen_binary (AND, mode,
2839 simplify_gen_unary (NOT, mode,
2840 XEXP (op0, 1), mode),
2841 op1);
2842
2843 if (GET_CODE (op0) == XOR
2844 && rtx_equal_p (XEXP (op0, 1), op1)
2845 && ! side_effects_p (op1))
2846 return simplify_gen_binary (AND, mode,
2847 simplify_gen_unary (NOT, mode,
2848 XEXP (op0, 0), mode),
2849 op1);
2850
2851 /* Similarly for (~(A ^ B)) & A. */
2852 if (GET_CODE (op0) == NOT
2853 && GET_CODE (XEXP (op0, 0)) == XOR
2854 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2855 && ! side_effects_p (op1))
2856 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2857
2858 if (GET_CODE (op0) == NOT
2859 && GET_CODE (XEXP (op0, 0)) == XOR
2860 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2861 && ! side_effects_p (op1))
2862 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2863
2864 /* Convert (A | B) & A to A. */
2865 if (GET_CODE (op0) == IOR
2866 && (rtx_equal_p (XEXP (op0, 0), op1)
2867 || rtx_equal_p (XEXP (op0, 1), op1))
2868 && ! side_effects_p (XEXP (op0, 0))
2869 && ! side_effects_p (XEXP (op0, 1)))
2870 return op1;
2871
2872 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2873 ((A & N) + B) & M -> (A + B) & M
2874 Similarly if (N & M) == 0,
2875 ((A | N) + B) & M -> (A + B) & M
2876 and for - instead of + and/or ^ instead of |.
2877 Also, if (N & M) == 0, then
2878 (A +- N) & M -> A & M. */
2879 if (CONST_INT_P (trueop1)
2880 && HWI_COMPUTABLE_MODE_P (mode)
2881 && ~UINTVAL (trueop1)
2882 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2883 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2884 {
2885 rtx pmop[2];
2886 int which;
2887
2888 pmop[0] = XEXP (op0, 0);
2889 pmop[1] = XEXP (op0, 1);
2890
2891 if (CONST_INT_P (pmop[1])
2892 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2893 return simplify_gen_binary (AND, mode, pmop[0], op1);
2894
2895 for (which = 0; which < 2; which++)
2896 {
2897 tem = pmop[which];
2898 switch (GET_CODE (tem))
2899 {
2900 case AND:
2901 if (CONST_INT_P (XEXP (tem, 1))
2902 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2903 == UINTVAL (trueop1))
2904 pmop[which] = XEXP (tem, 0);
2905 break;
2906 case IOR:
2907 case XOR:
2908 if (CONST_INT_P (XEXP (tem, 1))
2909 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2910 pmop[which] = XEXP (tem, 0);
2911 break;
2912 default:
2913 break;
2914 }
2915 }
2916
2917 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2918 {
2919 tem = simplify_gen_binary (GET_CODE (op0), mode,
2920 pmop[0], pmop[1]);
2921 return simplify_gen_binary (code, mode, tem, op1);
2922 }
2923 }
2924
2925 /* (and X (ior (not X) Y) -> (and X Y) */
2926 if (GET_CODE (op1) == IOR
2927 && GET_CODE (XEXP (op1, 0)) == NOT
2928 && op0 == XEXP (XEXP (op1, 0), 0))
2929 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2930
2931 /* (and (ior (not X) Y) X) -> (and X Y) */
2932 if (GET_CODE (op0) == IOR
2933 && GET_CODE (XEXP (op0, 0)) == NOT
2934 && op1 == XEXP (XEXP (op0, 0), 0))
2935 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2936
2937 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2938 if (tem)
2939 return tem;
2940
2941 tem = simplify_associative_operation (code, mode, op0, op1);
2942 if (tem)
2943 return tem;
2944 break;
2945
2946 case UDIV:
2947 /* 0/x is 0 (or x&0 if x has side-effects). */
2948 if (trueop0 == CONST0_RTX (mode))
2949 {
2950 if (side_effects_p (op1))
2951 return simplify_gen_binary (AND, mode, op1, trueop0);
2952 return trueop0;
2953 }
2954 /* x/1 is x. */
2955 if (trueop1 == CONST1_RTX (mode))
2956 {
2957 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2958 if (tem)
2959 return tem;
2960 }
2961 /* Convert divide by power of two into shift. */
2962 if (CONST_INT_P (trueop1)
2963 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2964 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2965 break;
2966
2967 case DIV:
2968 /* Handle floating point and integers separately. */
2969 if (SCALAR_FLOAT_MODE_P (mode))
2970 {
2971 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2972 safe for modes with NaNs, since 0.0 / 0.0 will then be
2973 NaN rather than 0.0. Nor is it safe for modes with signed
2974 zeros, since dividing 0 by a negative number gives -0.0 */
2975 if (trueop0 == CONST0_RTX (mode)
2976 && !HONOR_NANS (mode)
2977 && !HONOR_SIGNED_ZEROS (mode)
2978 && ! side_effects_p (op1))
2979 return op0;
2980 /* x/1.0 is x. */
2981 if (trueop1 == CONST1_RTX (mode)
2982 && !HONOR_SNANS (mode))
2983 return op0;
2984
2985 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2986 && trueop1 != CONST0_RTX (mode))
2987 {
2988 REAL_VALUE_TYPE d;
2989 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2990
2991 /* x/-1.0 is -x. */
2992 if (REAL_VALUES_EQUAL (d, dconstm1)
2993 && !HONOR_SNANS (mode))
2994 return simplify_gen_unary (NEG, mode, op0, mode);
2995
2996 /* Change FP division by a constant into multiplication.
2997 Only do this with -freciprocal-math. */
2998 if (flag_reciprocal_math
2999 && !REAL_VALUES_EQUAL (d, dconst0))
3000 {
3001 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3002 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3003 return simplify_gen_binary (MULT, mode, op0, tem);
3004 }
3005 }
3006 }
3007 else if (SCALAR_INT_MODE_P (mode))
3008 {
3009 /* 0/x is 0 (or x&0 if x has side-effects). */
3010 if (trueop0 == CONST0_RTX (mode)
3011 && !cfun->can_throw_non_call_exceptions)
3012 {
3013 if (side_effects_p (op1))
3014 return simplify_gen_binary (AND, mode, op1, trueop0);
3015 return trueop0;
3016 }
3017 /* x/1 is x. */
3018 if (trueop1 == CONST1_RTX (mode))
3019 {
3020 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3021 if (tem)
3022 return tem;
3023 }
3024 /* x/-1 is -x. */
3025 if (trueop1 == constm1_rtx)
3026 {
3027 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3028 if (x)
3029 return simplify_gen_unary (NEG, mode, x, mode);
3030 }
3031 }
3032 break;
3033
3034 case UMOD:
3035 /* 0%x is 0 (or x&0 if x has side-effects). */
3036 if (trueop0 == CONST0_RTX (mode))
3037 {
3038 if (side_effects_p (op1))
3039 return simplify_gen_binary (AND, mode, op1, trueop0);
3040 return trueop0;
3041 }
3042 /* x%1 is 0 (of x&0 if x has side-effects). */
3043 if (trueop1 == CONST1_RTX (mode))
3044 {
3045 if (side_effects_p (op0))
3046 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3047 return CONST0_RTX (mode);
3048 }
3049 /* Implement modulus by power of two as AND. */
3050 if (CONST_INT_P (trueop1)
3051 && exact_log2 (UINTVAL (trueop1)) > 0)
3052 return simplify_gen_binary (AND, mode, op0,
3053 gen_int_mode (INTVAL (op1) - 1, mode));
3054 break;
3055
3056 case MOD:
3057 /* 0%x is 0 (or x&0 if x has side-effects). */
3058 if (trueop0 == CONST0_RTX (mode))
3059 {
3060 if (side_effects_p (op1))
3061 return simplify_gen_binary (AND, mode, op1, trueop0);
3062 return trueop0;
3063 }
3064 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3065 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3066 {
3067 if (side_effects_p (op0))
3068 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3069 return CONST0_RTX (mode);
3070 }
3071 break;
3072
3073 case ROTATERT:
3074 case ROTATE:
3075 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3076 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3077 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3078 amount instead. */
3079 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3080 if (CONST_INT_P (trueop1)
3081 && IN_RANGE (INTVAL (trueop1),
3082 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3083 GET_MODE_PRECISION (mode) - 1))
3084 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3085 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3086 - INTVAL (trueop1)));
3087 #endif
3088 /* FALLTHRU */
3089 case ASHIFTRT:
3090 if (trueop1 == CONST0_RTX (mode))
3091 return op0;
3092 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3093 return op0;
3094 /* Rotating ~0 always results in ~0. */
3095 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3096 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3097 && ! side_effects_p (op1))
3098 return op0;
3099 canonicalize_shift:
3100 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3101 {
3102 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3103 if (val != INTVAL (op1))
3104 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3105 }
3106 break;
3107
3108 case ASHIFT:
3109 case SS_ASHIFT:
3110 case US_ASHIFT:
3111 if (trueop1 == CONST0_RTX (mode))
3112 return op0;
3113 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3114 return op0;
3115 goto canonicalize_shift;
3116
3117 case LSHIFTRT:
3118 if (trueop1 == CONST0_RTX (mode))
3119 return op0;
3120 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3121 return op0;
3122 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3123 if (GET_CODE (op0) == CLZ
3124 && CONST_INT_P (trueop1)
3125 && STORE_FLAG_VALUE == 1
3126 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3127 {
3128 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3129 unsigned HOST_WIDE_INT zero_val = 0;
3130
3131 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3132 && zero_val == GET_MODE_PRECISION (imode)
3133 && INTVAL (trueop1) == exact_log2 (zero_val))
3134 return simplify_gen_relational (EQ, mode, imode,
3135 XEXP (op0, 0), const0_rtx);
3136 }
3137 goto canonicalize_shift;
3138
3139 case SMIN:
3140 if (width <= HOST_BITS_PER_WIDE_INT
3141 && mode_signbit_p (mode, trueop1)
3142 && ! side_effects_p (op0))
3143 return op1;
3144 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3145 return op0;
3146 tem = simplify_associative_operation (code, mode, op0, op1);
3147 if (tem)
3148 return tem;
3149 break;
3150
3151 case SMAX:
3152 if (width <= HOST_BITS_PER_WIDE_INT
3153 && CONST_INT_P (trueop1)
3154 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3155 && ! side_effects_p (op0))
3156 return op1;
3157 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3158 return op0;
3159 tem = simplify_associative_operation (code, mode, op0, op1);
3160 if (tem)
3161 return tem;
3162 break;
3163
3164 case UMIN:
3165 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3166 return op1;
3167 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3168 return op0;
3169 tem = simplify_associative_operation (code, mode, op0, op1);
3170 if (tem)
3171 return tem;
3172 break;
3173
3174 case UMAX:
3175 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3176 return op1;
3177 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3178 return op0;
3179 tem = simplify_associative_operation (code, mode, op0, op1);
3180 if (tem)
3181 return tem;
3182 break;
3183
3184 case SS_PLUS:
3185 case US_PLUS:
3186 case SS_MINUS:
3187 case US_MINUS:
3188 case SS_MULT:
3189 case US_MULT:
3190 case SS_DIV:
3191 case US_DIV:
3192 /* ??? There are simplifications that can be done. */
3193 return 0;
3194
3195 case VEC_SELECT:
3196 if (!VECTOR_MODE_P (mode))
3197 {
3198 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3199 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3200 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3201 gcc_assert (XVECLEN (trueop1, 0) == 1);
3202 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3203
3204 if (GET_CODE (trueop0) == CONST_VECTOR)
3205 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3206 (trueop1, 0, 0)));
3207
3208 /* Extract a scalar element from a nested VEC_SELECT expression
3209 (with optional nested VEC_CONCAT expression). Some targets
3210 (i386) extract scalar element from a vector using chain of
3211 nested VEC_SELECT expressions. When input operand is a memory
3212 operand, this operation can be simplified to a simple scalar
3213 load from an offseted memory address. */
3214 if (GET_CODE (trueop0) == VEC_SELECT)
3215 {
3216 rtx op0 = XEXP (trueop0, 0);
3217 rtx op1 = XEXP (trueop0, 1);
3218
3219 enum machine_mode opmode = GET_MODE (op0);
3220 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3221 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3222
3223 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3224 int elem;
3225
3226 rtvec vec;
3227 rtx tmp_op, tmp;
3228
3229 gcc_assert (GET_CODE (op1) == PARALLEL);
3230 gcc_assert (i < n_elts);
3231
3232 /* Select element, pointed by nested selector. */
3233 elem = INTVAL (XVECEXP (op1, 0, i));
3234
3235 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3236 if (GET_CODE (op0) == VEC_CONCAT)
3237 {
3238 rtx op00 = XEXP (op0, 0);
3239 rtx op01 = XEXP (op0, 1);
3240
3241 enum machine_mode mode00, mode01;
3242 int n_elts00, n_elts01;
3243
3244 mode00 = GET_MODE (op00);
3245 mode01 = GET_MODE (op01);
3246
3247 /* Find out number of elements of each operand. */
3248 if (VECTOR_MODE_P (mode00))
3249 {
3250 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3251 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3252 }
3253 else
3254 n_elts00 = 1;
3255
3256 if (VECTOR_MODE_P (mode01))
3257 {
3258 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3259 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3260 }
3261 else
3262 n_elts01 = 1;
3263
3264 gcc_assert (n_elts == n_elts00 + n_elts01);
3265
3266 /* Select correct operand of VEC_CONCAT
3267 and adjust selector. */
3268 if (elem < n_elts01)
3269 tmp_op = op00;
3270 else
3271 {
3272 tmp_op = op01;
3273 elem -= n_elts00;
3274 }
3275 }
3276 else
3277 tmp_op = op0;
3278
3279 vec = rtvec_alloc (1);
3280 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3281
3282 tmp = gen_rtx_fmt_ee (code, mode,
3283 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3284 return tmp;
3285 }
3286 if (GET_CODE (trueop0) == VEC_DUPLICATE
3287 && GET_MODE (XEXP (trueop0, 0)) == mode)
3288 return XEXP (trueop0, 0);
3289 }
3290 else
3291 {
3292 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3293 gcc_assert (GET_MODE_INNER (mode)
3294 == GET_MODE_INNER (GET_MODE (trueop0)));
3295 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3296
3297 if (GET_CODE (trueop0) == CONST_VECTOR)
3298 {
3299 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3300 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3301 rtvec v = rtvec_alloc (n_elts);
3302 unsigned int i;
3303
3304 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3305 for (i = 0; i < n_elts; i++)
3306 {
3307 rtx x = XVECEXP (trueop1, 0, i);
3308
3309 gcc_assert (CONST_INT_P (x));
3310 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3311 INTVAL (x));
3312 }
3313
3314 return gen_rtx_CONST_VECTOR (mode, v);
3315 }
3316
3317 /* Recognize the identity. */
3318 if (GET_MODE (trueop0) == mode)
3319 {
3320 bool maybe_ident = true;
3321 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3322 {
3323 rtx j = XVECEXP (trueop1, 0, i);
3324 if (!CONST_INT_P (j) || INTVAL (j) != i)
3325 {
3326 maybe_ident = false;
3327 break;
3328 }
3329 }
3330 if (maybe_ident)
3331 return trueop0;
3332 }
3333
3334 /* If we build {a,b} then permute it, build the result directly. */
3335 if (XVECLEN (trueop1, 0) == 2
3336 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3337 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3338 && GET_CODE (trueop0) == VEC_CONCAT
3339 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3340 && GET_MODE (XEXP (trueop0, 0)) == mode
3341 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3342 && GET_MODE (XEXP (trueop0, 1)) == mode)
3343 {
3344 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3345 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3346 rtx subop0, subop1;
3347
3348 gcc_assert (i0 < 4 && i1 < 4);
3349 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3350 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3351
3352 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3353 }
3354
3355 if (XVECLEN (trueop1, 0) == 2
3356 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3357 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3358 && GET_CODE (trueop0) == VEC_CONCAT
3359 && GET_MODE (trueop0) == mode)
3360 {
3361 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3362 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3363 rtx subop0, subop1;
3364
3365 gcc_assert (i0 < 2 && i1 < 2);
3366 subop0 = XEXP (trueop0, i0);
3367 subop1 = XEXP (trueop0, i1);
3368
3369 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3370 }
3371
3372 /* If we select one half of a vec_concat, return that. */
3373 if (GET_CODE (trueop0) == VEC_CONCAT
3374 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3375 {
3376 rtx subop0 = XEXP (trueop0, 0);
3377 rtx subop1 = XEXP (trueop0, 1);
3378 enum machine_mode mode0 = GET_MODE (subop0);
3379 enum machine_mode mode1 = GET_MODE (subop1);
3380 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3381 int l0 = GET_MODE_SIZE (mode0) / li;
3382 int l1 = GET_MODE_SIZE (mode1) / li;
3383 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3384 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3385 {
3386 bool success = true;
3387 for (int i = 1; i < l0; ++i)
3388 {
3389 rtx j = XVECEXP (trueop1, 0, i);
3390 if (!CONST_INT_P (j) || INTVAL (j) != i)
3391 {
3392 success = false;
3393 break;
3394 }
3395 }
3396 if (success)
3397 return subop0;
3398 }
3399 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3400 {
3401 bool success = true;
3402 for (int i = 1; i < l1; ++i)
3403 {
3404 rtx j = XVECEXP (trueop1, 0, i);
3405 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3406 {
3407 success = false;
3408 break;
3409 }
3410 }
3411 if (success)
3412 return subop1;
3413 }
3414 }
3415 }
3416
3417 if (XVECLEN (trueop1, 0) == 1
3418 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3419 && GET_CODE (trueop0) == VEC_CONCAT)
3420 {
3421 rtx vec = trueop0;
3422 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3423
3424 /* Try to find the element in the VEC_CONCAT. */
3425 while (GET_MODE (vec) != mode
3426 && GET_CODE (vec) == VEC_CONCAT)
3427 {
3428 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3429 if (offset < vec_size)
3430 vec = XEXP (vec, 0);
3431 else
3432 {
3433 offset -= vec_size;
3434 vec = XEXP (vec, 1);
3435 }
3436 vec = avoid_constant_pool_reference (vec);
3437 }
3438
3439 if (GET_MODE (vec) == mode)
3440 return vec;
3441 }
3442
3443 /* If we select elements in a vec_merge that all come from the same
3444 operand, select from that operand directly. */
3445 if (GET_CODE (op0) == VEC_MERGE)
3446 {
3447 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3448 if (CONST_INT_P (trueop02))
3449 {
3450 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3451 bool all_operand0 = true;
3452 bool all_operand1 = true;
3453 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3454 {
3455 rtx j = XVECEXP (trueop1, 0, i);
3456 if (sel & (1 << UINTVAL (j)))
3457 all_operand1 = false;
3458 else
3459 all_operand0 = false;
3460 }
3461 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3462 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3463 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3464 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3465 }
3466 }
3467
3468 /* If we have two nested selects that are inverses of each
3469 other, replace them with the source operand. */
3470 if (GET_CODE (trueop0) == VEC_SELECT
3471 && GET_MODE (XEXP (trueop0, 0)) == mode)
3472 {
3473 rtx op0_subop1 = XEXP (trueop0, 1);
3474 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3475 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3476
3477 /* Apply the outer ordering vector to the inner one. (The inner
3478 ordering vector is expressly permitted to be of a different
3479 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3480 then the two VEC_SELECTs cancel. */
3481 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3482 {
3483 rtx x = XVECEXP (trueop1, 0, i);
3484 if (!CONST_INT_P (x))
3485 return 0;
3486 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3487 if (!CONST_INT_P (y) || i != INTVAL (y))
3488 return 0;
3489 }
3490 return XEXP (trueop0, 0);
3491 }
3492
3493 return 0;
3494 case VEC_CONCAT:
3495 {
3496 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3497 ? GET_MODE (trueop0)
3498 : GET_MODE_INNER (mode));
3499 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3500 ? GET_MODE (trueop1)
3501 : GET_MODE_INNER (mode));
3502
3503 gcc_assert (VECTOR_MODE_P (mode));
3504 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3505 == GET_MODE_SIZE (mode));
3506
3507 if (VECTOR_MODE_P (op0_mode))
3508 gcc_assert (GET_MODE_INNER (mode)
3509 == GET_MODE_INNER (op0_mode));
3510 else
3511 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3512
3513 if (VECTOR_MODE_P (op1_mode))
3514 gcc_assert (GET_MODE_INNER (mode)
3515 == GET_MODE_INNER (op1_mode));
3516 else
3517 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3518
3519 if ((GET_CODE (trueop0) == CONST_VECTOR
3520 || CONST_SCALAR_INT_P (trueop0)
3521 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3522 && (GET_CODE (trueop1) == CONST_VECTOR
3523 || CONST_SCALAR_INT_P (trueop1)
3524 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3525 {
3526 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3527 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3528 rtvec v = rtvec_alloc (n_elts);
3529 unsigned int i;
3530 unsigned in_n_elts = 1;
3531
3532 if (VECTOR_MODE_P (op0_mode))
3533 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3534 for (i = 0; i < n_elts; i++)
3535 {
3536 if (i < in_n_elts)
3537 {
3538 if (!VECTOR_MODE_P (op0_mode))
3539 RTVEC_ELT (v, i) = trueop0;
3540 else
3541 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3542 }
3543 else
3544 {
3545 if (!VECTOR_MODE_P (op1_mode))
3546 RTVEC_ELT (v, i) = trueop1;
3547 else
3548 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3549 i - in_n_elts);
3550 }
3551 }
3552
3553 return gen_rtx_CONST_VECTOR (mode, v);
3554 }
3555
3556 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3557 Restrict the transformation to avoid generating a VEC_SELECT with a
3558 mode unrelated to its operand. */
3559 if (GET_CODE (trueop0) == VEC_SELECT
3560 && GET_CODE (trueop1) == VEC_SELECT
3561 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3562 && GET_MODE (XEXP (trueop0, 0)) == mode)
3563 {
3564 rtx par0 = XEXP (trueop0, 1);
3565 rtx par1 = XEXP (trueop1, 1);
3566 int len0 = XVECLEN (par0, 0);
3567 int len1 = XVECLEN (par1, 0);
3568 rtvec vec = rtvec_alloc (len0 + len1);
3569 for (int i = 0; i < len0; i++)
3570 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3571 for (int i = 0; i < len1; i++)
3572 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3573 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3574 gen_rtx_PARALLEL (VOIDmode, vec));
3575 }
3576 }
3577 return 0;
3578
3579 default:
3580 gcc_unreachable ();
3581 }
3582
3583 return 0;
3584 }
3585
3586 rtx
3587 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3588 rtx op0, rtx op1)
3589 {
3590 unsigned int width = GET_MODE_PRECISION (mode);
3591
3592 if (VECTOR_MODE_P (mode)
3593 && code != VEC_CONCAT
3594 && GET_CODE (op0) == CONST_VECTOR
3595 && GET_CODE (op1) == CONST_VECTOR)
3596 {
3597 unsigned n_elts = GET_MODE_NUNITS (mode);
3598 enum machine_mode op0mode = GET_MODE (op0);
3599 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3600 enum machine_mode op1mode = GET_MODE (op1);
3601 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3602 rtvec v = rtvec_alloc (n_elts);
3603 unsigned int i;
3604
3605 gcc_assert (op0_n_elts == n_elts);
3606 gcc_assert (op1_n_elts == n_elts);
3607 for (i = 0; i < n_elts; i++)
3608 {
3609 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3610 CONST_VECTOR_ELT (op0, i),
3611 CONST_VECTOR_ELT (op1, i));
3612 if (!x)
3613 return 0;
3614 RTVEC_ELT (v, i) = x;
3615 }
3616
3617 return gen_rtx_CONST_VECTOR (mode, v);
3618 }
3619
3620 if (VECTOR_MODE_P (mode)
3621 && code == VEC_CONCAT
3622 && (CONST_SCALAR_INT_P (op0)
3623 || GET_CODE (op0) == CONST_FIXED
3624 || CONST_DOUBLE_AS_FLOAT_P (op0))
3625 && (CONST_SCALAR_INT_P (op1)
3626 || CONST_DOUBLE_AS_FLOAT_P (op1)
3627 || GET_CODE (op1) == CONST_FIXED))
3628 {
3629 unsigned n_elts = GET_MODE_NUNITS (mode);
3630 rtvec v = rtvec_alloc (n_elts);
3631
3632 gcc_assert (n_elts >= 2);
3633 if (n_elts == 2)
3634 {
3635 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3636 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3637
3638 RTVEC_ELT (v, 0) = op0;
3639 RTVEC_ELT (v, 1) = op1;
3640 }
3641 else
3642 {
3643 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3644 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3645 unsigned i;
3646
3647 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3648 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3649 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3650
3651 for (i = 0; i < op0_n_elts; ++i)
3652 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3653 for (i = 0; i < op1_n_elts; ++i)
3654 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3655 }
3656
3657 return gen_rtx_CONST_VECTOR (mode, v);
3658 }
3659
3660 if (SCALAR_FLOAT_MODE_P (mode)
3661 && CONST_DOUBLE_AS_FLOAT_P (op0)
3662 && CONST_DOUBLE_AS_FLOAT_P (op1)
3663 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3664 {
3665 if (code == AND
3666 || code == IOR
3667 || code == XOR)
3668 {
3669 long tmp0[4];
3670 long tmp1[4];
3671 REAL_VALUE_TYPE r;
3672 int i;
3673
3674 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3675 GET_MODE (op0));
3676 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3677 GET_MODE (op1));
3678 for (i = 0; i < 4; i++)
3679 {
3680 switch (code)
3681 {
3682 case AND:
3683 tmp0[i] &= tmp1[i];
3684 break;
3685 case IOR:
3686 tmp0[i] |= tmp1[i];
3687 break;
3688 case XOR:
3689 tmp0[i] ^= tmp1[i];
3690 break;
3691 default:
3692 gcc_unreachable ();
3693 }
3694 }
3695 real_from_target (&r, tmp0, mode);
3696 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3697 }
3698 else
3699 {
3700 REAL_VALUE_TYPE f0, f1, value, result;
3701 bool inexact;
3702
3703 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3704 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3705 real_convert (&f0, mode, &f0);
3706 real_convert (&f1, mode, &f1);
3707
3708 if (HONOR_SNANS (mode)
3709 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3710 return 0;
3711
3712 if (code == DIV
3713 && REAL_VALUES_EQUAL (f1, dconst0)
3714 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3715 return 0;
3716
3717 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3718 && flag_trapping_math
3719 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3720 {
3721 int s0 = REAL_VALUE_NEGATIVE (f0);
3722 int s1 = REAL_VALUE_NEGATIVE (f1);
3723
3724 switch (code)
3725 {
3726 case PLUS:
3727 /* Inf + -Inf = NaN plus exception. */
3728 if (s0 != s1)
3729 return 0;
3730 break;
3731 case MINUS:
3732 /* Inf - Inf = NaN plus exception. */
3733 if (s0 == s1)
3734 return 0;
3735 break;
3736 case DIV:
3737 /* Inf / Inf = NaN plus exception. */
3738 return 0;
3739 default:
3740 break;
3741 }
3742 }
3743
3744 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3745 && flag_trapping_math
3746 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3747 || (REAL_VALUE_ISINF (f1)
3748 && REAL_VALUES_EQUAL (f0, dconst0))))
3749 /* Inf * 0 = NaN plus exception. */
3750 return 0;
3751
3752 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3753 &f0, &f1);
3754 real_convert (&result, mode, &value);
3755
3756 /* Don't constant fold this floating point operation if
3757 the result has overflowed and flag_trapping_math. */
3758
3759 if (flag_trapping_math
3760 && MODE_HAS_INFINITIES (mode)
3761 && REAL_VALUE_ISINF (result)
3762 && !REAL_VALUE_ISINF (f0)
3763 && !REAL_VALUE_ISINF (f1))
3764 /* Overflow plus exception. */
3765 return 0;
3766
3767 /* Don't constant fold this floating point operation if the
3768 result may dependent upon the run-time rounding mode and
3769 flag_rounding_math is set, or if GCC's software emulation
3770 is unable to accurately represent the result. */
3771
3772 if ((flag_rounding_math
3773 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3774 && (inexact || !real_identical (&result, &value)))
3775 return NULL_RTX;
3776
3777 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3778 }
3779 }
3780
3781 /* We can fold some multi-word operations. */
3782 if ((GET_MODE_CLASS (mode) == MODE_INT
3783 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3784 && CONST_SCALAR_INT_P (op0)
3785 && CONST_SCALAR_INT_P (op1))
3786 {
3787 wide_int result;
3788 bool overflow;
3789 rtx_mode_t pop0 = std::make_pair (op0, mode);
3790 rtx_mode_t pop1 = std::make_pair (op1, mode);
3791
3792 #if TARGET_SUPPORTS_WIDE_INT == 0
3793 /* This assert keeps the simplification from producing a result
3794 that cannot be represented in a CONST_DOUBLE but a lot of
3795 upstream callers expect that this function never fails to
3796 simplify something and so you if you added this to the test
3797 above the code would die later anyway. If this assert
3798 happens, you just need to make the port support wide int. */
3799 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3800 #endif
3801 switch (code)
3802 {
3803 case MINUS:
3804 result = wi::sub (pop0, pop1);
3805 break;
3806
3807 case PLUS:
3808 result = wi::add (pop0, pop1);
3809 break;
3810
3811 case MULT:
3812 result = wi::mul (pop0, pop1);
3813 break;
3814
3815 case DIV:
3816 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3817 if (overflow)
3818 return NULL_RTX;
3819 break;
3820
3821 case MOD:
3822 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3823 if (overflow)
3824 return NULL_RTX;
3825 break;
3826
3827 case UDIV:
3828 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3829 if (overflow)
3830 return NULL_RTX;
3831 break;
3832
3833 case UMOD:
3834 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3835 if (overflow)
3836 return NULL_RTX;
3837 break;
3838
3839 case AND:
3840 result = wi::bit_and (pop0, pop1);
3841 break;
3842
3843 case IOR:
3844 result = wi::bit_or (pop0, pop1);
3845 break;
3846
3847 case XOR:
3848 result = wi::bit_xor (pop0, pop1);
3849 break;
3850
3851 case SMIN:
3852 result = wi::smin (pop0, pop1);
3853 break;
3854
3855 case SMAX:
3856 result = wi::smax (pop0, pop1);
3857 break;
3858
3859 case UMIN:
3860 result = wi::umin (pop0, pop1);
3861 break;
3862
3863 case UMAX:
3864 result = wi::umax (pop0, pop1);
3865 break;
3866
3867 case LSHIFTRT:
3868 case ASHIFTRT:
3869 case ASHIFT:
3870 {
3871 wide_int wop1 = pop1;
3872 if (SHIFT_COUNT_TRUNCATED)
3873 wop1 = wi::umod_trunc (wop1, width);
3874 else if (wi::geu_p (wop1, width))
3875 return NULL_RTX;
3876
3877 switch (code)
3878 {
3879 case LSHIFTRT:
3880 result = wi::lrshift (pop0, wop1);
3881 break;
3882
3883 case ASHIFTRT:
3884 result = wi::arshift (pop0, wop1);
3885 break;
3886
3887 case ASHIFT:
3888 result = wi::lshift (pop0, wop1);
3889 break;
3890
3891 default:
3892 gcc_unreachable ();
3893 }
3894 break;
3895 }
3896 case ROTATE:
3897 case ROTATERT:
3898 {
3899 if (wi::neg_p (pop1))
3900 return NULL_RTX;
3901
3902 switch (code)
3903 {
3904 case ROTATE:
3905 result = wi::lrotate (pop0, pop1);
3906 break;
3907
3908 case ROTATERT:
3909 result = wi::rrotate (pop0, pop1);
3910 break;
3911
3912 default:
3913 gcc_unreachable ();
3914 }
3915 break;
3916 }
3917 default:
3918 return NULL_RTX;
3919 }
3920 return immed_wide_int_const (result, mode);
3921 }
3922
3923 return NULL_RTX;
3924 }
3925
3926
3927 \f
3928 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3929 PLUS or MINUS.
3930
3931 Rather than test for specific case, we do this by a brute-force method
3932 and do all possible simplifications until no more changes occur. Then
3933 we rebuild the operation. */
3934
3935 struct simplify_plus_minus_op_data
3936 {
3937 rtx op;
3938 short neg;
3939 };
3940
3941 static bool
3942 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3943 {
3944 int result;
3945
3946 result = (commutative_operand_precedence (y)
3947 - commutative_operand_precedence (x));
3948 if (result)
3949 return result > 0;
3950
3951 /* Group together equal REGs to do more simplification. */
3952 if (REG_P (x) && REG_P (y))
3953 return REGNO (x) > REGNO (y);
3954 else
3955 return false;
3956 }
3957
3958 static rtx
3959 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3960 rtx op1)
3961 {
3962 struct simplify_plus_minus_op_data ops[8];
3963 rtx result, tem;
3964 int n_ops = 2, input_ops = 2;
3965 int changed, n_constants = 0, canonicalized = 0;
3966 int i, j;
3967
3968 memset (ops, 0, sizeof ops);
3969
3970 /* Set up the two operands and then expand them until nothing has been
3971 changed. If we run out of room in our array, give up; this should
3972 almost never happen. */
3973
3974 ops[0].op = op0;
3975 ops[0].neg = 0;
3976 ops[1].op = op1;
3977 ops[1].neg = (code == MINUS);
3978
3979 do
3980 {
3981 changed = 0;
3982
3983 for (i = 0; i < n_ops; i++)
3984 {
3985 rtx this_op = ops[i].op;
3986 int this_neg = ops[i].neg;
3987 enum rtx_code this_code = GET_CODE (this_op);
3988
3989 switch (this_code)
3990 {
3991 case PLUS:
3992 case MINUS:
3993 if (n_ops == 7)
3994 return NULL_RTX;
3995
3996 ops[n_ops].op = XEXP (this_op, 1);
3997 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3998 n_ops++;
3999
4000 ops[i].op = XEXP (this_op, 0);
4001 input_ops++;
4002 changed = 1;
4003 canonicalized |= this_neg;
4004 break;
4005
4006 case NEG:
4007 ops[i].op = XEXP (this_op, 0);
4008 ops[i].neg = ! this_neg;
4009 changed = 1;
4010 canonicalized = 1;
4011 break;
4012
4013 case CONST:
4014 if (n_ops < 7
4015 && GET_CODE (XEXP (this_op, 0)) == PLUS
4016 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4017 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4018 {
4019 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4020 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4021 ops[n_ops].neg = this_neg;
4022 n_ops++;
4023 changed = 1;
4024 canonicalized = 1;
4025 }
4026 break;
4027
4028 case NOT:
4029 /* ~a -> (-a - 1) */
4030 if (n_ops != 7)
4031 {
4032 ops[n_ops].op = CONSTM1_RTX (mode);
4033 ops[n_ops++].neg = this_neg;
4034 ops[i].op = XEXP (this_op, 0);
4035 ops[i].neg = !this_neg;
4036 changed = 1;
4037 canonicalized = 1;
4038 }
4039 break;
4040
4041 case CONST_INT:
4042 n_constants++;
4043 if (this_neg)
4044 {
4045 ops[i].op = neg_const_int (mode, this_op);
4046 ops[i].neg = 0;
4047 changed = 1;
4048 canonicalized = 1;
4049 }
4050 break;
4051
4052 default:
4053 break;
4054 }
4055 }
4056 }
4057 while (changed);
4058
4059 if (n_constants > 1)
4060 canonicalized = 1;
4061
4062 gcc_assert (n_ops >= 2);
4063
4064 /* If we only have two operands, we can avoid the loops. */
4065 if (n_ops == 2)
4066 {
4067 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4068 rtx lhs, rhs;
4069
4070 /* Get the two operands. Be careful with the order, especially for
4071 the cases where code == MINUS. */
4072 if (ops[0].neg && ops[1].neg)
4073 {
4074 lhs = gen_rtx_NEG (mode, ops[0].op);
4075 rhs = ops[1].op;
4076 }
4077 else if (ops[0].neg)
4078 {
4079 lhs = ops[1].op;
4080 rhs = ops[0].op;
4081 }
4082 else
4083 {
4084 lhs = ops[0].op;
4085 rhs = ops[1].op;
4086 }
4087
4088 return simplify_const_binary_operation (code, mode, lhs, rhs);
4089 }
4090
4091 /* Now simplify each pair of operands until nothing changes. */
4092 do
4093 {
4094 /* Insertion sort is good enough for an eight-element array. */
4095 for (i = 1; i < n_ops; i++)
4096 {
4097 struct simplify_plus_minus_op_data save;
4098 j = i - 1;
4099 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4100 continue;
4101
4102 canonicalized = 1;
4103 save = ops[i];
4104 do
4105 ops[j + 1] = ops[j];
4106 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4107 ops[j + 1] = save;
4108 }
4109
4110 changed = 0;
4111 for (i = n_ops - 1; i > 0; i--)
4112 for (j = i - 1; j >= 0; j--)
4113 {
4114 rtx lhs = ops[j].op, rhs = ops[i].op;
4115 int lneg = ops[j].neg, rneg = ops[i].neg;
4116
4117 if (lhs != 0 && rhs != 0)
4118 {
4119 enum rtx_code ncode = PLUS;
4120
4121 if (lneg != rneg)
4122 {
4123 ncode = MINUS;
4124 if (lneg)
4125 tem = lhs, lhs = rhs, rhs = tem;
4126 }
4127 else if (swap_commutative_operands_p (lhs, rhs))
4128 tem = lhs, lhs = rhs, rhs = tem;
4129
4130 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4131 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4132 {
4133 rtx tem_lhs, tem_rhs;
4134
4135 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4136 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4137 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4138
4139 if (tem && !CONSTANT_P (tem))
4140 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4141 }
4142 else
4143 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4144
4145 /* Reject "simplifications" that just wrap the two
4146 arguments in a CONST. Failure to do so can result
4147 in infinite recursion with simplify_binary_operation
4148 when it calls us to simplify CONST operations. */
4149 if (tem
4150 && ! (GET_CODE (tem) == CONST
4151 && GET_CODE (XEXP (tem, 0)) == ncode
4152 && XEXP (XEXP (tem, 0), 0) == lhs
4153 && XEXP (XEXP (tem, 0), 1) == rhs))
4154 {
4155 lneg &= rneg;
4156 if (GET_CODE (tem) == NEG)
4157 tem = XEXP (tem, 0), lneg = !lneg;
4158 if (CONST_INT_P (tem) && lneg)
4159 tem = neg_const_int (mode, tem), lneg = 0;
4160
4161 ops[i].op = tem;
4162 ops[i].neg = lneg;
4163 ops[j].op = NULL_RTX;
4164 changed = 1;
4165 canonicalized = 1;
4166 }
4167 }
4168 }
4169
4170 /* If nothing changed, fail. */
4171 if (!canonicalized)
4172 return NULL_RTX;
4173
4174 /* Pack all the operands to the lower-numbered entries. */
4175 for (i = 0, j = 0; j < n_ops; j++)
4176 if (ops[j].op)
4177 {
4178 ops[i] = ops[j];
4179 i++;
4180 }
4181 n_ops = i;
4182 }
4183 while (changed);
4184
4185 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4186 if (n_ops == 2
4187 && CONST_INT_P (ops[1].op)
4188 && CONSTANT_P (ops[0].op)
4189 && ops[0].neg)
4190 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4191
4192 /* We suppressed creation of trivial CONST expressions in the
4193 combination loop to avoid recursion. Create one manually now.
4194 The combination loop should have ensured that there is exactly
4195 one CONST_INT, and the sort will have ensured that it is last
4196 in the array and that any other constant will be next-to-last. */
4197
4198 if (n_ops > 1
4199 && CONST_INT_P (ops[n_ops - 1].op)
4200 && CONSTANT_P (ops[n_ops - 2].op))
4201 {
4202 rtx value = ops[n_ops - 1].op;
4203 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4204 value = neg_const_int (mode, value);
4205 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4206 INTVAL (value));
4207 n_ops--;
4208 }
4209
4210 /* Put a non-negated operand first, if possible. */
4211
4212 for (i = 0; i < n_ops && ops[i].neg; i++)
4213 continue;
4214 if (i == n_ops)
4215 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4216 else if (i != 0)
4217 {
4218 tem = ops[0].op;
4219 ops[0] = ops[i];
4220 ops[i].op = tem;
4221 ops[i].neg = 1;
4222 }
4223
4224 /* Now make the result by performing the requested operations. */
4225 result = ops[0].op;
4226 for (i = 1; i < n_ops; i++)
4227 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4228 mode, result, ops[i].op);
4229
4230 return result;
4231 }
4232
4233 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4234 static bool
4235 plus_minus_operand_p (const_rtx x)
4236 {
4237 return GET_CODE (x) == PLUS
4238 || GET_CODE (x) == MINUS
4239 || (GET_CODE (x) == CONST
4240 && GET_CODE (XEXP (x, 0)) == PLUS
4241 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4242 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4243 }
4244
4245 /* Like simplify_binary_operation except used for relational operators.
4246 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4247 not also be VOIDmode.
4248
4249 CMP_MODE specifies in which mode the comparison is done in, so it is
4250 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4251 the operands or, if both are VOIDmode, the operands are compared in
4252 "infinite precision". */
4253 rtx
4254 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4255 enum machine_mode cmp_mode, rtx op0, rtx op1)
4256 {
4257 rtx tem, trueop0, trueop1;
4258
4259 if (cmp_mode == VOIDmode)
4260 cmp_mode = GET_MODE (op0);
4261 if (cmp_mode == VOIDmode)
4262 cmp_mode = GET_MODE (op1);
4263
4264 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4265 if (tem)
4266 {
4267 if (SCALAR_FLOAT_MODE_P (mode))
4268 {
4269 if (tem == const0_rtx)
4270 return CONST0_RTX (mode);
4271 #ifdef FLOAT_STORE_FLAG_VALUE
4272 {
4273 REAL_VALUE_TYPE val;
4274 val = FLOAT_STORE_FLAG_VALUE (mode);
4275 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4276 }
4277 #else
4278 return NULL_RTX;
4279 #endif
4280 }
4281 if (VECTOR_MODE_P (mode))
4282 {
4283 if (tem == const0_rtx)
4284 return CONST0_RTX (mode);
4285 #ifdef VECTOR_STORE_FLAG_VALUE
4286 {
4287 int i, units;
4288 rtvec v;
4289
4290 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4291 if (val == NULL_RTX)
4292 return NULL_RTX;
4293 if (val == const1_rtx)
4294 return CONST1_RTX (mode);
4295
4296 units = GET_MODE_NUNITS (mode);
4297 v = rtvec_alloc (units);
4298 for (i = 0; i < units; i++)
4299 RTVEC_ELT (v, i) = val;
4300 return gen_rtx_raw_CONST_VECTOR (mode, v);
4301 }
4302 #else
4303 return NULL_RTX;
4304 #endif
4305 }
4306
4307 return tem;
4308 }
4309
4310 /* For the following tests, ensure const0_rtx is op1. */
4311 if (swap_commutative_operands_p (op0, op1)
4312 || (op0 == const0_rtx && op1 != const0_rtx))
4313 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4314
4315 /* If op0 is a compare, extract the comparison arguments from it. */
4316 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4317 return simplify_gen_relational (code, mode, VOIDmode,
4318 XEXP (op0, 0), XEXP (op0, 1));
4319
4320 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4321 || CC0_P (op0))
4322 return NULL_RTX;
4323
4324 trueop0 = avoid_constant_pool_reference (op0);
4325 trueop1 = avoid_constant_pool_reference (op1);
4326 return simplify_relational_operation_1 (code, mode, cmp_mode,
4327 trueop0, trueop1);
4328 }
4329
4330 /* This part of simplify_relational_operation is only used when CMP_MODE
4331 is not in class MODE_CC (i.e. it is a real comparison).
4332
4333 MODE is the mode of the result, while CMP_MODE specifies in which
4334 mode the comparison is done in, so it is the mode of the operands. */
4335
4336 static rtx
4337 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4338 enum machine_mode cmp_mode, rtx op0, rtx op1)
4339 {
4340 enum rtx_code op0code = GET_CODE (op0);
4341
4342 if (op1 == const0_rtx && COMPARISON_P (op0))
4343 {
4344 /* If op0 is a comparison, extract the comparison arguments
4345 from it. */
4346 if (code == NE)
4347 {
4348 if (GET_MODE (op0) == mode)
4349 return simplify_rtx (op0);
4350 else
4351 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4352 XEXP (op0, 0), XEXP (op0, 1));
4353 }
4354 else if (code == EQ)
4355 {
4356 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4357 if (new_code != UNKNOWN)
4358 return simplify_gen_relational (new_code, mode, VOIDmode,
4359 XEXP (op0, 0), XEXP (op0, 1));
4360 }
4361 }
4362
4363 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4364 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4365 if ((code == LTU || code == GEU)
4366 && GET_CODE (op0) == PLUS
4367 && CONST_INT_P (XEXP (op0, 1))
4368 && (rtx_equal_p (op1, XEXP (op0, 0))
4369 || rtx_equal_p (op1, XEXP (op0, 1)))
4370 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4371 && XEXP (op0, 1) != const0_rtx)
4372 {
4373 rtx new_cmp
4374 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4375 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4376 cmp_mode, XEXP (op0, 0), new_cmp);
4377 }
4378
4379 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4380 if ((code == LTU || code == GEU)
4381 && GET_CODE (op0) == PLUS
4382 && rtx_equal_p (op1, XEXP (op0, 1))
4383 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4384 && !rtx_equal_p (op1, XEXP (op0, 0)))
4385 return simplify_gen_relational (code, mode, cmp_mode, op0,
4386 copy_rtx (XEXP (op0, 0)));
4387
4388 if (op1 == const0_rtx)
4389 {
4390 /* Canonicalize (GTU x 0) as (NE x 0). */
4391 if (code == GTU)
4392 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4393 /* Canonicalize (LEU x 0) as (EQ x 0). */
4394 if (code == LEU)
4395 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4396 }
4397 else if (op1 == const1_rtx)
4398 {
4399 switch (code)
4400 {
4401 case GE:
4402 /* Canonicalize (GE x 1) as (GT x 0). */
4403 return simplify_gen_relational (GT, mode, cmp_mode,
4404 op0, const0_rtx);
4405 case GEU:
4406 /* Canonicalize (GEU x 1) as (NE x 0). */
4407 return simplify_gen_relational (NE, mode, cmp_mode,
4408 op0, const0_rtx);
4409 case LT:
4410 /* Canonicalize (LT x 1) as (LE x 0). */
4411 return simplify_gen_relational (LE, mode, cmp_mode,
4412 op0, const0_rtx);
4413 case LTU:
4414 /* Canonicalize (LTU x 1) as (EQ x 0). */
4415 return simplify_gen_relational (EQ, mode, cmp_mode,
4416 op0, const0_rtx);
4417 default:
4418 break;
4419 }
4420 }
4421 else if (op1 == constm1_rtx)
4422 {
4423 /* Canonicalize (LE x -1) as (LT x 0). */
4424 if (code == LE)
4425 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4426 /* Canonicalize (GT x -1) as (GE x 0). */
4427 if (code == GT)
4428 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4429 }
4430
4431 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4432 if ((code == EQ || code == NE)
4433 && (op0code == PLUS || op0code == MINUS)
4434 && CONSTANT_P (op1)
4435 && CONSTANT_P (XEXP (op0, 1))
4436 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4437 {
4438 rtx x = XEXP (op0, 0);
4439 rtx c = XEXP (op0, 1);
4440 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4441 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4442
4443 /* Detect an infinite recursive condition, where we oscillate at this
4444 simplification case between:
4445 A + B == C <---> C - B == A,
4446 where A, B, and C are all constants with non-simplifiable expressions,
4447 usually SYMBOL_REFs. */
4448 if (GET_CODE (tem) == invcode
4449 && CONSTANT_P (x)
4450 && rtx_equal_p (c, XEXP (tem, 1)))
4451 return NULL_RTX;
4452
4453 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4454 }
4455
4456 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4457 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4458 if (code == NE
4459 && op1 == const0_rtx
4460 && GET_MODE_CLASS (mode) == MODE_INT
4461 && cmp_mode != VOIDmode
4462 /* ??? Work-around BImode bugs in the ia64 backend. */
4463 && mode != BImode
4464 && cmp_mode != BImode
4465 && nonzero_bits (op0, cmp_mode) == 1
4466 && STORE_FLAG_VALUE == 1)
4467 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4468 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4469 : lowpart_subreg (mode, op0, cmp_mode);
4470
4471 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4472 if ((code == EQ || code == NE)
4473 && op1 == const0_rtx
4474 && op0code == XOR)
4475 return simplify_gen_relational (code, mode, cmp_mode,
4476 XEXP (op0, 0), XEXP (op0, 1));
4477
4478 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4479 if ((code == EQ || code == NE)
4480 && op0code == XOR
4481 && rtx_equal_p (XEXP (op0, 0), op1)
4482 && !side_effects_p (XEXP (op0, 0)))
4483 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4484 CONST0_RTX (mode));
4485
4486 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4487 if ((code == EQ || code == NE)
4488 && op0code == XOR
4489 && rtx_equal_p (XEXP (op0, 1), op1)
4490 && !side_effects_p (XEXP (op0, 1)))
4491 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4492 CONST0_RTX (mode));
4493
4494 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4495 if ((code == EQ || code == NE)
4496 && op0code == XOR
4497 && CONST_SCALAR_INT_P (op1)
4498 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4499 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4500 simplify_gen_binary (XOR, cmp_mode,
4501 XEXP (op0, 1), op1));
4502
4503 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4504 if ((code == EQ || code == NE)
4505 && GET_CODE (op0) == BSWAP
4506 && CONST_SCALAR_INT_P (op1))
4507 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4508 simplify_gen_unary (BSWAP, cmp_mode,
4509 op1, cmp_mode));
4510
4511 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4512 if ((code == EQ || code == NE)
4513 && GET_CODE (op0) == BSWAP
4514 && GET_CODE (op1) == BSWAP)
4515 return simplify_gen_relational (code, mode, cmp_mode,
4516 XEXP (op0, 0), XEXP (op1, 0));
4517
4518 if (op0code == POPCOUNT && op1 == const0_rtx)
4519 switch (code)
4520 {
4521 case EQ:
4522 case LE:
4523 case LEU:
4524 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4525 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4526 XEXP (op0, 0), const0_rtx);
4527
4528 case NE:
4529 case GT:
4530 case GTU:
4531 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4532 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4533 XEXP (op0, 0), const0_rtx);
4534
4535 default:
4536 break;
4537 }
4538
4539 return NULL_RTX;
4540 }
4541
4542 enum
4543 {
4544 CMP_EQ = 1,
4545 CMP_LT = 2,
4546 CMP_GT = 4,
4547 CMP_LTU = 8,
4548 CMP_GTU = 16
4549 };
4550
4551
4552 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4553 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4554 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4555 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4556 For floating-point comparisons, assume that the operands were ordered. */
4557
4558 static rtx
4559 comparison_result (enum rtx_code code, int known_results)
4560 {
4561 switch (code)
4562 {
4563 case EQ:
4564 case UNEQ:
4565 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4566 case NE:
4567 case LTGT:
4568 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4569
4570 case LT:
4571 case UNLT:
4572 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4573 case GE:
4574 case UNGE:
4575 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4576
4577 case GT:
4578 case UNGT:
4579 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4580 case LE:
4581 case UNLE:
4582 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4583
4584 case LTU:
4585 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4586 case GEU:
4587 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4588
4589 case GTU:
4590 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4591 case LEU:
4592 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4593
4594 case ORDERED:
4595 return const_true_rtx;
4596 case UNORDERED:
4597 return const0_rtx;
4598 default:
4599 gcc_unreachable ();
4600 }
4601 }
4602
4603 /* Check if the given comparison (done in the given MODE) is actually
4604 a tautology or a contradiction. If the mode is VOID_mode, the
4605 comparison is done in "infinite precision". If no simplification
4606 is possible, this function returns zero. Otherwise, it returns
4607 either const_true_rtx or const0_rtx. */
4608
4609 rtx
4610 simplify_const_relational_operation (enum rtx_code code,
4611 enum machine_mode mode,
4612 rtx op0, rtx op1)
4613 {
4614 rtx tem;
4615 rtx trueop0;
4616 rtx trueop1;
4617
4618 gcc_assert (mode != VOIDmode
4619 || (GET_MODE (op0) == VOIDmode
4620 && GET_MODE (op1) == VOIDmode));
4621
4622 /* If op0 is a compare, extract the comparison arguments from it. */
4623 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4624 {
4625 op1 = XEXP (op0, 1);
4626 op0 = XEXP (op0, 0);
4627
4628 if (GET_MODE (op0) != VOIDmode)
4629 mode = GET_MODE (op0);
4630 else if (GET_MODE (op1) != VOIDmode)
4631 mode = GET_MODE (op1);
4632 else
4633 return 0;
4634 }
4635
4636 /* We can't simplify MODE_CC values since we don't know what the
4637 actual comparison is. */
4638 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4639 return 0;
4640
4641 /* Make sure the constant is second. */
4642 if (swap_commutative_operands_p (op0, op1))
4643 {
4644 tem = op0, op0 = op1, op1 = tem;
4645 code = swap_condition (code);
4646 }
4647
4648 trueop0 = avoid_constant_pool_reference (op0);
4649 trueop1 = avoid_constant_pool_reference (op1);
4650
4651 /* For integer comparisons of A and B maybe we can simplify A - B and can
4652 then simplify a comparison of that with zero. If A and B are both either
4653 a register or a CONST_INT, this can't help; testing for these cases will
4654 prevent infinite recursion here and speed things up.
4655
4656 We can only do this for EQ and NE comparisons as otherwise we may
4657 lose or introduce overflow which we cannot disregard as undefined as
4658 we do not know the signedness of the operation on either the left or
4659 the right hand side of the comparison. */
4660
4661 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4662 && (code == EQ || code == NE)
4663 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4664 && (REG_P (op1) || CONST_INT_P (trueop1)))
4665 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4666 /* We cannot do this if tem is a nonzero address. */
4667 && ! nonzero_address_p (tem))
4668 return simplify_const_relational_operation (signed_condition (code),
4669 mode, tem, const0_rtx);
4670
4671 if (! HONOR_NANS (mode) && code == ORDERED)
4672 return const_true_rtx;
4673
4674 if (! HONOR_NANS (mode) && code == UNORDERED)
4675 return const0_rtx;
4676
4677 /* For modes without NaNs, if the two operands are equal, we know the
4678 result except if they have side-effects. Even with NaNs we know
4679 the result of unordered comparisons and, if signaling NaNs are
4680 irrelevant, also the result of LT/GT/LTGT. */
4681 if ((! HONOR_NANS (GET_MODE (trueop0))
4682 || code == UNEQ || code == UNLE || code == UNGE
4683 || ((code == LT || code == GT || code == LTGT)
4684 && ! HONOR_SNANS (GET_MODE (trueop0))))
4685 && rtx_equal_p (trueop0, trueop1)
4686 && ! side_effects_p (trueop0))
4687 return comparison_result (code, CMP_EQ);
4688
4689 /* If the operands are floating-point constants, see if we can fold
4690 the result. */
4691 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4692 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4693 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4694 {
4695 REAL_VALUE_TYPE d0, d1;
4696
4697 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4698 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4699
4700 /* Comparisons are unordered iff at least one of the values is NaN. */
4701 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4702 switch (code)
4703 {
4704 case UNEQ:
4705 case UNLT:
4706 case UNGT:
4707 case UNLE:
4708 case UNGE:
4709 case NE:
4710 case UNORDERED:
4711 return const_true_rtx;
4712 case EQ:
4713 case LT:
4714 case GT:
4715 case LE:
4716 case GE:
4717 case LTGT:
4718 case ORDERED:
4719 return const0_rtx;
4720 default:
4721 return 0;
4722 }
4723
4724 return comparison_result (code,
4725 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4726 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4727 }
4728
4729 /* Otherwise, see if the operands are both integers. */
4730 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4731 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4732 {
4733 /* It would be nice if we really had a mode here. However, the
4734 largest int representable on the target is as good as
4735 infinite. */
4736 enum machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4737 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4738 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4739
4740 if (wi::eq_p (ptrueop0, ptrueop1))
4741 return comparison_result (code, CMP_EQ);
4742 else
4743 {
4744 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4745 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4746 return comparison_result (code, cr);
4747 }
4748 }
4749
4750 /* Optimize comparisons with upper and lower bounds. */
4751 if (HWI_COMPUTABLE_MODE_P (mode)
4752 && CONST_INT_P (trueop1))
4753 {
4754 int sign;
4755 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4756 HOST_WIDE_INT val = INTVAL (trueop1);
4757 HOST_WIDE_INT mmin, mmax;
4758
4759 if (code == GEU
4760 || code == LEU
4761 || code == GTU
4762 || code == LTU)
4763 sign = 0;
4764 else
4765 sign = 1;
4766
4767 /* Get a reduced range if the sign bit is zero. */
4768 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4769 {
4770 mmin = 0;
4771 mmax = nonzero;
4772 }
4773 else
4774 {
4775 rtx mmin_rtx, mmax_rtx;
4776 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4777
4778 mmin = INTVAL (mmin_rtx);
4779 mmax = INTVAL (mmax_rtx);
4780 if (sign)
4781 {
4782 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4783
4784 mmin >>= (sign_copies - 1);
4785 mmax >>= (sign_copies - 1);
4786 }
4787 }
4788
4789 switch (code)
4790 {
4791 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4792 case GEU:
4793 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4794 return const_true_rtx;
4795 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4796 return const0_rtx;
4797 break;
4798 case GE:
4799 if (val <= mmin)
4800 return const_true_rtx;
4801 if (val > mmax)
4802 return const0_rtx;
4803 break;
4804
4805 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4806 case LEU:
4807 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4808 return const_true_rtx;
4809 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4810 return const0_rtx;
4811 break;
4812 case LE:
4813 if (val >= mmax)
4814 return const_true_rtx;
4815 if (val < mmin)
4816 return const0_rtx;
4817 break;
4818
4819 case EQ:
4820 /* x == y is always false for y out of range. */
4821 if (val < mmin || val > mmax)
4822 return const0_rtx;
4823 break;
4824
4825 /* x > y is always false for y >= mmax, always true for y < mmin. */
4826 case GTU:
4827 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4828 return const0_rtx;
4829 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4830 return const_true_rtx;
4831 break;
4832 case GT:
4833 if (val >= mmax)
4834 return const0_rtx;
4835 if (val < mmin)
4836 return const_true_rtx;
4837 break;
4838
4839 /* x < y is always false for y <= mmin, always true for y > mmax. */
4840 case LTU:
4841 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4842 return const0_rtx;
4843 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4844 return const_true_rtx;
4845 break;
4846 case LT:
4847 if (val <= mmin)
4848 return const0_rtx;
4849 if (val > mmax)
4850 return const_true_rtx;
4851 break;
4852
4853 case NE:
4854 /* x != y is always true for y out of range. */
4855 if (val < mmin || val > mmax)
4856 return const_true_rtx;
4857 break;
4858
4859 default:
4860 break;
4861 }
4862 }
4863
4864 /* Optimize integer comparisons with zero. */
4865 if (trueop1 == const0_rtx)
4866 {
4867 /* Some addresses are known to be nonzero. We don't know
4868 their sign, but equality comparisons are known. */
4869 if (nonzero_address_p (trueop0))
4870 {
4871 if (code == EQ || code == LEU)
4872 return const0_rtx;
4873 if (code == NE || code == GTU)
4874 return const_true_rtx;
4875 }
4876
4877 /* See if the first operand is an IOR with a constant. If so, we
4878 may be able to determine the result of this comparison. */
4879 if (GET_CODE (op0) == IOR)
4880 {
4881 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4882 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4883 {
4884 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4885 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4886 && (UINTVAL (inner_const)
4887 & ((unsigned HOST_WIDE_INT) 1
4888 << sign_bitnum)));
4889
4890 switch (code)
4891 {
4892 case EQ:
4893 case LEU:
4894 return const0_rtx;
4895 case NE:
4896 case GTU:
4897 return const_true_rtx;
4898 case LT:
4899 case LE:
4900 if (has_sign)
4901 return const_true_rtx;
4902 break;
4903 case GT:
4904 case GE:
4905 if (has_sign)
4906 return const0_rtx;
4907 break;
4908 default:
4909 break;
4910 }
4911 }
4912 }
4913 }
4914
4915 /* Optimize comparison of ABS with zero. */
4916 if (trueop1 == CONST0_RTX (mode)
4917 && (GET_CODE (trueop0) == ABS
4918 || (GET_CODE (trueop0) == FLOAT_EXTEND
4919 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4920 {
4921 switch (code)
4922 {
4923 case LT:
4924 /* Optimize abs(x) < 0.0. */
4925 if (!HONOR_SNANS (mode)
4926 && (!INTEGRAL_MODE_P (mode)
4927 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4928 {
4929 if (INTEGRAL_MODE_P (mode)
4930 && (issue_strict_overflow_warning
4931 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4932 warning (OPT_Wstrict_overflow,
4933 ("assuming signed overflow does not occur when "
4934 "assuming abs (x) < 0 is false"));
4935 return const0_rtx;
4936 }
4937 break;
4938
4939 case GE:
4940 /* Optimize abs(x) >= 0.0. */
4941 if (!HONOR_NANS (mode)
4942 && (!INTEGRAL_MODE_P (mode)
4943 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4944 {
4945 if (INTEGRAL_MODE_P (mode)
4946 && (issue_strict_overflow_warning
4947 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4948 warning (OPT_Wstrict_overflow,
4949 ("assuming signed overflow does not occur when "
4950 "assuming abs (x) >= 0 is true"));
4951 return const_true_rtx;
4952 }
4953 break;
4954
4955 case UNGE:
4956 /* Optimize ! (abs(x) < 0.0). */
4957 return const_true_rtx;
4958
4959 default:
4960 break;
4961 }
4962 }
4963
4964 return 0;
4965 }
4966 \f
4967 /* Simplify CODE, an operation with result mode MODE and three operands,
4968 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4969 a constant. Return 0 if no simplifications is possible. */
4970
4971 rtx
4972 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4973 enum machine_mode op0_mode, rtx op0, rtx op1,
4974 rtx op2)
4975 {
4976 unsigned int width = GET_MODE_PRECISION (mode);
4977 bool any_change = false;
4978 rtx tem, trueop2;
4979
4980 /* VOIDmode means "infinite" precision. */
4981 if (width == 0)
4982 width = HOST_BITS_PER_WIDE_INT;
4983
4984 switch (code)
4985 {
4986 case FMA:
4987 /* Simplify negations around the multiplication. */
4988 /* -a * -b + c => a * b + c. */
4989 if (GET_CODE (op0) == NEG)
4990 {
4991 tem = simplify_unary_operation (NEG, mode, op1, mode);
4992 if (tem)
4993 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4994 }
4995 else if (GET_CODE (op1) == NEG)
4996 {
4997 tem = simplify_unary_operation (NEG, mode, op0, mode);
4998 if (tem)
4999 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5000 }
5001
5002 /* Canonicalize the two multiplication operands. */
5003 /* a * -b + c => -b * a + c. */
5004 if (swap_commutative_operands_p (op0, op1))
5005 tem = op0, op0 = op1, op1 = tem, any_change = true;
5006
5007 if (any_change)
5008 return gen_rtx_FMA (mode, op0, op1, op2);
5009 return NULL_RTX;
5010
5011 case SIGN_EXTRACT:
5012 case ZERO_EXTRACT:
5013 if (CONST_INT_P (op0)
5014 && CONST_INT_P (op1)
5015 && CONST_INT_P (op2)
5016 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5017 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5018 {
5019 /* Extracting a bit-field from a constant */
5020 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5021 HOST_WIDE_INT op1val = INTVAL (op1);
5022 HOST_WIDE_INT op2val = INTVAL (op2);
5023 if (BITS_BIG_ENDIAN)
5024 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5025 else
5026 val >>= op2val;
5027
5028 if (HOST_BITS_PER_WIDE_INT != op1val)
5029 {
5030 /* First zero-extend. */
5031 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5032 /* If desired, propagate sign bit. */
5033 if (code == SIGN_EXTRACT
5034 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5035 != 0)
5036 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5037 }
5038
5039 return gen_int_mode (val, mode);
5040 }
5041 break;
5042
5043 case IF_THEN_ELSE:
5044 if (CONST_INT_P (op0))
5045 return op0 != const0_rtx ? op1 : op2;
5046
5047 /* Convert c ? a : a into "a". */
5048 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5049 return op1;
5050
5051 /* Convert a != b ? a : b into "a". */
5052 if (GET_CODE (op0) == NE
5053 && ! side_effects_p (op0)
5054 && ! HONOR_NANS (mode)
5055 && ! HONOR_SIGNED_ZEROS (mode)
5056 && ((rtx_equal_p (XEXP (op0, 0), op1)
5057 && rtx_equal_p (XEXP (op0, 1), op2))
5058 || (rtx_equal_p (XEXP (op0, 0), op2)
5059 && rtx_equal_p (XEXP (op0, 1), op1))))
5060 return op1;
5061
5062 /* Convert a == b ? a : b into "b". */
5063 if (GET_CODE (op0) == EQ
5064 && ! side_effects_p (op0)
5065 && ! HONOR_NANS (mode)
5066 && ! HONOR_SIGNED_ZEROS (mode)
5067 && ((rtx_equal_p (XEXP (op0, 0), op1)
5068 && rtx_equal_p (XEXP (op0, 1), op2))
5069 || (rtx_equal_p (XEXP (op0, 0), op2)
5070 && rtx_equal_p (XEXP (op0, 1), op1))))
5071 return op2;
5072
5073 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5074 {
5075 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5076 ? GET_MODE (XEXP (op0, 1))
5077 : GET_MODE (XEXP (op0, 0)));
5078 rtx temp;
5079
5080 /* Look for happy constants in op1 and op2. */
5081 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5082 {
5083 HOST_WIDE_INT t = INTVAL (op1);
5084 HOST_WIDE_INT f = INTVAL (op2);
5085
5086 if (t == STORE_FLAG_VALUE && f == 0)
5087 code = GET_CODE (op0);
5088 else if (t == 0 && f == STORE_FLAG_VALUE)
5089 {
5090 enum rtx_code tmp;
5091 tmp = reversed_comparison_code (op0, NULL_RTX);
5092 if (tmp == UNKNOWN)
5093 break;
5094 code = tmp;
5095 }
5096 else
5097 break;
5098
5099 return simplify_gen_relational (code, mode, cmp_mode,
5100 XEXP (op0, 0), XEXP (op0, 1));
5101 }
5102
5103 if (cmp_mode == VOIDmode)
5104 cmp_mode = op0_mode;
5105 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5106 cmp_mode, XEXP (op0, 0),
5107 XEXP (op0, 1));
5108
5109 /* See if any simplifications were possible. */
5110 if (temp)
5111 {
5112 if (CONST_INT_P (temp))
5113 return temp == const0_rtx ? op2 : op1;
5114 else if (temp)
5115 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5116 }
5117 }
5118 break;
5119
5120 case VEC_MERGE:
5121 gcc_assert (GET_MODE (op0) == mode);
5122 gcc_assert (GET_MODE (op1) == mode);
5123 gcc_assert (VECTOR_MODE_P (mode));
5124 trueop2 = avoid_constant_pool_reference (op2);
5125 if (CONST_INT_P (trueop2))
5126 {
5127 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5128 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5129 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5130 unsigned HOST_WIDE_INT mask;
5131 if (n_elts == HOST_BITS_PER_WIDE_INT)
5132 mask = -1;
5133 else
5134 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5135
5136 if (!(sel & mask) && !side_effects_p (op0))
5137 return op1;
5138 if ((sel & mask) == mask && !side_effects_p (op1))
5139 return op0;
5140
5141 rtx trueop0 = avoid_constant_pool_reference (op0);
5142 rtx trueop1 = avoid_constant_pool_reference (op1);
5143 if (GET_CODE (trueop0) == CONST_VECTOR
5144 && GET_CODE (trueop1) == CONST_VECTOR)
5145 {
5146 rtvec v = rtvec_alloc (n_elts);
5147 unsigned int i;
5148
5149 for (i = 0; i < n_elts; i++)
5150 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5151 ? CONST_VECTOR_ELT (trueop0, i)
5152 : CONST_VECTOR_ELT (trueop1, i));
5153 return gen_rtx_CONST_VECTOR (mode, v);
5154 }
5155
5156 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5157 if no element from a appears in the result. */
5158 if (GET_CODE (op0) == VEC_MERGE)
5159 {
5160 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5161 if (CONST_INT_P (tem))
5162 {
5163 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5164 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5165 return simplify_gen_ternary (code, mode, mode,
5166 XEXP (op0, 1), op1, op2);
5167 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5168 return simplify_gen_ternary (code, mode, mode,
5169 XEXP (op0, 0), op1, op2);
5170 }
5171 }
5172 if (GET_CODE (op1) == VEC_MERGE)
5173 {
5174 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5175 if (CONST_INT_P (tem))
5176 {
5177 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5178 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5179 return simplify_gen_ternary (code, mode, mode,
5180 op0, XEXP (op1, 1), op2);
5181 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5182 return simplify_gen_ternary (code, mode, mode,
5183 op0, XEXP (op1, 0), op2);
5184 }
5185 }
5186 }
5187
5188 if (rtx_equal_p (op0, op1)
5189 && !side_effects_p (op2) && !side_effects_p (op1))
5190 return op0;
5191
5192 break;
5193
5194 default:
5195 gcc_unreachable ();
5196 }
5197
5198 return 0;
5199 }
5200
5201 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5202 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5203 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5204
5205 Works by unpacking OP into a collection of 8-bit values
5206 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5207 and then repacking them again for OUTERMODE. */
5208
5209 static rtx
5210 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5211 enum machine_mode innermode, unsigned int byte)
5212 {
5213 enum {
5214 value_bit = 8,
5215 value_mask = (1 << value_bit) - 1
5216 };
5217 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5218 int value_start;
5219 int i;
5220 int elem;
5221
5222 int num_elem;
5223 rtx * elems;
5224 int elem_bitsize;
5225 rtx result_s;
5226 rtvec result_v = NULL;
5227 enum mode_class outer_class;
5228 enum machine_mode outer_submode;
5229 int max_bitsize;
5230
5231 /* Some ports misuse CCmode. */
5232 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5233 return op;
5234
5235 /* We have no way to represent a complex constant at the rtl level. */
5236 if (COMPLEX_MODE_P (outermode))
5237 return NULL_RTX;
5238
5239 /* We support any size mode. */
5240 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5241 GET_MODE_BITSIZE (innermode));
5242
5243 /* Unpack the value. */
5244
5245 if (GET_CODE (op) == CONST_VECTOR)
5246 {
5247 num_elem = CONST_VECTOR_NUNITS (op);
5248 elems = &CONST_VECTOR_ELT (op, 0);
5249 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5250 }
5251 else
5252 {
5253 num_elem = 1;
5254 elems = &op;
5255 elem_bitsize = max_bitsize;
5256 }
5257 /* If this asserts, it is too complicated; reducing value_bit may help. */
5258 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5259 /* I don't know how to handle endianness of sub-units. */
5260 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5261
5262 for (elem = 0; elem < num_elem; elem++)
5263 {
5264 unsigned char * vp;
5265 rtx el = elems[elem];
5266
5267 /* Vectors are kept in target memory order. (This is probably
5268 a mistake.) */
5269 {
5270 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5271 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5272 / BITS_PER_UNIT);
5273 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5274 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5275 unsigned bytele = (subword_byte % UNITS_PER_WORD
5276 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5277 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5278 }
5279
5280 switch (GET_CODE (el))
5281 {
5282 case CONST_INT:
5283 for (i = 0;
5284 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5285 i += value_bit)
5286 *vp++ = INTVAL (el) >> i;
5287 /* CONST_INTs are always logically sign-extended. */
5288 for (; i < elem_bitsize; i += value_bit)
5289 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5290 break;
5291
5292 case CONST_WIDE_INT:
5293 {
5294 rtx_mode_t val = std::make_pair (el, innermode);
5295 unsigned char extend = wi::sign_mask (val);
5296
5297 for (i = 0; i < elem_bitsize; i += value_bit)
5298 *vp++ = wi::extract_uhwi (val, i, value_bit);
5299 for (; i < elem_bitsize; i += value_bit)
5300 *vp++ = extend;
5301 }
5302 break;
5303
5304 case CONST_DOUBLE:
5305 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5306 {
5307 unsigned char extend = 0;
5308 /* If this triggers, someone should have generated a
5309 CONST_INT instead. */
5310 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5311
5312 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5313 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5314 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5315 {
5316 *vp++
5317 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5318 i += value_bit;
5319 }
5320
5321 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5322 extend = -1;
5323 for (; i < elem_bitsize; i += value_bit)
5324 *vp++ = extend;
5325 }
5326 else
5327 {
5328 /* This is big enough for anything on the platform. */
5329 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5330 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5331
5332 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5333 gcc_assert (bitsize <= elem_bitsize);
5334 gcc_assert (bitsize % value_bit == 0);
5335
5336 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5337 GET_MODE (el));
5338
5339 /* real_to_target produces its result in words affected by
5340 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5341 and use WORDS_BIG_ENDIAN instead; see the documentation
5342 of SUBREG in rtl.texi. */
5343 for (i = 0; i < bitsize; i += value_bit)
5344 {
5345 int ibase;
5346 if (WORDS_BIG_ENDIAN)
5347 ibase = bitsize - 1 - i;
5348 else
5349 ibase = i;
5350 *vp++ = tmp[ibase / 32] >> i % 32;
5351 }
5352
5353 /* It shouldn't matter what's done here, so fill it with
5354 zero. */
5355 for (; i < elem_bitsize; i += value_bit)
5356 *vp++ = 0;
5357 }
5358 break;
5359
5360 case CONST_FIXED:
5361 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5362 {
5363 for (i = 0; i < elem_bitsize; i += value_bit)
5364 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5365 }
5366 else
5367 {
5368 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5369 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5370 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5371 i += value_bit)
5372 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5373 >> (i - HOST_BITS_PER_WIDE_INT);
5374 for (; i < elem_bitsize; i += value_bit)
5375 *vp++ = 0;
5376 }
5377 break;
5378
5379 default:
5380 gcc_unreachable ();
5381 }
5382 }
5383
5384 /* Now, pick the right byte to start with. */
5385 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5386 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5387 will already have offset 0. */
5388 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5389 {
5390 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5391 - byte);
5392 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5393 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5394 byte = (subword_byte % UNITS_PER_WORD
5395 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5396 }
5397
5398 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5399 so if it's become negative it will instead be very large.) */
5400 gcc_assert (byte < GET_MODE_SIZE (innermode));
5401
5402 /* Convert from bytes to chunks of size value_bit. */
5403 value_start = byte * (BITS_PER_UNIT / value_bit);
5404
5405 /* Re-pack the value. */
5406
5407 if (VECTOR_MODE_P (outermode))
5408 {
5409 num_elem = GET_MODE_NUNITS (outermode);
5410 result_v = rtvec_alloc (num_elem);
5411 elems = &RTVEC_ELT (result_v, 0);
5412 outer_submode = GET_MODE_INNER (outermode);
5413 }
5414 else
5415 {
5416 num_elem = 1;
5417 elems = &result_s;
5418 outer_submode = outermode;
5419 }
5420
5421 outer_class = GET_MODE_CLASS (outer_submode);
5422 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5423
5424 gcc_assert (elem_bitsize % value_bit == 0);
5425 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5426
5427 for (elem = 0; elem < num_elem; elem++)
5428 {
5429 unsigned char *vp;
5430
5431 /* Vectors are stored in target memory order. (This is probably
5432 a mistake.) */
5433 {
5434 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5435 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5436 / BITS_PER_UNIT);
5437 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5438 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5439 unsigned bytele = (subword_byte % UNITS_PER_WORD
5440 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5441 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5442 }
5443
5444 switch (outer_class)
5445 {
5446 case MODE_INT:
5447 case MODE_PARTIAL_INT:
5448 {
5449 int u;
5450 int base = 0;
5451 int units
5452 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5453 / HOST_BITS_PER_WIDE_INT;
5454 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5455 wide_int r;
5456
5457 for (u = 0; u < units; u++)
5458 {
5459 unsigned HOST_WIDE_INT buf = 0;
5460 for (i = 0;
5461 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5462 i += value_bit)
5463 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5464
5465 tmp[u] = buf;
5466 base += HOST_BITS_PER_WIDE_INT;
5467 }
5468 gcc_assert (GET_MODE_PRECISION (outer_submode)
5469 <= MAX_BITSIZE_MODE_ANY_INT);
5470 r = wide_int::from_array (tmp, units,
5471 GET_MODE_PRECISION (outer_submode));
5472 elems[elem] = immed_wide_int_const (r, outer_submode);
5473 }
5474 break;
5475
5476 case MODE_FLOAT:
5477 case MODE_DECIMAL_FLOAT:
5478 {
5479 REAL_VALUE_TYPE r;
5480 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5481
5482 /* real_from_target wants its input in words affected by
5483 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5484 and use WORDS_BIG_ENDIAN instead; see the documentation
5485 of SUBREG in rtl.texi. */
5486 for (i = 0; i < max_bitsize / 32; i++)
5487 tmp[i] = 0;
5488 for (i = 0; i < elem_bitsize; i += value_bit)
5489 {
5490 int ibase;
5491 if (WORDS_BIG_ENDIAN)
5492 ibase = elem_bitsize - 1 - i;
5493 else
5494 ibase = i;
5495 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5496 }
5497
5498 real_from_target (&r, tmp, outer_submode);
5499 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5500 }
5501 break;
5502
5503 case MODE_FRACT:
5504 case MODE_UFRACT:
5505 case MODE_ACCUM:
5506 case MODE_UACCUM:
5507 {
5508 FIXED_VALUE_TYPE f;
5509 f.data.low = 0;
5510 f.data.high = 0;
5511 f.mode = outer_submode;
5512
5513 for (i = 0;
5514 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5515 i += value_bit)
5516 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5517 for (; i < elem_bitsize; i += value_bit)
5518 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5519 << (i - HOST_BITS_PER_WIDE_INT));
5520
5521 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5522 }
5523 break;
5524
5525 default:
5526 gcc_unreachable ();
5527 }
5528 }
5529 if (VECTOR_MODE_P (outermode))
5530 return gen_rtx_CONST_VECTOR (outermode, result_v);
5531 else
5532 return result_s;
5533 }
5534
5535 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5536 Return 0 if no simplifications are possible. */
5537 rtx
5538 simplify_subreg (enum machine_mode outermode, rtx op,
5539 enum machine_mode innermode, unsigned int byte)
5540 {
5541 /* Little bit of sanity checking. */
5542 gcc_assert (innermode != VOIDmode);
5543 gcc_assert (outermode != VOIDmode);
5544 gcc_assert (innermode != BLKmode);
5545 gcc_assert (outermode != BLKmode);
5546
5547 gcc_assert (GET_MODE (op) == innermode
5548 || GET_MODE (op) == VOIDmode);
5549
5550 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5551 return NULL_RTX;
5552
5553 if (byte >= GET_MODE_SIZE (innermode))
5554 return NULL_RTX;
5555
5556 if (outermode == innermode && !byte)
5557 return op;
5558
5559 if (CONST_SCALAR_INT_P (op)
5560 || CONST_DOUBLE_AS_FLOAT_P (op)
5561 || GET_CODE (op) == CONST_FIXED
5562 || GET_CODE (op) == CONST_VECTOR)
5563 return simplify_immed_subreg (outermode, op, innermode, byte);
5564
5565 /* Changing mode twice with SUBREG => just change it once,
5566 or not at all if changing back op starting mode. */
5567 if (GET_CODE (op) == SUBREG)
5568 {
5569 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5570 int final_offset = byte + SUBREG_BYTE (op);
5571 rtx newx;
5572
5573 if (outermode == innermostmode
5574 && byte == 0 && SUBREG_BYTE (op) == 0)
5575 return SUBREG_REG (op);
5576
5577 /* The SUBREG_BYTE represents offset, as if the value were stored
5578 in memory. Irritating exception is paradoxical subreg, where
5579 we define SUBREG_BYTE to be 0. On big endian machines, this
5580 value should be negative. For a moment, undo this exception. */
5581 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5582 {
5583 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5584 if (WORDS_BIG_ENDIAN)
5585 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5586 if (BYTES_BIG_ENDIAN)
5587 final_offset += difference % UNITS_PER_WORD;
5588 }
5589 if (SUBREG_BYTE (op) == 0
5590 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5591 {
5592 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5593 if (WORDS_BIG_ENDIAN)
5594 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5595 if (BYTES_BIG_ENDIAN)
5596 final_offset += difference % UNITS_PER_WORD;
5597 }
5598
5599 /* See whether resulting subreg will be paradoxical. */
5600 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5601 {
5602 /* In nonparadoxical subregs we can't handle negative offsets. */
5603 if (final_offset < 0)
5604 return NULL_RTX;
5605 /* Bail out in case resulting subreg would be incorrect. */
5606 if (final_offset % GET_MODE_SIZE (outermode)
5607 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5608 return NULL_RTX;
5609 }
5610 else
5611 {
5612 int offset = 0;
5613 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5614
5615 /* In paradoxical subreg, see if we are still looking on lower part.
5616 If so, our SUBREG_BYTE will be 0. */
5617 if (WORDS_BIG_ENDIAN)
5618 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5619 if (BYTES_BIG_ENDIAN)
5620 offset += difference % UNITS_PER_WORD;
5621 if (offset == final_offset)
5622 final_offset = 0;
5623 else
5624 return NULL_RTX;
5625 }
5626
5627 /* Recurse for further possible simplifications. */
5628 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5629 final_offset);
5630 if (newx)
5631 return newx;
5632 if (validate_subreg (outermode, innermostmode,
5633 SUBREG_REG (op), final_offset))
5634 {
5635 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5636 if (SUBREG_PROMOTED_VAR_P (op)
5637 && SUBREG_PROMOTED_SIGN (op) >= 0
5638 && GET_MODE_CLASS (outermode) == MODE_INT
5639 && IN_RANGE (GET_MODE_SIZE (outermode),
5640 GET_MODE_SIZE (innermode),
5641 GET_MODE_SIZE (innermostmode))
5642 && subreg_lowpart_p (newx))
5643 {
5644 SUBREG_PROMOTED_VAR_P (newx) = 1;
5645 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5646 }
5647 return newx;
5648 }
5649 return NULL_RTX;
5650 }
5651
5652 /* SUBREG of a hard register => just change the register number
5653 and/or mode. If the hard register is not valid in that mode,
5654 suppress this simplification. If the hard register is the stack,
5655 frame, or argument pointer, leave this as a SUBREG. */
5656
5657 if (REG_P (op) && HARD_REGISTER_P (op))
5658 {
5659 unsigned int regno, final_regno;
5660
5661 regno = REGNO (op);
5662 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5663 if (HARD_REGISTER_NUM_P (final_regno))
5664 {
5665 rtx x;
5666 int final_offset = byte;
5667
5668 /* Adjust offset for paradoxical subregs. */
5669 if (byte == 0
5670 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5671 {
5672 int difference = (GET_MODE_SIZE (innermode)
5673 - GET_MODE_SIZE (outermode));
5674 if (WORDS_BIG_ENDIAN)
5675 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5676 if (BYTES_BIG_ENDIAN)
5677 final_offset += difference % UNITS_PER_WORD;
5678 }
5679
5680 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5681
5682 /* Propagate original regno. We don't have any way to specify
5683 the offset inside original regno, so do so only for lowpart.
5684 The information is used only by alias analysis that can not
5685 grog partial register anyway. */
5686
5687 if (subreg_lowpart_offset (outermode, innermode) == byte)
5688 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5689 return x;
5690 }
5691 }
5692
5693 /* If we have a SUBREG of a register that we are replacing and we are
5694 replacing it with a MEM, make a new MEM and try replacing the
5695 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5696 or if we would be widening it. */
5697
5698 if (MEM_P (op)
5699 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5700 /* Allow splitting of volatile memory references in case we don't
5701 have instruction to move the whole thing. */
5702 && (! MEM_VOLATILE_P (op)
5703 || ! have_insn_for (SET, innermode))
5704 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5705 return adjust_address_nv (op, outermode, byte);
5706
5707 /* Handle complex values represented as CONCAT
5708 of real and imaginary part. */
5709 if (GET_CODE (op) == CONCAT)
5710 {
5711 unsigned int part_size, final_offset;
5712 rtx part, res;
5713
5714 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5715 if (byte < part_size)
5716 {
5717 part = XEXP (op, 0);
5718 final_offset = byte;
5719 }
5720 else
5721 {
5722 part = XEXP (op, 1);
5723 final_offset = byte - part_size;
5724 }
5725
5726 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5727 return NULL_RTX;
5728
5729 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5730 if (res)
5731 return res;
5732 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5733 return gen_rtx_SUBREG (outermode, part, final_offset);
5734 return NULL_RTX;
5735 }
5736
5737 /* A SUBREG resulting from a zero extension may fold to zero if
5738 it extracts higher bits that the ZERO_EXTEND's source bits. */
5739 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5740 {
5741 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5742 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5743 return CONST0_RTX (outermode);
5744 }
5745
5746 if (SCALAR_INT_MODE_P (outermode)
5747 && SCALAR_INT_MODE_P (innermode)
5748 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5749 && byte == subreg_lowpart_offset (outermode, innermode))
5750 {
5751 rtx tem = simplify_truncation (outermode, op, innermode);
5752 if (tem)
5753 return tem;
5754 }
5755
5756 return NULL_RTX;
5757 }
5758
5759 /* Make a SUBREG operation or equivalent if it folds. */
5760
5761 rtx
5762 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5763 enum machine_mode innermode, unsigned int byte)
5764 {
5765 rtx newx;
5766
5767 newx = simplify_subreg (outermode, op, innermode, byte);
5768 if (newx)
5769 return newx;
5770
5771 if (GET_CODE (op) == SUBREG
5772 || GET_CODE (op) == CONCAT
5773 || GET_MODE (op) == VOIDmode)
5774 return NULL_RTX;
5775
5776 if (validate_subreg (outermode, innermode, op, byte))
5777 return gen_rtx_SUBREG (outermode, op, byte);
5778
5779 return NULL_RTX;
5780 }
5781
5782 /* Simplify X, an rtx expression.
5783
5784 Return the simplified expression or NULL if no simplifications
5785 were possible.
5786
5787 This is the preferred entry point into the simplification routines;
5788 however, we still allow passes to call the more specific routines.
5789
5790 Right now GCC has three (yes, three) major bodies of RTL simplification
5791 code that need to be unified.
5792
5793 1. fold_rtx in cse.c. This code uses various CSE specific
5794 information to aid in RTL simplification.
5795
5796 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5797 it uses combine specific information to aid in RTL
5798 simplification.
5799
5800 3. The routines in this file.
5801
5802
5803 Long term we want to only have one body of simplification code; to
5804 get to that state I recommend the following steps:
5805
5806 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5807 which are not pass dependent state into these routines.
5808
5809 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5810 use this routine whenever possible.
5811
5812 3. Allow for pass dependent state to be provided to these
5813 routines and add simplifications based on the pass dependent
5814 state. Remove code from cse.c & combine.c that becomes
5815 redundant/dead.
5816
5817 It will take time, but ultimately the compiler will be easier to
5818 maintain and improve. It's totally silly that when we add a
5819 simplification that it needs to be added to 4 places (3 for RTL
5820 simplification and 1 for tree simplification. */
5821
5822 rtx
5823 simplify_rtx (const_rtx x)
5824 {
5825 const enum rtx_code code = GET_CODE (x);
5826 const enum machine_mode mode = GET_MODE (x);
5827
5828 switch (GET_RTX_CLASS (code))
5829 {
5830 case RTX_UNARY:
5831 return simplify_unary_operation (code, mode,
5832 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5833 case RTX_COMM_ARITH:
5834 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5835 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5836
5837 /* Fall through.... */
5838
5839 case RTX_BIN_ARITH:
5840 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5841
5842 case RTX_TERNARY:
5843 case RTX_BITFIELD_OPS:
5844 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5845 XEXP (x, 0), XEXP (x, 1),
5846 XEXP (x, 2));
5847
5848 case RTX_COMPARE:
5849 case RTX_COMM_COMPARE:
5850 return simplify_relational_operation (code, mode,
5851 ((GET_MODE (XEXP (x, 0))
5852 != VOIDmode)
5853 ? GET_MODE (XEXP (x, 0))
5854 : GET_MODE (XEXP (x, 1))),
5855 XEXP (x, 0),
5856 XEXP (x, 1));
5857
5858 case RTX_EXTRA:
5859 if (code == SUBREG)
5860 return simplify_subreg (mode, SUBREG_REG (x),
5861 GET_MODE (SUBREG_REG (x)),
5862 SUBREG_BYTE (x));
5863 break;
5864
5865 case RTX_OBJ:
5866 if (code == LO_SUM)
5867 {
5868 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5869 if (GET_CODE (XEXP (x, 0)) == HIGH
5870 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5871 return XEXP (x, 1);
5872 }
5873 break;
5874
5875 default:
5876 break;
5877 }
5878 return NULL;
5879 }