]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
2012-06-2 Kenneth Zadeck <zadeck@naturalbridge.com>
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
68 {
69 return gen_int_mode (- INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
101
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
110
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114 unsigned int width;
115
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
118
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
122
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
136
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
140
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
143 }
144
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150 unsigned int width;
151
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
154
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
158
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
161 }
162 \f
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
165
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
169 {
170 rtx tem;
171
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
176
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
181
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 \f
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
193
194 switch (GET_CODE (x))
195 {
196 case MEM:
197 break;
198
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
204 {
205 REAL_VALUE_TYPE d;
206
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 }
210 return x;
211
212 default:
213 return x;
214 }
215
216 if (GET_MODE (x) == BLKmode)
217 return x;
218
219 addr = XEXP (x, 0);
220
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
223
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 {
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
231 }
232
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
235
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
240 {
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
243
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
248 {
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
252 }
253 else
254 return c;
255 }
256
257 return x;
258 }
259 \f
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
263
264 rtx
265 delegitimize_mem_from_attrs (rtx x)
266 {
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
272 {
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
276
277 switch (TREE_CODE (decl))
278 {
279 default:
280 decl = NULL;
281 break;
282
283 case VAR_DECL:
284 break;
285
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
293 {
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
297
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
305 {
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
309 }
310 break;
311 }
312 }
313
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
321 {
322 rtx newx;
323
324 offset += MEM_OFFSET (x);
325
326 newx = DECL_RTL (decl);
327
328 if (MEM_P (newx))
329 {
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
350 }
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
354 }
355 }
356
357 return x;
358 }
359 \f
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
362
363 rtx
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
366 {
367 rtx tem;
368
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
372
373 return gen_rtx_fmt_e (code, mode, op);
374 }
375
376 /* Likewise for ternary operations. */
377
378 rtx
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 {
382 rtx tem;
383
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
388
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 }
391
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
394
395 rtx
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 {
399 rtx tem;
400
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
404
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
406 }
407 \f
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
412
413 rtx
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 {
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
424
425 if (__builtin_expect (fn != NULL, 0))
426 {
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
430 }
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
433
434 switch (GET_RTX_CLASS (code))
435 {
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
443
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
451
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475
476 case RTX_EXTRA:
477 if (code == SUBREG)
478 {
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
486 }
487 break;
488
489 case RTX_OBJ:
490 if (code == MEM)
491 {
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
496 }
497 else if (code == LO_SUM)
498 {
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
505
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
509 }
510 break;
511
512 default:
513 break;
514 }
515
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
520 {
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 {
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
529 {
530 if (newvec == vec)
531 {
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
536 }
537 RTVEC_ELT (newvec, j) = op;
538 }
539 }
540 break;
541
542 case 'e':
543 if (XEXP (x, i))
544 {
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
547 {
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
551 }
552 }
553 break;
554 }
555 return newx;
556 }
557
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
560
561 rtx
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 {
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 }
566 \f
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
570 rtx
571 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572 rtx op, enum machine_mode op_mode)
573 {
574 rtx trueop, tem;
575
576 trueop = avoid_constant_pool_reference (op);
577
578 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579 if (tem)
580 return tem;
581
582 return simplify_unary_operation_1 (code, mode, op);
583 }
584
585 /* Perform some simplifications we can do even if the operands
586 aren't constant. */
587 static rtx
588 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
589 {
590 enum rtx_code reversed;
591 rtx temp;
592
593 switch (code)
594 {
595 case NOT:
596 /* (not (not X)) == X. */
597 if (GET_CODE (op) == NOT)
598 return XEXP (op, 0);
599
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op)
603 && (mode == BImode || STORE_FLAG_VALUE == -1)
604 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605 return simplify_gen_relational (reversed, mode, VOIDmode,
606 XEXP (op, 0), XEXP (op, 1));
607
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op) == PLUS
610 && XEXP (op, 1) == constm1_rtx)
611 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
612
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op) == NEG)
615 return plus_constant (mode, XEXP (op, 0), -1);
616
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op) == XOR
619 && CONST_INT_P (XEXP (op, 1))
620 && (temp = simplify_unary_operation (NOT, mode,
621 XEXP (op, 1), mode)) != 0)
622 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
623
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op) == PLUS
626 && CONST_INT_P (XEXP (op, 1))
627 && mode_signbit_p (mode, XEXP (op, 1))
628 && (temp = simplify_unary_operation (NOT, mode,
629 XEXP (op, 1), mode)) != 0)
630 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
631
632
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
637 bother with. */
638 if (GET_CODE (op) == ASHIFT
639 && XEXP (op, 0) == const1_rtx)
640 {
641 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
643 }
644
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
648
649 if (STORE_FLAG_VALUE == -1
650 && GET_CODE (op) == ASHIFTRT
651 && GET_CODE (XEXP (op, 1))
652 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653 return simplify_gen_relational (GE, mode, VOIDmode,
654 XEXP (op, 0), const0_rtx);
655
656
657 if (GET_CODE (op) == SUBREG
658 && subreg_lowpart_p (op)
659 && (GET_MODE_SIZE (GET_MODE (op))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661 && GET_CODE (SUBREG_REG (op)) == ASHIFT
662 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
663 {
664 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665 rtx x;
666
667 x = gen_rtx_ROTATE (inner_mode,
668 simplify_gen_unary (NOT, inner_mode, const1_rtx,
669 inner_mode),
670 XEXP (SUBREG_REG (op), 1));
671 return rtl_hooks.gen_lowpart_no_emit (mode, x);
672 }
673
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
677 coded. */
678
679 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
680 {
681 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682 enum machine_mode op_mode;
683
684 op_mode = GET_MODE (in1);
685 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
686
687 op_mode = GET_MODE (in2);
688 if (op_mode == VOIDmode)
689 op_mode = mode;
690 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
691
692 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
693 {
694 rtx tem = in2;
695 in2 = in1; in1 = tem;
696 }
697
698 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699 mode, in1, in2);
700 }
701 break;
702
703 case NEG:
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op) == NEG)
706 return XEXP (op, 0);
707
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op) == PLUS
710 && XEXP (op, 1) == const1_rtx)
711 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
712
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op) == NOT)
715 return plus_constant (mode, XEXP (op, 0), 1);
716
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
726
727 if (GET_CODE (op) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
730 {
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op, 1))
733 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
734 {
735 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736 if (temp)
737 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
738 }
739
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
743 }
744
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
749 {
750 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
752 }
753
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
756 is a constant). */
757 if (GET_CODE (op) == ASHIFT)
758 {
759 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760 if (temp)
761 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
762 }
763
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op) == ASHIFTRT
767 && CONST_INT_P (XEXP (op, 1))
768 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769 return simplify_gen_binary (LSHIFTRT, mode,
770 XEXP (op, 0), XEXP (op, 1));
771
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op) == LSHIFTRT
775 && CONST_INT_P (XEXP (op, 1))
776 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777 return simplify_gen_binary (ASHIFTRT, mode,
778 XEXP (op, 0), XEXP (op, 1));
779
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op) == XOR
782 && XEXP (op, 1) == const1_rtx
783 && nonzero_bits (XEXP (op, 0), mode) == 1)
784 return plus_constant (mode, XEXP (op, 0), -1);
785
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op) == LT
789 && XEXP (op, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
791 {
792 enum machine_mode inner = GET_MODE (XEXP (op, 0));
793 int isize = GET_MODE_PRECISION (inner);
794 if (STORE_FLAG_VALUE == 1)
795 {
796 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797 GEN_INT (isize - 1));
798 if (mode == inner)
799 return temp;
800 if (GET_MODE_PRECISION (mode) > isize)
801 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
803 }
804 else if (STORE_FLAG_VALUE == -1)
805 {
806 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807 GEN_INT (isize - 1));
808 if (mode == inner)
809 return temp;
810 if (GET_MODE_PRECISION (mode) > isize)
811 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
813 }
814 }
815 break;
816
817 case TRUNCATE:
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
820 integer mode. */
821 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822 break;
823
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op) == SIGN_EXTEND
826 || GET_CODE (op) == ZERO_EXTEND)
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
829
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op) == ABS
833 || GET_CODE (op) == NEG)
834 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837 return simplify_gen_unary (GET_CODE (op), mode,
838 XEXP (XEXP (op, 0), 0), mode);
839
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
841 (truncate:A X). */
842 if (GET_CODE (op) == SUBREG
843 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844 && subreg_lowpart_p (op))
845 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846 GET_MODE (XEXP (SUBREG_REG (op), 0)));
847
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
854 patterns. */
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856 ? (num_sign_bit_copies (op, GET_MODE (op))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858 - GET_MODE_PRECISION (mode)))
859 : truncated_to_mode (mode, op))
860 && ! (GET_CODE (op) == LSHIFTRT
861 && GET_CODE (XEXP (op, 0)) == MULT))
862 return rtl_hooks.gen_lowpart_no_emit (mode, op);
863
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode)
869 && COMPARISON_P (op)
870 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871 return rtl_hooks.gen_lowpart_no_emit (mode, op);
872 break;
873
874 case FLOAT_TRUNCATE:
875 if (DECIMAL_FLOAT_MODE_P (mode))
876 break;
877
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op, 0)) == mode)
881 return XEXP (op, 0);
882
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
886
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
889
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations)
894 || GET_CODE (op) == FLOAT_EXTEND)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
896 0)))
897 > GET_MODE_SIZE (mode)
898 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
899 mode,
900 XEXP (op, 0), mode);
901
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
906 && ((unsigned)significand_size (GET_MODE (op))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
908 - num_sign_bit_copies (XEXP (op, 0),
909 GET_MODE (XEXP (op, 0))))))))
910 return simplify_gen_unary (FLOAT, mode,
911 XEXP (op, 0),
912 GET_MODE (XEXP (op, 0)));
913
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op) == ABS
917 || GET_CODE (op) == NEG)
918 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
920 return simplify_gen_unary (GET_CODE (op), mode,
921 XEXP (XEXP (op, 0), 0), mode);
922
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op) == SUBREG
926 && subreg_lowpart_p (op)
927 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
928 return SUBREG_REG (op);
929 break;
930
931 case FLOAT_EXTEND:
932 if (DECIMAL_FLOAT_MODE_P (mode))
933 break;
934
935 /* (float_extend (float_extend x)) is (float_extend x)
936
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
939 */
940 if (GET_CODE (op) == FLOAT_EXTEND
941 || (GET_CODE (op) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
943 && ((unsigned)significand_size (GET_MODE (op))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
945 - num_sign_bit_copies (XEXP (op, 0),
946 GET_MODE (XEXP (op, 0)))))))
947 return simplify_gen_unary (GET_CODE (op), mode,
948 XEXP (op, 0),
949 GET_MODE (XEXP (op, 0)));
950
951 break;
952
953 case ABS:
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op) == NEG)
956 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
957 GET_MODE (XEXP (op, 0)));
958
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
960 do nothing. */
961 if (GET_MODE (op) == VOIDmode)
962 break;
963
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op),
967 nonzero_bits (op, GET_MODE (op))))
968 return op;
969
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
972 return gen_rtx_NEG (mode, op);
973
974 break;
975
976 case FFS:
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op) == SIGN_EXTEND
979 || GET_CODE (op) == ZERO_EXTEND)
980 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
981 GET_MODE (XEXP (op, 0)));
982 break;
983
984 case POPCOUNT:
985 switch (GET_CODE (op))
986 {
987 case BSWAP:
988 case ZERO_EXTEND:
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
991 GET_MODE (XEXP (op, 0)));
992
993 case ROTATE:
994 case ROTATERT:
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op, 1)))
997 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
998 GET_MODE (XEXP (op, 0)));
999 break;
1000
1001 default:
1002 break;
1003 }
1004 break;
1005
1006 case PARITY:
1007 switch (GET_CODE (op))
1008 {
1009 case NOT:
1010 case BSWAP:
1011 case ZERO_EXTEND:
1012 case SIGN_EXTEND:
1013 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1014 GET_MODE (XEXP (op, 0)));
1015
1016 case ROTATE:
1017 case ROTATERT:
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op, 1)))
1020 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1021 GET_MODE (XEXP (op, 0)));
1022 break;
1023
1024 default:
1025 break;
1026 }
1027 break;
1028
1029 case BSWAP:
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op) == BSWAP)
1032 return XEXP (op, 0);
1033 break;
1034
1035 case FLOAT:
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op) == SIGN_EXTEND)
1038 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1039 GET_MODE (XEXP (op, 0)));
1040 break;
1041
1042 case SIGN_EXTEND:
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1046 the VAX). */
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1053
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op) == MULT)
1057 {
1058 rtx lhs = XEXP (op, 0);
1059 rtx rhs = XEXP (op, 1);
1060 enum rtx_code lcode = GET_CODE (lhs);
1061 enum rtx_code rcode = GET_CODE (rhs);
1062
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode == SIGN_EXTEND
1066 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1067 && (rcode == SIGN_EXTEND
1068 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1069 {
1070 enum machine_mode lmode = GET_MODE (lhs);
1071 enum machine_mode rmode = GET_MODE (rhs);
1072 int bits;
1073
1074 if (lcode == ASHIFTRT)
1075 /* Number of bits not shifted off the end. */
1076 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1080
1081 if (rcode == ASHIFTRT)
1082 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1085
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1089 return simplify_gen_binary
1090 (MULT, mode,
1091 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1092 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1093 }
1094 }
1095
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1102 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1103 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1104
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1108 {
1109 gcc_assert (GET_MODE_BITSIZE (mode)
1110 > GET_MODE_BITSIZE (GET_MODE (op)));
1111 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1112 GET_MODE (XEXP (op, 0)));
1113 }
1114
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1121 && GET_CODE (XEXP (op, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op, 1))
1123 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1125 {
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1128 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode)
1130 > GET_MODE_BITSIZE (GET_MODE (op)));
1131 if (tmode != BLKmode)
1132 {
1133 rtx inner =
1134 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1136 ? SIGN_EXTEND : ZERO_EXTEND,
1137 mode, inner, tmode);
1138 }
1139 }
1140
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is referring to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode == Pmode && GET_MODE (op) == ptr_mode
1148 && (CONSTANT_P (op)
1149 || (GET_CODE (op) == SUBREG
1150 && REG_P (SUBREG_REG (op))
1151 && REG_POINTER (SUBREG_REG (op))
1152 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1153 return convert_memory_address (Pmode, op);
1154 #endif
1155 break;
1156
1157 case ZERO_EXTEND:
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1164 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1165 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1166
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op) == MULT)
1170 {
1171 rtx lhs = XEXP (op, 0);
1172 rtx rhs = XEXP (op, 1);
1173 enum rtx_code lcode = GET_CODE (lhs);
1174 enum rtx_code rcode = GET_CODE (rhs);
1175
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode == ZERO_EXTEND
1179 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1180 && (rcode == ZERO_EXTEND
1181 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1182 {
1183 enum machine_mode lmode = GET_MODE (lhs);
1184 enum machine_mode rmode = GET_MODE (rhs);
1185 int bits;
1186
1187 if (lcode == LSHIFTRT)
1188 /* Number of bits not shifted off the end. */
1189 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1193
1194 if (rcode == LSHIFTRT)
1195 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1198
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1202 return simplify_gen_binary
1203 (MULT, mode,
1204 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1205 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1206 }
1207 }
1208
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op) == ZERO_EXTEND)
1211 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1213
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op) == LSHIFTRT
1218 && GET_CODE (XEXP (op, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op, 1))
1220 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1222 {
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1225 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1226 if (tmode != BLKmode)
1227 {
1228 rtx inner =
1229 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1231 }
1232 }
1233
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is referring to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED > 0
1240 && mode == Pmode && GET_MODE (op) == ptr_mode
1241 && (CONSTANT_P (op)
1242 || (GET_CODE (op) == SUBREG
1243 && REG_P (SUBREG_REG (op))
1244 && REG_POINTER (SUBREG_REG (op))
1245 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1246 return convert_memory_address (Pmode, op);
1247 #endif
1248 break;
1249
1250 default:
1251 break;
1252 }
1253
1254 return 0;
1255 }
1256
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1260 rtx
1261 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1262 rtx op, enum machine_mode op_mode)
1263 {
1264 unsigned int width = GET_MODE_PRECISION (mode);
1265 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1266
1267 if (code == VEC_DUPLICATE)
1268 {
1269 gcc_assert (VECTOR_MODE_P (mode));
1270 if (GET_MODE (op) != VOIDmode)
1271 {
1272 if (!VECTOR_MODE_P (GET_MODE (op)))
1273 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1274 else
1275 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1276 (GET_MODE (op)));
1277 }
1278 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1279 || GET_CODE (op) == CONST_VECTOR)
1280 {
1281 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1282 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1283 rtvec v = rtvec_alloc (n_elts);
1284 unsigned int i;
1285
1286 if (GET_CODE (op) != CONST_VECTOR)
1287 for (i = 0; i < n_elts; i++)
1288 RTVEC_ELT (v, i) = op;
1289 else
1290 {
1291 enum machine_mode inmode = GET_MODE (op);
1292 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1293 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1294
1295 gcc_assert (in_n_elts < n_elts);
1296 gcc_assert ((n_elts % in_n_elts) == 0);
1297 for (i = 0; i < n_elts; i++)
1298 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1299 }
1300 return gen_rtx_CONST_VECTOR (mode, v);
1301 }
1302 }
1303
1304 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1305 {
1306 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1307 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1308 enum machine_mode opmode = GET_MODE (op);
1309 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1310 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1311 rtvec v = rtvec_alloc (n_elts);
1312 unsigned int i;
1313
1314 gcc_assert (op_n_elts == n_elts);
1315 for (i = 0; i < n_elts; i++)
1316 {
1317 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1318 CONST_VECTOR_ELT (op, i),
1319 GET_MODE_INNER (opmode));
1320 if (!x)
1321 return 0;
1322 RTVEC_ELT (v, i) = x;
1323 }
1324 return gen_rtx_CONST_VECTOR (mode, v);
1325 }
1326
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1330
1331 if (code == FLOAT && GET_MODE (op) == VOIDmode
1332 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1333 {
1334 HOST_WIDE_INT hv, lv;
1335 REAL_VALUE_TYPE d;
1336
1337 if (CONST_INT_P (op))
1338 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1339 else
1340 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1341
1342 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1343 d = real_value_truncate (mode, d);
1344 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1345 }
1346 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1347 && (GET_CODE (op) == CONST_DOUBLE
1348 || CONST_INT_P (op)))
1349 {
1350 HOST_WIDE_INT hv, lv;
1351 REAL_VALUE_TYPE d;
1352
1353 if (CONST_INT_P (op))
1354 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1355 else
1356 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1357
1358 if (op_mode == VOIDmode
1359 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1360 /* We should never get a negative number. */
1361 gcc_assert (hv >= 0);
1362 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1363 hv = 0, lv &= GET_MODE_MASK (op_mode);
1364
1365 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1366 d = real_value_truncate (mode, d);
1367 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1368 }
1369
1370 if (CONST_INT_P (op)
1371 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1372 {
1373 HOST_WIDE_INT arg0 = INTVAL (op);
1374 HOST_WIDE_INT val;
1375
1376 switch (code)
1377 {
1378 case NOT:
1379 val = ~ arg0;
1380 break;
1381
1382 case NEG:
1383 val = - arg0;
1384 break;
1385
1386 case ABS:
1387 val = (arg0 >= 0 ? arg0 : - arg0);
1388 break;
1389
1390 case FFS:
1391 arg0 &= GET_MODE_MASK (mode);
1392 val = ffs_hwi (arg0);
1393 break;
1394
1395 case CLZ:
1396 arg0 &= GET_MODE_MASK (mode);
1397 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1398 ;
1399 else
1400 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1401 break;
1402
1403 case CLRSB:
1404 arg0 &= GET_MODE_MASK (mode);
1405 if (arg0 == 0)
1406 val = GET_MODE_PRECISION (mode) - 1;
1407 else if (arg0 >= 0)
1408 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1409 else if (arg0 < 0)
1410 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1411 break;
1412
1413 case CTZ:
1414 arg0 &= GET_MODE_MASK (mode);
1415 if (arg0 == 0)
1416 {
1417 /* Even if the value at zero is undefined, we have to come
1418 up with some replacement. Seems good enough. */
1419 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1420 val = GET_MODE_PRECISION (mode);
1421 }
1422 else
1423 val = ctz_hwi (arg0);
1424 break;
1425
1426 case POPCOUNT:
1427 arg0 &= GET_MODE_MASK (mode);
1428 val = 0;
1429 while (arg0)
1430 val++, arg0 &= arg0 - 1;
1431 break;
1432
1433 case PARITY:
1434 arg0 &= GET_MODE_MASK (mode);
1435 val = 0;
1436 while (arg0)
1437 val++, arg0 &= arg0 - 1;
1438 val &= 1;
1439 break;
1440
1441 case BSWAP:
1442 {
1443 unsigned int s;
1444
1445 val = 0;
1446 for (s = 0; s < width; s += 8)
1447 {
1448 unsigned int d = width - s - 8;
1449 unsigned HOST_WIDE_INT byte;
1450 byte = (arg0 >> s) & 0xff;
1451 val |= byte << d;
1452 }
1453 }
1454 break;
1455
1456 case TRUNCATE:
1457 val = arg0;
1458 break;
1459
1460 case ZERO_EXTEND:
1461 /* When zero-extending a CONST_INT, we need to know its
1462 original mode. */
1463 gcc_assert (op_mode != VOIDmode);
1464 if (op_width == HOST_BITS_PER_WIDE_INT)
1465 {
1466 /* If we were really extending the mode,
1467 we would have to distinguish between zero-extension
1468 and sign-extension. */
1469 gcc_assert (width == op_width);
1470 val = arg0;
1471 }
1472 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1473 val = arg0 & GET_MODE_MASK (op_mode);
1474 else
1475 return 0;
1476 break;
1477
1478 case SIGN_EXTEND:
1479 if (op_mode == VOIDmode)
1480 op_mode = mode;
1481 op_width = GET_MODE_PRECISION (op_mode);
1482 if (op_width == HOST_BITS_PER_WIDE_INT)
1483 {
1484 /* If we were really extending the mode,
1485 we would have to distinguish between zero-extension
1486 and sign-extension. */
1487 gcc_assert (width == op_width);
1488 val = arg0;
1489 }
1490 else if (op_width < HOST_BITS_PER_WIDE_INT)
1491 {
1492 val = arg0 & GET_MODE_MASK (op_mode);
1493 if (val_signbit_known_set_p (op_mode, val))
1494 val |= ~GET_MODE_MASK (op_mode);
1495 }
1496 else
1497 return 0;
1498 break;
1499
1500 case SQRT:
1501 case FLOAT_EXTEND:
1502 case FLOAT_TRUNCATE:
1503 case SS_TRUNCATE:
1504 case US_TRUNCATE:
1505 case SS_NEG:
1506 case US_NEG:
1507 case SS_ABS:
1508 return 0;
1509
1510 default:
1511 gcc_unreachable ();
1512 }
1513
1514 return gen_int_mode (val, mode);
1515 }
1516
1517 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1518 for a DImode operation on a CONST_INT. */
1519 else if (GET_MODE (op) == VOIDmode
1520 && width <= HOST_BITS_PER_DOUBLE_INT
1521 && (GET_CODE (op) == CONST_DOUBLE
1522 || CONST_INT_P (op)))
1523 {
1524 unsigned HOST_WIDE_INT l1, lv;
1525 HOST_WIDE_INT h1, hv;
1526
1527 if (GET_CODE (op) == CONST_DOUBLE)
1528 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1529 else
1530 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1531
1532 switch (code)
1533 {
1534 case NOT:
1535 lv = ~ l1;
1536 hv = ~ h1;
1537 break;
1538
1539 case NEG:
1540 neg_double (l1, h1, &lv, &hv);
1541 break;
1542
1543 case ABS:
1544 if (h1 < 0)
1545 neg_double (l1, h1, &lv, &hv);
1546 else
1547 lv = l1, hv = h1;
1548 break;
1549
1550 case FFS:
1551 hv = 0;
1552 if (l1 != 0)
1553 lv = ffs_hwi (l1);
1554 else if (h1 != 0)
1555 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1556 else
1557 lv = 0;
1558 break;
1559
1560 case CLZ:
1561 hv = 0;
1562 if (h1 != 0)
1563 lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1564 - HOST_BITS_PER_WIDE_INT;
1565 else if (l1 != 0)
1566 lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1567 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1568 lv = GET_MODE_PRECISION (mode);
1569 break;
1570
1571 case CTZ:
1572 hv = 0;
1573 if (l1 != 0)
1574 lv = ctz_hwi (l1);
1575 else if (h1 != 0)
1576 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1577 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1578 lv = GET_MODE_PRECISION (mode);
1579 break;
1580
1581 case POPCOUNT:
1582 hv = 0;
1583 lv = 0;
1584 while (l1)
1585 lv++, l1 &= l1 - 1;
1586 while (h1)
1587 lv++, h1 &= h1 - 1;
1588 break;
1589
1590 case PARITY:
1591 hv = 0;
1592 lv = 0;
1593 while (l1)
1594 lv++, l1 &= l1 - 1;
1595 while (h1)
1596 lv++, h1 &= h1 - 1;
1597 lv &= 1;
1598 break;
1599
1600 case BSWAP:
1601 {
1602 unsigned int s;
1603
1604 hv = 0;
1605 lv = 0;
1606 for (s = 0; s < width; s += 8)
1607 {
1608 unsigned int d = width - s - 8;
1609 unsigned HOST_WIDE_INT byte;
1610
1611 if (s < HOST_BITS_PER_WIDE_INT)
1612 byte = (l1 >> s) & 0xff;
1613 else
1614 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1615
1616 if (d < HOST_BITS_PER_WIDE_INT)
1617 lv |= byte << d;
1618 else
1619 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1620 }
1621 }
1622 break;
1623
1624 case TRUNCATE:
1625 /* This is just a change-of-mode, so do nothing. */
1626 lv = l1, hv = h1;
1627 break;
1628
1629 case ZERO_EXTEND:
1630 gcc_assert (op_mode != VOIDmode);
1631
1632 if (op_width > HOST_BITS_PER_WIDE_INT)
1633 return 0;
1634
1635 hv = 0;
1636 lv = l1 & GET_MODE_MASK (op_mode);
1637 break;
1638
1639 case SIGN_EXTEND:
1640 if (op_mode == VOIDmode
1641 || op_width > HOST_BITS_PER_WIDE_INT)
1642 return 0;
1643 else
1644 {
1645 lv = l1 & GET_MODE_MASK (op_mode);
1646 if (val_signbit_known_set_p (op_mode, lv))
1647 lv |= ~GET_MODE_MASK (op_mode);
1648
1649 hv = HWI_SIGN_EXTEND (lv);
1650 }
1651 break;
1652
1653 case SQRT:
1654 return 0;
1655
1656 default:
1657 return 0;
1658 }
1659
1660 return immed_double_const (lv, hv, mode);
1661 }
1662
1663 else if (GET_CODE (op) == CONST_DOUBLE
1664 && SCALAR_FLOAT_MODE_P (mode)
1665 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1666 {
1667 REAL_VALUE_TYPE d, t;
1668 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1669
1670 switch (code)
1671 {
1672 case SQRT:
1673 if (HONOR_SNANS (mode) && real_isnan (&d))
1674 return 0;
1675 real_sqrt (&t, mode, &d);
1676 d = t;
1677 break;
1678 case ABS:
1679 d = real_value_abs (&d);
1680 break;
1681 case NEG:
1682 d = real_value_negate (&d);
1683 break;
1684 case FLOAT_TRUNCATE:
1685 d = real_value_truncate (mode, d);
1686 break;
1687 case FLOAT_EXTEND:
1688 /* All this does is change the mode, unless changing
1689 mode class. */
1690 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1691 real_convert (&d, mode, &d);
1692 break;
1693 case FIX:
1694 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1695 break;
1696 case NOT:
1697 {
1698 long tmp[4];
1699 int i;
1700
1701 real_to_target (tmp, &d, GET_MODE (op));
1702 for (i = 0; i < 4; i++)
1703 tmp[i] = ~tmp[i];
1704 real_from_target (&d, tmp, mode);
1705 break;
1706 }
1707 default:
1708 gcc_unreachable ();
1709 }
1710 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1711 }
1712
1713 else if (GET_CODE (op) == CONST_DOUBLE
1714 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1715 && GET_MODE_CLASS (mode) == MODE_INT
1716 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1717 {
1718 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1719 operators are intentionally left unspecified (to ease implementation
1720 by target backends), for consistency, this routine implements the
1721 same semantics for constant folding as used by the middle-end. */
1722
1723 /* This was formerly used only for non-IEEE float.
1724 eggert@twinsun.com says it is safe for IEEE also. */
1725 HOST_WIDE_INT xh, xl, th, tl;
1726 REAL_VALUE_TYPE x, t;
1727 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1728 switch (code)
1729 {
1730 case FIX:
1731 if (REAL_VALUE_ISNAN (x))
1732 return const0_rtx;
1733
1734 /* Test against the signed upper bound. */
1735 if (width > HOST_BITS_PER_WIDE_INT)
1736 {
1737 th = ((unsigned HOST_WIDE_INT) 1
1738 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1739 tl = -1;
1740 }
1741 else
1742 {
1743 th = 0;
1744 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1745 }
1746 real_from_integer (&t, VOIDmode, tl, th, 0);
1747 if (REAL_VALUES_LESS (t, x))
1748 {
1749 xh = th;
1750 xl = tl;
1751 break;
1752 }
1753
1754 /* Test against the signed lower bound. */
1755 if (width > HOST_BITS_PER_WIDE_INT)
1756 {
1757 th = (unsigned HOST_WIDE_INT) (-1)
1758 << (width - HOST_BITS_PER_WIDE_INT - 1);
1759 tl = 0;
1760 }
1761 else
1762 {
1763 th = -1;
1764 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1765 }
1766 real_from_integer (&t, VOIDmode, tl, th, 0);
1767 if (REAL_VALUES_LESS (x, t))
1768 {
1769 xh = th;
1770 xl = tl;
1771 break;
1772 }
1773 REAL_VALUE_TO_INT (&xl, &xh, x);
1774 break;
1775
1776 case UNSIGNED_FIX:
1777 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1778 return const0_rtx;
1779
1780 /* Test against the unsigned upper bound. */
1781 if (width == HOST_BITS_PER_DOUBLE_INT)
1782 {
1783 th = -1;
1784 tl = -1;
1785 }
1786 else if (width >= HOST_BITS_PER_WIDE_INT)
1787 {
1788 th = ((unsigned HOST_WIDE_INT) 1
1789 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1790 tl = -1;
1791 }
1792 else
1793 {
1794 th = 0;
1795 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1796 }
1797 real_from_integer (&t, VOIDmode, tl, th, 1);
1798 if (REAL_VALUES_LESS (t, x))
1799 {
1800 xh = th;
1801 xl = tl;
1802 break;
1803 }
1804
1805 REAL_VALUE_TO_INT (&xl, &xh, x);
1806 break;
1807
1808 default:
1809 gcc_unreachable ();
1810 }
1811 return immed_double_const (xl, xh, mode);
1812 }
1813
1814 return NULL_RTX;
1815 }
1816 \f
1817 /* Subroutine of simplify_binary_operation to simplify a commutative,
1818 associative binary operation CODE with result mode MODE, operating
1819 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1820 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1821 canonicalization is possible. */
1822
1823 static rtx
1824 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1825 rtx op0, rtx op1)
1826 {
1827 rtx tem;
1828
1829 /* Linearize the operator to the left. */
1830 if (GET_CODE (op1) == code)
1831 {
1832 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1833 if (GET_CODE (op0) == code)
1834 {
1835 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1836 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1837 }
1838
1839 /* "a op (b op c)" becomes "(b op c) op a". */
1840 if (! swap_commutative_operands_p (op1, op0))
1841 return simplify_gen_binary (code, mode, op1, op0);
1842
1843 tem = op0;
1844 op0 = op1;
1845 op1 = tem;
1846 }
1847
1848 if (GET_CODE (op0) == code)
1849 {
1850 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1851 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1852 {
1853 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1854 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1855 }
1856
1857 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1858 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1859 if (tem != 0)
1860 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1861
1862 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1863 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1864 if (tem != 0)
1865 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1866 }
1867
1868 return 0;
1869 }
1870
1871
1872 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1873 and OP1. Return 0 if no simplification is possible.
1874
1875 Don't use this for relational operations such as EQ or LT.
1876 Use simplify_relational_operation instead. */
1877 rtx
1878 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1879 rtx op0, rtx op1)
1880 {
1881 rtx trueop0, trueop1;
1882 rtx tem;
1883
1884 /* Relational operations don't work here. We must know the mode
1885 of the operands in order to do the comparison correctly.
1886 Assuming a full word can give incorrect results.
1887 Consider comparing 128 with -128 in QImode. */
1888 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1889 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1890
1891 /* Make sure the constant is second. */
1892 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1893 && swap_commutative_operands_p (op0, op1))
1894 {
1895 tem = op0, op0 = op1, op1 = tem;
1896 }
1897
1898 trueop0 = avoid_constant_pool_reference (op0);
1899 trueop1 = avoid_constant_pool_reference (op1);
1900
1901 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1902 if (tem)
1903 return tem;
1904 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1905 }
1906
1907 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1908 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1909 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1910 actual constants. */
1911
1912 static rtx
1913 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1914 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1915 {
1916 rtx tem, reversed, opleft, opright;
1917 HOST_WIDE_INT val;
1918 unsigned int width = GET_MODE_PRECISION (mode);
1919
1920 /* Even if we can't compute a constant result,
1921 there are some cases worth simplifying. */
1922
1923 switch (code)
1924 {
1925 case PLUS:
1926 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1927 when x is NaN, infinite, or finite and nonzero. They aren't
1928 when x is -0 and the rounding mode is not towards -infinity,
1929 since (-0) + 0 is then 0. */
1930 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1931 return op0;
1932
1933 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1934 transformations are safe even for IEEE. */
1935 if (GET_CODE (op0) == NEG)
1936 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1937 else if (GET_CODE (op1) == NEG)
1938 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1939
1940 /* (~a) + 1 -> -a */
1941 if (INTEGRAL_MODE_P (mode)
1942 && GET_CODE (op0) == NOT
1943 && trueop1 == const1_rtx)
1944 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1945
1946 /* Handle both-operands-constant cases. We can only add
1947 CONST_INTs to constants since the sum of relocatable symbols
1948 can't be handled by most assemblers. Don't add CONST_INT
1949 to CONST_INT since overflow won't be computed properly if wider
1950 than HOST_BITS_PER_WIDE_INT. */
1951
1952 if ((GET_CODE (op0) == CONST
1953 || GET_CODE (op0) == SYMBOL_REF
1954 || GET_CODE (op0) == LABEL_REF)
1955 && CONST_INT_P (op1))
1956 return plus_constant (mode, op0, INTVAL (op1));
1957 else if ((GET_CODE (op1) == CONST
1958 || GET_CODE (op1) == SYMBOL_REF
1959 || GET_CODE (op1) == LABEL_REF)
1960 && CONST_INT_P (op0))
1961 return plus_constant (mode, op1, INTVAL (op0));
1962
1963 /* See if this is something like X * C - X or vice versa or
1964 if the multiplication is written as a shift. If so, we can
1965 distribute and make a new multiply, shift, or maybe just
1966 have X (if C is 2 in the example above). But don't make
1967 something more expensive than we had before. */
1968
1969 if (SCALAR_INT_MODE_P (mode))
1970 {
1971 double_int coeff0, coeff1;
1972 rtx lhs = op0, rhs = op1;
1973
1974 coeff0 = double_int_one;
1975 coeff1 = double_int_one;
1976
1977 if (GET_CODE (lhs) == NEG)
1978 {
1979 coeff0 = double_int_minus_one;
1980 lhs = XEXP (lhs, 0);
1981 }
1982 else if (GET_CODE (lhs) == MULT
1983 && CONST_INT_P (XEXP (lhs, 1)))
1984 {
1985 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1986 lhs = XEXP (lhs, 0);
1987 }
1988 else if (GET_CODE (lhs) == ASHIFT
1989 && CONST_INT_P (XEXP (lhs, 1))
1990 && INTVAL (XEXP (lhs, 1)) >= 0
1991 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1992 {
1993 coeff0 = double_int_setbit (double_int_zero,
1994 INTVAL (XEXP (lhs, 1)));
1995 lhs = XEXP (lhs, 0);
1996 }
1997
1998 if (GET_CODE (rhs) == NEG)
1999 {
2000 coeff1 = double_int_minus_one;
2001 rhs = XEXP (rhs, 0);
2002 }
2003 else if (GET_CODE (rhs) == MULT
2004 && CONST_INT_P (XEXP (rhs, 1)))
2005 {
2006 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2007 rhs = XEXP (rhs, 0);
2008 }
2009 else if (GET_CODE (rhs) == ASHIFT
2010 && CONST_INT_P (XEXP (rhs, 1))
2011 && INTVAL (XEXP (rhs, 1)) >= 0
2012 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2013 {
2014 coeff1 = double_int_setbit (double_int_zero,
2015 INTVAL (XEXP (rhs, 1)));
2016 rhs = XEXP (rhs, 0);
2017 }
2018
2019 if (rtx_equal_p (lhs, rhs))
2020 {
2021 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2022 rtx coeff;
2023 double_int val;
2024 bool speed = optimize_function_for_speed_p (cfun);
2025
2026 val = double_int_add (coeff0, coeff1);
2027 coeff = immed_double_int_const (val, mode);
2028
2029 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2030 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2031 ? tem : 0;
2032 }
2033 }
2034
2035 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2036 if ((CONST_INT_P (op1)
2037 || GET_CODE (op1) == CONST_DOUBLE)
2038 && GET_CODE (op0) == XOR
2039 && (CONST_INT_P (XEXP (op0, 1))
2040 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2041 && mode_signbit_p (mode, op1))
2042 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2043 simplify_gen_binary (XOR, mode, op1,
2044 XEXP (op0, 1)));
2045
2046 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2047 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2048 && GET_CODE (op0) == MULT
2049 && GET_CODE (XEXP (op0, 0)) == NEG)
2050 {
2051 rtx in1, in2;
2052
2053 in1 = XEXP (XEXP (op0, 0), 0);
2054 in2 = XEXP (op0, 1);
2055 return simplify_gen_binary (MINUS, mode, op1,
2056 simplify_gen_binary (MULT, mode,
2057 in1, in2));
2058 }
2059
2060 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2061 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2062 is 1. */
2063 if (COMPARISON_P (op0)
2064 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2065 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2066 && (reversed = reversed_comparison (op0, mode)))
2067 return
2068 simplify_gen_unary (NEG, mode, reversed, mode);
2069
2070 /* If one of the operands is a PLUS or a MINUS, see if we can
2071 simplify this by the associative law.
2072 Don't use the associative law for floating point.
2073 The inaccuracy makes it nonassociative,
2074 and subtle programs can break if operations are associated. */
2075
2076 if (INTEGRAL_MODE_P (mode)
2077 && (plus_minus_operand_p (op0)
2078 || plus_minus_operand_p (op1))
2079 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2080 return tem;
2081
2082 /* Reassociate floating point addition only when the user
2083 specifies associative math operations. */
2084 if (FLOAT_MODE_P (mode)
2085 && flag_associative_math)
2086 {
2087 tem = simplify_associative_operation (code, mode, op0, op1);
2088 if (tem)
2089 return tem;
2090 }
2091 break;
2092
2093 case COMPARE:
2094 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2095 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2096 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2097 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2098 {
2099 rtx xop00 = XEXP (op0, 0);
2100 rtx xop10 = XEXP (op1, 0);
2101
2102 #ifdef HAVE_cc0
2103 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2104 #else
2105 if (REG_P (xop00) && REG_P (xop10)
2106 && GET_MODE (xop00) == GET_MODE (xop10)
2107 && REGNO (xop00) == REGNO (xop10)
2108 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2109 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2110 #endif
2111 return xop00;
2112 }
2113 break;
2114
2115 case MINUS:
2116 /* We can't assume x-x is 0 even with non-IEEE floating point,
2117 but since it is zero except in very strange circumstances, we
2118 will treat it as zero with -ffinite-math-only. */
2119 if (rtx_equal_p (trueop0, trueop1)
2120 && ! side_effects_p (op0)
2121 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2122 return CONST0_RTX (mode);
2123
2124 /* Change subtraction from zero into negation. (0 - x) is the
2125 same as -x when x is NaN, infinite, or finite and nonzero.
2126 But if the mode has signed zeros, and does not round towards
2127 -infinity, then 0 - 0 is 0, not -0. */
2128 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2129 return simplify_gen_unary (NEG, mode, op1, mode);
2130
2131 /* (-1 - a) is ~a. */
2132 if (trueop0 == constm1_rtx)
2133 return simplify_gen_unary (NOT, mode, op1, mode);
2134
2135 /* Subtracting 0 has no effect unless the mode has signed zeros
2136 and supports rounding towards -infinity. In such a case,
2137 0 - 0 is -0. */
2138 if (!(HONOR_SIGNED_ZEROS (mode)
2139 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2140 && trueop1 == CONST0_RTX (mode))
2141 return op0;
2142
2143 /* See if this is something like X * C - X or vice versa or
2144 if the multiplication is written as a shift. If so, we can
2145 distribute and make a new multiply, shift, or maybe just
2146 have X (if C is 2 in the example above). But don't make
2147 something more expensive than we had before. */
2148
2149 if (SCALAR_INT_MODE_P (mode))
2150 {
2151 double_int coeff0, negcoeff1;
2152 rtx lhs = op0, rhs = op1;
2153
2154 coeff0 = double_int_one;
2155 negcoeff1 = double_int_minus_one;
2156
2157 if (GET_CODE (lhs) == NEG)
2158 {
2159 coeff0 = double_int_minus_one;
2160 lhs = XEXP (lhs, 0);
2161 }
2162 else if (GET_CODE (lhs) == MULT
2163 && CONST_INT_P (XEXP (lhs, 1)))
2164 {
2165 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2166 lhs = XEXP (lhs, 0);
2167 }
2168 else if (GET_CODE (lhs) == ASHIFT
2169 && CONST_INT_P (XEXP (lhs, 1))
2170 && INTVAL (XEXP (lhs, 1)) >= 0
2171 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2172 {
2173 coeff0 = double_int_setbit (double_int_zero,
2174 INTVAL (XEXP (lhs, 1)));
2175 lhs = XEXP (lhs, 0);
2176 }
2177
2178 if (GET_CODE (rhs) == NEG)
2179 {
2180 negcoeff1 = double_int_one;
2181 rhs = XEXP (rhs, 0);
2182 }
2183 else if (GET_CODE (rhs) == MULT
2184 && CONST_INT_P (XEXP (rhs, 1)))
2185 {
2186 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2187 rhs = XEXP (rhs, 0);
2188 }
2189 else if (GET_CODE (rhs) == ASHIFT
2190 && CONST_INT_P (XEXP (rhs, 1))
2191 && INTVAL (XEXP (rhs, 1)) >= 0
2192 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2193 {
2194 negcoeff1 = double_int_setbit (double_int_zero,
2195 INTVAL (XEXP (rhs, 1)));
2196 negcoeff1 = double_int_neg (negcoeff1);
2197 rhs = XEXP (rhs, 0);
2198 }
2199
2200 if (rtx_equal_p (lhs, rhs))
2201 {
2202 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2203 rtx coeff;
2204 double_int val;
2205 bool speed = optimize_function_for_speed_p (cfun);
2206
2207 val = double_int_add (coeff0, negcoeff1);
2208 coeff = immed_double_int_const (val, mode);
2209
2210 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2211 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2212 ? tem : 0;
2213 }
2214 }
2215
2216 /* (a - (-b)) -> (a + b). True even for IEEE. */
2217 if (GET_CODE (op1) == NEG)
2218 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2219
2220 /* (-x - c) may be simplified as (-c - x). */
2221 if (GET_CODE (op0) == NEG
2222 && (CONST_INT_P (op1)
2223 || GET_CODE (op1) == CONST_DOUBLE))
2224 {
2225 tem = simplify_unary_operation (NEG, mode, op1, mode);
2226 if (tem)
2227 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2228 }
2229
2230 /* Don't let a relocatable value get a negative coeff. */
2231 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2232 return simplify_gen_binary (PLUS, mode,
2233 op0,
2234 neg_const_int (mode, op1));
2235
2236 /* (x - (x & y)) -> (x & ~y) */
2237 if (GET_CODE (op1) == AND)
2238 {
2239 if (rtx_equal_p (op0, XEXP (op1, 0)))
2240 {
2241 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2242 GET_MODE (XEXP (op1, 1)));
2243 return simplify_gen_binary (AND, mode, op0, tem);
2244 }
2245 if (rtx_equal_p (op0, XEXP (op1, 1)))
2246 {
2247 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2248 GET_MODE (XEXP (op1, 0)));
2249 return simplify_gen_binary (AND, mode, op0, tem);
2250 }
2251 }
2252
2253 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2254 by reversing the comparison code if valid. */
2255 if (STORE_FLAG_VALUE == 1
2256 && trueop0 == const1_rtx
2257 && COMPARISON_P (op1)
2258 && (reversed = reversed_comparison (op1, mode)))
2259 return reversed;
2260
2261 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2262 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2263 && GET_CODE (op1) == MULT
2264 && GET_CODE (XEXP (op1, 0)) == NEG)
2265 {
2266 rtx in1, in2;
2267
2268 in1 = XEXP (XEXP (op1, 0), 0);
2269 in2 = XEXP (op1, 1);
2270 return simplify_gen_binary (PLUS, mode,
2271 simplify_gen_binary (MULT, mode,
2272 in1, in2),
2273 op0);
2274 }
2275
2276 /* Canonicalize (minus (neg A) (mult B C)) to
2277 (minus (mult (neg B) C) A). */
2278 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2279 && GET_CODE (op1) == MULT
2280 && GET_CODE (op0) == NEG)
2281 {
2282 rtx in1, in2;
2283
2284 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2285 in2 = XEXP (op1, 1);
2286 return simplify_gen_binary (MINUS, mode,
2287 simplify_gen_binary (MULT, mode,
2288 in1, in2),
2289 XEXP (op0, 0));
2290 }
2291
2292 /* If one of the operands is a PLUS or a MINUS, see if we can
2293 simplify this by the associative law. This will, for example,
2294 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2295 Don't use the associative law for floating point.
2296 The inaccuracy makes it nonassociative,
2297 and subtle programs can break if operations are associated. */
2298
2299 if (INTEGRAL_MODE_P (mode)
2300 && (plus_minus_operand_p (op0)
2301 || plus_minus_operand_p (op1))
2302 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2303 return tem;
2304 break;
2305
2306 case MULT:
2307 if (trueop1 == constm1_rtx)
2308 return simplify_gen_unary (NEG, mode, op0, mode);
2309
2310 if (GET_CODE (op0) == NEG)
2311 {
2312 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2313 /* If op1 is a MULT as well and simplify_unary_operation
2314 just moved the NEG to the second operand, simplify_gen_binary
2315 below could through simplify_associative_operation move
2316 the NEG around again and recurse endlessly. */
2317 if (temp
2318 && GET_CODE (op1) == MULT
2319 && GET_CODE (temp) == MULT
2320 && XEXP (op1, 0) == XEXP (temp, 0)
2321 && GET_CODE (XEXP (temp, 1)) == NEG
2322 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2323 temp = NULL_RTX;
2324 if (temp)
2325 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2326 }
2327 if (GET_CODE (op1) == NEG)
2328 {
2329 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2330 /* If op0 is a MULT as well and simplify_unary_operation
2331 just moved the NEG to the second operand, simplify_gen_binary
2332 below could through simplify_associative_operation move
2333 the NEG around again and recurse endlessly. */
2334 if (temp
2335 && GET_CODE (op0) == MULT
2336 && GET_CODE (temp) == MULT
2337 && XEXP (op0, 0) == XEXP (temp, 0)
2338 && GET_CODE (XEXP (temp, 1)) == NEG
2339 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2340 temp = NULL_RTX;
2341 if (temp)
2342 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2343 }
2344
2345 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2346 x is NaN, since x * 0 is then also NaN. Nor is it valid
2347 when the mode has signed zeros, since multiplying a negative
2348 number by 0 will give -0, not 0. */
2349 if (!HONOR_NANS (mode)
2350 && !HONOR_SIGNED_ZEROS (mode)
2351 && trueop1 == CONST0_RTX (mode)
2352 && ! side_effects_p (op0))
2353 return op1;
2354
2355 /* In IEEE floating point, x*1 is not equivalent to x for
2356 signalling NaNs. */
2357 if (!HONOR_SNANS (mode)
2358 && trueop1 == CONST1_RTX (mode))
2359 return op0;
2360
2361 /* Convert multiply by constant power of two into shift unless
2362 we are still generating RTL. This test is a kludge. */
2363 if (CONST_INT_P (trueop1)
2364 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2365 /* If the mode is larger than the host word size, and the
2366 uppermost bit is set, then this isn't a power of two due
2367 to implicit sign extension. */
2368 && (width <= HOST_BITS_PER_WIDE_INT
2369 || val != HOST_BITS_PER_WIDE_INT - 1))
2370 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2371
2372 /* Likewise for multipliers wider than a word. */
2373 if (GET_CODE (trueop1) == CONST_DOUBLE
2374 && (GET_MODE (trueop1) == VOIDmode
2375 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2376 && GET_MODE (op0) == mode
2377 && CONST_DOUBLE_LOW (trueop1) == 0
2378 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2379 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2380 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2381 return simplify_gen_binary (ASHIFT, mode, op0,
2382 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2383
2384 /* x*2 is x+x and x*(-1) is -x */
2385 if (GET_CODE (trueop1) == CONST_DOUBLE
2386 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2387 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2388 && GET_MODE (op0) == mode)
2389 {
2390 REAL_VALUE_TYPE d;
2391 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2392
2393 if (REAL_VALUES_EQUAL (d, dconst2))
2394 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2395
2396 if (!HONOR_SNANS (mode)
2397 && REAL_VALUES_EQUAL (d, dconstm1))
2398 return simplify_gen_unary (NEG, mode, op0, mode);
2399 }
2400
2401 /* Optimize -x * -x as x * x. */
2402 if (FLOAT_MODE_P (mode)
2403 && GET_CODE (op0) == NEG
2404 && GET_CODE (op1) == NEG
2405 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2406 && !side_effects_p (XEXP (op0, 0)))
2407 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2408
2409 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2410 if (SCALAR_FLOAT_MODE_P (mode)
2411 && GET_CODE (op0) == ABS
2412 && GET_CODE (op1) == ABS
2413 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2414 && !side_effects_p (XEXP (op0, 0)))
2415 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2416
2417 /* Reassociate multiplication, but for floating point MULTs
2418 only when the user specifies unsafe math optimizations. */
2419 if (! FLOAT_MODE_P (mode)
2420 || flag_unsafe_math_optimizations)
2421 {
2422 tem = simplify_associative_operation (code, mode, op0, op1);
2423 if (tem)
2424 return tem;
2425 }
2426 break;
2427
2428 case IOR:
2429 if (trueop1 == CONST0_RTX (mode))
2430 return op0;
2431 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2432 return op1;
2433 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2434 return op0;
2435 /* A | (~A) -> -1 */
2436 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2437 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2438 && ! side_effects_p (op0)
2439 && SCALAR_INT_MODE_P (mode))
2440 return constm1_rtx;
2441
2442 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2443 if (CONST_INT_P (op1)
2444 && HWI_COMPUTABLE_MODE_P (mode)
2445 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2446 return op1;
2447
2448 /* Canonicalize (X & C1) | C2. */
2449 if (GET_CODE (op0) == AND
2450 && CONST_INT_P (trueop1)
2451 && CONST_INT_P (XEXP (op0, 1)))
2452 {
2453 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2454 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2455 HOST_WIDE_INT c2 = INTVAL (trueop1);
2456
2457 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2458 if ((c1 & c2) == c1
2459 && !side_effects_p (XEXP (op0, 0)))
2460 return trueop1;
2461
2462 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2463 if (((c1|c2) & mask) == mask)
2464 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2465
2466 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2467 if (((c1 & ~c2) & mask) != (c1 & mask))
2468 {
2469 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2470 gen_int_mode (c1 & ~c2, mode));
2471 return simplify_gen_binary (IOR, mode, tem, op1);
2472 }
2473 }
2474
2475 /* Convert (A & B) | A to A. */
2476 if (GET_CODE (op0) == AND
2477 && (rtx_equal_p (XEXP (op0, 0), op1)
2478 || rtx_equal_p (XEXP (op0, 1), op1))
2479 && ! side_effects_p (XEXP (op0, 0))
2480 && ! side_effects_p (XEXP (op0, 1)))
2481 return op1;
2482
2483 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2484 mode size to (rotate A CX). */
2485
2486 if (GET_CODE (op1) == ASHIFT
2487 || GET_CODE (op1) == SUBREG)
2488 {
2489 opleft = op1;
2490 opright = op0;
2491 }
2492 else
2493 {
2494 opright = op1;
2495 opleft = op0;
2496 }
2497
2498 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2499 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2500 && CONST_INT_P (XEXP (opleft, 1))
2501 && CONST_INT_P (XEXP (opright, 1))
2502 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2503 == GET_MODE_PRECISION (mode)))
2504 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2505
2506 /* Same, but for ashift that has been "simplified" to a wider mode
2507 by simplify_shift_const. */
2508
2509 if (GET_CODE (opleft) == SUBREG
2510 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2511 && GET_CODE (opright) == LSHIFTRT
2512 && GET_CODE (XEXP (opright, 0)) == SUBREG
2513 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2514 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2515 && (GET_MODE_SIZE (GET_MODE (opleft))
2516 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2517 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2518 SUBREG_REG (XEXP (opright, 0)))
2519 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2520 && CONST_INT_P (XEXP (opright, 1))
2521 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2522 == GET_MODE_PRECISION (mode)))
2523 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2524 XEXP (SUBREG_REG (opleft), 1));
2525
2526 /* If we have (ior (and (X C1) C2)), simplify this by making
2527 C1 as small as possible if C1 actually changes. */
2528 if (CONST_INT_P (op1)
2529 && (HWI_COMPUTABLE_MODE_P (mode)
2530 || INTVAL (op1) > 0)
2531 && GET_CODE (op0) == AND
2532 && CONST_INT_P (XEXP (op0, 1))
2533 && CONST_INT_P (op1)
2534 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2535 return simplify_gen_binary (IOR, mode,
2536 simplify_gen_binary
2537 (AND, mode, XEXP (op0, 0),
2538 GEN_INT (UINTVAL (XEXP (op0, 1))
2539 & ~UINTVAL (op1))),
2540 op1);
2541
2542 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2543 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2544 the PLUS does not affect any of the bits in OP1: then we can do
2545 the IOR as a PLUS and we can associate. This is valid if OP1
2546 can be safely shifted left C bits. */
2547 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2548 && GET_CODE (XEXP (op0, 0)) == PLUS
2549 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2550 && CONST_INT_P (XEXP (op0, 1))
2551 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2552 {
2553 int count = INTVAL (XEXP (op0, 1));
2554 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2555
2556 if (mask >> count == INTVAL (trueop1)
2557 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2558 return simplify_gen_binary (ASHIFTRT, mode,
2559 plus_constant (mode, XEXP (op0, 0),
2560 mask),
2561 XEXP (op0, 1));
2562 }
2563
2564 tem = simplify_associative_operation (code, mode, op0, op1);
2565 if (tem)
2566 return tem;
2567 break;
2568
2569 case XOR:
2570 if (trueop1 == CONST0_RTX (mode))
2571 return op0;
2572 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2573 return simplify_gen_unary (NOT, mode, op0, mode);
2574 if (rtx_equal_p (trueop0, trueop1)
2575 && ! side_effects_p (op0)
2576 && GET_MODE_CLASS (mode) != MODE_CC)
2577 return CONST0_RTX (mode);
2578
2579 /* Canonicalize XOR of the most significant bit to PLUS. */
2580 if ((CONST_INT_P (op1)
2581 || GET_CODE (op1) == CONST_DOUBLE)
2582 && mode_signbit_p (mode, op1))
2583 return simplify_gen_binary (PLUS, mode, op0, op1);
2584 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2585 if ((CONST_INT_P (op1)
2586 || GET_CODE (op1) == CONST_DOUBLE)
2587 && GET_CODE (op0) == PLUS
2588 && (CONST_INT_P (XEXP (op0, 1))
2589 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2590 && mode_signbit_p (mode, XEXP (op0, 1)))
2591 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2592 simplify_gen_binary (XOR, mode, op1,
2593 XEXP (op0, 1)));
2594
2595 /* If we are XORing two things that have no bits in common,
2596 convert them into an IOR. This helps to detect rotation encoded
2597 using those methods and possibly other simplifications. */
2598
2599 if (HWI_COMPUTABLE_MODE_P (mode)
2600 && (nonzero_bits (op0, mode)
2601 & nonzero_bits (op1, mode)) == 0)
2602 return (simplify_gen_binary (IOR, mode, op0, op1));
2603
2604 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2605 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2606 (NOT y). */
2607 {
2608 int num_negated = 0;
2609
2610 if (GET_CODE (op0) == NOT)
2611 num_negated++, op0 = XEXP (op0, 0);
2612 if (GET_CODE (op1) == NOT)
2613 num_negated++, op1 = XEXP (op1, 0);
2614
2615 if (num_negated == 2)
2616 return simplify_gen_binary (XOR, mode, op0, op1);
2617 else if (num_negated == 1)
2618 return simplify_gen_unary (NOT, mode,
2619 simplify_gen_binary (XOR, mode, op0, op1),
2620 mode);
2621 }
2622
2623 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2624 correspond to a machine insn or result in further simplifications
2625 if B is a constant. */
2626
2627 if (GET_CODE (op0) == AND
2628 && rtx_equal_p (XEXP (op0, 1), op1)
2629 && ! side_effects_p (op1))
2630 return simplify_gen_binary (AND, mode,
2631 simplify_gen_unary (NOT, mode,
2632 XEXP (op0, 0), mode),
2633 op1);
2634
2635 else if (GET_CODE (op0) == AND
2636 && rtx_equal_p (XEXP (op0, 0), op1)
2637 && ! side_effects_p (op1))
2638 return simplify_gen_binary (AND, mode,
2639 simplify_gen_unary (NOT, mode,
2640 XEXP (op0, 1), mode),
2641 op1);
2642
2643 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2644 we can transform like this:
2645 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2646 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2647 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2648 Attempt a few simplifications when B and C are both constants. */
2649 if (GET_CODE (op0) == AND
2650 && CONST_INT_P (op1)
2651 && CONST_INT_P (XEXP (op0, 1)))
2652 {
2653 rtx a = XEXP (op0, 0);
2654 rtx b = XEXP (op0, 1);
2655 rtx c = op1;
2656 HOST_WIDE_INT bval = INTVAL (b);
2657 HOST_WIDE_INT cval = INTVAL (c);
2658
2659 rtx na_c
2660 = simplify_binary_operation (AND, mode,
2661 simplify_gen_unary (NOT, mode, a, mode),
2662 c);
2663 if ((~cval & bval) == 0)
2664 {
2665 /* Try to simplify ~A&C | ~B&C. */
2666 if (na_c != NULL_RTX)
2667 return simplify_gen_binary (IOR, mode, na_c,
2668 GEN_INT (~bval & cval));
2669 }
2670 else
2671 {
2672 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2673 if (na_c == const0_rtx)
2674 {
2675 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2676 GEN_INT (~cval & bval));
2677 return simplify_gen_binary (IOR, mode, a_nc_b,
2678 GEN_INT (~bval & cval));
2679 }
2680 }
2681 }
2682
2683 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2684 comparison if STORE_FLAG_VALUE is 1. */
2685 if (STORE_FLAG_VALUE == 1
2686 && trueop1 == const1_rtx
2687 && COMPARISON_P (op0)
2688 && (reversed = reversed_comparison (op0, mode)))
2689 return reversed;
2690
2691 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2692 is (lt foo (const_int 0)), so we can perform the above
2693 simplification if STORE_FLAG_VALUE is 1. */
2694
2695 if (STORE_FLAG_VALUE == 1
2696 && trueop1 == const1_rtx
2697 && GET_CODE (op0) == LSHIFTRT
2698 && CONST_INT_P (XEXP (op0, 1))
2699 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2700 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2701
2702 /* (xor (comparison foo bar) (const_int sign-bit))
2703 when STORE_FLAG_VALUE is the sign bit. */
2704 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2705 && trueop1 == const_true_rtx
2706 && COMPARISON_P (op0)
2707 && (reversed = reversed_comparison (op0, mode)))
2708 return reversed;
2709
2710 tem = simplify_associative_operation (code, mode, op0, op1);
2711 if (tem)
2712 return tem;
2713 break;
2714
2715 case AND:
2716 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2717 return trueop1;
2718 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2719 return op0;
2720 if (HWI_COMPUTABLE_MODE_P (mode))
2721 {
2722 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2723 HOST_WIDE_INT nzop1;
2724 if (CONST_INT_P (trueop1))
2725 {
2726 HOST_WIDE_INT val1 = INTVAL (trueop1);
2727 /* If we are turning off bits already known off in OP0, we need
2728 not do an AND. */
2729 if ((nzop0 & ~val1) == 0)
2730 return op0;
2731 }
2732 nzop1 = nonzero_bits (trueop1, mode);
2733 /* If we are clearing all the nonzero bits, the result is zero. */
2734 if ((nzop1 & nzop0) == 0
2735 && !side_effects_p (op0) && !side_effects_p (op1))
2736 return CONST0_RTX (mode);
2737 }
2738 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2739 && GET_MODE_CLASS (mode) != MODE_CC)
2740 return op0;
2741 /* A & (~A) -> 0 */
2742 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2743 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2744 && ! side_effects_p (op0)
2745 && GET_MODE_CLASS (mode) != MODE_CC)
2746 return CONST0_RTX (mode);
2747
2748 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2749 there are no nonzero bits of C outside of X's mode. */
2750 if ((GET_CODE (op0) == SIGN_EXTEND
2751 || GET_CODE (op0) == ZERO_EXTEND)
2752 && CONST_INT_P (trueop1)
2753 && HWI_COMPUTABLE_MODE_P (mode)
2754 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2755 & UINTVAL (trueop1)) == 0)
2756 {
2757 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2758 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2759 gen_int_mode (INTVAL (trueop1),
2760 imode));
2761 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2762 }
2763
2764 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2765 we might be able to further simplify the AND with X and potentially
2766 remove the truncation altogether. */
2767 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2768 {
2769 rtx x = XEXP (op0, 0);
2770 enum machine_mode xmode = GET_MODE (x);
2771 tem = simplify_gen_binary (AND, xmode, x,
2772 gen_int_mode (INTVAL (trueop1), xmode));
2773 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2774 }
2775
2776 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2777 if (GET_CODE (op0) == IOR
2778 && CONST_INT_P (trueop1)
2779 && CONST_INT_P (XEXP (op0, 1)))
2780 {
2781 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2782 return simplify_gen_binary (IOR, mode,
2783 simplify_gen_binary (AND, mode,
2784 XEXP (op0, 0), op1),
2785 gen_int_mode (tmp, mode));
2786 }
2787
2788 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2789 insn (and may simplify more). */
2790 if (GET_CODE (op0) == XOR
2791 && rtx_equal_p (XEXP (op0, 0), op1)
2792 && ! side_effects_p (op1))
2793 return simplify_gen_binary (AND, mode,
2794 simplify_gen_unary (NOT, mode,
2795 XEXP (op0, 1), mode),
2796 op1);
2797
2798 if (GET_CODE (op0) == XOR
2799 && rtx_equal_p (XEXP (op0, 1), op1)
2800 && ! side_effects_p (op1))
2801 return simplify_gen_binary (AND, mode,
2802 simplify_gen_unary (NOT, mode,
2803 XEXP (op0, 0), mode),
2804 op1);
2805
2806 /* Similarly for (~(A ^ B)) & A. */
2807 if (GET_CODE (op0) == NOT
2808 && GET_CODE (XEXP (op0, 0)) == XOR
2809 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2810 && ! side_effects_p (op1))
2811 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2812
2813 if (GET_CODE (op0) == NOT
2814 && GET_CODE (XEXP (op0, 0)) == XOR
2815 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2816 && ! side_effects_p (op1))
2817 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2818
2819 /* Convert (A | B) & A to A. */
2820 if (GET_CODE (op0) == IOR
2821 && (rtx_equal_p (XEXP (op0, 0), op1)
2822 || rtx_equal_p (XEXP (op0, 1), op1))
2823 && ! side_effects_p (XEXP (op0, 0))
2824 && ! side_effects_p (XEXP (op0, 1)))
2825 return op1;
2826
2827 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2828 ((A & N) + B) & M -> (A + B) & M
2829 Similarly if (N & M) == 0,
2830 ((A | N) + B) & M -> (A + B) & M
2831 and for - instead of + and/or ^ instead of |.
2832 Also, if (N & M) == 0, then
2833 (A +- N) & M -> A & M. */
2834 if (CONST_INT_P (trueop1)
2835 && HWI_COMPUTABLE_MODE_P (mode)
2836 && ~UINTVAL (trueop1)
2837 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2838 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2839 {
2840 rtx pmop[2];
2841 int which;
2842
2843 pmop[0] = XEXP (op0, 0);
2844 pmop[1] = XEXP (op0, 1);
2845
2846 if (CONST_INT_P (pmop[1])
2847 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2848 return simplify_gen_binary (AND, mode, pmop[0], op1);
2849
2850 for (which = 0; which < 2; which++)
2851 {
2852 tem = pmop[which];
2853 switch (GET_CODE (tem))
2854 {
2855 case AND:
2856 if (CONST_INT_P (XEXP (tem, 1))
2857 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2858 == UINTVAL (trueop1))
2859 pmop[which] = XEXP (tem, 0);
2860 break;
2861 case IOR:
2862 case XOR:
2863 if (CONST_INT_P (XEXP (tem, 1))
2864 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2865 pmop[which] = XEXP (tem, 0);
2866 break;
2867 default:
2868 break;
2869 }
2870 }
2871
2872 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2873 {
2874 tem = simplify_gen_binary (GET_CODE (op0), mode,
2875 pmop[0], pmop[1]);
2876 return simplify_gen_binary (code, mode, tem, op1);
2877 }
2878 }
2879
2880 /* (and X (ior (not X) Y) -> (and X Y) */
2881 if (GET_CODE (op1) == IOR
2882 && GET_CODE (XEXP (op1, 0)) == NOT
2883 && op0 == XEXP (XEXP (op1, 0), 0))
2884 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2885
2886 /* (and (ior (not X) Y) X) -> (and X Y) */
2887 if (GET_CODE (op0) == IOR
2888 && GET_CODE (XEXP (op0, 0)) == NOT
2889 && op1 == XEXP (XEXP (op0, 0), 0))
2890 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2891
2892 tem = simplify_associative_operation (code, mode, op0, op1);
2893 if (tem)
2894 return tem;
2895 break;
2896
2897 case UDIV:
2898 /* 0/x is 0 (or x&0 if x has side-effects). */
2899 if (trueop0 == CONST0_RTX (mode))
2900 {
2901 if (side_effects_p (op1))
2902 return simplify_gen_binary (AND, mode, op1, trueop0);
2903 return trueop0;
2904 }
2905 /* x/1 is x. */
2906 if (trueop1 == CONST1_RTX (mode))
2907 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2908 /* Convert divide by power of two into shift. */
2909 if (CONST_INT_P (trueop1)
2910 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2911 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2912 break;
2913
2914 case DIV:
2915 /* Handle floating point and integers separately. */
2916 if (SCALAR_FLOAT_MODE_P (mode))
2917 {
2918 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2919 safe for modes with NaNs, since 0.0 / 0.0 will then be
2920 NaN rather than 0.0. Nor is it safe for modes with signed
2921 zeros, since dividing 0 by a negative number gives -0.0 */
2922 if (trueop0 == CONST0_RTX (mode)
2923 && !HONOR_NANS (mode)
2924 && !HONOR_SIGNED_ZEROS (mode)
2925 && ! side_effects_p (op1))
2926 return op0;
2927 /* x/1.0 is x. */
2928 if (trueop1 == CONST1_RTX (mode)
2929 && !HONOR_SNANS (mode))
2930 return op0;
2931
2932 if (GET_CODE (trueop1) == CONST_DOUBLE
2933 && trueop1 != CONST0_RTX (mode))
2934 {
2935 REAL_VALUE_TYPE d;
2936 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2937
2938 /* x/-1.0 is -x. */
2939 if (REAL_VALUES_EQUAL (d, dconstm1)
2940 && !HONOR_SNANS (mode))
2941 return simplify_gen_unary (NEG, mode, op0, mode);
2942
2943 /* Change FP division by a constant into multiplication.
2944 Only do this with -freciprocal-math. */
2945 if (flag_reciprocal_math
2946 && !REAL_VALUES_EQUAL (d, dconst0))
2947 {
2948 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2949 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2950 return simplify_gen_binary (MULT, mode, op0, tem);
2951 }
2952 }
2953 }
2954 else if (SCALAR_INT_MODE_P (mode))
2955 {
2956 /* 0/x is 0 (or x&0 if x has side-effects). */
2957 if (trueop0 == CONST0_RTX (mode)
2958 && !cfun->can_throw_non_call_exceptions)
2959 {
2960 if (side_effects_p (op1))
2961 return simplify_gen_binary (AND, mode, op1, trueop0);
2962 return trueop0;
2963 }
2964 /* x/1 is x. */
2965 if (trueop1 == CONST1_RTX (mode))
2966 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2967 /* x/-1 is -x. */
2968 if (trueop1 == constm1_rtx)
2969 {
2970 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2971 return simplify_gen_unary (NEG, mode, x, mode);
2972 }
2973 }
2974 break;
2975
2976 case UMOD:
2977 /* 0%x is 0 (or x&0 if x has side-effects). */
2978 if (trueop0 == CONST0_RTX (mode))
2979 {
2980 if (side_effects_p (op1))
2981 return simplify_gen_binary (AND, mode, op1, trueop0);
2982 return trueop0;
2983 }
2984 /* x%1 is 0 (of x&0 if x has side-effects). */
2985 if (trueop1 == CONST1_RTX (mode))
2986 {
2987 if (side_effects_p (op0))
2988 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2989 return CONST0_RTX (mode);
2990 }
2991 /* Implement modulus by power of two as AND. */
2992 if (CONST_INT_P (trueop1)
2993 && exact_log2 (UINTVAL (trueop1)) > 0)
2994 return simplify_gen_binary (AND, mode, op0,
2995 GEN_INT (INTVAL (op1) - 1));
2996 break;
2997
2998 case MOD:
2999 /* 0%x is 0 (or x&0 if x has side-effects). */
3000 if (trueop0 == CONST0_RTX (mode))
3001 {
3002 if (side_effects_p (op1))
3003 return simplify_gen_binary (AND, mode, op1, trueop0);
3004 return trueop0;
3005 }
3006 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3007 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3008 {
3009 if (side_effects_p (op0))
3010 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3011 return CONST0_RTX (mode);
3012 }
3013 break;
3014
3015 case ROTATERT:
3016 case ROTATE:
3017 case ASHIFTRT:
3018 if (trueop1 == CONST0_RTX (mode))
3019 return op0;
3020 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3021 return op0;
3022 /* Rotating ~0 always results in ~0. */
3023 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3024 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3025 && ! side_effects_p (op1))
3026 return op0;
3027 canonicalize_shift:
3028 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3029 {
3030 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3031 if (val != INTVAL (op1))
3032 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3033 }
3034 break;
3035
3036 case ASHIFT:
3037 case SS_ASHIFT:
3038 case US_ASHIFT:
3039 if (trueop1 == CONST0_RTX (mode))
3040 return op0;
3041 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3042 return op0;
3043 goto canonicalize_shift;
3044
3045 case LSHIFTRT:
3046 if (trueop1 == CONST0_RTX (mode))
3047 return op0;
3048 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3049 return op0;
3050 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3051 if (GET_CODE (op0) == CLZ
3052 && CONST_INT_P (trueop1)
3053 && STORE_FLAG_VALUE == 1
3054 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3055 {
3056 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3057 unsigned HOST_WIDE_INT zero_val = 0;
3058
3059 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3060 && zero_val == GET_MODE_PRECISION (imode)
3061 && INTVAL (trueop1) == exact_log2 (zero_val))
3062 return simplify_gen_relational (EQ, mode, imode,
3063 XEXP (op0, 0), const0_rtx);
3064 }
3065 goto canonicalize_shift;
3066
3067 case SMIN:
3068 if (width <= HOST_BITS_PER_WIDE_INT
3069 && mode_signbit_p (mode, trueop1)
3070 && ! side_effects_p (op0))
3071 return op1;
3072 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3073 return op0;
3074 tem = simplify_associative_operation (code, mode, op0, op1);
3075 if (tem)
3076 return tem;
3077 break;
3078
3079 case SMAX:
3080 if (width <= HOST_BITS_PER_WIDE_INT
3081 && CONST_INT_P (trueop1)
3082 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3083 && ! side_effects_p (op0))
3084 return op1;
3085 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3086 return op0;
3087 tem = simplify_associative_operation (code, mode, op0, op1);
3088 if (tem)
3089 return tem;
3090 break;
3091
3092 case UMIN:
3093 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3094 return op1;
3095 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3096 return op0;
3097 tem = simplify_associative_operation (code, mode, op0, op1);
3098 if (tem)
3099 return tem;
3100 break;
3101
3102 case UMAX:
3103 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3104 return op1;
3105 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3106 return op0;
3107 tem = simplify_associative_operation (code, mode, op0, op1);
3108 if (tem)
3109 return tem;
3110 break;
3111
3112 case SS_PLUS:
3113 case US_PLUS:
3114 case SS_MINUS:
3115 case US_MINUS:
3116 case SS_MULT:
3117 case US_MULT:
3118 case SS_DIV:
3119 case US_DIV:
3120 /* ??? There are simplifications that can be done. */
3121 return 0;
3122
3123 case VEC_SELECT:
3124 if (!VECTOR_MODE_P (mode))
3125 {
3126 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3127 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3128 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3129 gcc_assert (XVECLEN (trueop1, 0) == 1);
3130 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3131
3132 if (GET_CODE (trueop0) == CONST_VECTOR)
3133 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3134 (trueop1, 0, 0)));
3135
3136 /* Extract a scalar element from a nested VEC_SELECT expression
3137 (with optional nested VEC_CONCAT expression). Some targets
3138 (i386) extract scalar element from a vector using chain of
3139 nested VEC_SELECT expressions. When input operand is a memory
3140 operand, this operation can be simplified to a simple scalar
3141 load from an offseted memory address. */
3142 if (GET_CODE (trueop0) == VEC_SELECT)
3143 {
3144 rtx op0 = XEXP (trueop0, 0);
3145 rtx op1 = XEXP (trueop0, 1);
3146
3147 enum machine_mode opmode = GET_MODE (op0);
3148 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3149 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3150
3151 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3152 int elem;
3153
3154 rtvec vec;
3155 rtx tmp_op, tmp;
3156
3157 gcc_assert (GET_CODE (op1) == PARALLEL);
3158 gcc_assert (i < n_elts);
3159
3160 /* Select element, pointed by nested selector. */
3161 elem = INTVAL (XVECEXP (op1, 0, i));
3162
3163 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3164 if (GET_CODE (op0) == VEC_CONCAT)
3165 {
3166 rtx op00 = XEXP (op0, 0);
3167 rtx op01 = XEXP (op0, 1);
3168
3169 enum machine_mode mode00, mode01;
3170 int n_elts00, n_elts01;
3171
3172 mode00 = GET_MODE (op00);
3173 mode01 = GET_MODE (op01);
3174
3175 /* Find out number of elements of each operand. */
3176 if (VECTOR_MODE_P (mode00))
3177 {
3178 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3179 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3180 }
3181 else
3182 n_elts00 = 1;
3183
3184 if (VECTOR_MODE_P (mode01))
3185 {
3186 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3187 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3188 }
3189 else
3190 n_elts01 = 1;
3191
3192 gcc_assert (n_elts == n_elts00 + n_elts01);
3193
3194 /* Select correct operand of VEC_CONCAT
3195 and adjust selector. */
3196 if (elem < n_elts01)
3197 tmp_op = op00;
3198 else
3199 {
3200 tmp_op = op01;
3201 elem -= n_elts00;
3202 }
3203 }
3204 else
3205 tmp_op = op0;
3206
3207 vec = rtvec_alloc (1);
3208 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3209
3210 tmp = gen_rtx_fmt_ee (code, mode,
3211 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3212 return tmp;
3213 }
3214 if (GET_CODE (trueop0) == VEC_DUPLICATE
3215 && GET_MODE (XEXP (trueop0, 0)) == mode)
3216 return XEXP (trueop0, 0);
3217 }
3218 else
3219 {
3220 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3221 gcc_assert (GET_MODE_INNER (mode)
3222 == GET_MODE_INNER (GET_MODE (trueop0)));
3223 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3224
3225 if (GET_CODE (trueop0) == CONST_VECTOR)
3226 {
3227 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3228 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3229 rtvec v = rtvec_alloc (n_elts);
3230 unsigned int i;
3231
3232 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3233 for (i = 0; i < n_elts; i++)
3234 {
3235 rtx x = XVECEXP (trueop1, 0, i);
3236
3237 gcc_assert (CONST_INT_P (x));
3238 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3239 INTVAL (x));
3240 }
3241
3242 return gen_rtx_CONST_VECTOR (mode, v);
3243 }
3244
3245 /* If we build {a,b} then permute it, build the result directly. */
3246 if (XVECLEN (trueop1, 0) == 2
3247 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3248 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3249 && GET_CODE (trueop0) == VEC_CONCAT
3250 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3251 && GET_MODE (XEXP (trueop0, 0)) == mode
3252 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3253 && GET_MODE (XEXP (trueop0, 1)) == mode)
3254 {
3255 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3256 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3257 rtx subop0, subop1;
3258
3259 gcc_assert (i0 < 4 && i1 < 4);
3260 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3261 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3262
3263 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3264 }
3265 }
3266
3267 if (XVECLEN (trueop1, 0) == 1
3268 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3269 && GET_CODE (trueop0) == VEC_CONCAT)
3270 {
3271 rtx vec = trueop0;
3272 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3273
3274 /* Try to find the element in the VEC_CONCAT. */
3275 while (GET_MODE (vec) != mode
3276 && GET_CODE (vec) == VEC_CONCAT)
3277 {
3278 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3279 if (offset < vec_size)
3280 vec = XEXP (vec, 0);
3281 else
3282 {
3283 offset -= vec_size;
3284 vec = XEXP (vec, 1);
3285 }
3286 vec = avoid_constant_pool_reference (vec);
3287 }
3288
3289 if (GET_MODE (vec) == mode)
3290 return vec;
3291 }
3292
3293 return 0;
3294 case VEC_CONCAT:
3295 {
3296 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3297 ? GET_MODE (trueop0)
3298 : GET_MODE_INNER (mode));
3299 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3300 ? GET_MODE (trueop1)
3301 : GET_MODE_INNER (mode));
3302
3303 gcc_assert (VECTOR_MODE_P (mode));
3304 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3305 == GET_MODE_SIZE (mode));
3306
3307 if (VECTOR_MODE_P (op0_mode))
3308 gcc_assert (GET_MODE_INNER (mode)
3309 == GET_MODE_INNER (op0_mode));
3310 else
3311 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3312
3313 if (VECTOR_MODE_P (op1_mode))
3314 gcc_assert (GET_MODE_INNER (mode)
3315 == GET_MODE_INNER (op1_mode));
3316 else
3317 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3318
3319 if ((GET_CODE (trueop0) == CONST_VECTOR
3320 || CONST_INT_P (trueop0)
3321 || GET_CODE (trueop0) == CONST_DOUBLE)
3322 && (GET_CODE (trueop1) == CONST_VECTOR
3323 || CONST_INT_P (trueop1)
3324 || GET_CODE (trueop1) == CONST_DOUBLE))
3325 {
3326 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3327 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3328 rtvec v = rtvec_alloc (n_elts);
3329 unsigned int i;
3330 unsigned in_n_elts = 1;
3331
3332 if (VECTOR_MODE_P (op0_mode))
3333 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3334 for (i = 0; i < n_elts; i++)
3335 {
3336 if (i < in_n_elts)
3337 {
3338 if (!VECTOR_MODE_P (op0_mode))
3339 RTVEC_ELT (v, i) = trueop0;
3340 else
3341 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3342 }
3343 else
3344 {
3345 if (!VECTOR_MODE_P (op1_mode))
3346 RTVEC_ELT (v, i) = trueop1;
3347 else
3348 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3349 i - in_n_elts);
3350 }
3351 }
3352
3353 return gen_rtx_CONST_VECTOR (mode, v);
3354 }
3355 }
3356 return 0;
3357
3358 default:
3359 gcc_unreachable ();
3360 }
3361
3362 return 0;
3363 }
3364
3365 rtx
3366 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3367 rtx op0, rtx op1)
3368 {
3369 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3370 HOST_WIDE_INT val;
3371 unsigned int width = GET_MODE_PRECISION (mode);
3372
3373 if (VECTOR_MODE_P (mode)
3374 && code != VEC_CONCAT
3375 && GET_CODE (op0) == CONST_VECTOR
3376 && GET_CODE (op1) == CONST_VECTOR)
3377 {
3378 unsigned n_elts = GET_MODE_NUNITS (mode);
3379 enum machine_mode op0mode = GET_MODE (op0);
3380 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3381 enum machine_mode op1mode = GET_MODE (op1);
3382 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3383 rtvec v = rtvec_alloc (n_elts);
3384 unsigned int i;
3385
3386 gcc_assert (op0_n_elts == n_elts);
3387 gcc_assert (op1_n_elts == n_elts);
3388 for (i = 0; i < n_elts; i++)
3389 {
3390 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3391 CONST_VECTOR_ELT (op0, i),
3392 CONST_VECTOR_ELT (op1, i));
3393 if (!x)
3394 return 0;
3395 RTVEC_ELT (v, i) = x;
3396 }
3397
3398 return gen_rtx_CONST_VECTOR (mode, v);
3399 }
3400
3401 if (VECTOR_MODE_P (mode)
3402 && code == VEC_CONCAT
3403 && (CONST_INT_P (op0)
3404 || GET_CODE (op0) == CONST_DOUBLE
3405 || GET_CODE (op0) == CONST_FIXED)
3406 && (CONST_INT_P (op1)
3407 || GET_CODE (op1) == CONST_DOUBLE
3408 || GET_CODE (op1) == CONST_FIXED))
3409 {
3410 unsigned n_elts = GET_MODE_NUNITS (mode);
3411 rtvec v = rtvec_alloc (n_elts);
3412
3413 gcc_assert (n_elts >= 2);
3414 if (n_elts == 2)
3415 {
3416 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3417 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3418
3419 RTVEC_ELT (v, 0) = op0;
3420 RTVEC_ELT (v, 1) = op1;
3421 }
3422 else
3423 {
3424 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3425 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3426 unsigned i;
3427
3428 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3429 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3430 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3431
3432 for (i = 0; i < op0_n_elts; ++i)
3433 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3434 for (i = 0; i < op1_n_elts; ++i)
3435 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3436 }
3437
3438 return gen_rtx_CONST_VECTOR (mode, v);
3439 }
3440
3441 if (SCALAR_FLOAT_MODE_P (mode)
3442 && GET_CODE (op0) == CONST_DOUBLE
3443 && GET_CODE (op1) == CONST_DOUBLE
3444 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3445 {
3446 if (code == AND
3447 || code == IOR
3448 || code == XOR)
3449 {
3450 long tmp0[4];
3451 long tmp1[4];
3452 REAL_VALUE_TYPE r;
3453 int i;
3454
3455 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3456 GET_MODE (op0));
3457 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3458 GET_MODE (op1));
3459 for (i = 0; i < 4; i++)
3460 {
3461 switch (code)
3462 {
3463 case AND:
3464 tmp0[i] &= tmp1[i];
3465 break;
3466 case IOR:
3467 tmp0[i] |= tmp1[i];
3468 break;
3469 case XOR:
3470 tmp0[i] ^= tmp1[i];
3471 break;
3472 default:
3473 gcc_unreachable ();
3474 }
3475 }
3476 real_from_target (&r, tmp0, mode);
3477 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3478 }
3479 else
3480 {
3481 REAL_VALUE_TYPE f0, f1, value, result;
3482 bool inexact;
3483
3484 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3485 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3486 real_convert (&f0, mode, &f0);
3487 real_convert (&f1, mode, &f1);
3488
3489 if (HONOR_SNANS (mode)
3490 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3491 return 0;
3492
3493 if (code == DIV
3494 && REAL_VALUES_EQUAL (f1, dconst0)
3495 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3496 return 0;
3497
3498 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3499 && flag_trapping_math
3500 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3501 {
3502 int s0 = REAL_VALUE_NEGATIVE (f0);
3503 int s1 = REAL_VALUE_NEGATIVE (f1);
3504
3505 switch (code)
3506 {
3507 case PLUS:
3508 /* Inf + -Inf = NaN plus exception. */
3509 if (s0 != s1)
3510 return 0;
3511 break;
3512 case MINUS:
3513 /* Inf - Inf = NaN plus exception. */
3514 if (s0 == s1)
3515 return 0;
3516 break;
3517 case DIV:
3518 /* Inf / Inf = NaN plus exception. */
3519 return 0;
3520 default:
3521 break;
3522 }
3523 }
3524
3525 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3526 && flag_trapping_math
3527 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3528 || (REAL_VALUE_ISINF (f1)
3529 && REAL_VALUES_EQUAL (f0, dconst0))))
3530 /* Inf * 0 = NaN plus exception. */
3531 return 0;
3532
3533 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3534 &f0, &f1);
3535 real_convert (&result, mode, &value);
3536
3537 /* Don't constant fold this floating point operation if
3538 the result has overflowed and flag_trapping_math. */
3539
3540 if (flag_trapping_math
3541 && MODE_HAS_INFINITIES (mode)
3542 && REAL_VALUE_ISINF (result)
3543 && !REAL_VALUE_ISINF (f0)
3544 && !REAL_VALUE_ISINF (f1))
3545 /* Overflow plus exception. */
3546 return 0;
3547
3548 /* Don't constant fold this floating point operation if the
3549 result may dependent upon the run-time rounding mode and
3550 flag_rounding_math is set, or if GCC's software emulation
3551 is unable to accurately represent the result. */
3552
3553 if ((flag_rounding_math
3554 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3555 && (inexact || !real_identical (&result, &value)))
3556 return NULL_RTX;
3557
3558 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3559 }
3560 }
3561
3562 /* We can fold some multi-word operations. */
3563 if (GET_MODE_CLASS (mode) == MODE_INT
3564 && width == HOST_BITS_PER_DOUBLE_INT
3565 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3566 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3567 {
3568 double_int o0, o1, res, tmp;
3569
3570 o0 = rtx_to_double_int (op0);
3571 o1 = rtx_to_double_int (op1);
3572
3573 switch (code)
3574 {
3575 case MINUS:
3576 /* A - B == A + (-B). */
3577 o1 = double_int_neg (o1);
3578
3579 /* Fall through.... */
3580
3581 case PLUS:
3582 res = double_int_add (o0, o1);
3583 break;
3584
3585 case MULT:
3586 res = double_int_mul (o0, o1);
3587 break;
3588
3589 case DIV:
3590 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3591 o0.low, o0.high, o1.low, o1.high,
3592 &res.low, &res.high,
3593 &tmp.low, &tmp.high))
3594 return 0;
3595 break;
3596
3597 case MOD:
3598 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3599 o0.low, o0.high, o1.low, o1.high,
3600 &tmp.low, &tmp.high,
3601 &res.low, &res.high))
3602 return 0;
3603 break;
3604
3605 case UDIV:
3606 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3607 o0.low, o0.high, o1.low, o1.high,
3608 &res.low, &res.high,
3609 &tmp.low, &tmp.high))
3610 return 0;
3611 break;
3612
3613 case UMOD:
3614 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3615 o0.low, o0.high, o1.low, o1.high,
3616 &tmp.low, &tmp.high,
3617 &res.low, &res.high))
3618 return 0;
3619 break;
3620
3621 case AND:
3622 res = double_int_and (o0, o1);
3623 break;
3624
3625 case IOR:
3626 res = double_int_ior (o0, o1);
3627 break;
3628
3629 case XOR:
3630 res = double_int_xor (o0, o1);
3631 break;
3632
3633 case SMIN:
3634 res = double_int_smin (o0, o1);
3635 break;
3636
3637 case SMAX:
3638 res = double_int_smax (o0, o1);
3639 break;
3640
3641 case UMIN:
3642 res = double_int_umin (o0, o1);
3643 break;
3644
3645 case UMAX:
3646 res = double_int_umax (o0, o1);
3647 break;
3648
3649 case LSHIFTRT: case ASHIFTRT:
3650 case ASHIFT:
3651 case ROTATE: case ROTATERT:
3652 {
3653 unsigned HOST_WIDE_INT cnt;
3654
3655 if (SHIFT_COUNT_TRUNCATED)
3656 o1 = double_int_zext (o1, GET_MODE_PRECISION (mode));
3657
3658 if (!double_int_fits_in_uhwi_p (o1)
3659 || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3660 return 0;
3661
3662 cnt = double_int_to_uhwi (o1);
3663
3664 if (code == LSHIFTRT || code == ASHIFTRT)
3665 res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3666 code == ASHIFTRT);
3667 else if (code == ASHIFT)
3668 res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3669 true);
3670 else if (code == ROTATE)
3671 res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3672 else /* code == ROTATERT */
3673 res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3674 }
3675 break;
3676
3677 default:
3678 return 0;
3679 }
3680
3681 return immed_double_int_const (res, mode);
3682 }
3683
3684 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3685 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3686 {
3687 /* Get the integer argument values in two forms:
3688 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3689
3690 arg0 = INTVAL (op0);
3691 arg1 = INTVAL (op1);
3692
3693 if (width < HOST_BITS_PER_WIDE_INT)
3694 {
3695 arg0 &= GET_MODE_MASK (mode);
3696 arg1 &= GET_MODE_MASK (mode);
3697
3698 arg0s = arg0;
3699 if (val_signbit_known_set_p (mode, arg0s))
3700 arg0s |= ~GET_MODE_MASK (mode);
3701
3702 arg1s = arg1;
3703 if (val_signbit_known_set_p (mode, arg1s))
3704 arg1s |= ~GET_MODE_MASK (mode);
3705 }
3706 else
3707 {
3708 arg0s = arg0;
3709 arg1s = arg1;
3710 }
3711
3712 /* Compute the value of the arithmetic. */
3713
3714 switch (code)
3715 {
3716 case PLUS:
3717 val = arg0s + arg1s;
3718 break;
3719
3720 case MINUS:
3721 val = arg0s - arg1s;
3722 break;
3723
3724 case MULT:
3725 val = arg0s * arg1s;
3726 break;
3727
3728 case DIV:
3729 if (arg1s == 0
3730 || ((unsigned HOST_WIDE_INT) arg0s
3731 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3732 && arg1s == -1))
3733 return 0;
3734 val = arg0s / arg1s;
3735 break;
3736
3737 case MOD:
3738 if (arg1s == 0
3739 || ((unsigned HOST_WIDE_INT) arg0s
3740 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3741 && arg1s == -1))
3742 return 0;
3743 val = arg0s % arg1s;
3744 break;
3745
3746 case UDIV:
3747 if (arg1 == 0
3748 || ((unsigned HOST_WIDE_INT) arg0s
3749 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3750 && arg1s == -1))
3751 return 0;
3752 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3753 break;
3754
3755 case UMOD:
3756 if (arg1 == 0
3757 || ((unsigned HOST_WIDE_INT) arg0s
3758 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3759 && arg1s == -1))
3760 return 0;
3761 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3762 break;
3763
3764 case AND:
3765 val = arg0 & arg1;
3766 break;
3767
3768 case IOR:
3769 val = arg0 | arg1;
3770 break;
3771
3772 case XOR:
3773 val = arg0 ^ arg1;
3774 break;
3775
3776 case LSHIFTRT:
3777 case ASHIFT:
3778 case ASHIFTRT:
3779 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3780 the value is in range. We can't return any old value for
3781 out-of-range arguments because either the middle-end (via
3782 shift_truncation_mask) or the back-end might be relying on
3783 target-specific knowledge. Nor can we rely on
3784 shift_truncation_mask, since the shift might not be part of an
3785 ashlM3, lshrM3 or ashrM3 instruction. */
3786 if (SHIFT_COUNT_TRUNCATED)
3787 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3788 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3789 return 0;
3790
3791 val = (code == ASHIFT
3792 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3793 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3794
3795 /* Sign-extend the result for arithmetic right shifts. */
3796 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3797 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3798 break;
3799
3800 case ROTATERT:
3801 if (arg1 < 0)
3802 return 0;
3803
3804 arg1 %= width;
3805 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3806 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3807 break;
3808
3809 case ROTATE:
3810 if (arg1 < 0)
3811 return 0;
3812
3813 arg1 %= width;
3814 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3815 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3816 break;
3817
3818 case COMPARE:
3819 /* Do nothing here. */
3820 return 0;
3821
3822 case SMIN:
3823 val = arg0s <= arg1s ? arg0s : arg1s;
3824 break;
3825
3826 case UMIN:
3827 val = ((unsigned HOST_WIDE_INT) arg0
3828 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3829 break;
3830
3831 case SMAX:
3832 val = arg0s > arg1s ? arg0s : arg1s;
3833 break;
3834
3835 case UMAX:
3836 val = ((unsigned HOST_WIDE_INT) arg0
3837 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3838 break;
3839
3840 case SS_PLUS:
3841 case US_PLUS:
3842 case SS_MINUS:
3843 case US_MINUS:
3844 case SS_MULT:
3845 case US_MULT:
3846 case SS_DIV:
3847 case US_DIV:
3848 case SS_ASHIFT:
3849 case US_ASHIFT:
3850 /* ??? There are simplifications that can be done. */
3851 return 0;
3852
3853 default:
3854 gcc_unreachable ();
3855 }
3856
3857 return gen_int_mode (val, mode);
3858 }
3859
3860 return NULL_RTX;
3861 }
3862
3863
3864 \f
3865 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3866 PLUS or MINUS.
3867
3868 Rather than test for specific case, we do this by a brute-force method
3869 and do all possible simplifications until no more changes occur. Then
3870 we rebuild the operation. */
3871
3872 struct simplify_plus_minus_op_data
3873 {
3874 rtx op;
3875 short neg;
3876 };
3877
3878 static bool
3879 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3880 {
3881 int result;
3882
3883 result = (commutative_operand_precedence (y)
3884 - commutative_operand_precedence (x));
3885 if (result)
3886 return result > 0;
3887
3888 /* Group together equal REGs to do more simplification. */
3889 if (REG_P (x) && REG_P (y))
3890 return REGNO (x) > REGNO (y);
3891 else
3892 return false;
3893 }
3894
3895 static rtx
3896 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3897 rtx op1)
3898 {
3899 struct simplify_plus_minus_op_data ops[8];
3900 rtx result, tem;
3901 int n_ops = 2, input_ops = 2;
3902 int changed, n_constants = 0, canonicalized = 0;
3903 int i, j;
3904
3905 memset (ops, 0, sizeof ops);
3906
3907 /* Set up the two operands and then expand them until nothing has been
3908 changed. If we run out of room in our array, give up; this should
3909 almost never happen. */
3910
3911 ops[0].op = op0;
3912 ops[0].neg = 0;
3913 ops[1].op = op1;
3914 ops[1].neg = (code == MINUS);
3915
3916 do
3917 {
3918 changed = 0;
3919
3920 for (i = 0; i < n_ops; i++)
3921 {
3922 rtx this_op = ops[i].op;
3923 int this_neg = ops[i].neg;
3924 enum rtx_code this_code = GET_CODE (this_op);
3925
3926 switch (this_code)
3927 {
3928 case PLUS:
3929 case MINUS:
3930 if (n_ops == 7)
3931 return NULL_RTX;
3932
3933 ops[n_ops].op = XEXP (this_op, 1);
3934 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3935 n_ops++;
3936
3937 ops[i].op = XEXP (this_op, 0);
3938 input_ops++;
3939 changed = 1;
3940 canonicalized |= this_neg;
3941 break;
3942
3943 case NEG:
3944 ops[i].op = XEXP (this_op, 0);
3945 ops[i].neg = ! this_neg;
3946 changed = 1;
3947 canonicalized = 1;
3948 break;
3949
3950 case CONST:
3951 if (n_ops < 7
3952 && GET_CODE (XEXP (this_op, 0)) == PLUS
3953 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3954 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3955 {
3956 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3957 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3958 ops[n_ops].neg = this_neg;
3959 n_ops++;
3960 changed = 1;
3961 canonicalized = 1;
3962 }
3963 break;
3964
3965 case NOT:
3966 /* ~a -> (-a - 1) */
3967 if (n_ops != 7)
3968 {
3969 ops[n_ops].op = CONSTM1_RTX (mode);
3970 ops[n_ops++].neg = this_neg;
3971 ops[i].op = XEXP (this_op, 0);
3972 ops[i].neg = !this_neg;
3973 changed = 1;
3974 canonicalized = 1;
3975 }
3976 break;
3977
3978 case CONST_INT:
3979 n_constants++;
3980 if (this_neg)
3981 {
3982 ops[i].op = neg_const_int (mode, this_op);
3983 ops[i].neg = 0;
3984 changed = 1;
3985 canonicalized = 1;
3986 }
3987 break;
3988
3989 default:
3990 break;
3991 }
3992 }
3993 }
3994 while (changed);
3995
3996 if (n_constants > 1)
3997 canonicalized = 1;
3998
3999 gcc_assert (n_ops >= 2);
4000
4001 /* If we only have two operands, we can avoid the loops. */
4002 if (n_ops == 2)
4003 {
4004 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4005 rtx lhs, rhs;
4006
4007 /* Get the two operands. Be careful with the order, especially for
4008 the cases where code == MINUS. */
4009 if (ops[0].neg && ops[1].neg)
4010 {
4011 lhs = gen_rtx_NEG (mode, ops[0].op);
4012 rhs = ops[1].op;
4013 }
4014 else if (ops[0].neg)
4015 {
4016 lhs = ops[1].op;
4017 rhs = ops[0].op;
4018 }
4019 else
4020 {
4021 lhs = ops[0].op;
4022 rhs = ops[1].op;
4023 }
4024
4025 return simplify_const_binary_operation (code, mode, lhs, rhs);
4026 }
4027
4028 /* Now simplify each pair of operands until nothing changes. */
4029 do
4030 {
4031 /* Insertion sort is good enough for an eight-element array. */
4032 for (i = 1; i < n_ops; i++)
4033 {
4034 struct simplify_plus_minus_op_data save;
4035 j = i - 1;
4036 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4037 continue;
4038
4039 canonicalized = 1;
4040 save = ops[i];
4041 do
4042 ops[j + 1] = ops[j];
4043 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4044 ops[j + 1] = save;
4045 }
4046
4047 changed = 0;
4048 for (i = n_ops - 1; i > 0; i--)
4049 for (j = i - 1; j >= 0; j--)
4050 {
4051 rtx lhs = ops[j].op, rhs = ops[i].op;
4052 int lneg = ops[j].neg, rneg = ops[i].neg;
4053
4054 if (lhs != 0 && rhs != 0)
4055 {
4056 enum rtx_code ncode = PLUS;
4057
4058 if (lneg != rneg)
4059 {
4060 ncode = MINUS;
4061 if (lneg)
4062 tem = lhs, lhs = rhs, rhs = tem;
4063 }
4064 else if (swap_commutative_operands_p (lhs, rhs))
4065 tem = lhs, lhs = rhs, rhs = tem;
4066
4067 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4068 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4069 {
4070 rtx tem_lhs, tem_rhs;
4071
4072 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4073 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4074 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4075
4076 if (tem && !CONSTANT_P (tem))
4077 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4078 }
4079 else
4080 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4081
4082 /* Reject "simplifications" that just wrap the two
4083 arguments in a CONST. Failure to do so can result
4084 in infinite recursion with simplify_binary_operation
4085 when it calls us to simplify CONST operations. */
4086 if (tem
4087 && ! (GET_CODE (tem) == CONST
4088 && GET_CODE (XEXP (tem, 0)) == ncode
4089 && XEXP (XEXP (tem, 0), 0) == lhs
4090 && XEXP (XEXP (tem, 0), 1) == rhs))
4091 {
4092 lneg &= rneg;
4093 if (GET_CODE (tem) == NEG)
4094 tem = XEXP (tem, 0), lneg = !lneg;
4095 if (CONST_INT_P (tem) && lneg)
4096 tem = neg_const_int (mode, tem), lneg = 0;
4097
4098 ops[i].op = tem;
4099 ops[i].neg = lneg;
4100 ops[j].op = NULL_RTX;
4101 changed = 1;
4102 canonicalized = 1;
4103 }
4104 }
4105 }
4106
4107 /* If nothing changed, fail. */
4108 if (!canonicalized)
4109 return NULL_RTX;
4110
4111 /* Pack all the operands to the lower-numbered entries. */
4112 for (i = 0, j = 0; j < n_ops; j++)
4113 if (ops[j].op)
4114 {
4115 ops[i] = ops[j];
4116 i++;
4117 }
4118 n_ops = i;
4119 }
4120 while (changed);
4121
4122 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4123 if (n_ops == 2
4124 && CONST_INT_P (ops[1].op)
4125 && CONSTANT_P (ops[0].op)
4126 && ops[0].neg)
4127 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4128
4129 /* We suppressed creation of trivial CONST expressions in the
4130 combination loop to avoid recursion. Create one manually now.
4131 The combination loop should have ensured that there is exactly
4132 one CONST_INT, and the sort will have ensured that it is last
4133 in the array and that any other constant will be next-to-last. */
4134
4135 if (n_ops > 1
4136 && CONST_INT_P (ops[n_ops - 1].op)
4137 && CONSTANT_P (ops[n_ops - 2].op))
4138 {
4139 rtx value = ops[n_ops - 1].op;
4140 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4141 value = neg_const_int (mode, value);
4142 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4143 INTVAL (value));
4144 n_ops--;
4145 }
4146
4147 /* Put a non-negated operand first, if possible. */
4148
4149 for (i = 0; i < n_ops && ops[i].neg; i++)
4150 continue;
4151 if (i == n_ops)
4152 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4153 else if (i != 0)
4154 {
4155 tem = ops[0].op;
4156 ops[0] = ops[i];
4157 ops[i].op = tem;
4158 ops[i].neg = 1;
4159 }
4160
4161 /* Now make the result by performing the requested operations. */
4162 result = ops[0].op;
4163 for (i = 1; i < n_ops; i++)
4164 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4165 mode, result, ops[i].op);
4166
4167 return result;
4168 }
4169
4170 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4171 static bool
4172 plus_minus_operand_p (const_rtx x)
4173 {
4174 return GET_CODE (x) == PLUS
4175 || GET_CODE (x) == MINUS
4176 || (GET_CODE (x) == CONST
4177 && GET_CODE (XEXP (x, 0)) == PLUS
4178 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4179 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4180 }
4181
4182 /* Like simplify_binary_operation except used for relational operators.
4183 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4184 not also be VOIDmode.
4185
4186 CMP_MODE specifies in which mode the comparison is done in, so it is
4187 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4188 the operands or, if both are VOIDmode, the operands are compared in
4189 "infinite precision". */
4190 rtx
4191 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4192 enum machine_mode cmp_mode, rtx op0, rtx op1)
4193 {
4194 rtx tem, trueop0, trueop1;
4195
4196 if (cmp_mode == VOIDmode)
4197 cmp_mode = GET_MODE (op0);
4198 if (cmp_mode == VOIDmode)
4199 cmp_mode = GET_MODE (op1);
4200
4201 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4202 if (tem)
4203 {
4204 if (SCALAR_FLOAT_MODE_P (mode))
4205 {
4206 if (tem == const0_rtx)
4207 return CONST0_RTX (mode);
4208 #ifdef FLOAT_STORE_FLAG_VALUE
4209 {
4210 REAL_VALUE_TYPE val;
4211 val = FLOAT_STORE_FLAG_VALUE (mode);
4212 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4213 }
4214 #else
4215 return NULL_RTX;
4216 #endif
4217 }
4218 if (VECTOR_MODE_P (mode))
4219 {
4220 if (tem == const0_rtx)
4221 return CONST0_RTX (mode);
4222 #ifdef VECTOR_STORE_FLAG_VALUE
4223 {
4224 int i, units;
4225 rtvec v;
4226
4227 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4228 if (val == NULL_RTX)
4229 return NULL_RTX;
4230 if (val == const1_rtx)
4231 return CONST1_RTX (mode);
4232
4233 units = GET_MODE_NUNITS (mode);
4234 v = rtvec_alloc (units);
4235 for (i = 0; i < units; i++)
4236 RTVEC_ELT (v, i) = val;
4237 return gen_rtx_raw_CONST_VECTOR (mode, v);
4238 }
4239 #else
4240 return NULL_RTX;
4241 #endif
4242 }
4243
4244 return tem;
4245 }
4246
4247 /* For the following tests, ensure const0_rtx is op1. */
4248 if (swap_commutative_operands_p (op0, op1)
4249 || (op0 == const0_rtx && op1 != const0_rtx))
4250 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4251
4252 /* If op0 is a compare, extract the comparison arguments from it. */
4253 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4254 return simplify_gen_relational (code, mode, VOIDmode,
4255 XEXP (op0, 0), XEXP (op0, 1));
4256
4257 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4258 || CC0_P (op0))
4259 return NULL_RTX;
4260
4261 trueop0 = avoid_constant_pool_reference (op0);
4262 trueop1 = avoid_constant_pool_reference (op1);
4263 return simplify_relational_operation_1 (code, mode, cmp_mode,
4264 trueop0, trueop1);
4265 }
4266
4267 /* This part of simplify_relational_operation is only used when CMP_MODE
4268 is not in class MODE_CC (i.e. it is a real comparison).
4269
4270 MODE is the mode of the result, while CMP_MODE specifies in which
4271 mode the comparison is done in, so it is the mode of the operands. */
4272
4273 static rtx
4274 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4275 enum machine_mode cmp_mode, rtx op0, rtx op1)
4276 {
4277 enum rtx_code op0code = GET_CODE (op0);
4278
4279 if (op1 == const0_rtx && COMPARISON_P (op0))
4280 {
4281 /* If op0 is a comparison, extract the comparison arguments
4282 from it. */
4283 if (code == NE)
4284 {
4285 if (GET_MODE (op0) == mode)
4286 return simplify_rtx (op0);
4287 else
4288 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4289 XEXP (op0, 0), XEXP (op0, 1));
4290 }
4291 else if (code == EQ)
4292 {
4293 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4294 if (new_code != UNKNOWN)
4295 return simplify_gen_relational (new_code, mode, VOIDmode,
4296 XEXP (op0, 0), XEXP (op0, 1));
4297 }
4298 }
4299
4300 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4301 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4302 if ((code == LTU || code == GEU)
4303 && GET_CODE (op0) == PLUS
4304 && CONST_INT_P (XEXP (op0, 1))
4305 && (rtx_equal_p (op1, XEXP (op0, 0))
4306 || rtx_equal_p (op1, XEXP (op0, 1))))
4307 {
4308 rtx new_cmp
4309 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4310 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4311 cmp_mode, XEXP (op0, 0), new_cmp);
4312 }
4313
4314 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4315 if ((code == LTU || code == GEU)
4316 && GET_CODE (op0) == PLUS
4317 && rtx_equal_p (op1, XEXP (op0, 1))
4318 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4319 && !rtx_equal_p (op1, XEXP (op0, 0)))
4320 return simplify_gen_relational (code, mode, cmp_mode, op0,
4321 copy_rtx (XEXP (op0, 0)));
4322
4323 if (op1 == const0_rtx)
4324 {
4325 /* Canonicalize (GTU x 0) as (NE x 0). */
4326 if (code == GTU)
4327 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4328 /* Canonicalize (LEU x 0) as (EQ x 0). */
4329 if (code == LEU)
4330 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4331 }
4332 else if (op1 == const1_rtx)
4333 {
4334 switch (code)
4335 {
4336 case GE:
4337 /* Canonicalize (GE x 1) as (GT x 0). */
4338 return simplify_gen_relational (GT, mode, cmp_mode,
4339 op0, const0_rtx);
4340 case GEU:
4341 /* Canonicalize (GEU x 1) as (NE x 0). */
4342 return simplify_gen_relational (NE, mode, cmp_mode,
4343 op0, const0_rtx);
4344 case LT:
4345 /* Canonicalize (LT x 1) as (LE x 0). */
4346 return simplify_gen_relational (LE, mode, cmp_mode,
4347 op0, const0_rtx);
4348 case LTU:
4349 /* Canonicalize (LTU x 1) as (EQ x 0). */
4350 return simplify_gen_relational (EQ, mode, cmp_mode,
4351 op0, const0_rtx);
4352 default:
4353 break;
4354 }
4355 }
4356 else if (op1 == constm1_rtx)
4357 {
4358 /* Canonicalize (LE x -1) as (LT x 0). */
4359 if (code == LE)
4360 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4361 /* Canonicalize (GT x -1) as (GE x 0). */
4362 if (code == GT)
4363 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4364 }
4365
4366 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4367 if ((code == EQ || code == NE)
4368 && (op0code == PLUS || op0code == MINUS)
4369 && CONSTANT_P (op1)
4370 && CONSTANT_P (XEXP (op0, 1))
4371 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4372 {
4373 rtx x = XEXP (op0, 0);
4374 rtx c = XEXP (op0, 1);
4375 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4376 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4377
4378 /* Detect an infinite recursive condition, where we oscillate at this
4379 simplification case between:
4380 A + B == C <---> C - B == A,
4381 where A, B, and C are all constants with non-simplifiable expressions,
4382 usually SYMBOL_REFs. */
4383 if (GET_CODE (tem) == invcode
4384 && CONSTANT_P (x)
4385 && rtx_equal_p (c, XEXP (tem, 1)))
4386 return NULL_RTX;
4387
4388 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4389 }
4390
4391 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4392 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4393 if (code == NE
4394 && op1 == const0_rtx
4395 && GET_MODE_CLASS (mode) == MODE_INT
4396 && cmp_mode != VOIDmode
4397 /* ??? Work-around BImode bugs in the ia64 backend. */
4398 && mode != BImode
4399 && cmp_mode != BImode
4400 && nonzero_bits (op0, cmp_mode) == 1
4401 && STORE_FLAG_VALUE == 1)
4402 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4403 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4404 : lowpart_subreg (mode, op0, cmp_mode);
4405
4406 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4407 if ((code == EQ || code == NE)
4408 && op1 == const0_rtx
4409 && op0code == XOR)
4410 return simplify_gen_relational (code, mode, cmp_mode,
4411 XEXP (op0, 0), XEXP (op0, 1));
4412
4413 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4414 if ((code == EQ || code == NE)
4415 && op0code == XOR
4416 && rtx_equal_p (XEXP (op0, 0), op1)
4417 && !side_effects_p (XEXP (op0, 0)))
4418 return simplify_gen_relational (code, mode, cmp_mode,
4419 XEXP (op0, 1), const0_rtx);
4420
4421 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4422 if ((code == EQ || code == NE)
4423 && op0code == XOR
4424 && rtx_equal_p (XEXP (op0, 1), op1)
4425 && !side_effects_p (XEXP (op0, 1)))
4426 return simplify_gen_relational (code, mode, cmp_mode,
4427 XEXP (op0, 0), const0_rtx);
4428
4429 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4430 if ((code == EQ || code == NE)
4431 && op0code == XOR
4432 && (CONST_INT_P (op1)
4433 || GET_CODE (op1) == CONST_DOUBLE)
4434 && (CONST_INT_P (XEXP (op0, 1))
4435 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4436 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4437 simplify_gen_binary (XOR, cmp_mode,
4438 XEXP (op0, 1), op1));
4439
4440 if (op0code == POPCOUNT && op1 == const0_rtx)
4441 switch (code)
4442 {
4443 case EQ:
4444 case LE:
4445 case LEU:
4446 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4447 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4448 XEXP (op0, 0), const0_rtx);
4449
4450 case NE:
4451 case GT:
4452 case GTU:
4453 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4454 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4455 XEXP (op0, 0), const0_rtx);
4456
4457 default:
4458 break;
4459 }
4460
4461 return NULL_RTX;
4462 }
4463
4464 enum
4465 {
4466 CMP_EQ = 1,
4467 CMP_LT = 2,
4468 CMP_GT = 4,
4469 CMP_LTU = 8,
4470 CMP_GTU = 16
4471 };
4472
4473
4474 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4475 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4476 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4477 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4478 For floating-point comparisons, assume that the operands were ordered. */
4479
4480 static rtx
4481 comparison_result (enum rtx_code code, int known_results)
4482 {
4483 switch (code)
4484 {
4485 case EQ:
4486 case UNEQ:
4487 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4488 case NE:
4489 case LTGT:
4490 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4491
4492 case LT:
4493 case UNLT:
4494 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4495 case GE:
4496 case UNGE:
4497 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4498
4499 case GT:
4500 case UNGT:
4501 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4502 case LE:
4503 case UNLE:
4504 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4505
4506 case LTU:
4507 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4508 case GEU:
4509 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4510
4511 case GTU:
4512 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4513 case LEU:
4514 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4515
4516 case ORDERED:
4517 return const_true_rtx;
4518 case UNORDERED:
4519 return const0_rtx;
4520 default:
4521 gcc_unreachable ();
4522 }
4523 }
4524
4525 /* Check if the given comparison (done in the given MODE) is actually a
4526 tautology or a contradiction.
4527 If no simplification is possible, this function returns zero.
4528 Otherwise, it returns either const_true_rtx or const0_rtx. */
4529
4530 rtx
4531 simplify_const_relational_operation (enum rtx_code code,
4532 enum machine_mode mode,
4533 rtx op0, rtx op1)
4534 {
4535 rtx tem;
4536 rtx trueop0;
4537 rtx trueop1;
4538
4539 gcc_assert (mode != VOIDmode
4540 || (GET_MODE (op0) == VOIDmode
4541 && GET_MODE (op1) == VOIDmode));
4542
4543 /* If op0 is a compare, extract the comparison arguments from it. */
4544 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4545 {
4546 op1 = XEXP (op0, 1);
4547 op0 = XEXP (op0, 0);
4548
4549 if (GET_MODE (op0) != VOIDmode)
4550 mode = GET_MODE (op0);
4551 else if (GET_MODE (op1) != VOIDmode)
4552 mode = GET_MODE (op1);
4553 else
4554 return 0;
4555 }
4556
4557 /* We can't simplify MODE_CC values since we don't know what the
4558 actual comparison is. */
4559 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4560 return 0;
4561
4562 /* Make sure the constant is second. */
4563 if (swap_commutative_operands_p (op0, op1))
4564 {
4565 tem = op0, op0 = op1, op1 = tem;
4566 code = swap_condition (code);
4567 }
4568
4569 trueop0 = avoid_constant_pool_reference (op0);
4570 trueop1 = avoid_constant_pool_reference (op1);
4571
4572 /* For integer comparisons of A and B maybe we can simplify A - B and can
4573 then simplify a comparison of that with zero. If A and B are both either
4574 a register or a CONST_INT, this can't help; testing for these cases will
4575 prevent infinite recursion here and speed things up.
4576
4577 We can only do this for EQ and NE comparisons as otherwise we may
4578 lose or introduce overflow which we cannot disregard as undefined as
4579 we do not know the signedness of the operation on either the left or
4580 the right hand side of the comparison. */
4581
4582 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4583 && (code == EQ || code == NE)
4584 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4585 && (REG_P (op1) || CONST_INT_P (trueop1)))
4586 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4587 /* We cannot do this if tem is a nonzero address. */
4588 && ! nonzero_address_p (tem))
4589 return simplify_const_relational_operation (signed_condition (code),
4590 mode, tem, const0_rtx);
4591
4592 if (! HONOR_NANS (mode) && code == ORDERED)
4593 return const_true_rtx;
4594
4595 if (! HONOR_NANS (mode) && code == UNORDERED)
4596 return const0_rtx;
4597
4598 /* For modes without NaNs, if the two operands are equal, we know the
4599 result except if they have side-effects. Even with NaNs we know
4600 the result of unordered comparisons and, if signaling NaNs are
4601 irrelevant, also the result of LT/GT/LTGT. */
4602 if ((! HONOR_NANS (GET_MODE (trueop0))
4603 || code == UNEQ || code == UNLE || code == UNGE
4604 || ((code == LT || code == GT || code == LTGT)
4605 && ! HONOR_SNANS (GET_MODE (trueop0))))
4606 && rtx_equal_p (trueop0, trueop1)
4607 && ! side_effects_p (trueop0))
4608 return comparison_result (code, CMP_EQ);
4609
4610 /* If the operands are floating-point constants, see if we can fold
4611 the result. */
4612 if (GET_CODE (trueop0) == CONST_DOUBLE
4613 && GET_CODE (trueop1) == CONST_DOUBLE
4614 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4615 {
4616 REAL_VALUE_TYPE d0, d1;
4617
4618 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4619 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4620
4621 /* Comparisons are unordered iff at least one of the values is NaN. */
4622 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4623 switch (code)
4624 {
4625 case UNEQ:
4626 case UNLT:
4627 case UNGT:
4628 case UNLE:
4629 case UNGE:
4630 case NE:
4631 case UNORDERED:
4632 return const_true_rtx;
4633 case EQ:
4634 case LT:
4635 case GT:
4636 case LE:
4637 case GE:
4638 case LTGT:
4639 case ORDERED:
4640 return const0_rtx;
4641 default:
4642 return 0;
4643 }
4644
4645 return comparison_result (code,
4646 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4647 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4648 }
4649
4650 /* Otherwise, see if the operands are both integers. */
4651 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4652 && (GET_CODE (trueop0) == CONST_DOUBLE
4653 || CONST_INT_P (trueop0))
4654 && (GET_CODE (trueop1) == CONST_DOUBLE
4655 || CONST_INT_P (trueop1)))
4656 {
4657 int width = GET_MODE_PRECISION (mode);
4658 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4659 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4660
4661 /* Get the two words comprising each integer constant. */
4662 if (GET_CODE (trueop0) == CONST_DOUBLE)
4663 {
4664 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4665 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4666 }
4667 else
4668 {
4669 l0u = l0s = INTVAL (trueop0);
4670 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4671 }
4672
4673 if (GET_CODE (trueop1) == CONST_DOUBLE)
4674 {
4675 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4676 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4677 }
4678 else
4679 {
4680 l1u = l1s = INTVAL (trueop1);
4681 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4682 }
4683
4684 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4685 we have to sign or zero-extend the values. */
4686 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4687 {
4688 l0u &= GET_MODE_MASK (mode);
4689 l1u &= GET_MODE_MASK (mode);
4690
4691 if (val_signbit_known_set_p (mode, l0s))
4692 l0s |= ~GET_MODE_MASK (mode);
4693
4694 if (val_signbit_known_set_p (mode, l1s))
4695 l1s |= ~GET_MODE_MASK (mode);
4696 }
4697 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4698 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4699
4700 if (h0u == h1u && l0u == l1u)
4701 return comparison_result (code, CMP_EQ);
4702 else
4703 {
4704 int cr;
4705 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4706 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4707 return comparison_result (code, cr);
4708 }
4709 }
4710
4711 /* Optimize comparisons with upper and lower bounds. */
4712 if (HWI_COMPUTABLE_MODE_P (mode)
4713 && CONST_INT_P (trueop1))
4714 {
4715 int sign;
4716 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4717 HOST_WIDE_INT val = INTVAL (trueop1);
4718 HOST_WIDE_INT mmin, mmax;
4719
4720 if (code == GEU
4721 || code == LEU
4722 || code == GTU
4723 || code == LTU)
4724 sign = 0;
4725 else
4726 sign = 1;
4727
4728 /* Get a reduced range if the sign bit is zero. */
4729 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4730 {
4731 mmin = 0;
4732 mmax = nonzero;
4733 }
4734 else
4735 {
4736 rtx mmin_rtx, mmax_rtx;
4737 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4738
4739 mmin = INTVAL (mmin_rtx);
4740 mmax = INTVAL (mmax_rtx);
4741 if (sign)
4742 {
4743 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4744
4745 mmin >>= (sign_copies - 1);
4746 mmax >>= (sign_copies - 1);
4747 }
4748 }
4749
4750 switch (code)
4751 {
4752 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4753 case GEU:
4754 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4755 return const_true_rtx;
4756 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4757 return const0_rtx;
4758 break;
4759 case GE:
4760 if (val <= mmin)
4761 return const_true_rtx;
4762 if (val > mmax)
4763 return const0_rtx;
4764 break;
4765
4766 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4767 case LEU:
4768 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4769 return const_true_rtx;
4770 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4771 return const0_rtx;
4772 break;
4773 case LE:
4774 if (val >= mmax)
4775 return const_true_rtx;
4776 if (val < mmin)
4777 return const0_rtx;
4778 break;
4779
4780 case EQ:
4781 /* x == y is always false for y out of range. */
4782 if (val < mmin || val > mmax)
4783 return const0_rtx;
4784 break;
4785
4786 /* x > y is always false for y >= mmax, always true for y < mmin. */
4787 case GTU:
4788 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4789 return const0_rtx;
4790 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4791 return const_true_rtx;
4792 break;
4793 case GT:
4794 if (val >= mmax)
4795 return const0_rtx;
4796 if (val < mmin)
4797 return const_true_rtx;
4798 break;
4799
4800 /* x < y is always false for y <= mmin, always true for y > mmax. */
4801 case LTU:
4802 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4803 return const0_rtx;
4804 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4805 return const_true_rtx;
4806 break;
4807 case LT:
4808 if (val <= mmin)
4809 return const0_rtx;
4810 if (val > mmax)
4811 return const_true_rtx;
4812 break;
4813
4814 case NE:
4815 /* x != y is always true for y out of range. */
4816 if (val < mmin || val > mmax)
4817 return const_true_rtx;
4818 break;
4819
4820 default:
4821 break;
4822 }
4823 }
4824
4825 /* Optimize integer comparisons with zero. */
4826 if (trueop1 == const0_rtx)
4827 {
4828 /* Some addresses are known to be nonzero. We don't know
4829 their sign, but equality comparisons are known. */
4830 if (nonzero_address_p (trueop0))
4831 {
4832 if (code == EQ || code == LEU)
4833 return const0_rtx;
4834 if (code == NE || code == GTU)
4835 return const_true_rtx;
4836 }
4837
4838 /* See if the first operand is an IOR with a constant. If so, we
4839 may be able to determine the result of this comparison. */
4840 if (GET_CODE (op0) == IOR)
4841 {
4842 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4843 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4844 {
4845 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4846 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4847 && (UINTVAL (inner_const)
4848 & ((unsigned HOST_WIDE_INT) 1
4849 << sign_bitnum)));
4850
4851 switch (code)
4852 {
4853 case EQ:
4854 case LEU:
4855 return const0_rtx;
4856 case NE:
4857 case GTU:
4858 return const_true_rtx;
4859 case LT:
4860 case LE:
4861 if (has_sign)
4862 return const_true_rtx;
4863 break;
4864 case GT:
4865 case GE:
4866 if (has_sign)
4867 return const0_rtx;
4868 break;
4869 default:
4870 break;
4871 }
4872 }
4873 }
4874 }
4875
4876 /* Optimize comparison of ABS with zero. */
4877 if (trueop1 == CONST0_RTX (mode)
4878 && (GET_CODE (trueop0) == ABS
4879 || (GET_CODE (trueop0) == FLOAT_EXTEND
4880 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4881 {
4882 switch (code)
4883 {
4884 case LT:
4885 /* Optimize abs(x) < 0.0. */
4886 if (!HONOR_SNANS (mode)
4887 && (!INTEGRAL_MODE_P (mode)
4888 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4889 {
4890 if (INTEGRAL_MODE_P (mode)
4891 && (issue_strict_overflow_warning
4892 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4893 warning (OPT_Wstrict_overflow,
4894 ("assuming signed overflow does not occur when "
4895 "assuming abs (x) < 0 is false"));
4896 return const0_rtx;
4897 }
4898 break;
4899
4900 case GE:
4901 /* Optimize abs(x) >= 0.0. */
4902 if (!HONOR_NANS (mode)
4903 && (!INTEGRAL_MODE_P (mode)
4904 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4905 {
4906 if (INTEGRAL_MODE_P (mode)
4907 && (issue_strict_overflow_warning
4908 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4909 warning (OPT_Wstrict_overflow,
4910 ("assuming signed overflow does not occur when "
4911 "assuming abs (x) >= 0 is true"));
4912 return const_true_rtx;
4913 }
4914 break;
4915
4916 case UNGE:
4917 /* Optimize ! (abs(x) < 0.0). */
4918 return const_true_rtx;
4919
4920 default:
4921 break;
4922 }
4923 }
4924
4925 return 0;
4926 }
4927 \f
4928 /* Simplify CODE, an operation with result mode MODE and three operands,
4929 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4930 a constant. Return 0 if no simplifications is possible. */
4931
4932 rtx
4933 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4934 enum machine_mode op0_mode, rtx op0, rtx op1,
4935 rtx op2)
4936 {
4937 unsigned int width = GET_MODE_PRECISION (mode);
4938 bool any_change = false;
4939 rtx tem;
4940
4941 /* VOIDmode means "infinite" precision. */
4942 if (width == 0)
4943 width = HOST_BITS_PER_WIDE_INT;
4944
4945 switch (code)
4946 {
4947 case FMA:
4948 /* Simplify negations around the multiplication. */
4949 /* -a * -b + c => a * b + c. */
4950 if (GET_CODE (op0) == NEG)
4951 {
4952 tem = simplify_unary_operation (NEG, mode, op1, mode);
4953 if (tem)
4954 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4955 }
4956 else if (GET_CODE (op1) == NEG)
4957 {
4958 tem = simplify_unary_operation (NEG, mode, op0, mode);
4959 if (tem)
4960 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4961 }
4962
4963 /* Canonicalize the two multiplication operands. */
4964 /* a * -b + c => -b * a + c. */
4965 if (swap_commutative_operands_p (op0, op1))
4966 tem = op0, op0 = op1, op1 = tem, any_change = true;
4967
4968 if (any_change)
4969 return gen_rtx_FMA (mode, op0, op1, op2);
4970 return NULL_RTX;
4971
4972 case SIGN_EXTRACT:
4973 case ZERO_EXTRACT:
4974 if (CONST_INT_P (op0)
4975 && CONST_INT_P (op1)
4976 && CONST_INT_P (op2)
4977 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4978 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4979 {
4980 /* Extracting a bit-field from a constant */
4981 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4982 HOST_WIDE_INT op1val = INTVAL (op1);
4983 HOST_WIDE_INT op2val = INTVAL (op2);
4984 if (BITS_BIG_ENDIAN)
4985 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4986 else
4987 val >>= op2val;
4988
4989 if (HOST_BITS_PER_WIDE_INT != op1val)
4990 {
4991 /* First zero-extend. */
4992 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4993 /* If desired, propagate sign bit. */
4994 if (code == SIGN_EXTRACT
4995 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4996 != 0)
4997 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4998 }
4999
5000 return gen_int_mode (val, mode);
5001 }
5002 break;
5003
5004 case IF_THEN_ELSE:
5005 if (CONST_INT_P (op0))
5006 return op0 != const0_rtx ? op1 : op2;
5007
5008 /* Convert c ? a : a into "a". */
5009 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5010 return op1;
5011
5012 /* Convert a != b ? a : b into "a". */
5013 if (GET_CODE (op0) == NE
5014 && ! side_effects_p (op0)
5015 && ! HONOR_NANS (mode)
5016 && ! HONOR_SIGNED_ZEROS (mode)
5017 && ((rtx_equal_p (XEXP (op0, 0), op1)
5018 && rtx_equal_p (XEXP (op0, 1), op2))
5019 || (rtx_equal_p (XEXP (op0, 0), op2)
5020 && rtx_equal_p (XEXP (op0, 1), op1))))
5021 return op1;
5022
5023 /* Convert a == b ? a : b into "b". */
5024 if (GET_CODE (op0) == EQ
5025 && ! side_effects_p (op0)
5026 && ! HONOR_NANS (mode)
5027 && ! HONOR_SIGNED_ZEROS (mode)
5028 && ((rtx_equal_p (XEXP (op0, 0), op1)
5029 && rtx_equal_p (XEXP (op0, 1), op2))
5030 || (rtx_equal_p (XEXP (op0, 0), op2)
5031 && rtx_equal_p (XEXP (op0, 1), op1))))
5032 return op2;
5033
5034 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5035 {
5036 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5037 ? GET_MODE (XEXP (op0, 1))
5038 : GET_MODE (XEXP (op0, 0)));
5039 rtx temp;
5040
5041 /* Look for happy constants in op1 and op2. */
5042 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5043 {
5044 HOST_WIDE_INT t = INTVAL (op1);
5045 HOST_WIDE_INT f = INTVAL (op2);
5046
5047 if (t == STORE_FLAG_VALUE && f == 0)
5048 code = GET_CODE (op0);
5049 else if (t == 0 && f == STORE_FLAG_VALUE)
5050 {
5051 enum rtx_code tmp;
5052 tmp = reversed_comparison_code (op0, NULL_RTX);
5053 if (tmp == UNKNOWN)
5054 break;
5055 code = tmp;
5056 }
5057 else
5058 break;
5059
5060 return simplify_gen_relational (code, mode, cmp_mode,
5061 XEXP (op0, 0), XEXP (op0, 1));
5062 }
5063
5064 if (cmp_mode == VOIDmode)
5065 cmp_mode = op0_mode;
5066 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5067 cmp_mode, XEXP (op0, 0),
5068 XEXP (op0, 1));
5069
5070 /* See if any simplifications were possible. */
5071 if (temp)
5072 {
5073 if (CONST_INT_P (temp))
5074 return temp == const0_rtx ? op2 : op1;
5075 else if (temp)
5076 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5077 }
5078 }
5079 break;
5080
5081 case VEC_MERGE:
5082 gcc_assert (GET_MODE (op0) == mode);
5083 gcc_assert (GET_MODE (op1) == mode);
5084 gcc_assert (VECTOR_MODE_P (mode));
5085 op2 = avoid_constant_pool_reference (op2);
5086 if (CONST_INT_P (op2))
5087 {
5088 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5089 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5090 int mask = (1 << n_elts) - 1;
5091
5092 if (!(INTVAL (op2) & mask))
5093 return op1;
5094 if ((INTVAL (op2) & mask) == mask)
5095 return op0;
5096
5097 op0 = avoid_constant_pool_reference (op0);
5098 op1 = avoid_constant_pool_reference (op1);
5099 if (GET_CODE (op0) == CONST_VECTOR
5100 && GET_CODE (op1) == CONST_VECTOR)
5101 {
5102 rtvec v = rtvec_alloc (n_elts);
5103 unsigned int i;
5104
5105 for (i = 0; i < n_elts; i++)
5106 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5107 ? CONST_VECTOR_ELT (op0, i)
5108 : CONST_VECTOR_ELT (op1, i));
5109 return gen_rtx_CONST_VECTOR (mode, v);
5110 }
5111 }
5112 break;
5113
5114 default:
5115 gcc_unreachable ();
5116 }
5117
5118 return 0;
5119 }
5120
5121 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5122 or CONST_VECTOR,
5123 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5124
5125 Works by unpacking OP into a collection of 8-bit values
5126 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5127 and then repacking them again for OUTERMODE. */
5128
5129 static rtx
5130 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5131 enum machine_mode innermode, unsigned int byte)
5132 {
5133 /* We support up to 512-bit values (for V8DFmode). */
5134 enum {
5135 max_bitsize = 512,
5136 value_bit = 8,
5137 value_mask = (1 << value_bit) - 1
5138 };
5139 unsigned char value[max_bitsize / value_bit];
5140 int value_start;
5141 int i;
5142 int elem;
5143
5144 int num_elem;
5145 rtx * elems;
5146 int elem_bitsize;
5147 rtx result_s;
5148 rtvec result_v = NULL;
5149 enum mode_class outer_class;
5150 enum machine_mode outer_submode;
5151
5152 /* Some ports misuse CCmode. */
5153 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5154 return op;
5155
5156 /* We have no way to represent a complex constant at the rtl level. */
5157 if (COMPLEX_MODE_P (outermode))
5158 return NULL_RTX;
5159
5160 /* Unpack the value. */
5161
5162 if (GET_CODE (op) == CONST_VECTOR)
5163 {
5164 num_elem = CONST_VECTOR_NUNITS (op);
5165 elems = &CONST_VECTOR_ELT (op, 0);
5166 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5167 }
5168 else
5169 {
5170 num_elem = 1;
5171 elems = &op;
5172 elem_bitsize = max_bitsize;
5173 }
5174 /* If this asserts, it is too complicated; reducing value_bit may help. */
5175 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5176 /* I don't know how to handle endianness of sub-units. */
5177 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5178
5179 for (elem = 0; elem < num_elem; elem++)
5180 {
5181 unsigned char * vp;
5182 rtx el = elems[elem];
5183
5184 /* Vectors are kept in target memory order. (This is probably
5185 a mistake.) */
5186 {
5187 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5188 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5189 / BITS_PER_UNIT);
5190 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5191 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5192 unsigned bytele = (subword_byte % UNITS_PER_WORD
5193 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5194 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5195 }
5196
5197 switch (GET_CODE (el))
5198 {
5199 case CONST_INT:
5200 for (i = 0;
5201 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5202 i += value_bit)
5203 *vp++ = INTVAL (el) >> i;
5204 /* CONST_INTs are always logically sign-extended. */
5205 for (; i < elem_bitsize; i += value_bit)
5206 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5207 break;
5208
5209 case CONST_DOUBLE:
5210 if (GET_MODE (el) == VOIDmode)
5211 {
5212 unsigned char extend = 0;
5213 /* If this triggers, someone should have generated a
5214 CONST_INT instead. */
5215 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5216
5217 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5218 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5219 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5220 {
5221 *vp++
5222 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5223 i += value_bit;
5224 }
5225
5226 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5227 extend = -1;
5228 for (; i < elem_bitsize; i += value_bit)
5229 *vp++ = extend;
5230 }
5231 else
5232 {
5233 long tmp[max_bitsize / 32];
5234 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5235
5236 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5237 gcc_assert (bitsize <= elem_bitsize);
5238 gcc_assert (bitsize % value_bit == 0);
5239
5240 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5241 GET_MODE (el));
5242
5243 /* real_to_target produces its result in words affected by
5244 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5245 and use WORDS_BIG_ENDIAN instead; see the documentation
5246 of SUBREG in rtl.texi. */
5247 for (i = 0; i < bitsize; i += value_bit)
5248 {
5249 int ibase;
5250 if (WORDS_BIG_ENDIAN)
5251 ibase = bitsize - 1 - i;
5252 else
5253 ibase = i;
5254 *vp++ = tmp[ibase / 32] >> i % 32;
5255 }
5256
5257 /* It shouldn't matter what's done here, so fill it with
5258 zero. */
5259 for (; i < elem_bitsize; i += value_bit)
5260 *vp++ = 0;
5261 }
5262 break;
5263
5264 case CONST_FIXED:
5265 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5266 {
5267 for (i = 0; i < elem_bitsize; i += value_bit)
5268 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5269 }
5270 else
5271 {
5272 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5273 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5274 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5275 i += value_bit)
5276 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5277 >> (i - HOST_BITS_PER_WIDE_INT);
5278 for (; i < elem_bitsize; i += value_bit)
5279 *vp++ = 0;
5280 }
5281 break;
5282
5283 default:
5284 gcc_unreachable ();
5285 }
5286 }
5287
5288 /* Now, pick the right byte to start with. */
5289 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5290 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5291 will already have offset 0. */
5292 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5293 {
5294 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5295 - byte);
5296 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5297 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5298 byte = (subword_byte % UNITS_PER_WORD
5299 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5300 }
5301
5302 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5303 so if it's become negative it will instead be very large.) */
5304 gcc_assert (byte < GET_MODE_SIZE (innermode));
5305
5306 /* Convert from bytes to chunks of size value_bit. */
5307 value_start = byte * (BITS_PER_UNIT / value_bit);
5308
5309 /* Re-pack the value. */
5310
5311 if (VECTOR_MODE_P (outermode))
5312 {
5313 num_elem = GET_MODE_NUNITS (outermode);
5314 result_v = rtvec_alloc (num_elem);
5315 elems = &RTVEC_ELT (result_v, 0);
5316 outer_submode = GET_MODE_INNER (outermode);
5317 }
5318 else
5319 {
5320 num_elem = 1;
5321 elems = &result_s;
5322 outer_submode = outermode;
5323 }
5324
5325 outer_class = GET_MODE_CLASS (outer_submode);
5326 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5327
5328 gcc_assert (elem_bitsize % value_bit == 0);
5329 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5330
5331 for (elem = 0; elem < num_elem; elem++)
5332 {
5333 unsigned char *vp;
5334
5335 /* Vectors are stored in target memory order. (This is probably
5336 a mistake.) */
5337 {
5338 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5339 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5340 / BITS_PER_UNIT);
5341 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5342 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5343 unsigned bytele = (subword_byte % UNITS_PER_WORD
5344 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5345 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5346 }
5347
5348 switch (outer_class)
5349 {
5350 case MODE_INT:
5351 case MODE_PARTIAL_INT:
5352 {
5353 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5354
5355 for (i = 0;
5356 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5357 i += value_bit)
5358 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5359 for (; i < elem_bitsize; i += value_bit)
5360 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5361 << (i - HOST_BITS_PER_WIDE_INT);
5362
5363 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5364 know why. */
5365 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5366 elems[elem] = gen_int_mode (lo, outer_submode);
5367 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5368 elems[elem] = immed_double_const (lo, hi, outer_submode);
5369 else
5370 return NULL_RTX;
5371 }
5372 break;
5373
5374 case MODE_FLOAT:
5375 case MODE_DECIMAL_FLOAT:
5376 {
5377 REAL_VALUE_TYPE r;
5378 long tmp[max_bitsize / 32];
5379
5380 /* real_from_target wants its input in words affected by
5381 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5382 and use WORDS_BIG_ENDIAN instead; see the documentation
5383 of SUBREG in rtl.texi. */
5384 for (i = 0; i < max_bitsize / 32; i++)
5385 tmp[i] = 0;
5386 for (i = 0; i < elem_bitsize; i += value_bit)
5387 {
5388 int ibase;
5389 if (WORDS_BIG_ENDIAN)
5390 ibase = elem_bitsize - 1 - i;
5391 else
5392 ibase = i;
5393 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5394 }
5395
5396 real_from_target (&r, tmp, outer_submode);
5397 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5398 }
5399 break;
5400
5401 case MODE_FRACT:
5402 case MODE_UFRACT:
5403 case MODE_ACCUM:
5404 case MODE_UACCUM:
5405 {
5406 FIXED_VALUE_TYPE f;
5407 f.data.low = 0;
5408 f.data.high = 0;
5409 f.mode = outer_submode;
5410
5411 for (i = 0;
5412 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5413 i += value_bit)
5414 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5415 for (; i < elem_bitsize; i += value_bit)
5416 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5417 << (i - HOST_BITS_PER_WIDE_INT));
5418
5419 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5420 }
5421 break;
5422
5423 default:
5424 gcc_unreachable ();
5425 }
5426 }
5427 if (VECTOR_MODE_P (outermode))
5428 return gen_rtx_CONST_VECTOR (outermode, result_v);
5429 else
5430 return result_s;
5431 }
5432
5433 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5434 Return 0 if no simplifications are possible. */
5435 rtx
5436 simplify_subreg (enum machine_mode outermode, rtx op,
5437 enum machine_mode innermode, unsigned int byte)
5438 {
5439 /* Little bit of sanity checking. */
5440 gcc_assert (innermode != VOIDmode);
5441 gcc_assert (outermode != VOIDmode);
5442 gcc_assert (innermode != BLKmode);
5443 gcc_assert (outermode != BLKmode);
5444
5445 gcc_assert (GET_MODE (op) == innermode
5446 || GET_MODE (op) == VOIDmode);
5447
5448 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5449 gcc_assert (byte < GET_MODE_SIZE (innermode));
5450
5451 if (outermode == innermode && !byte)
5452 return op;
5453
5454 if (CONST_INT_P (op)
5455 || GET_CODE (op) == CONST_DOUBLE
5456 || GET_CODE (op) == CONST_FIXED
5457 || GET_CODE (op) == CONST_VECTOR)
5458 return simplify_immed_subreg (outermode, op, innermode, byte);
5459
5460 /* Changing mode twice with SUBREG => just change it once,
5461 or not at all if changing back op starting mode. */
5462 if (GET_CODE (op) == SUBREG)
5463 {
5464 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5465 int final_offset = byte + SUBREG_BYTE (op);
5466 rtx newx;
5467
5468 if (outermode == innermostmode
5469 && byte == 0 && SUBREG_BYTE (op) == 0)
5470 return SUBREG_REG (op);
5471
5472 /* The SUBREG_BYTE represents offset, as if the value were stored
5473 in memory. Irritating exception is paradoxical subreg, where
5474 we define SUBREG_BYTE to be 0. On big endian machines, this
5475 value should be negative. For a moment, undo this exception. */
5476 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5477 {
5478 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5479 if (WORDS_BIG_ENDIAN)
5480 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5481 if (BYTES_BIG_ENDIAN)
5482 final_offset += difference % UNITS_PER_WORD;
5483 }
5484 if (SUBREG_BYTE (op) == 0
5485 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5486 {
5487 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5488 if (WORDS_BIG_ENDIAN)
5489 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5490 if (BYTES_BIG_ENDIAN)
5491 final_offset += difference % UNITS_PER_WORD;
5492 }
5493
5494 /* See whether resulting subreg will be paradoxical. */
5495 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5496 {
5497 /* In nonparadoxical subregs we can't handle negative offsets. */
5498 if (final_offset < 0)
5499 return NULL_RTX;
5500 /* Bail out in case resulting subreg would be incorrect. */
5501 if (final_offset % GET_MODE_SIZE (outermode)
5502 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5503 return NULL_RTX;
5504 }
5505 else
5506 {
5507 int offset = 0;
5508 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5509
5510 /* In paradoxical subreg, see if we are still looking on lower part.
5511 If so, our SUBREG_BYTE will be 0. */
5512 if (WORDS_BIG_ENDIAN)
5513 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5514 if (BYTES_BIG_ENDIAN)
5515 offset += difference % UNITS_PER_WORD;
5516 if (offset == final_offset)
5517 final_offset = 0;
5518 else
5519 return NULL_RTX;
5520 }
5521
5522 /* Recurse for further possible simplifications. */
5523 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5524 final_offset);
5525 if (newx)
5526 return newx;
5527 if (validate_subreg (outermode, innermostmode,
5528 SUBREG_REG (op), final_offset))
5529 {
5530 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5531 if (SUBREG_PROMOTED_VAR_P (op)
5532 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5533 && GET_MODE_CLASS (outermode) == MODE_INT
5534 && IN_RANGE (GET_MODE_SIZE (outermode),
5535 GET_MODE_SIZE (innermode),
5536 GET_MODE_SIZE (innermostmode))
5537 && subreg_lowpart_p (newx))
5538 {
5539 SUBREG_PROMOTED_VAR_P (newx) = 1;
5540 SUBREG_PROMOTED_UNSIGNED_SET
5541 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5542 }
5543 return newx;
5544 }
5545 return NULL_RTX;
5546 }
5547
5548 /* Merge implicit and explicit truncations. */
5549
5550 if (GET_CODE (op) == TRUNCATE
5551 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5552 && subreg_lowpart_offset (outermode, innermode) == byte)
5553 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5554 GET_MODE (XEXP (op, 0)));
5555
5556 /* SUBREG of a hard register => just change the register number
5557 and/or mode. If the hard register is not valid in that mode,
5558 suppress this simplification. If the hard register is the stack,
5559 frame, or argument pointer, leave this as a SUBREG. */
5560
5561 if (REG_P (op) && HARD_REGISTER_P (op))
5562 {
5563 unsigned int regno, final_regno;
5564
5565 regno = REGNO (op);
5566 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5567 if (HARD_REGISTER_NUM_P (final_regno))
5568 {
5569 rtx x;
5570 int final_offset = byte;
5571
5572 /* Adjust offset for paradoxical subregs. */
5573 if (byte == 0
5574 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5575 {
5576 int difference = (GET_MODE_SIZE (innermode)
5577 - GET_MODE_SIZE (outermode));
5578 if (WORDS_BIG_ENDIAN)
5579 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5580 if (BYTES_BIG_ENDIAN)
5581 final_offset += difference % UNITS_PER_WORD;
5582 }
5583
5584 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5585
5586 /* Propagate original regno. We don't have any way to specify
5587 the offset inside original regno, so do so only for lowpart.
5588 The information is used only by alias analysis that can not
5589 grog partial register anyway. */
5590
5591 if (subreg_lowpart_offset (outermode, innermode) == byte)
5592 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5593 return x;
5594 }
5595 }
5596
5597 /* If we have a SUBREG of a register that we are replacing and we are
5598 replacing it with a MEM, make a new MEM and try replacing the
5599 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5600 or if we would be widening it. */
5601
5602 if (MEM_P (op)
5603 && ! mode_dependent_address_p (XEXP (op, 0))
5604 /* Allow splitting of volatile memory references in case we don't
5605 have instruction to move the whole thing. */
5606 && (! MEM_VOLATILE_P (op)
5607 || ! have_insn_for (SET, innermode))
5608 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5609 return adjust_address_nv (op, outermode, byte);
5610
5611 /* Handle complex values represented as CONCAT
5612 of real and imaginary part. */
5613 if (GET_CODE (op) == CONCAT)
5614 {
5615 unsigned int part_size, final_offset;
5616 rtx part, res;
5617
5618 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5619 if (byte < part_size)
5620 {
5621 part = XEXP (op, 0);
5622 final_offset = byte;
5623 }
5624 else
5625 {
5626 part = XEXP (op, 1);
5627 final_offset = byte - part_size;
5628 }
5629
5630 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5631 return NULL_RTX;
5632
5633 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5634 if (res)
5635 return res;
5636 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5637 return gen_rtx_SUBREG (outermode, part, final_offset);
5638 return NULL_RTX;
5639 }
5640
5641 /* Optimize SUBREG truncations of zero and sign extended values. */
5642 if ((GET_CODE (op) == ZERO_EXTEND
5643 || GET_CODE (op) == SIGN_EXTEND)
5644 && SCALAR_INT_MODE_P (innermode)
5645 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5646 {
5647 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5648
5649 /* If we're requesting the lowpart of a zero or sign extension,
5650 there are three possibilities. If the outermode is the same
5651 as the origmode, we can omit both the extension and the subreg.
5652 If the outermode is not larger than the origmode, we can apply
5653 the truncation without the extension. Finally, if the outermode
5654 is larger than the origmode, but both are integer modes, we
5655 can just extend to the appropriate mode. */
5656 if (bitpos == 0)
5657 {
5658 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5659 if (outermode == origmode)
5660 return XEXP (op, 0);
5661 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5662 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5663 subreg_lowpart_offset (outermode,
5664 origmode));
5665 if (SCALAR_INT_MODE_P (outermode))
5666 return simplify_gen_unary (GET_CODE (op), outermode,
5667 XEXP (op, 0), origmode);
5668 }
5669
5670 /* A SUBREG resulting from a zero extension may fold to zero if
5671 it extracts higher bits that the ZERO_EXTEND's source bits. */
5672 if (GET_CODE (op) == ZERO_EXTEND
5673 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5674 return CONST0_RTX (outermode);
5675 }
5676
5677 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5678 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5679 the outer subreg is effectively a truncation to the original mode. */
5680 if ((GET_CODE (op) == LSHIFTRT
5681 || GET_CODE (op) == ASHIFTRT)
5682 && SCALAR_INT_MODE_P (outermode)
5683 && SCALAR_INT_MODE_P (innermode)
5684 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5685 to avoid the possibility that an outer LSHIFTRT shifts by more
5686 than the sign extension's sign_bit_copies and introduces zeros
5687 into the high bits of the result. */
5688 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5689 && CONST_INT_P (XEXP (op, 1))
5690 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5691 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5692 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5693 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5694 return simplify_gen_binary (ASHIFTRT, outermode,
5695 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5696
5697 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5698 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5699 the outer subreg is effectively a truncation to the original mode. */
5700 if ((GET_CODE (op) == LSHIFTRT
5701 || GET_CODE (op) == ASHIFTRT)
5702 && SCALAR_INT_MODE_P (outermode)
5703 && SCALAR_INT_MODE_P (innermode)
5704 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5705 && CONST_INT_P (XEXP (op, 1))
5706 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5707 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5708 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5709 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5710 return simplify_gen_binary (LSHIFTRT, outermode,
5711 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5712
5713 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5714 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5715 the outer subreg is effectively a truncation to the original mode. */
5716 if (GET_CODE (op) == ASHIFT
5717 && SCALAR_INT_MODE_P (outermode)
5718 && SCALAR_INT_MODE_P (innermode)
5719 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5720 && CONST_INT_P (XEXP (op, 1))
5721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5724 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5725 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5726 return simplify_gen_binary (ASHIFT, outermode,
5727 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5728
5729 /* Recognize a word extraction from a multi-word subreg. */
5730 if ((GET_CODE (op) == LSHIFTRT
5731 || GET_CODE (op) == ASHIFTRT)
5732 && SCALAR_INT_MODE_P (innermode)
5733 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5734 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5735 && CONST_INT_P (XEXP (op, 1))
5736 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5737 && INTVAL (XEXP (op, 1)) >= 0
5738 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5739 && byte == subreg_lowpart_offset (outermode, innermode))
5740 {
5741 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5742 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5743 (WORDS_BIG_ENDIAN
5744 ? byte - shifted_bytes
5745 : byte + shifted_bytes));
5746 }
5747
5748 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5749 and try replacing the SUBREG and shift with it. Don't do this if
5750 the MEM has a mode-dependent address or if we would be widening it. */
5751
5752 if ((GET_CODE (op) == LSHIFTRT
5753 || GET_CODE (op) == ASHIFTRT)
5754 && SCALAR_INT_MODE_P (innermode)
5755 && MEM_P (XEXP (op, 0))
5756 && CONST_INT_P (XEXP (op, 1))
5757 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5758 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5759 && INTVAL (XEXP (op, 1)) > 0
5760 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5761 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5762 && ! MEM_VOLATILE_P (XEXP (op, 0))
5763 && byte == subreg_lowpart_offset (outermode, innermode)
5764 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5765 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5766 {
5767 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5768 return adjust_address_nv (XEXP (op, 0), outermode,
5769 (WORDS_BIG_ENDIAN
5770 ? byte - shifted_bytes
5771 : byte + shifted_bytes));
5772 }
5773
5774 return NULL_RTX;
5775 }
5776
5777 /* Make a SUBREG operation or equivalent if it folds. */
5778
5779 rtx
5780 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5781 enum machine_mode innermode, unsigned int byte)
5782 {
5783 rtx newx;
5784
5785 newx = simplify_subreg (outermode, op, innermode, byte);
5786 if (newx)
5787 return newx;
5788
5789 if (GET_CODE (op) == SUBREG
5790 || GET_CODE (op) == CONCAT
5791 || GET_MODE (op) == VOIDmode)
5792 return NULL_RTX;
5793
5794 if (validate_subreg (outermode, innermode, op, byte))
5795 return gen_rtx_SUBREG (outermode, op, byte);
5796
5797 return NULL_RTX;
5798 }
5799
5800 /* Simplify X, an rtx expression.
5801
5802 Return the simplified expression or NULL if no simplifications
5803 were possible.
5804
5805 This is the preferred entry point into the simplification routines;
5806 however, we still allow passes to call the more specific routines.
5807
5808 Right now GCC has three (yes, three) major bodies of RTL simplification
5809 code that need to be unified.
5810
5811 1. fold_rtx in cse.c. This code uses various CSE specific
5812 information to aid in RTL simplification.
5813
5814 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5815 it uses combine specific information to aid in RTL
5816 simplification.
5817
5818 3. The routines in this file.
5819
5820
5821 Long term we want to only have one body of simplification code; to
5822 get to that state I recommend the following steps:
5823
5824 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5825 which are not pass dependent state into these routines.
5826
5827 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5828 use this routine whenever possible.
5829
5830 3. Allow for pass dependent state to be provided to these
5831 routines and add simplifications based on the pass dependent
5832 state. Remove code from cse.c & combine.c that becomes
5833 redundant/dead.
5834
5835 It will take time, but ultimately the compiler will be easier to
5836 maintain and improve. It's totally silly that when we add a
5837 simplification that it needs to be added to 4 places (3 for RTL
5838 simplification and 1 for tree simplification. */
5839
5840 rtx
5841 simplify_rtx (const_rtx x)
5842 {
5843 const enum rtx_code code = GET_CODE (x);
5844 const enum machine_mode mode = GET_MODE (x);
5845
5846 switch (GET_RTX_CLASS (code))
5847 {
5848 case RTX_UNARY:
5849 return simplify_unary_operation (code, mode,
5850 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5851 case RTX_COMM_ARITH:
5852 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5853 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5854
5855 /* Fall through.... */
5856
5857 case RTX_BIN_ARITH:
5858 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5859
5860 case RTX_TERNARY:
5861 case RTX_BITFIELD_OPS:
5862 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5863 XEXP (x, 0), XEXP (x, 1),
5864 XEXP (x, 2));
5865
5866 case RTX_COMPARE:
5867 case RTX_COMM_COMPARE:
5868 return simplify_relational_operation (code, mode,
5869 ((GET_MODE (XEXP (x, 0))
5870 != VOIDmode)
5871 ? GET_MODE (XEXP (x, 0))
5872 : GET_MODE (XEXP (x, 1))),
5873 XEXP (x, 0),
5874 XEXP (x, 1));
5875
5876 case RTX_EXTRA:
5877 if (code == SUBREG)
5878 return simplify_subreg (mode, SUBREG_REG (x),
5879 GET_MODE (SUBREG_REG (x)),
5880 SUBREG_BYTE (x));
5881 break;
5882
5883 case RTX_OBJ:
5884 if (code == LO_SUM)
5885 {
5886 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5887 if (GET_CODE (XEXP (x, 0)) == HIGH
5888 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5889 return XEXP (x, 1);
5890 }
5891 break;
5892
5893 default:
5894 break;
5895 }
5896 return NULL;
5897 }