]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
Revert:
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
41
42 /* Simplification and canonicalization of RTL. */
43
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
47 signed wide int. */
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 rtx, rtx, rtx, rtx);
64 \f
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
69 {
70 return gen_int_mode (- INTVAL (i), mode);
71 }
72
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
75
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 {
79 unsigned HOST_WIDE_INT val;
80 unsigned int width;
81
82 if (GET_MODE_CLASS (mode) != MODE_INT)
83 return false;
84
85 width = GET_MODE_PRECISION (mode);
86 if (width == 0)
87 return false;
88
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x) == 0)
95 {
96 val = CONST_DOUBLE_HIGH (x);
97 width -= HOST_BITS_PER_WIDE_INT;
98 }
99 else
100 return false;
101
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
110
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114 unsigned int width;
115
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
118
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
122
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
136
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
140
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
143 }
144
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150 unsigned int width;
151
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
154
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
158
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
161 }
162 \f
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
165
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
169 {
170 rtx tem;
171
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
176
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
181
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 \f
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
193
194 switch (GET_CODE (x))
195 {
196 case MEM:
197 break;
198
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
204 {
205 REAL_VALUE_TYPE d;
206
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 }
210 return x;
211
212 default:
213 return x;
214 }
215
216 if (GET_MODE (x) == BLKmode)
217 return x;
218
219 addr = XEXP (x, 0);
220
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
223
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 {
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
231 }
232
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
235
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
240 {
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
243
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
248 {
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
252 }
253 else
254 return c;
255 }
256
257 return x;
258 }
259 \f
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
263
264 rtx
265 delegitimize_mem_from_attrs (rtx x)
266 {
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET (x))
272 {
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
276
277 switch (TREE_CODE (decl))
278 {
279 default:
280 decl = NULL;
281 break;
282
283 case VAR_DECL:
284 break;
285
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
293 {
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp = 0, volatilep = 0;
297
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
305 {
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
309 }
310 break;
311 }
312 }
313
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
321 {
322 rtx newx;
323
324 offset += INTVAL (MEM_OFFSET (x));
325
326 newx = DECL_RTL (decl);
327
328 if (MEM_P (newx))
329 {
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
350 }
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
354 }
355 }
356
357 return x;
358 }
359 \f
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
362
363 rtx
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
366 {
367 rtx tem;
368
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
372
373 return gen_rtx_fmt_e (code, mode, op);
374 }
375
376 /* Likewise for ternary operations. */
377
378 rtx
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 {
382 rtx tem;
383
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
388
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 }
391
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
394
395 rtx
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 {
399 rtx tem;
400
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
404
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
406 }
407 \f
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
412
413 rtx
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 {
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
424
425 if (__builtin_expect (fn != NULL, 0))
426 {
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
430 }
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
433
434 switch (GET_RTX_CLASS (code))
435 {
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
443
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
451
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475
476 case RTX_EXTRA:
477 if (code == SUBREG)
478 {
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
486 }
487 break;
488
489 case RTX_OBJ:
490 if (code == MEM)
491 {
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
496 }
497 else if (code == LO_SUM)
498 {
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
505
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
509 }
510 break;
511
512 default:
513 break;
514 }
515
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
520 {
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 {
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
529 {
530 if (newvec == vec)
531 {
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
536 }
537 RTVEC_ELT (newvec, j) = op;
538 }
539 }
540 break;
541
542 case 'e':
543 if (XEXP (x, i))
544 {
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
547 {
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
551 }
552 }
553 break;
554 }
555 return newx;
556 }
557
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
560
561 rtx
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 {
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 }
566 \f
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
570 rtx
571 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572 rtx op, enum machine_mode op_mode)
573 {
574 rtx trueop, tem;
575
576 trueop = avoid_constant_pool_reference (op);
577
578 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579 if (tem)
580 return tem;
581
582 return simplify_unary_operation_1 (code, mode, op);
583 }
584
585 /* Perform some simplifications we can do even if the operands
586 aren't constant. */
587 static rtx
588 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
589 {
590 enum rtx_code reversed;
591 rtx temp;
592
593 switch (code)
594 {
595 case NOT:
596 /* (not (not X)) == X. */
597 if (GET_CODE (op) == NOT)
598 return XEXP (op, 0);
599
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op)
603 && (mode == BImode || STORE_FLAG_VALUE == -1)
604 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605 return simplify_gen_relational (reversed, mode, VOIDmode,
606 XEXP (op, 0), XEXP (op, 1));
607
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op) == PLUS
610 && XEXP (op, 1) == constm1_rtx)
611 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
612
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op) == NEG)
615 return plus_constant (XEXP (op, 0), -1);
616
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op) == XOR
619 && CONST_INT_P (XEXP (op, 1))
620 && (temp = simplify_unary_operation (NOT, mode,
621 XEXP (op, 1), mode)) != 0)
622 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
623
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op) == PLUS
626 && CONST_INT_P (XEXP (op, 1))
627 && mode_signbit_p (mode, XEXP (op, 1))
628 && (temp = simplify_unary_operation (NOT, mode,
629 XEXP (op, 1), mode)) != 0)
630 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
631
632
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
637 bother with. */
638 if (GET_CODE (op) == ASHIFT
639 && XEXP (op, 0) == const1_rtx)
640 {
641 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
643 }
644
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
648
649 if (STORE_FLAG_VALUE == -1
650 && GET_CODE (op) == ASHIFTRT
651 && GET_CODE (XEXP (op, 1))
652 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653 return simplify_gen_relational (GE, mode, VOIDmode,
654 XEXP (op, 0), const0_rtx);
655
656
657 if (GET_CODE (op) == SUBREG
658 && subreg_lowpart_p (op)
659 && (GET_MODE_SIZE (GET_MODE (op))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661 && GET_CODE (SUBREG_REG (op)) == ASHIFT
662 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
663 {
664 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665 rtx x;
666
667 x = gen_rtx_ROTATE (inner_mode,
668 simplify_gen_unary (NOT, inner_mode, const1_rtx,
669 inner_mode),
670 XEXP (SUBREG_REG (op), 1));
671 return rtl_hooks.gen_lowpart_no_emit (mode, x);
672 }
673
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
677 coded. */
678
679 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
680 {
681 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682 enum machine_mode op_mode;
683
684 op_mode = GET_MODE (in1);
685 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
686
687 op_mode = GET_MODE (in2);
688 if (op_mode == VOIDmode)
689 op_mode = mode;
690 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
691
692 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
693 {
694 rtx tem = in2;
695 in2 = in1; in1 = tem;
696 }
697
698 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699 mode, in1, in2);
700 }
701 break;
702
703 case NEG:
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op) == NEG)
706 return XEXP (op, 0);
707
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op) == PLUS
710 && XEXP (op, 1) == const1_rtx)
711 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
712
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op) == NOT)
715 return plus_constant (XEXP (op, 0), 1);
716
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
726
727 if (GET_CODE (op) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
730 {
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op, 1))
733 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
734 {
735 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736 if (temp)
737 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
738 }
739
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
743 }
744
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
749 {
750 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
752 }
753
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
756 is a constant). */
757 if (GET_CODE (op) == ASHIFT)
758 {
759 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760 if (temp)
761 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
762 }
763
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op) == ASHIFTRT
767 && CONST_INT_P (XEXP (op, 1))
768 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769 return simplify_gen_binary (LSHIFTRT, mode,
770 XEXP (op, 0), XEXP (op, 1));
771
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op) == LSHIFTRT
775 && CONST_INT_P (XEXP (op, 1))
776 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777 return simplify_gen_binary (ASHIFTRT, mode,
778 XEXP (op, 0), XEXP (op, 1));
779
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op) == XOR
782 && XEXP (op, 1) == const1_rtx
783 && nonzero_bits (XEXP (op, 0), mode) == 1)
784 return plus_constant (XEXP (op, 0), -1);
785
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op) == LT
789 && XEXP (op, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
791 {
792 enum machine_mode inner = GET_MODE (XEXP (op, 0));
793 int isize = GET_MODE_PRECISION (inner);
794 if (STORE_FLAG_VALUE == 1)
795 {
796 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797 GEN_INT (isize - 1));
798 if (mode == inner)
799 return temp;
800 if (GET_MODE_PRECISION (mode) > isize)
801 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
803 }
804 else if (STORE_FLAG_VALUE == -1)
805 {
806 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807 GEN_INT (isize - 1));
808 if (mode == inner)
809 return temp;
810 if (GET_MODE_PRECISION (mode) > isize)
811 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
813 }
814 }
815 break;
816
817 case TRUNCATE:
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
820 integer mode. */
821 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822 break;
823
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op) == SIGN_EXTEND
826 || GET_CODE (op) == ZERO_EXTEND)
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
829
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op) == ABS
833 || GET_CODE (op) == NEG)
834 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837 return simplify_gen_unary (GET_CODE (op), mode,
838 XEXP (XEXP (op, 0), 0), mode);
839
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
841 (truncate:A X). */
842 if (GET_CODE (op) == SUBREG
843 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844 && subreg_lowpart_p (op))
845 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846 GET_MODE (XEXP (SUBREG_REG (op), 0)));
847
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
854 patterns. */
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856 ? (num_sign_bit_copies (op, GET_MODE (op))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858 - GET_MODE_PRECISION (mode)))
859 : truncated_to_mode (mode, op))
860 && ! (GET_CODE (op) == LSHIFTRT
861 && GET_CODE (XEXP (op, 0)) == MULT))
862 return rtl_hooks.gen_lowpart_no_emit (mode, op);
863
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode)
869 && COMPARISON_P (op)
870 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871 return rtl_hooks.gen_lowpart_no_emit (mode, op);
872 break;
873
874 case FLOAT_TRUNCATE:
875 if (DECIMAL_FLOAT_MODE_P (mode))
876 break;
877
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op, 0)) == mode)
881 return XEXP (op, 0);
882
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
886
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
889
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations)
894 || GET_CODE (op) == FLOAT_EXTEND)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
896 0)))
897 > GET_MODE_SIZE (mode)
898 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
899 mode,
900 XEXP (op, 0), mode);
901
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
906 && ((unsigned)significand_size (GET_MODE (op))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
908 - num_sign_bit_copies (XEXP (op, 0),
909 GET_MODE (XEXP (op, 0))))))))
910 return simplify_gen_unary (FLOAT, mode,
911 XEXP (op, 0),
912 GET_MODE (XEXP (op, 0)));
913
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op) == ABS
917 || GET_CODE (op) == NEG)
918 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
920 return simplify_gen_unary (GET_CODE (op), mode,
921 XEXP (XEXP (op, 0), 0), mode);
922
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op) == SUBREG
926 && subreg_lowpart_p (op)
927 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
928 return SUBREG_REG (op);
929 break;
930
931 case FLOAT_EXTEND:
932 if (DECIMAL_FLOAT_MODE_P (mode))
933 break;
934
935 /* (float_extend (float_extend x)) is (float_extend x)
936
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
939 */
940 if (GET_CODE (op) == FLOAT_EXTEND
941 || (GET_CODE (op) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
943 && ((unsigned)significand_size (GET_MODE (op))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
945 - num_sign_bit_copies (XEXP (op, 0),
946 GET_MODE (XEXP (op, 0)))))))
947 return simplify_gen_unary (GET_CODE (op), mode,
948 XEXP (op, 0),
949 GET_MODE (XEXP (op, 0)));
950
951 break;
952
953 case ABS:
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op) == NEG)
956 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
957 GET_MODE (XEXP (op, 0)));
958
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
960 do nothing. */
961 if (GET_MODE (op) == VOIDmode)
962 break;
963
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op),
967 nonzero_bits (op, GET_MODE (op))))
968 return op;
969
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
972 return gen_rtx_NEG (mode, op);
973
974 break;
975
976 case FFS:
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op) == SIGN_EXTEND
979 || GET_CODE (op) == ZERO_EXTEND)
980 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
981 GET_MODE (XEXP (op, 0)));
982 break;
983
984 case POPCOUNT:
985 switch (GET_CODE (op))
986 {
987 case BSWAP:
988 case ZERO_EXTEND:
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
991 GET_MODE (XEXP (op, 0)));
992
993 case ROTATE:
994 case ROTATERT:
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op, 1)))
997 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
998 GET_MODE (XEXP (op, 0)));
999 break;
1000
1001 default:
1002 break;
1003 }
1004 break;
1005
1006 case PARITY:
1007 switch (GET_CODE (op))
1008 {
1009 case NOT:
1010 case BSWAP:
1011 case ZERO_EXTEND:
1012 case SIGN_EXTEND:
1013 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1014 GET_MODE (XEXP (op, 0)));
1015
1016 case ROTATE:
1017 case ROTATERT:
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op, 1)))
1020 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1021 GET_MODE (XEXP (op, 0)));
1022 break;
1023
1024 default:
1025 break;
1026 }
1027 break;
1028
1029 case BSWAP:
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op) == BSWAP)
1032 return XEXP (op, 0);
1033 break;
1034
1035 case FLOAT:
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op) == SIGN_EXTEND)
1038 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1039 GET_MODE (XEXP (op, 0)));
1040 break;
1041
1042 case SIGN_EXTEND:
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1046 the VAX). */
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1053
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op) == MULT)
1057 {
1058 rtx lhs = XEXP (op, 0);
1059 rtx rhs = XEXP (op, 1);
1060 enum rtx_code lcode = GET_CODE (lhs);
1061 enum rtx_code rcode = GET_CODE (rhs);
1062
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode == SIGN_EXTEND
1066 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1067 && (rcode == SIGN_EXTEND
1068 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1069 {
1070 enum machine_mode lmode = GET_MODE (lhs);
1071 enum machine_mode rmode = GET_MODE (rhs);
1072 int bits;
1073
1074 if (lcode == ASHIFTRT)
1075 /* Number of bits not shifted off the end. */
1076 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1080
1081 if (rcode == ASHIFTRT)
1082 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1085
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1089 return simplify_gen_binary
1090 (MULT, mode,
1091 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1092 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1093 }
1094 }
1095
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1102 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1103 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1104
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1108 {
1109 gcc_assert (GET_MODE_BITSIZE (mode)
1110 > GET_MODE_BITSIZE (GET_MODE (op)));
1111 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1112 GET_MODE (XEXP (op, 0)));
1113 }
1114
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1121 && GET_CODE (XEXP (op, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op, 1))
1123 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1125 {
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1128 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode)
1130 > GET_MODE_BITSIZE (GET_MODE (op)));
1131 if (tmode != BLKmode)
1132 {
1133 rtx inner =
1134 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1136 ? SIGN_EXTEND : ZERO_EXTEND,
1137 mode, inner, tmode);
1138 }
1139 }
1140
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is refering to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode == Pmode && GET_MODE (op) == ptr_mode
1148 && (CONSTANT_P (op)
1149 || (GET_CODE (op) == SUBREG
1150 && REG_P (SUBREG_REG (op))
1151 && REG_POINTER (SUBREG_REG (op))
1152 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1153 return convert_memory_address (Pmode, op);
1154 #endif
1155 break;
1156
1157 case ZERO_EXTEND:
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1164 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1165 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1166
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op) == MULT)
1170 {
1171 rtx lhs = XEXP (op, 0);
1172 rtx rhs = XEXP (op, 1);
1173 enum rtx_code lcode = GET_CODE (lhs);
1174 enum rtx_code rcode = GET_CODE (rhs);
1175
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode == ZERO_EXTEND
1179 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1180 && (rcode == ZERO_EXTEND
1181 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1182 {
1183 enum machine_mode lmode = GET_MODE (lhs);
1184 enum machine_mode rmode = GET_MODE (rhs);
1185 int bits;
1186
1187 if (lcode == LSHIFTRT)
1188 /* Number of bits not shifted off the end. */
1189 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1193
1194 if (rcode == LSHIFTRT)
1195 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1198
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1202 return simplify_gen_binary
1203 (MULT, mode,
1204 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1205 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1206 }
1207 }
1208
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op) == ZERO_EXTEND)
1211 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1213
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op) == LSHIFTRT
1218 && GET_CODE (XEXP (op, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op, 1))
1220 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1222 {
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1225 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1226 if (tmode != BLKmode)
1227 {
1228 rtx inner =
1229 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1231 }
1232 }
1233
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is refering to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED > 0
1240 && mode == Pmode && GET_MODE (op) == ptr_mode
1241 && (CONSTANT_P (op)
1242 || (GET_CODE (op) == SUBREG
1243 && REG_P (SUBREG_REG (op))
1244 && REG_POINTER (SUBREG_REG (op))
1245 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1246 return convert_memory_address (Pmode, op);
1247 #endif
1248 break;
1249
1250 default:
1251 break;
1252 }
1253
1254 return 0;
1255 }
1256
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1260 rtx
1261 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1262 rtx op, enum machine_mode op_mode)
1263 {
1264 unsigned int width = GET_MODE_PRECISION (mode);
1265 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1266
1267 if (code == VEC_DUPLICATE)
1268 {
1269 gcc_assert (VECTOR_MODE_P (mode));
1270 if (GET_MODE (op) != VOIDmode)
1271 {
1272 if (!VECTOR_MODE_P (GET_MODE (op)))
1273 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1274 else
1275 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1276 (GET_MODE (op)));
1277 }
1278 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1279 || GET_CODE (op) == CONST_VECTOR)
1280 {
1281 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1282 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1283 rtvec v = rtvec_alloc (n_elts);
1284 unsigned int i;
1285
1286 if (GET_CODE (op) != CONST_VECTOR)
1287 for (i = 0; i < n_elts; i++)
1288 RTVEC_ELT (v, i) = op;
1289 else
1290 {
1291 enum machine_mode inmode = GET_MODE (op);
1292 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1293 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1294
1295 gcc_assert (in_n_elts < n_elts);
1296 gcc_assert ((n_elts % in_n_elts) == 0);
1297 for (i = 0; i < n_elts; i++)
1298 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1299 }
1300 return gen_rtx_CONST_VECTOR (mode, v);
1301 }
1302 }
1303
1304 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1305 {
1306 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1307 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1308 enum machine_mode opmode = GET_MODE (op);
1309 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1310 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1311 rtvec v = rtvec_alloc (n_elts);
1312 unsigned int i;
1313
1314 gcc_assert (op_n_elts == n_elts);
1315 for (i = 0; i < n_elts; i++)
1316 {
1317 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1318 CONST_VECTOR_ELT (op, i),
1319 GET_MODE_INNER (opmode));
1320 if (!x)
1321 return 0;
1322 RTVEC_ELT (v, i) = x;
1323 }
1324 return gen_rtx_CONST_VECTOR (mode, v);
1325 }
1326
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1330
1331 if (code == FLOAT && GET_MODE (op) == VOIDmode
1332 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1333 {
1334 HOST_WIDE_INT hv, lv;
1335 REAL_VALUE_TYPE d;
1336
1337 if (CONST_INT_P (op))
1338 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1339 else
1340 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1341
1342 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1343 d = real_value_truncate (mode, d);
1344 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1345 }
1346 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1347 && (GET_CODE (op) == CONST_DOUBLE
1348 || CONST_INT_P (op)))
1349 {
1350 HOST_WIDE_INT hv, lv;
1351 REAL_VALUE_TYPE d;
1352
1353 if (CONST_INT_P (op))
1354 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1355 else
1356 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1357
1358 if (op_mode == VOIDmode)
1359 {
1360 /* We don't know how to interpret negative-looking numbers in
1361 this case, so don't try to fold those. */
1362 if (hv < 0)
1363 return 0;
1364 }
1365 else if (GET_MODE_PRECISION (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1366 ;
1367 else
1368 hv = 0, lv &= GET_MODE_MASK (op_mode);
1369
1370 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1371 d = real_value_truncate (mode, d);
1372 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1373 }
1374
1375 if (CONST_INT_P (op)
1376 && width <= HOST_BITS_PER_WIDE_INT
1377 && op_width <= HOST_BITS_PER_WIDE_INT && op_width > 0)
1378 {
1379 HOST_WIDE_INT arg0 = INTVAL (op);
1380 HOST_WIDE_INT val;
1381
1382 switch (code)
1383 {
1384 case NOT:
1385 val = ~ arg0;
1386 break;
1387
1388 case NEG:
1389 val = - arg0;
1390 break;
1391
1392 case ABS:
1393 val = (arg0 >= 0 ? arg0 : - arg0);
1394 break;
1395
1396 case FFS:
1397 arg0 &= GET_MODE_MASK (op_mode);
1398 val = ffs_hwi (arg0);
1399 break;
1400
1401 case CLZ:
1402 arg0 &= GET_MODE_MASK (op_mode);
1403 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (op_mode, val))
1404 ;
1405 else
1406 val = GET_MODE_PRECISION (op_mode) - floor_log2 (arg0) - 1;
1407 break;
1408
1409 case CLRSB:
1410 arg0 &= GET_MODE_MASK (op_mode);
1411 if (arg0 == 0)
1412 val = GET_MODE_PRECISION (op_mode) - 1;
1413 else if (arg0 >= 0)
1414 val = GET_MODE_PRECISION (op_mode) - floor_log2 (arg0) - 2;
1415 else if (arg0 < 0)
1416 val = GET_MODE_PRECISION (op_mode) - floor_log2 (~arg0) - 2;
1417 break;
1418
1419 case CTZ:
1420 arg0 &= GET_MODE_MASK (op_mode);
1421 if (arg0 == 0)
1422 {
1423 /* Even if the value at zero is undefined, we have to come
1424 up with some replacement. Seems good enough. */
1425 if (! CTZ_DEFINED_VALUE_AT_ZERO (op_mode, val))
1426 val = GET_MODE_PRECISION (op_mode);
1427 }
1428 else
1429 val = ctz_hwi (arg0);
1430 break;
1431
1432 case POPCOUNT:
1433 arg0 &= GET_MODE_MASK (op_mode);
1434 val = 0;
1435 while (arg0)
1436 val++, arg0 &= arg0 - 1;
1437 break;
1438
1439 case PARITY:
1440 arg0 &= GET_MODE_MASK (op_mode);
1441 val = 0;
1442 while (arg0)
1443 val++, arg0 &= arg0 - 1;
1444 val &= 1;
1445 break;
1446
1447 case BSWAP:
1448 {
1449 unsigned int s;
1450
1451 val = 0;
1452 for (s = 0; s < width; s += 8)
1453 {
1454 unsigned int d = width - s - 8;
1455 unsigned HOST_WIDE_INT byte;
1456 byte = (arg0 >> s) & 0xff;
1457 val |= byte << d;
1458 }
1459 }
1460 break;
1461
1462 case TRUNCATE:
1463 val = arg0;
1464 break;
1465
1466 case ZERO_EXTEND:
1467 /* When zero-extending a CONST_INT, we need to know its
1468 original mode. */
1469 gcc_assert (op_mode != VOIDmode);
1470 if (op_width == HOST_BITS_PER_WIDE_INT)
1471 {
1472 /* If we were really extending the mode,
1473 we would have to distinguish between zero-extension
1474 and sign-extension. */
1475 gcc_assert (width == op_width);
1476 val = arg0;
1477 }
1478 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1479 val = arg0 & GET_MODE_MASK (op_mode);
1480 else
1481 return 0;
1482 break;
1483
1484 case SIGN_EXTEND:
1485 if (op_mode == VOIDmode)
1486 op_mode = mode;
1487 op_width = GET_MODE_PRECISION (op_mode);
1488 if (op_width == HOST_BITS_PER_WIDE_INT)
1489 {
1490 /* If we were really extending the mode,
1491 we would have to distinguish between zero-extension
1492 and sign-extension. */
1493 gcc_assert (width == op_width);
1494 val = arg0;
1495 }
1496 else if (op_width < HOST_BITS_PER_WIDE_INT)
1497 {
1498 val = arg0 & GET_MODE_MASK (op_mode);
1499 if (val_signbit_known_set_p (op_mode, val))
1500 val |= ~GET_MODE_MASK (op_mode);
1501 }
1502 else
1503 return 0;
1504 break;
1505
1506 case SQRT:
1507 case FLOAT_EXTEND:
1508 case FLOAT_TRUNCATE:
1509 case SS_TRUNCATE:
1510 case US_TRUNCATE:
1511 case SS_NEG:
1512 case US_NEG:
1513 case SS_ABS:
1514 return 0;
1515
1516 default:
1517 gcc_unreachable ();
1518 }
1519
1520 return gen_int_mode (val, mode);
1521 }
1522
1523 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1524 for a DImode operation on a CONST_INT. */
1525 else if (GET_MODE (op) == VOIDmode
1526 && width <= HOST_BITS_PER_WIDE_INT * 2
1527 && (GET_CODE (op) == CONST_DOUBLE
1528 || CONST_INT_P (op)))
1529 {
1530 unsigned HOST_WIDE_INT l1, lv;
1531 HOST_WIDE_INT h1, hv;
1532
1533 if (GET_CODE (op) == CONST_DOUBLE)
1534 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1535 else
1536 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1537
1538 switch (code)
1539 {
1540 case NOT:
1541 lv = ~ l1;
1542 hv = ~ h1;
1543 break;
1544
1545 case NEG:
1546 neg_double (l1, h1, &lv, &hv);
1547 break;
1548
1549 case ABS:
1550 if (h1 < 0)
1551 neg_double (l1, h1, &lv, &hv);
1552 else
1553 lv = l1, hv = h1;
1554 break;
1555
1556 case FFS:
1557 hv = 0;
1558 if (l1 != 0)
1559 lv = ffs_hwi (l1);
1560 else if (h1 != 0)
1561 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1562 else
1563 lv = 0;
1564 break;
1565
1566 case CLZ:
1567 hv = 0;
1568 if (h1 != 0)
1569 lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1570 - HOST_BITS_PER_WIDE_INT;
1571 else if (l1 != 0)
1572 lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1573 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1574 lv = GET_MODE_PRECISION (mode);
1575 break;
1576
1577 case CTZ:
1578 hv = 0;
1579 if (l1 != 0)
1580 lv = ctz_hwi (l1);
1581 else if (h1 != 0)
1582 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1583 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1584 lv = GET_MODE_PRECISION (mode);
1585 break;
1586
1587 case POPCOUNT:
1588 hv = 0;
1589 lv = 0;
1590 while (l1)
1591 lv++, l1 &= l1 - 1;
1592 while (h1)
1593 lv++, h1 &= h1 - 1;
1594 break;
1595
1596 case PARITY:
1597 hv = 0;
1598 lv = 0;
1599 while (l1)
1600 lv++, l1 &= l1 - 1;
1601 while (h1)
1602 lv++, h1 &= h1 - 1;
1603 lv &= 1;
1604 break;
1605
1606 case BSWAP:
1607 {
1608 unsigned int s;
1609
1610 hv = 0;
1611 lv = 0;
1612 for (s = 0; s < width; s += 8)
1613 {
1614 unsigned int d = width - s - 8;
1615 unsigned HOST_WIDE_INT byte;
1616
1617 if (s < HOST_BITS_PER_WIDE_INT)
1618 byte = (l1 >> s) & 0xff;
1619 else
1620 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1621
1622 if (d < HOST_BITS_PER_WIDE_INT)
1623 lv |= byte << d;
1624 else
1625 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1626 }
1627 }
1628 break;
1629
1630 case TRUNCATE:
1631 /* This is just a change-of-mode, so do nothing. */
1632 lv = l1, hv = h1;
1633 break;
1634
1635 case ZERO_EXTEND:
1636 gcc_assert (op_mode != VOIDmode);
1637
1638 if (op_width > HOST_BITS_PER_WIDE_INT)
1639 return 0;
1640
1641 hv = 0;
1642 lv = l1 & GET_MODE_MASK (op_mode);
1643 break;
1644
1645 case SIGN_EXTEND:
1646 if (op_mode == VOIDmode
1647 || op_width > HOST_BITS_PER_WIDE_INT)
1648 return 0;
1649 else
1650 {
1651 lv = l1 & GET_MODE_MASK (op_mode);
1652 if (val_signbit_known_set_p (op_mode, lv))
1653 lv |= ~GET_MODE_MASK (op_mode);
1654
1655 hv = HWI_SIGN_EXTEND (lv);
1656 }
1657 break;
1658
1659 case SQRT:
1660 return 0;
1661
1662 default:
1663 return 0;
1664 }
1665
1666 return immed_double_const (lv, hv, mode);
1667 }
1668
1669 else if (GET_CODE (op) == CONST_DOUBLE
1670 && SCALAR_FLOAT_MODE_P (mode)
1671 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1672 {
1673 REAL_VALUE_TYPE d, t;
1674 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1675
1676 switch (code)
1677 {
1678 case SQRT:
1679 if (HONOR_SNANS (mode) && real_isnan (&d))
1680 return 0;
1681 real_sqrt (&t, mode, &d);
1682 d = t;
1683 break;
1684 case ABS:
1685 d = real_value_abs (&d);
1686 break;
1687 case NEG:
1688 d = real_value_negate (&d);
1689 break;
1690 case FLOAT_TRUNCATE:
1691 d = real_value_truncate (mode, d);
1692 break;
1693 case FLOAT_EXTEND:
1694 /* All this does is change the mode, unless changing
1695 mode class. */
1696 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1697 real_convert (&d, mode, &d);
1698 break;
1699 case FIX:
1700 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1701 break;
1702 case NOT:
1703 {
1704 long tmp[4];
1705 int i;
1706
1707 real_to_target (tmp, &d, GET_MODE (op));
1708 for (i = 0; i < 4; i++)
1709 tmp[i] = ~tmp[i];
1710 real_from_target (&d, tmp, mode);
1711 break;
1712 }
1713 default:
1714 gcc_unreachable ();
1715 }
1716 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1717 }
1718
1719 else if (GET_CODE (op) == CONST_DOUBLE
1720 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1721 && GET_MODE_CLASS (mode) == MODE_INT
1722 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1723 {
1724 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1725 operators are intentionally left unspecified (to ease implementation
1726 by target backends), for consistency, this routine implements the
1727 same semantics for constant folding as used by the middle-end. */
1728
1729 /* This was formerly used only for non-IEEE float.
1730 eggert@twinsun.com says it is safe for IEEE also. */
1731 HOST_WIDE_INT xh, xl, th, tl;
1732 REAL_VALUE_TYPE x, t;
1733 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1734 switch (code)
1735 {
1736 case FIX:
1737 if (REAL_VALUE_ISNAN (x))
1738 return const0_rtx;
1739
1740 /* Test against the signed upper bound. */
1741 if (width > HOST_BITS_PER_WIDE_INT)
1742 {
1743 th = ((unsigned HOST_WIDE_INT) 1
1744 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1745 tl = -1;
1746 }
1747 else
1748 {
1749 th = 0;
1750 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1751 }
1752 real_from_integer (&t, VOIDmode, tl, th, 0);
1753 if (REAL_VALUES_LESS (t, x))
1754 {
1755 xh = th;
1756 xl = tl;
1757 break;
1758 }
1759
1760 /* Test against the signed lower bound. */
1761 if (width > HOST_BITS_PER_WIDE_INT)
1762 {
1763 th = (unsigned HOST_WIDE_INT) (-1)
1764 << (width - HOST_BITS_PER_WIDE_INT - 1);
1765 tl = 0;
1766 }
1767 else
1768 {
1769 th = -1;
1770 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1771 }
1772 real_from_integer (&t, VOIDmode, tl, th, 0);
1773 if (REAL_VALUES_LESS (x, t))
1774 {
1775 xh = th;
1776 xl = tl;
1777 break;
1778 }
1779 REAL_VALUE_TO_INT (&xl, &xh, x);
1780 break;
1781
1782 case UNSIGNED_FIX:
1783 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1784 return const0_rtx;
1785
1786 /* Test against the unsigned upper bound. */
1787 if (width == 2*HOST_BITS_PER_WIDE_INT)
1788 {
1789 th = -1;
1790 tl = -1;
1791 }
1792 else if (width >= HOST_BITS_PER_WIDE_INT)
1793 {
1794 th = ((unsigned HOST_WIDE_INT) 1
1795 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1796 tl = -1;
1797 }
1798 else
1799 {
1800 th = 0;
1801 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1802 }
1803 real_from_integer (&t, VOIDmode, tl, th, 1);
1804 if (REAL_VALUES_LESS (t, x))
1805 {
1806 xh = th;
1807 xl = tl;
1808 break;
1809 }
1810
1811 REAL_VALUE_TO_INT (&xl, &xh, x);
1812 break;
1813
1814 default:
1815 gcc_unreachable ();
1816 }
1817 return immed_double_const (xl, xh, mode);
1818 }
1819
1820 return NULL_RTX;
1821 }
1822 \f
1823 /* Subroutine of simplify_binary_operation to simplify a commutative,
1824 associative binary operation CODE with result mode MODE, operating
1825 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1826 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1827 canonicalization is possible. */
1828
1829 static rtx
1830 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1831 rtx op0, rtx op1)
1832 {
1833 rtx tem;
1834
1835 /* Linearize the operator to the left. */
1836 if (GET_CODE (op1) == code)
1837 {
1838 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1839 if (GET_CODE (op0) == code)
1840 {
1841 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1842 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1843 }
1844
1845 /* "a op (b op c)" becomes "(b op c) op a". */
1846 if (! swap_commutative_operands_p (op1, op0))
1847 return simplify_gen_binary (code, mode, op1, op0);
1848
1849 tem = op0;
1850 op0 = op1;
1851 op1 = tem;
1852 }
1853
1854 if (GET_CODE (op0) == code)
1855 {
1856 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1857 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1858 {
1859 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1860 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1861 }
1862
1863 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1864 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1865 if (tem != 0)
1866 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1867
1868 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1869 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1870 if (tem != 0)
1871 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1872 }
1873
1874 return 0;
1875 }
1876
1877
1878 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1879 and OP1. Return 0 if no simplification is possible.
1880
1881 Don't use this for relational operations such as EQ or LT.
1882 Use simplify_relational_operation instead. */
1883 rtx
1884 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1885 rtx op0, rtx op1)
1886 {
1887 rtx trueop0, trueop1;
1888 rtx tem;
1889
1890 /* Relational operations don't work here. We must know the mode
1891 of the operands in order to do the comparison correctly.
1892 Assuming a full word can give incorrect results.
1893 Consider comparing 128 with -128 in QImode. */
1894 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1895 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1896
1897 /* Make sure the constant is second. */
1898 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1899 && swap_commutative_operands_p (op0, op1))
1900 {
1901 tem = op0, op0 = op1, op1 = tem;
1902 }
1903
1904 trueop0 = avoid_constant_pool_reference (op0);
1905 trueop1 = avoid_constant_pool_reference (op1);
1906
1907 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1908 if (tem)
1909 return tem;
1910 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1911 }
1912
1913 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1914 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1915 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1916 actual constants. */
1917
1918 static rtx
1919 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1920 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1921 {
1922 rtx tem, reversed, opleft, opright;
1923 HOST_WIDE_INT val;
1924 unsigned int width = GET_MODE_PRECISION (mode);
1925
1926 /* Even if we can't compute a constant result,
1927 there are some cases worth simplifying. */
1928
1929 switch (code)
1930 {
1931 case PLUS:
1932 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1933 when x is NaN, infinite, or finite and nonzero. They aren't
1934 when x is -0 and the rounding mode is not towards -infinity,
1935 since (-0) + 0 is then 0. */
1936 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1937 return op0;
1938
1939 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1940 transformations are safe even for IEEE. */
1941 if (GET_CODE (op0) == NEG)
1942 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1943 else if (GET_CODE (op1) == NEG)
1944 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1945
1946 /* (~a) + 1 -> -a */
1947 if (INTEGRAL_MODE_P (mode)
1948 && GET_CODE (op0) == NOT
1949 && trueop1 == const1_rtx)
1950 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1951
1952 /* Handle both-operands-constant cases. We can only add
1953 CONST_INTs to constants since the sum of relocatable symbols
1954 can't be handled by most assemblers. Don't add CONST_INT
1955 to CONST_INT since overflow won't be computed properly if wider
1956 than HOST_BITS_PER_WIDE_INT. */
1957
1958 if ((GET_CODE (op0) == CONST
1959 || GET_CODE (op0) == SYMBOL_REF
1960 || GET_CODE (op0) == LABEL_REF)
1961 && CONST_INT_P (op1))
1962 return plus_constant (op0, INTVAL (op1));
1963 else if ((GET_CODE (op1) == CONST
1964 || GET_CODE (op1) == SYMBOL_REF
1965 || GET_CODE (op1) == LABEL_REF)
1966 && CONST_INT_P (op0))
1967 return plus_constant (op1, INTVAL (op0));
1968
1969 /* See if this is something like X * C - X or vice versa or
1970 if the multiplication is written as a shift. If so, we can
1971 distribute and make a new multiply, shift, or maybe just
1972 have X (if C is 2 in the example above). But don't make
1973 something more expensive than we had before. */
1974
1975 if (SCALAR_INT_MODE_P (mode))
1976 {
1977 double_int coeff0, coeff1;
1978 rtx lhs = op0, rhs = op1;
1979
1980 coeff0 = double_int_one;
1981 coeff1 = double_int_one;
1982
1983 if (GET_CODE (lhs) == NEG)
1984 {
1985 coeff0 = double_int_minus_one;
1986 lhs = XEXP (lhs, 0);
1987 }
1988 else if (GET_CODE (lhs) == MULT
1989 && CONST_INT_P (XEXP (lhs, 1)))
1990 {
1991 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1992 lhs = XEXP (lhs, 0);
1993 }
1994 else if (GET_CODE (lhs) == ASHIFT
1995 && CONST_INT_P (XEXP (lhs, 1))
1996 && INTVAL (XEXP (lhs, 1)) >= 0
1997 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1998 {
1999 coeff0 = double_int_setbit (double_int_zero,
2000 INTVAL (XEXP (lhs, 1)));
2001 lhs = XEXP (lhs, 0);
2002 }
2003
2004 if (GET_CODE (rhs) == NEG)
2005 {
2006 coeff1 = double_int_minus_one;
2007 rhs = XEXP (rhs, 0);
2008 }
2009 else if (GET_CODE (rhs) == MULT
2010 && CONST_INT_P (XEXP (rhs, 1)))
2011 {
2012 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2013 rhs = XEXP (rhs, 0);
2014 }
2015 else if (GET_CODE (rhs) == ASHIFT
2016 && CONST_INT_P (XEXP (rhs, 1))
2017 && INTVAL (XEXP (rhs, 1)) >= 0
2018 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2019 {
2020 coeff1 = double_int_setbit (double_int_zero,
2021 INTVAL (XEXP (rhs, 1)));
2022 rhs = XEXP (rhs, 0);
2023 }
2024
2025 if (rtx_equal_p (lhs, rhs))
2026 {
2027 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2028 rtx coeff;
2029 double_int val;
2030 bool speed = optimize_function_for_speed_p (cfun);
2031
2032 val = double_int_add (coeff0, coeff1);
2033 coeff = immed_double_int_const (val, mode);
2034
2035 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2036 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2037 ? tem : 0;
2038 }
2039 }
2040
2041 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2042 if ((CONST_INT_P (op1)
2043 || GET_CODE (op1) == CONST_DOUBLE)
2044 && GET_CODE (op0) == XOR
2045 && (CONST_INT_P (XEXP (op0, 1))
2046 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2047 && mode_signbit_p (mode, op1))
2048 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2049 simplify_gen_binary (XOR, mode, op1,
2050 XEXP (op0, 1)));
2051
2052 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2053 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2054 && GET_CODE (op0) == MULT
2055 && GET_CODE (XEXP (op0, 0)) == NEG)
2056 {
2057 rtx in1, in2;
2058
2059 in1 = XEXP (XEXP (op0, 0), 0);
2060 in2 = XEXP (op0, 1);
2061 return simplify_gen_binary (MINUS, mode, op1,
2062 simplify_gen_binary (MULT, mode,
2063 in1, in2));
2064 }
2065
2066 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2067 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2068 is 1. */
2069 if (COMPARISON_P (op0)
2070 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2071 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2072 && (reversed = reversed_comparison (op0, mode)))
2073 return
2074 simplify_gen_unary (NEG, mode, reversed, mode);
2075
2076 /* If one of the operands is a PLUS or a MINUS, see if we can
2077 simplify this by the associative law.
2078 Don't use the associative law for floating point.
2079 The inaccuracy makes it nonassociative,
2080 and subtle programs can break if operations are associated. */
2081
2082 if (INTEGRAL_MODE_P (mode)
2083 && (plus_minus_operand_p (op0)
2084 || plus_minus_operand_p (op1))
2085 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2086 return tem;
2087
2088 /* Reassociate floating point addition only when the user
2089 specifies associative math operations. */
2090 if (FLOAT_MODE_P (mode)
2091 && flag_associative_math)
2092 {
2093 tem = simplify_associative_operation (code, mode, op0, op1);
2094 if (tem)
2095 return tem;
2096 }
2097 break;
2098
2099 case COMPARE:
2100 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2101 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2102 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2103 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2104 {
2105 rtx xop00 = XEXP (op0, 0);
2106 rtx xop10 = XEXP (op1, 0);
2107
2108 #ifdef HAVE_cc0
2109 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2110 #else
2111 if (REG_P (xop00) && REG_P (xop10)
2112 && GET_MODE (xop00) == GET_MODE (xop10)
2113 && REGNO (xop00) == REGNO (xop10)
2114 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2115 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2116 #endif
2117 return xop00;
2118 }
2119 break;
2120
2121 case MINUS:
2122 /* We can't assume x-x is 0 even with non-IEEE floating point,
2123 but since it is zero except in very strange circumstances, we
2124 will treat it as zero with -ffinite-math-only. */
2125 if (rtx_equal_p (trueop0, trueop1)
2126 && ! side_effects_p (op0)
2127 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2128 return CONST0_RTX (mode);
2129
2130 /* Change subtraction from zero into negation. (0 - x) is the
2131 same as -x when x is NaN, infinite, or finite and nonzero.
2132 But if the mode has signed zeros, and does not round towards
2133 -infinity, then 0 - 0 is 0, not -0. */
2134 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2135 return simplify_gen_unary (NEG, mode, op1, mode);
2136
2137 /* (-1 - a) is ~a. */
2138 if (trueop0 == constm1_rtx)
2139 return simplify_gen_unary (NOT, mode, op1, mode);
2140
2141 /* Subtracting 0 has no effect unless the mode has signed zeros
2142 and supports rounding towards -infinity. In such a case,
2143 0 - 0 is -0. */
2144 if (!(HONOR_SIGNED_ZEROS (mode)
2145 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2146 && trueop1 == CONST0_RTX (mode))
2147 return op0;
2148
2149 /* See if this is something like X * C - X or vice versa or
2150 if the multiplication is written as a shift. If so, we can
2151 distribute and make a new multiply, shift, or maybe just
2152 have X (if C is 2 in the example above). But don't make
2153 something more expensive than we had before. */
2154
2155 if (SCALAR_INT_MODE_P (mode))
2156 {
2157 double_int coeff0, negcoeff1;
2158 rtx lhs = op0, rhs = op1;
2159
2160 coeff0 = double_int_one;
2161 negcoeff1 = double_int_minus_one;
2162
2163 if (GET_CODE (lhs) == NEG)
2164 {
2165 coeff0 = double_int_minus_one;
2166 lhs = XEXP (lhs, 0);
2167 }
2168 else if (GET_CODE (lhs) == MULT
2169 && CONST_INT_P (XEXP (lhs, 1)))
2170 {
2171 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2172 lhs = XEXP (lhs, 0);
2173 }
2174 else if (GET_CODE (lhs) == ASHIFT
2175 && CONST_INT_P (XEXP (lhs, 1))
2176 && INTVAL (XEXP (lhs, 1)) >= 0
2177 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2178 {
2179 coeff0 = double_int_setbit (double_int_zero,
2180 INTVAL (XEXP (lhs, 1)));
2181 lhs = XEXP (lhs, 0);
2182 }
2183
2184 if (GET_CODE (rhs) == NEG)
2185 {
2186 negcoeff1 = double_int_one;
2187 rhs = XEXP (rhs, 0);
2188 }
2189 else if (GET_CODE (rhs) == MULT
2190 && CONST_INT_P (XEXP (rhs, 1)))
2191 {
2192 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2193 rhs = XEXP (rhs, 0);
2194 }
2195 else if (GET_CODE (rhs) == ASHIFT
2196 && CONST_INT_P (XEXP (rhs, 1))
2197 && INTVAL (XEXP (rhs, 1)) >= 0
2198 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2199 {
2200 negcoeff1 = double_int_setbit (double_int_zero,
2201 INTVAL (XEXP (rhs, 1)));
2202 negcoeff1 = double_int_neg (negcoeff1);
2203 rhs = XEXP (rhs, 0);
2204 }
2205
2206 if (rtx_equal_p (lhs, rhs))
2207 {
2208 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2209 rtx coeff;
2210 double_int val;
2211 bool speed = optimize_function_for_speed_p (cfun);
2212
2213 val = double_int_add (coeff0, negcoeff1);
2214 coeff = immed_double_int_const (val, mode);
2215
2216 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2217 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2218 ? tem : 0;
2219 }
2220 }
2221
2222 /* (a - (-b)) -> (a + b). True even for IEEE. */
2223 if (GET_CODE (op1) == NEG)
2224 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2225
2226 /* (-x - c) may be simplified as (-c - x). */
2227 if (GET_CODE (op0) == NEG
2228 && (CONST_INT_P (op1)
2229 || GET_CODE (op1) == CONST_DOUBLE))
2230 {
2231 tem = simplify_unary_operation (NEG, mode, op1, mode);
2232 if (tem)
2233 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2234 }
2235
2236 /* Don't let a relocatable value get a negative coeff. */
2237 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2238 return simplify_gen_binary (PLUS, mode,
2239 op0,
2240 neg_const_int (mode, op1));
2241
2242 /* (x - (x & y)) -> (x & ~y) */
2243 if (GET_CODE (op1) == AND)
2244 {
2245 if (rtx_equal_p (op0, XEXP (op1, 0)))
2246 {
2247 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2248 GET_MODE (XEXP (op1, 1)));
2249 return simplify_gen_binary (AND, mode, op0, tem);
2250 }
2251 if (rtx_equal_p (op0, XEXP (op1, 1)))
2252 {
2253 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2254 GET_MODE (XEXP (op1, 0)));
2255 return simplify_gen_binary (AND, mode, op0, tem);
2256 }
2257 }
2258
2259 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2260 by reversing the comparison code if valid. */
2261 if (STORE_FLAG_VALUE == 1
2262 && trueop0 == const1_rtx
2263 && COMPARISON_P (op1)
2264 && (reversed = reversed_comparison (op1, mode)))
2265 return reversed;
2266
2267 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2268 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2269 && GET_CODE (op1) == MULT
2270 && GET_CODE (XEXP (op1, 0)) == NEG)
2271 {
2272 rtx in1, in2;
2273
2274 in1 = XEXP (XEXP (op1, 0), 0);
2275 in2 = XEXP (op1, 1);
2276 return simplify_gen_binary (PLUS, mode,
2277 simplify_gen_binary (MULT, mode,
2278 in1, in2),
2279 op0);
2280 }
2281
2282 /* Canonicalize (minus (neg A) (mult B C)) to
2283 (minus (mult (neg B) C) A). */
2284 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2285 && GET_CODE (op1) == MULT
2286 && GET_CODE (op0) == NEG)
2287 {
2288 rtx in1, in2;
2289
2290 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2291 in2 = XEXP (op1, 1);
2292 return simplify_gen_binary (MINUS, mode,
2293 simplify_gen_binary (MULT, mode,
2294 in1, in2),
2295 XEXP (op0, 0));
2296 }
2297
2298 /* If one of the operands is a PLUS or a MINUS, see if we can
2299 simplify this by the associative law. This will, for example,
2300 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2301 Don't use the associative law for floating point.
2302 The inaccuracy makes it nonassociative,
2303 and subtle programs can break if operations are associated. */
2304
2305 if (INTEGRAL_MODE_P (mode)
2306 && (plus_minus_operand_p (op0)
2307 || plus_minus_operand_p (op1))
2308 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2309 return tem;
2310 break;
2311
2312 case MULT:
2313 if (trueop1 == constm1_rtx)
2314 return simplify_gen_unary (NEG, mode, op0, mode);
2315
2316 if (GET_CODE (op0) == NEG)
2317 {
2318 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2319 /* If op1 is a MULT as well and simplify_unary_operation
2320 just moved the NEG to the second operand, simplify_gen_binary
2321 below could through simplify_associative_operation move
2322 the NEG around again and recurse endlessly. */
2323 if (temp
2324 && GET_CODE (op1) == MULT
2325 && GET_CODE (temp) == MULT
2326 && XEXP (op1, 0) == XEXP (temp, 0)
2327 && GET_CODE (XEXP (temp, 1)) == NEG
2328 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2329 temp = NULL_RTX;
2330 if (temp)
2331 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2332 }
2333 if (GET_CODE (op1) == NEG)
2334 {
2335 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2336 /* If op0 is a MULT as well and simplify_unary_operation
2337 just moved the NEG to the second operand, simplify_gen_binary
2338 below could through simplify_associative_operation move
2339 the NEG around again and recurse endlessly. */
2340 if (temp
2341 && GET_CODE (op0) == MULT
2342 && GET_CODE (temp) == MULT
2343 && XEXP (op0, 0) == XEXP (temp, 0)
2344 && GET_CODE (XEXP (temp, 1)) == NEG
2345 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2346 temp = NULL_RTX;
2347 if (temp)
2348 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2349 }
2350
2351 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2352 x is NaN, since x * 0 is then also NaN. Nor is it valid
2353 when the mode has signed zeros, since multiplying a negative
2354 number by 0 will give -0, not 0. */
2355 if (!HONOR_NANS (mode)
2356 && !HONOR_SIGNED_ZEROS (mode)
2357 && trueop1 == CONST0_RTX (mode)
2358 && ! side_effects_p (op0))
2359 return op1;
2360
2361 /* In IEEE floating point, x*1 is not equivalent to x for
2362 signalling NaNs. */
2363 if (!HONOR_SNANS (mode)
2364 && trueop1 == CONST1_RTX (mode))
2365 return op0;
2366
2367 /* Convert multiply by constant power of two into shift unless
2368 we are still generating RTL. This test is a kludge. */
2369 if (CONST_INT_P (trueop1)
2370 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2371 /* If the mode is larger than the host word size, and the
2372 uppermost bit is set, then this isn't a power of two due
2373 to implicit sign extension. */
2374 && (width <= HOST_BITS_PER_WIDE_INT
2375 || val != HOST_BITS_PER_WIDE_INT - 1))
2376 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2377
2378 /* Likewise for multipliers wider than a word. */
2379 if (GET_CODE (trueop1) == CONST_DOUBLE
2380 && (GET_MODE (trueop1) == VOIDmode
2381 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2382 && GET_MODE (op0) == mode
2383 && CONST_DOUBLE_LOW (trueop1) == 0
2384 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2385 return simplify_gen_binary (ASHIFT, mode, op0,
2386 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2387
2388 /* x*2 is x+x and x*(-1) is -x */
2389 if (GET_CODE (trueop1) == CONST_DOUBLE
2390 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2391 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2392 && GET_MODE (op0) == mode)
2393 {
2394 REAL_VALUE_TYPE d;
2395 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2396
2397 if (REAL_VALUES_EQUAL (d, dconst2))
2398 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2399
2400 if (!HONOR_SNANS (mode)
2401 && REAL_VALUES_EQUAL (d, dconstm1))
2402 return simplify_gen_unary (NEG, mode, op0, mode);
2403 }
2404
2405 /* Optimize -x * -x as x * x. */
2406 if (FLOAT_MODE_P (mode)
2407 && GET_CODE (op0) == NEG
2408 && GET_CODE (op1) == NEG
2409 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2410 && !side_effects_p (XEXP (op0, 0)))
2411 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2412
2413 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2414 if (SCALAR_FLOAT_MODE_P (mode)
2415 && GET_CODE (op0) == ABS
2416 && GET_CODE (op1) == ABS
2417 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2418 && !side_effects_p (XEXP (op0, 0)))
2419 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2420
2421 /* Reassociate multiplication, but for floating point MULTs
2422 only when the user specifies unsafe math optimizations. */
2423 if (! FLOAT_MODE_P (mode)
2424 || flag_unsafe_math_optimizations)
2425 {
2426 tem = simplify_associative_operation (code, mode, op0, op1);
2427 if (tem)
2428 return tem;
2429 }
2430 break;
2431
2432 case IOR:
2433 if (trueop1 == CONST0_RTX (mode))
2434 return op0;
2435 if (CONST_INT_P (trueop1)
2436 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2437 == GET_MODE_MASK (mode)))
2438 return op1;
2439 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2440 return op0;
2441 /* A | (~A) -> -1 */
2442 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2443 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2444 && ! side_effects_p (op0)
2445 && SCALAR_INT_MODE_P (mode))
2446 return constm1_rtx;
2447
2448 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2449 if (CONST_INT_P (op1)
2450 && HWI_COMPUTABLE_MODE_P (mode)
2451 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2452 return op1;
2453
2454 /* Canonicalize (X & C1) | C2. */
2455 if (GET_CODE (op0) == AND
2456 && CONST_INT_P (trueop1)
2457 && CONST_INT_P (XEXP (op0, 1)))
2458 {
2459 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2460 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2461 HOST_WIDE_INT c2 = INTVAL (trueop1);
2462
2463 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2464 if ((c1 & c2) == c1
2465 && !side_effects_p (XEXP (op0, 0)))
2466 return trueop1;
2467
2468 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2469 if (((c1|c2) & mask) == mask)
2470 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2471
2472 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2473 if (((c1 & ~c2) & mask) != (c1 & mask))
2474 {
2475 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2476 gen_int_mode (c1 & ~c2, mode));
2477 return simplify_gen_binary (IOR, mode, tem, op1);
2478 }
2479 }
2480
2481 /* Convert (A & B) | A to A. */
2482 if (GET_CODE (op0) == AND
2483 && (rtx_equal_p (XEXP (op0, 0), op1)
2484 || rtx_equal_p (XEXP (op0, 1), op1))
2485 && ! side_effects_p (XEXP (op0, 0))
2486 && ! side_effects_p (XEXP (op0, 1)))
2487 return op1;
2488
2489 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2490 mode size to (rotate A CX). */
2491
2492 if (GET_CODE (op1) == ASHIFT
2493 || GET_CODE (op1) == SUBREG)
2494 {
2495 opleft = op1;
2496 opright = op0;
2497 }
2498 else
2499 {
2500 opright = op1;
2501 opleft = op0;
2502 }
2503
2504 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2505 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2506 && CONST_INT_P (XEXP (opleft, 1))
2507 && CONST_INT_P (XEXP (opright, 1))
2508 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2509 == GET_MODE_PRECISION (mode)))
2510 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2511
2512 /* Same, but for ashift that has been "simplified" to a wider mode
2513 by simplify_shift_const. */
2514
2515 if (GET_CODE (opleft) == SUBREG
2516 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2517 && GET_CODE (opright) == LSHIFTRT
2518 && GET_CODE (XEXP (opright, 0)) == SUBREG
2519 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2520 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2521 && (GET_MODE_SIZE (GET_MODE (opleft))
2522 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2523 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2524 SUBREG_REG (XEXP (opright, 0)))
2525 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2526 && CONST_INT_P (XEXP (opright, 1))
2527 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2528 == GET_MODE_PRECISION (mode)))
2529 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2530 XEXP (SUBREG_REG (opleft), 1));
2531
2532 /* If we have (ior (and (X C1) C2)), simplify this by making
2533 C1 as small as possible if C1 actually changes. */
2534 if (CONST_INT_P (op1)
2535 && (HWI_COMPUTABLE_MODE_P (mode)
2536 || INTVAL (op1) > 0)
2537 && GET_CODE (op0) == AND
2538 && CONST_INT_P (XEXP (op0, 1))
2539 && CONST_INT_P (op1)
2540 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2541 return simplify_gen_binary (IOR, mode,
2542 simplify_gen_binary
2543 (AND, mode, XEXP (op0, 0),
2544 GEN_INT (UINTVAL (XEXP (op0, 1))
2545 & ~UINTVAL (op1))),
2546 op1);
2547
2548 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2549 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2550 the PLUS does not affect any of the bits in OP1: then we can do
2551 the IOR as a PLUS and we can associate. This is valid if OP1
2552 can be safely shifted left C bits. */
2553 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2554 && GET_CODE (XEXP (op0, 0)) == PLUS
2555 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2556 && CONST_INT_P (XEXP (op0, 1))
2557 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2558 {
2559 int count = INTVAL (XEXP (op0, 1));
2560 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2561
2562 if (mask >> count == INTVAL (trueop1)
2563 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2564 return simplify_gen_binary (ASHIFTRT, mode,
2565 plus_constant (XEXP (op0, 0), mask),
2566 XEXP (op0, 1));
2567 }
2568
2569 tem = simplify_associative_operation (code, mode, op0, op1);
2570 if (tem)
2571 return tem;
2572 break;
2573
2574 case XOR:
2575 if (trueop1 == CONST0_RTX (mode))
2576 return op0;
2577 if (CONST_INT_P (trueop1)
2578 && ((UINTVAL (trueop1) & GET_MODE_MASK (mode))
2579 == GET_MODE_MASK (mode)))
2580 return simplify_gen_unary (NOT, mode, op0, mode);
2581 if (rtx_equal_p (trueop0, trueop1)
2582 && ! side_effects_p (op0)
2583 && GET_MODE_CLASS (mode) != MODE_CC)
2584 return CONST0_RTX (mode);
2585
2586 /* Canonicalize XOR of the most significant bit to PLUS. */
2587 if ((CONST_INT_P (op1)
2588 || GET_CODE (op1) == CONST_DOUBLE)
2589 && mode_signbit_p (mode, op1))
2590 return simplify_gen_binary (PLUS, mode, op0, op1);
2591 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2592 if ((CONST_INT_P (op1)
2593 || GET_CODE (op1) == CONST_DOUBLE)
2594 && GET_CODE (op0) == PLUS
2595 && (CONST_INT_P (XEXP (op0, 1))
2596 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2597 && mode_signbit_p (mode, XEXP (op0, 1)))
2598 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2599 simplify_gen_binary (XOR, mode, op1,
2600 XEXP (op0, 1)));
2601
2602 /* If we are XORing two things that have no bits in common,
2603 convert them into an IOR. This helps to detect rotation encoded
2604 using those methods and possibly other simplifications. */
2605
2606 if (HWI_COMPUTABLE_MODE_P (mode)
2607 && (nonzero_bits (op0, mode)
2608 & nonzero_bits (op1, mode)) == 0)
2609 return (simplify_gen_binary (IOR, mode, op0, op1));
2610
2611 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2612 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2613 (NOT y). */
2614 {
2615 int num_negated = 0;
2616
2617 if (GET_CODE (op0) == NOT)
2618 num_negated++, op0 = XEXP (op0, 0);
2619 if (GET_CODE (op1) == NOT)
2620 num_negated++, op1 = XEXP (op1, 0);
2621
2622 if (num_negated == 2)
2623 return simplify_gen_binary (XOR, mode, op0, op1);
2624 else if (num_negated == 1)
2625 return simplify_gen_unary (NOT, mode,
2626 simplify_gen_binary (XOR, mode, op0, op1),
2627 mode);
2628 }
2629
2630 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2631 correspond to a machine insn or result in further simplifications
2632 if B is a constant. */
2633
2634 if (GET_CODE (op0) == AND
2635 && rtx_equal_p (XEXP (op0, 1), op1)
2636 && ! side_effects_p (op1))
2637 return simplify_gen_binary (AND, mode,
2638 simplify_gen_unary (NOT, mode,
2639 XEXP (op0, 0), mode),
2640 op1);
2641
2642 else if (GET_CODE (op0) == AND
2643 && rtx_equal_p (XEXP (op0, 0), op1)
2644 && ! side_effects_p (op1))
2645 return simplify_gen_binary (AND, mode,
2646 simplify_gen_unary (NOT, mode,
2647 XEXP (op0, 1), mode),
2648 op1);
2649
2650 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2651 we can transform like this:
2652 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2653 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2654 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2655 Attempt a few simplifications when B and C are both constants. */
2656 if (GET_CODE (op0) == AND
2657 && CONST_INT_P (op1)
2658 && CONST_INT_P (XEXP (op0, 1)))
2659 {
2660 rtx a = XEXP (op0, 0);
2661 rtx b = XEXP (op0, 1);
2662 rtx c = op1;
2663 HOST_WIDE_INT bval = INTVAL (b);
2664 HOST_WIDE_INT cval = INTVAL (c);
2665
2666 rtx na_c
2667 = simplify_binary_operation (AND, mode,
2668 simplify_gen_unary (NOT, mode, a, mode),
2669 c);
2670 if ((~cval & bval) == 0)
2671 {
2672 /* Try to simplify ~A&C | ~B&C. */
2673 if (na_c != NULL_RTX)
2674 return simplify_gen_binary (IOR, mode, na_c,
2675 GEN_INT (~bval & cval));
2676 }
2677 else
2678 {
2679 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2680 if (na_c == const0_rtx)
2681 {
2682 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2683 GEN_INT (~cval & bval));
2684 return simplify_gen_binary (IOR, mode, a_nc_b,
2685 GEN_INT (~bval & cval));
2686 }
2687 }
2688 }
2689
2690 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2691 comparison if STORE_FLAG_VALUE is 1. */
2692 if (STORE_FLAG_VALUE == 1
2693 && trueop1 == const1_rtx
2694 && COMPARISON_P (op0)
2695 && (reversed = reversed_comparison (op0, mode)))
2696 return reversed;
2697
2698 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2699 is (lt foo (const_int 0)), so we can perform the above
2700 simplification if STORE_FLAG_VALUE is 1. */
2701
2702 if (STORE_FLAG_VALUE == 1
2703 && trueop1 == const1_rtx
2704 && GET_CODE (op0) == LSHIFTRT
2705 && CONST_INT_P (XEXP (op0, 1))
2706 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2707 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2708
2709 /* (xor (comparison foo bar) (const_int sign-bit))
2710 when STORE_FLAG_VALUE is the sign bit. */
2711 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2712 && trueop1 == const_true_rtx
2713 && COMPARISON_P (op0)
2714 && (reversed = reversed_comparison (op0, mode)))
2715 return reversed;
2716
2717 tem = simplify_associative_operation (code, mode, op0, op1);
2718 if (tem)
2719 return tem;
2720 break;
2721
2722 case AND:
2723 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2724 return trueop1;
2725 if (HWI_COMPUTABLE_MODE_P (mode))
2726 {
2727 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2728 HOST_WIDE_INT nzop1;
2729 if (CONST_INT_P (trueop1))
2730 {
2731 HOST_WIDE_INT val1 = INTVAL (trueop1);
2732 /* If we are turning off bits already known off in OP0, we need
2733 not do an AND. */
2734 if ((nzop0 & ~val1) == 0)
2735 return op0;
2736 }
2737 nzop1 = nonzero_bits (trueop1, mode);
2738 /* If we are clearing all the nonzero bits, the result is zero. */
2739 if ((nzop1 & nzop0) == 0
2740 && !side_effects_p (op0) && !side_effects_p (op1))
2741 return CONST0_RTX (mode);
2742 }
2743 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2744 && GET_MODE_CLASS (mode) != MODE_CC)
2745 return op0;
2746 /* A & (~A) -> 0 */
2747 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2748 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2749 && ! side_effects_p (op0)
2750 && GET_MODE_CLASS (mode) != MODE_CC)
2751 return CONST0_RTX (mode);
2752
2753 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2754 there are no nonzero bits of C outside of X's mode. */
2755 if ((GET_CODE (op0) == SIGN_EXTEND
2756 || GET_CODE (op0) == ZERO_EXTEND)
2757 && CONST_INT_P (trueop1)
2758 && HWI_COMPUTABLE_MODE_P (mode)
2759 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2760 & UINTVAL (trueop1)) == 0)
2761 {
2762 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2763 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2764 gen_int_mode (INTVAL (trueop1),
2765 imode));
2766 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2767 }
2768
2769 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2770 we might be able to further simplify the AND with X and potentially
2771 remove the truncation altogether. */
2772 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2773 {
2774 rtx x = XEXP (op0, 0);
2775 enum machine_mode xmode = GET_MODE (x);
2776 tem = simplify_gen_binary (AND, xmode, x,
2777 gen_int_mode (INTVAL (trueop1), xmode));
2778 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2779 }
2780
2781 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2782 if (GET_CODE (op0) == IOR
2783 && CONST_INT_P (trueop1)
2784 && CONST_INT_P (XEXP (op0, 1)))
2785 {
2786 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2787 return simplify_gen_binary (IOR, mode,
2788 simplify_gen_binary (AND, mode,
2789 XEXP (op0, 0), op1),
2790 gen_int_mode (tmp, mode));
2791 }
2792
2793 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2794 insn (and may simplify more). */
2795 if (GET_CODE (op0) == XOR
2796 && rtx_equal_p (XEXP (op0, 0), op1)
2797 && ! side_effects_p (op1))
2798 return simplify_gen_binary (AND, mode,
2799 simplify_gen_unary (NOT, mode,
2800 XEXP (op0, 1), mode),
2801 op1);
2802
2803 if (GET_CODE (op0) == XOR
2804 && rtx_equal_p (XEXP (op0, 1), op1)
2805 && ! side_effects_p (op1))
2806 return simplify_gen_binary (AND, mode,
2807 simplify_gen_unary (NOT, mode,
2808 XEXP (op0, 0), mode),
2809 op1);
2810
2811 /* Similarly for (~(A ^ B)) & A. */
2812 if (GET_CODE (op0) == NOT
2813 && GET_CODE (XEXP (op0, 0)) == XOR
2814 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2815 && ! side_effects_p (op1))
2816 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2817
2818 if (GET_CODE (op0) == NOT
2819 && GET_CODE (XEXP (op0, 0)) == XOR
2820 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2821 && ! side_effects_p (op1))
2822 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2823
2824 /* Convert (A | B) & A to A. */
2825 if (GET_CODE (op0) == IOR
2826 && (rtx_equal_p (XEXP (op0, 0), op1)
2827 || rtx_equal_p (XEXP (op0, 1), op1))
2828 && ! side_effects_p (XEXP (op0, 0))
2829 && ! side_effects_p (XEXP (op0, 1)))
2830 return op1;
2831
2832 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2833 ((A & N) + B) & M -> (A + B) & M
2834 Similarly if (N & M) == 0,
2835 ((A | N) + B) & M -> (A + B) & M
2836 and for - instead of + and/or ^ instead of |.
2837 Also, if (N & M) == 0, then
2838 (A +- N) & M -> A & M. */
2839 if (CONST_INT_P (trueop1)
2840 && HWI_COMPUTABLE_MODE_P (mode)
2841 && ~UINTVAL (trueop1)
2842 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2843 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2844 {
2845 rtx pmop[2];
2846 int which;
2847
2848 pmop[0] = XEXP (op0, 0);
2849 pmop[1] = XEXP (op0, 1);
2850
2851 if (CONST_INT_P (pmop[1])
2852 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2853 return simplify_gen_binary (AND, mode, pmop[0], op1);
2854
2855 for (which = 0; which < 2; which++)
2856 {
2857 tem = pmop[which];
2858 switch (GET_CODE (tem))
2859 {
2860 case AND:
2861 if (CONST_INT_P (XEXP (tem, 1))
2862 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2863 == UINTVAL (trueop1))
2864 pmop[which] = XEXP (tem, 0);
2865 break;
2866 case IOR:
2867 case XOR:
2868 if (CONST_INT_P (XEXP (tem, 1))
2869 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2870 pmop[which] = XEXP (tem, 0);
2871 break;
2872 default:
2873 break;
2874 }
2875 }
2876
2877 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2878 {
2879 tem = simplify_gen_binary (GET_CODE (op0), mode,
2880 pmop[0], pmop[1]);
2881 return simplify_gen_binary (code, mode, tem, op1);
2882 }
2883 }
2884
2885 /* (and X (ior (not X) Y) -> (and X Y) */
2886 if (GET_CODE (op1) == IOR
2887 && GET_CODE (XEXP (op1, 0)) == NOT
2888 && op0 == XEXP (XEXP (op1, 0), 0))
2889 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2890
2891 /* (and (ior (not X) Y) X) -> (and X Y) */
2892 if (GET_CODE (op0) == IOR
2893 && GET_CODE (XEXP (op0, 0)) == NOT
2894 && op1 == XEXP (XEXP (op0, 0), 0))
2895 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2896
2897 tem = simplify_associative_operation (code, mode, op0, op1);
2898 if (tem)
2899 return tem;
2900 break;
2901
2902 case UDIV:
2903 /* 0/x is 0 (or x&0 if x has side-effects). */
2904 if (trueop0 == CONST0_RTX (mode))
2905 {
2906 if (side_effects_p (op1))
2907 return simplify_gen_binary (AND, mode, op1, trueop0);
2908 return trueop0;
2909 }
2910 /* x/1 is x. */
2911 if (trueop1 == CONST1_RTX (mode))
2912 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2913 /* Convert divide by power of two into shift. */
2914 if (CONST_INT_P (trueop1)
2915 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2916 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2917 break;
2918
2919 case DIV:
2920 /* Handle floating point and integers separately. */
2921 if (SCALAR_FLOAT_MODE_P (mode))
2922 {
2923 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2924 safe for modes with NaNs, since 0.0 / 0.0 will then be
2925 NaN rather than 0.0. Nor is it safe for modes with signed
2926 zeros, since dividing 0 by a negative number gives -0.0 */
2927 if (trueop0 == CONST0_RTX (mode)
2928 && !HONOR_NANS (mode)
2929 && !HONOR_SIGNED_ZEROS (mode)
2930 && ! side_effects_p (op1))
2931 return op0;
2932 /* x/1.0 is x. */
2933 if (trueop1 == CONST1_RTX (mode)
2934 && !HONOR_SNANS (mode))
2935 return op0;
2936
2937 if (GET_CODE (trueop1) == CONST_DOUBLE
2938 && trueop1 != CONST0_RTX (mode))
2939 {
2940 REAL_VALUE_TYPE d;
2941 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2942
2943 /* x/-1.0 is -x. */
2944 if (REAL_VALUES_EQUAL (d, dconstm1)
2945 && !HONOR_SNANS (mode))
2946 return simplify_gen_unary (NEG, mode, op0, mode);
2947
2948 /* Change FP division by a constant into multiplication.
2949 Only do this with -freciprocal-math. */
2950 if (flag_reciprocal_math
2951 && !REAL_VALUES_EQUAL (d, dconst0))
2952 {
2953 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2954 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2955 return simplify_gen_binary (MULT, mode, op0, tem);
2956 }
2957 }
2958 }
2959 else
2960 {
2961 /* 0/x is 0 (or x&0 if x has side-effects). */
2962 if (trueop0 == CONST0_RTX (mode)
2963 && !cfun->can_throw_non_call_exceptions)
2964 {
2965 if (side_effects_p (op1))
2966 return simplify_gen_binary (AND, mode, op1, trueop0);
2967 return trueop0;
2968 }
2969 /* x/1 is x. */
2970 if (trueop1 == CONST1_RTX (mode))
2971 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2972 /* x/-1 is -x. */
2973 if (trueop1 == constm1_rtx)
2974 {
2975 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2976 return simplify_gen_unary (NEG, mode, x, mode);
2977 }
2978 }
2979 break;
2980
2981 case UMOD:
2982 /* 0%x is 0 (or x&0 if x has side-effects). */
2983 if (trueop0 == CONST0_RTX (mode))
2984 {
2985 if (side_effects_p (op1))
2986 return simplify_gen_binary (AND, mode, op1, trueop0);
2987 return trueop0;
2988 }
2989 /* x%1 is 0 (of x&0 if x has side-effects). */
2990 if (trueop1 == CONST1_RTX (mode))
2991 {
2992 if (side_effects_p (op0))
2993 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2994 return CONST0_RTX (mode);
2995 }
2996 /* Implement modulus by power of two as AND. */
2997 if (CONST_INT_P (trueop1)
2998 && exact_log2 (UINTVAL (trueop1)) > 0)
2999 return simplify_gen_binary (AND, mode, op0,
3000 GEN_INT (INTVAL (op1) - 1));
3001 break;
3002
3003 case MOD:
3004 /* 0%x is 0 (or x&0 if x has side-effects). */
3005 if (trueop0 == CONST0_RTX (mode))
3006 {
3007 if (side_effects_p (op1))
3008 return simplify_gen_binary (AND, mode, op1, trueop0);
3009 return trueop0;
3010 }
3011 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3012 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3013 {
3014 if (side_effects_p (op0))
3015 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3016 return CONST0_RTX (mode);
3017 }
3018 break;
3019
3020 case ROTATERT:
3021 case ROTATE:
3022 case ASHIFTRT:
3023 if (trueop1 == CONST0_RTX (mode))
3024 return op0;
3025 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3026 return op0;
3027 /* Rotating ~0 always results in ~0. */
3028 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3029 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3030 && ! side_effects_p (op1))
3031 return op0;
3032 canonicalize_shift:
3033 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3034 {
3035 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3036 if (val != INTVAL (op1))
3037 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3038 }
3039 break;
3040
3041 case ASHIFT:
3042 case SS_ASHIFT:
3043 case US_ASHIFT:
3044 if (trueop1 == CONST0_RTX (mode))
3045 return op0;
3046 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3047 return op0;
3048 goto canonicalize_shift;
3049
3050 case LSHIFTRT:
3051 if (trueop1 == CONST0_RTX (mode))
3052 return op0;
3053 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3054 return op0;
3055 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3056 if (GET_CODE (op0) == CLZ
3057 && CONST_INT_P (trueop1)
3058 && STORE_FLAG_VALUE == 1
3059 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3060 {
3061 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3062 unsigned HOST_WIDE_INT zero_val = 0;
3063
3064 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3065 && zero_val == GET_MODE_PRECISION (imode)
3066 && INTVAL (trueop1) == exact_log2 (zero_val))
3067 return simplify_gen_relational (EQ, mode, imode,
3068 XEXP (op0, 0), const0_rtx);
3069 }
3070 goto canonicalize_shift;
3071
3072 case SMIN:
3073 if (width <= HOST_BITS_PER_WIDE_INT
3074 && mode_signbit_p (mode, trueop1)
3075 && ! side_effects_p (op0))
3076 return op1;
3077 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3078 return op0;
3079 tem = simplify_associative_operation (code, mode, op0, op1);
3080 if (tem)
3081 return tem;
3082 break;
3083
3084 case SMAX:
3085 if (width <= HOST_BITS_PER_WIDE_INT
3086 && CONST_INT_P (trueop1)
3087 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3088 && ! side_effects_p (op0))
3089 return op1;
3090 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3091 return op0;
3092 tem = simplify_associative_operation (code, mode, op0, op1);
3093 if (tem)
3094 return tem;
3095 break;
3096
3097 case UMIN:
3098 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3099 return op1;
3100 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3101 return op0;
3102 tem = simplify_associative_operation (code, mode, op0, op1);
3103 if (tem)
3104 return tem;
3105 break;
3106
3107 case UMAX:
3108 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3109 return op1;
3110 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3111 return op0;
3112 tem = simplify_associative_operation (code, mode, op0, op1);
3113 if (tem)
3114 return tem;
3115 break;
3116
3117 case SS_PLUS:
3118 case US_PLUS:
3119 case SS_MINUS:
3120 case US_MINUS:
3121 case SS_MULT:
3122 case US_MULT:
3123 case SS_DIV:
3124 case US_DIV:
3125 /* ??? There are simplifications that can be done. */
3126 return 0;
3127
3128 case VEC_SELECT:
3129 if (!VECTOR_MODE_P (mode))
3130 {
3131 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3132 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3133 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3134 gcc_assert (XVECLEN (trueop1, 0) == 1);
3135 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3136
3137 if (GET_CODE (trueop0) == CONST_VECTOR)
3138 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3139 (trueop1, 0, 0)));
3140
3141 /* Extract a scalar element from a nested VEC_SELECT expression
3142 (with optional nested VEC_CONCAT expression). Some targets
3143 (i386) extract scalar element from a vector using chain of
3144 nested VEC_SELECT expressions. When input operand is a memory
3145 operand, this operation can be simplified to a simple scalar
3146 load from an offseted memory address. */
3147 if (GET_CODE (trueop0) == VEC_SELECT)
3148 {
3149 rtx op0 = XEXP (trueop0, 0);
3150 rtx op1 = XEXP (trueop0, 1);
3151
3152 enum machine_mode opmode = GET_MODE (op0);
3153 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3154 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3155
3156 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3157 int elem;
3158
3159 rtvec vec;
3160 rtx tmp_op, tmp;
3161
3162 gcc_assert (GET_CODE (op1) == PARALLEL);
3163 gcc_assert (i < n_elts);
3164
3165 /* Select element, pointed by nested selector. */
3166 elem = INTVAL (XVECEXP (op1, 0, i));
3167
3168 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3169 if (GET_CODE (op0) == VEC_CONCAT)
3170 {
3171 rtx op00 = XEXP (op0, 0);
3172 rtx op01 = XEXP (op0, 1);
3173
3174 enum machine_mode mode00, mode01;
3175 int n_elts00, n_elts01;
3176
3177 mode00 = GET_MODE (op00);
3178 mode01 = GET_MODE (op01);
3179
3180 /* Find out number of elements of each operand. */
3181 if (VECTOR_MODE_P (mode00))
3182 {
3183 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3184 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3185 }
3186 else
3187 n_elts00 = 1;
3188
3189 if (VECTOR_MODE_P (mode01))
3190 {
3191 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3192 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3193 }
3194 else
3195 n_elts01 = 1;
3196
3197 gcc_assert (n_elts == n_elts00 + n_elts01);
3198
3199 /* Select correct operand of VEC_CONCAT
3200 and adjust selector. */
3201 if (elem < n_elts01)
3202 tmp_op = op00;
3203 else
3204 {
3205 tmp_op = op01;
3206 elem -= n_elts00;
3207 }
3208 }
3209 else
3210 tmp_op = op0;
3211
3212 vec = rtvec_alloc (1);
3213 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3214
3215 tmp = gen_rtx_fmt_ee (code, mode,
3216 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3217 return tmp;
3218 }
3219 if (GET_CODE (trueop0) == VEC_DUPLICATE
3220 && GET_MODE (XEXP (trueop0, 0)) == mode)
3221 return XEXP (trueop0, 0);
3222 }
3223 else
3224 {
3225 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3226 gcc_assert (GET_MODE_INNER (mode)
3227 == GET_MODE_INNER (GET_MODE (trueop0)));
3228 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3229
3230 if (GET_CODE (trueop0) == CONST_VECTOR)
3231 {
3232 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3233 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3234 rtvec v = rtvec_alloc (n_elts);
3235 unsigned int i;
3236
3237 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3238 for (i = 0; i < n_elts; i++)
3239 {
3240 rtx x = XVECEXP (trueop1, 0, i);
3241
3242 gcc_assert (CONST_INT_P (x));
3243 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3244 INTVAL (x));
3245 }
3246
3247 return gen_rtx_CONST_VECTOR (mode, v);
3248 }
3249 }
3250
3251 if (XVECLEN (trueop1, 0) == 1
3252 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3253 && GET_CODE (trueop0) == VEC_CONCAT)
3254 {
3255 rtx vec = trueop0;
3256 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3257
3258 /* Try to find the element in the VEC_CONCAT. */
3259 while (GET_MODE (vec) != mode
3260 && GET_CODE (vec) == VEC_CONCAT)
3261 {
3262 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3263 if (offset < vec_size)
3264 vec = XEXP (vec, 0);
3265 else
3266 {
3267 offset -= vec_size;
3268 vec = XEXP (vec, 1);
3269 }
3270 vec = avoid_constant_pool_reference (vec);
3271 }
3272
3273 if (GET_MODE (vec) == mode)
3274 return vec;
3275 }
3276
3277 return 0;
3278 case VEC_CONCAT:
3279 {
3280 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3281 ? GET_MODE (trueop0)
3282 : GET_MODE_INNER (mode));
3283 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3284 ? GET_MODE (trueop1)
3285 : GET_MODE_INNER (mode));
3286
3287 gcc_assert (VECTOR_MODE_P (mode));
3288 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3289 == GET_MODE_SIZE (mode));
3290
3291 if (VECTOR_MODE_P (op0_mode))
3292 gcc_assert (GET_MODE_INNER (mode)
3293 == GET_MODE_INNER (op0_mode));
3294 else
3295 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3296
3297 if (VECTOR_MODE_P (op1_mode))
3298 gcc_assert (GET_MODE_INNER (mode)
3299 == GET_MODE_INNER (op1_mode));
3300 else
3301 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3302
3303 if ((GET_CODE (trueop0) == CONST_VECTOR
3304 || CONST_INT_P (trueop0)
3305 || GET_CODE (trueop0) == CONST_DOUBLE)
3306 && (GET_CODE (trueop1) == CONST_VECTOR
3307 || CONST_INT_P (trueop1)
3308 || GET_CODE (trueop1) == CONST_DOUBLE))
3309 {
3310 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3311 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3312 rtvec v = rtvec_alloc (n_elts);
3313 unsigned int i;
3314 unsigned in_n_elts = 1;
3315
3316 if (VECTOR_MODE_P (op0_mode))
3317 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3318 for (i = 0; i < n_elts; i++)
3319 {
3320 if (i < in_n_elts)
3321 {
3322 if (!VECTOR_MODE_P (op0_mode))
3323 RTVEC_ELT (v, i) = trueop0;
3324 else
3325 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3326 }
3327 else
3328 {
3329 if (!VECTOR_MODE_P (op1_mode))
3330 RTVEC_ELT (v, i) = trueop1;
3331 else
3332 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3333 i - in_n_elts);
3334 }
3335 }
3336
3337 return gen_rtx_CONST_VECTOR (mode, v);
3338 }
3339 }
3340 return 0;
3341
3342 default:
3343 gcc_unreachable ();
3344 }
3345
3346 return 0;
3347 }
3348
3349 rtx
3350 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3351 rtx op0, rtx op1)
3352 {
3353 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3354 HOST_WIDE_INT val;
3355 unsigned int width = GET_MODE_PRECISION (mode);
3356
3357 if (VECTOR_MODE_P (mode)
3358 && code != VEC_CONCAT
3359 && GET_CODE (op0) == CONST_VECTOR
3360 && GET_CODE (op1) == CONST_VECTOR)
3361 {
3362 unsigned n_elts = GET_MODE_NUNITS (mode);
3363 enum machine_mode op0mode = GET_MODE (op0);
3364 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3365 enum machine_mode op1mode = GET_MODE (op1);
3366 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3367 rtvec v = rtvec_alloc (n_elts);
3368 unsigned int i;
3369
3370 gcc_assert (op0_n_elts == n_elts);
3371 gcc_assert (op1_n_elts == n_elts);
3372 for (i = 0; i < n_elts; i++)
3373 {
3374 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3375 CONST_VECTOR_ELT (op0, i),
3376 CONST_VECTOR_ELT (op1, i));
3377 if (!x)
3378 return 0;
3379 RTVEC_ELT (v, i) = x;
3380 }
3381
3382 return gen_rtx_CONST_VECTOR (mode, v);
3383 }
3384
3385 if (VECTOR_MODE_P (mode)
3386 && code == VEC_CONCAT
3387 && (CONST_INT_P (op0)
3388 || GET_CODE (op0) == CONST_DOUBLE
3389 || GET_CODE (op0) == CONST_FIXED)
3390 && (CONST_INT_P (op1)
3391 || GET_CODE (op1) == CONST_DOUBLE
3392 || GET_CODE (op1) == CONST_FIXED))
3393 {
3394 unsigned n_elts = GET_MODE_NUNITS (mode);
3395 rtvec v = rtvec_alloc (n_elts);
3396
3397 gcc_assert (n_elts >= 2);
3398 if (n_elts == 2)
3399 {
3400 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3401 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3402
3403 RTVEC_ELT (v, 0) = op0;
3404 RTVEC_ELT (v, 1) = op1;
3405 }
3406 else
3407 {
3408 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3409 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3410 unsigned i;
3411
3412 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3413 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3414 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3415
3416 for (i = 0; i < op0_n_elts; ++i)
3417 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3418 for (i = 0; i < op1_n_elts; ++i)
3419 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3420 }
3421
3422 return gen_rtx_CONST_VECTOR (mode, v);
3423 }
3424
3425 if (SCALAR_FLOAT_MODE_P (mode)
3426 && GET_CODE (op0) == CONST_DOUBLE
3427 && GET_CODE (op1) == CONST_DOUBLE
3428 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3429 {
3430 if (code == AND
3431 || code == IOR
3432 || code == XOR)
3433 {
3434 long tmp0[4];
3435 long tmp1[4];
3436 REAL_VALUE_TYPE r;
3437 int i;
3438
3439 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3440 GET_MODE (op0));
3441 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3442 GET_MODE (op1));
3443 for (i = 0; i < 4; i++)
3444 {
3445 switch (code)
3446 {
3447 case AND:
3448 tmp0[i] &= tmp1[i];
3449 break;
3450 case IOR:
3451 tmp0[i] |= tmp1[i];
3452 break;
3453 case XOR:
3454 tmp0[i] ^= tmp1[i];
3455 break;
3456 default:
3457 gcc_unreachable ();
3458 }
3459 }
3460 real_from_target (&r, tmp0, mode);
3461 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3462 }
3463 else
3464 {
3465 REAL_VALUE_TYPE f0, f1, value, result;
3466 bool inexact;
3467
3468 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3469 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3470 real_convert (&f0, mode, &f0);
3471 real_convert (&f1, mode, &f1);
3472
3473 if (HONOR_SNANS (mode)
3474 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3475 return 0;
3476
3477 if (code == DIV
3478 && REAL_VALUES_EQUAL (f1, dconst0)
3479 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3480 return 0;
3481
3482 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3483 && flag_trapping_math
3484 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3485 {
3486 int s0 = REAL_VALUE_NEGATIVE (f0);
3487 int s1 = REAL_VALUE_NEGATIVE (f1);
3488
3489 switch (code)
3490 {
3491 case PLUS:
3492 /* Inf + -Inf = NaN plus exception. */
3493 if (s0 != s1)
3494 return 0;
3495 break;
3496 case MINUS:
3497 /* Inf - Inf = NaN plus exception. */
3498 if (s0 == s1)
3499 return 0;
3500 break;
3501 case DIV:
3502 /* Inf / Inf = NaN plus exception. */
3503 return 0;
3504 default:
3505 break;
3506 }
3507 }
3508
3509 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3510 && flag_trapping_math
3511 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3512 || (REAL_VALUE_ISINF (f1)
3513 && REAL_VALUES_EQUAL (f0, dconst0))))
3514 /* Inf * 0 = NaN plus exception. */
3515 return 0;
3516
3517 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3518 &f0, &f1);
3519 real_convert (&result, mode, &value);
3520
3521 /* Don't constant fold this floating point operation if
3522 the result has overflowed and flag_trapping_math. */
3523
3524 if (flag_trapping_math
3525 && MODE_HAS_INFINITIES (mode)
3526 && REAL_VALUE_ISINF (result)
3527 && !REAL_VALUE_ISINF (f0)
3528 && !REAL_VALUE_ISINF (f1))
3529 /* Overflow plus exception. */
3530 return 0;
3531
3532 /* Don't constant fold this floating point operation if the
3533 result may dependent upon the run-time rounding mode and
3534 flag_rounding_math is set, or if GCC's software emulation
3535 is unable to accurately represent the result. */
3536
3537 if ((flag_rounding_math
3538 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3539 && (inexact || !real_identical (&result, &value)))
3540 return NULL_RTX;
3541
3542 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3543 }
3544 }
3545
3546 /* We can fold some multi-word operations. */
3547 if (GET_MODE_CLASS (mode) == MODE_INT
3548 && width == HOST_BITS_PER_DOUBLE_INT
3549 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3550 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3551 {
3552 double_int o0, o1, res, tmp;
3553
3554 o0 = rtx_to_double_int (op0);
3555 o1 = rtx_to_double_int (op1);
3556
3557 switch (code)
3558 {
3559 case MINUS:
3560 /* A - B == A + (-B). */
3561 o1 = double_int_neg (o1);
3562
3563 /* Fall through.... */
3564
3565 case PLUS:
3566 res = double_int_add (o0, o1);
3567 break;
3568
3569 case MULT:
3570 res = double_int_mul (o0, o1);
3571 break;
3572
3573 case DIV:
3574 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3575 o0.low, o0.high, o1.low, o1.high,
3576 &res.low, &res.high,
3577 &tmp.low, &tmp.high))
3578 return 0;
3579 break;
3580
3581 case MOD:
3582 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3583 o0.low, o0.high, o1.low, o1.high,
3584 &tmp.low, &tmp.high,
3585 &res.low, &res.high))
3586 return 0;
3587 break;
3588
3589 case UDIV:
3590 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3591 o0.low, o0.high, o1.low, o1.high,
3592 &res.low, &res.high,
3593 &tmp.low, &tmp.high))
3594 return 0;
3595 break;
3596
3597 case UMOD:
3598 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3599 o0.low, o0.high, o1.low, o1.high,
3600 &tmp.low, &tmp.high,
3601 &res.low, &res.high))
3602 return 0;
3603 break;
3604
3605 case AND:
3606 res = double_int_and (o0, o1);
3607 break;
3608
3609 case IOR:
3610 res = double_int_ior (o0, o1);
3611 break;
3612
3613 case XOR:
3614 res = double_int_xor (o0, o1);
3615 break;
3616
3617 case SMIN:
3618 res = double_int_smin (o0, o1);
3619 break;
3620
3621 case SMAX:
3622 res = double_int_smax (o0, o1);
3623 break;
3624
3625 case UMIN:
3626 res = double_int_umin (o0, o1);
3627 break;
3628
3629 case UMAX:
3630 res = double_int_umax (o0, o1);
3631 break;
3632
3633 case LSHIFTRT: case ASHIFTRT:
3634 case ASHIFT:
3635 case ROTATE: case ROTATERT:
3636 {
3637 unsigned HOST_WIDE_INT cnt;
3638
3639 if (SHIFT_COUNT_TRUNCATED)
3640 o1 = double_int_zext (o1, GET_MODE_PRECISION (mode));
3641
3642 if (!double_int_fits_in_uhwi_p (o1)
3643 || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3644 return 0;
3645
3646 cnt = double_int_to_uhwi (o1);
3647
3648 if (code == LSHIFTRT || code == ASHIFTRT)
3649 res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3650 code == ASHIFTRT);
3651 else if (code == ASHIFT)
3652 res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3653 true);
3654 else if (code == ROTATE)
3655 res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3656 else /* code == ROTATERT */
3657 res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3658 }
3659 break;
3660
3661 default:
3662 return 0;
3663 }
3664
3665 return immed_double_int_const (res, mode);
3666 }
3667
3668 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3669 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3670 {
3671 /* Get the integer argument values in two forms:
3672 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3673
3674 arg0 = INTVAL (op0);
3675 arg1 = INTVAL (op1);
3676
3677 if (width < HOST_BITS_PER_WIDE_INT)
3678 {
3679 arg0 &= GET_MODE_MASK (mode);
3680 arg1 &= GET_MODE_MASK (mode);
3681
3682 arg0s = arg0;
3683 if (val_signbit_known_set_p (mode, arg0s))
3684 arg0s |= ~GET_MODE_MASK (mode);
3685
3686 arg1s = arg1;
3687 if (val_signbit_known_set_p (mode, arg1s))
3688 arg1s |= ~GET_MODE_MASK (mode);
3689 }
3690 else
3691 {
3692 arg0s = arg0;
3693 arg1s = arg1;
3694 }
3695
3696 /* Compute the value of the arithmetic. */
3697
3698 switch (code)
3699 {
3700 case PLUS:
3701 val = arg0s + arg1s;
3702 break;
3703
3704 case MINUS:
3705 val = arg0s - arg1s;
3706 break;
3707
3708 case MULT:
3709 val = arg0s * arg1s;
3710 break;
3711
3712 case DIV:
3713 if (arg1s == 0
3714 || ((unsigned HOST_WIDE_INT) arg0s
3715 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3716 && arg1s == -1))
3717 return 0;
3718 val = arg0s / arg1s;
3719 break;
3720
3721 case MOD:
3722 if (arg1s == 0
3723 || ((unsigned HOST_WIDE_INT) arg0s
3724 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3725 && arg1s == -1))
3726 return 0;
3727 val = arg0s % arg1s;
3728 break;
3729
3730 case UDIV:
3731 if (arg1 == 0
3732 || ((unsigned HOST_WIDE_INT) arg0s
3733 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3734 && arg1s == -1))
3735 return 0;
3736 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3737 break;
3738
3739 case UMOD:
3740 if (arg1 == 0
3741 || ((unsigned HOST_WIDE_INT) arg0s
3742 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3743 && arg1s == -1))
3744 return 0;
3745 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3746 break;
3747
3748 case AND:
3749 val = arg0 & arg1;
3750 break;
3751
3752 case IOR:
3753 val = arg0 | arg1;
3754 break;
3755
3756 case XOR:
3757 val = arg0 ^ arg1;
3758 break;
3759
3760 case LSHIFTRT:
3761 case ASHIFT:
3762 case ASHIFTRT:
3763 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3764 the value is in range. We can't return any old value for
3765 out-of-range arguments because either the middle-end (via
3766 shift_truncation_mask) or the back-end might be relying on
3767 target-specific knowledge. Nor can we rely on
3768 shift_truncation_mask, since the shift might not be part of an
3769 ashlM3, lshrM3 or ashrM3 instruction. */
3770 if (SHIFT_COUNT_TRUNCATED)
3771 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3772 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3773 return 0;
3774
3775 val = (code == ASHIFT
3776 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3777 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3778
3779 /* Sign-extend the result for arithmetic right shifts. */
3780 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3781 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3782 break;
3783
3784 case ROTATERT:
3785 if (arg1 < 0)
3786 return 0;
3787
3788 arg1 %= width;
3789 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3790 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3791 break;
3792
3793 case ROTATE:
3794 if (arg1 < 0)
3795 return 0;
3796
3797 arg1 %= width;
3798 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3799 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3800 break;
3801
3802 case COMPARE:
3803 /* Do nothing here. */
3804 return 0;
3805
3806 case SMIN:
3807 val = arg0s <= arg1s ? arg0s : arg1s;
3808 break;
3809
3810 case UMIN:
3811 val = ((unsigned HOST_WIDE_INT) arg0
3812 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3813 break;
3814
3815 case SMAX:
3816 val = arg0s > arg1s ? arg0s : arg1s;
3817 break;
3818
3819 case UMAX:
3820 val = ((unsigned HOST_WIDE_INT) arg0
3821 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3822 break;
3823
3824 case SS_PLUS:
3825 case US_PLUS:
3826 case SS_MINUS:
3827 case US_MINUS:
3828 case SS_MULT:
3829 case US_MULT:
3830 case SS_DIV:
3831 case US_DIV:
3832 case SS_ASHIFT:
3833 case US_ASHIFT:
3834 /* ??? There are simplifications that can be done. */
3835 return 0;
3836
3837 default:
3838 gcc_unreachable ();
3839 }
3840
3841 return gen_int_mode (val, mode);
3842 }
3843
3844 return NULL_RTX;
3845 }
3846
3847
3848 \f
3849 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3850 PLUS or MINUS.
3851
3852 Rather than test for specific case, we do this by a brute-force method
3853 and do all possible simplifications until no more changes occur. Then
3854 we rebuild the operation. */
3855
3856 struct simplify_plus_minus_op_data
3857 {
3858 rtx op;
3859 short neg;
3860 };
3861
3862 static bool
3863 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3864 {
3865 int result;
3866
3867 result = (commutative_operand_precedence (y)
3868 - commutative_operand_precedence (x));
3869 if (result)
3870 return result > 0;
3871
3872 /* Group together equal REGs to do more simplification. */
3873 if (REG_P (x) && REG_P (y))
3874 return REGNO (x) > REGNO (y);
3875 else
3876 return false;
3877 }
3878
3879 static rtx
3880 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3881 rtx op1)
3882 {
3883 struct simplify_plus_minus_op_data ops[8];
3884 rtx result, tem;
3885 int n_ops = 2, input_ops = 2;
3886 int changed, n_constants = 0, canonicalized = 0;
3887 int i, j;
3888
3889 memset (ops, 0, sizeof ops);
3890
3891 /* Set up the two operands and then expand them until nothing has been
3892 changed. If we run out of room in our array, give up; this should
3893 almost never happen. */
3894
3895 ops[0].op = op0;
3896 ops[0].neg = 0;
3897 ops[1].op = op1;
3898 ops[1].neg = (code == MINUS);
3899
3900 do
3901 {
3902 changed = 0;
3903
3904 for (i = 0; i < n_ops; i++)
3905 {
3906 rtx this_op = ops[i].op;
3907 int this_neg = ops[i].neg;
3908 enum rtx_code this_code = GET_CODE (this_op);
3909
3910 switch (this_code)
3911 {
3912 case PLUS:
3913 case MINUS:
3914 if (n_ops == 7)
3915 return NULL_RTX;
3916
3917 ops[n_ops].op = XEXP (this_op, 1);
3918 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3919 n_ops++;
3920
3921 ops[i].op = XEXP (this_op, 0);
3922 input_ops++;
3923 changed = 1;
3924 canonicalized |= this_neg;
3925 break;
3926
3927 case NEG:
3928 ops[i].op = XEXP (this_op, 0);
3929 ops[i].neg = ! this_neg;
3930 changed = 1;
3931 canonicalized = 1;
3932 break;
3933
3934 case CONST:
3935 if (n_ops < 7
3936 && GET_CODE (XEXP (this_op, 0)) == PLUS
3937 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3938 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3939 {
3940 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3941 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3942 ops[n_ops].neg = this_neg;
3943 n_ops++;
3944 changed = 1;
3945 canonicalized = 1;
3946 }
3947 break;
3948
3949 case NOT:
3950 /* ~a -> (-a - 1) */
3951 if (n_ops != 7)
3952 {
3953 ops[n_ops].op = constm1_rtx;
3954 ops[n_ops++].neg = this_neg;
3955 ops[i].op = XEXP (this_op, 0);
3956 ops[i].neg = !this_neg;
3957 changed = 1;
3958 canonicalized = 1;
3959 }
3960 break;
3961
3962 case CONST_INT:
3963 n_constants++;
3964 if (this_neg)
3965 {
3966 ops[i].op = neg_const_int (mode, this_op);
3967 ops[i].neg = 0;
3968 changed = 1;
3969 canonicalized = 1;
3970 }
3971 break;
3972
3973 default:
3974 break;
3975 }
3976 }
3977 }
3978 while (changed);
3979
3980 if (n_constants > 1)
3981 canonicalized = 1;
3982
3983 gcc_assert (n_ops >= 2);
3984
3985 /* If we only have two operands, we can avoid the loops. */
3986 if (n_ops == 2)
3987 {
3988 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3989 rtx lhs, rhs;
3990
3991 /* Get the two operands. Be careful with the order, especially for
3992 the cases where code == MINUS. */
3993 if (ops[0].neg && ops[1].neg)
3994 {
3995 lhs = gen_rtx_NEG (mode, ops[0].op);
3996 rhs = ops[1].op;
3997 }
3998 else if (ops[0].neg)
3999 {
4000 lhs = ops[1].op;
4001 rhs = ops[0].op;
4002 }
4003 else
4004 {
4005 lhs = ops[0].op;
4006 rhs = ops[1].op;
4007 }
4008
4009 return simplify_const_binary_operation (code, mode, lhs, rhs);
4010 }
4011
4012 /* Now simplify each pair of operands until nothing changes. */
4013 do
4014 {
4015 /* Insertion sort is good enough for an eight-element array. */
4016 for (i = 1; i < n_ops; i++)
4017 {
4018 struct simplify_plus_minus_op_data save;
4019 j = i - 1;
4020 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4021 continue;
4022
4023 canonicalized = 1;
4024 save = ops[i];
4025 do
4026 ops[j + 1] = ops[j];
4027 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4028 ops[j + 1] = save;
4029 }
4030
4031 changed = 0;
4032 for (i = n_ops - 1; i > 0; i--)
4033 for (j = i - 1; j >= 0; j--)
4034 {
4035 rtx lhs = ops[j].op, rhs = ops[i].op;
4036 int lneg = ops[j].neg, rneg = ops[i].neg;
4037
4038 if (lhs != 0 && rhs != 0)
4039 {
4040 enum rtx_code ncode = PLUS;
4041
4042 if (lneg != rneg)
4043 {
4044 ncode = MINUS;
4045 if (lneg)
4046 tem = lhs, lhs = rhs, rhs = tem;
4047 }
4048 else if (swap_commutative_operands_p (lhs, rhs))
4049 tem = lhs, lhs = rhs, rhs = tem;
4050
4051 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4052 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4053 {
4054 rtx tem_lhs, tem_rhs;
4055
4056 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4057 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4058 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4059
4060 if (tem && !CONSTANT_P (tem))
4061 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4062 }
4063 else
4064 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4065
4066 /* Reject "simplifications" that just wrap the two
4067 arguments in a CONST. Failure to do so can result
4068 in infinite recursion with simplify_binary_operation
4069 when it calls us to simplify CONST operations. */
4070 if (tem
4071 && ! (GET_CODE (tem) == CONST
4072 && GET_CODE (XEXP (tem, 0)) == ncode
4073 && XEXP (XEXP (tem, 0), 0) == lhs
4074 && XEXP (XEXP (tem, 0), 1) == rhs))
4075 {
4076 lneg &= rneg;
4077 if (GET_CODE (tem) == NEG)
4078 tem = XEXP (tem, 0), lneg = !lneg;
4079 if (CONST_INT_P (tem) && lneg)
4080 tem = neg_const_int (mode, tem), lneg = 0;
4081
4082 ops[i].op = tem;
4083 ops[i].neg = lneg;
4084 ops[j].op = NULL_RTX;
4085 changed = 1;
4086 canonicalized = 1;
4087 }
4088 }
4089 }
4090
4091 /* If nothing changed, fail. */
4092 if (!canonicalized)
4093 return NULL_RTX;
4094
4095 /* Pack all the operands to the lower-numbered entries. */
4096 for (i = 0, j = 0; j < n_ops; j++)
4097 if (ops[j].op)
4098 {
4099 ops[i] = ops[j];
4100 i++;
4101 }
4102 n_ops = i;
4103 }
4104 while (changed);
4105
4106 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4107 if (n_ops == 2
4108 && CONST_INT_P (ops[1].op)
4109 && CONSTANT_P (ops[0].op)
4110 && ops[0].neg)
4111 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4112
4113 /* We suppressed creation of trivial CONST expressions in the
4114 combination loop to avoid recursion. Create one manually now.
4115 The combination loop should have ensured that there is exactly
4116 one CONST_INT, and the sort will have ensured that it is last
4117 in the array and that any other constant will be next-to-last. */
4118
4119 if (n_ops > 1
4120 && CONST_INT_P (ops[n_ops - 1].op)
4121 && CONSTANT_P (ops[n_ops - 2].op))
4122 {
4123 rtx value = ops[n_ops - 1].op;
4124 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4125 value = neg_const_int (mode, value);
4126 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
4127 n_ops--;
4128 }
4129
4130 /* Put a non-negated operand first, if possible. */
4131
4132 for (i = 0; i < n_ops && ops[i].neg; i++)
4133 continue;
4134 if (i == n_ops)
4135 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4136 else if (i != 0)
4137 {
4138 tem = ops[0].op;
4139 ops[0] = ops[i];
4140 ops[i].op = tem;
4141 ops[i].neg = 1;
4142 }
4143
4144 /* Now make the result by performing the requested operations. */
4145 result = ops[0].op;
4146 for (i = 1; i < n_ops; i++)
4147 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4148 mode, result, ops[i].op);
4149
4150 return result;
4151 }
4152
4153 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4154 static bool
4155 plus_minus_operand_p (const_rtx x)
4156 {
4157 return GET_CODE (x) == PLUS
4158 || GET_CODE (x) == MINUS
4159 || (GET_CODE (x) == CONST
4160 && GET_CODE (XEXP (x, 0)) == PLUS
4161 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4162 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4163 }
4164
4165 /* Like simplify_binary_operation except used for relational operators.
4166 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4167 not also be VOIDmode.
4168
4169 CMP_MODE specifies in which mode the comparison is done in, so it is
4170 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4171 the operands or, if both are VOIDmode, the operands are compared in
4172 "infinite precision". */
4173 rtx
4174 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4175 enum machine_mode cmp_mode, rtx op0, rtx op1)
4176 {
4177 rtx tem, trueop0, trueop1;
4178
4179 if (cmp_mode == VOIDmode)
4180 cmp_mode = GET_MODE (op0);
4181 if (cmp_mode == VOIDmode)
4182 cmp_mode = GET_MODE (op1);
4183
4184 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4185 if (tem)
4186 {
4187 if (SCALAR_FLOAT_MODE_P (mode))
4188 {
4189 if (tem == const0_rtx)
4190 return CONST0_RTX (mode);
4191 #ifdef FLOAT_STORE_FLAG_VALUE
4192 {
4193 REAL_VALUE_TYPE val;
4194 val = FLOAT_STORE_FLAG_VALUE (mode);
4195 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4196 }
4197 #else
4198 return NULL_RTX;
4199 #endif
4200 }
4201 if (VECTOR_MODE_P (mode))
4202 {
4203 if (tem == const0_rtx)
4204 return CONST0_RTX (mode);
4205 #ifdef VECTOR_STORE_FLAG_VALUE
4206 {
4207 int i, units;
4208 rtvec v;
4209
4210 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4211 if (val == NULL_RTX)
4212 return NULL_RTX;
4213 if (val == const1_rtx)
4214 return CONST1_RTX (mode);
4215
4216 units = GET_MODE_NUNITS (mode);
4217 v = rtvec_alloc (units);
4218 for (i = 0; i < units; i++)
4219 RTVEC_ELT (v, i) = val;
4220 return gen_rtx_raw_CONST_VECTOR (mode, v);
4221 }
4222 #else
4223 return NULL_RTX;
4224 #endif
4225 }
4226
4227 return tem;
4228 }
4229
4230 /* For the following tests, ensure const0_rtx is op1. */
4231 if (swap_commutative_operands_p (op0, op1)
4232 || (op0 == const0_rtx && op1 != const0_rtx))
4233 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4234
4235 /* If op0 is a compare, extract the comparison arguments from it. */
4236 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4237 return simplify_gen_relational (code, mode, VOIDmode,
4238 XEXP (op0, 0), XEXP (op0, 1));
4239
4240 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4241 || CC0_P (op0))
4242 return NULL_RTX;
4243
4244 trueop0 = avoid_constant_pool_reference (op0);
4245 trueop1 = avoid_constant_pool_reference (op1);
4246 return simplify_relational_operation_1 (code, mode, cmp_mode,
4247 trueop0, trueop1);
4248 }
4249
4250 /* This part of simplify_relational_operation is only used when CMP_MODE
4251 is not in class MODE_CC (i.e. it is a real comparison).
4252
4253 MODE is the mode of the result, while CMP_MODE specifies in which
4254 mode the comparison is done in, so it is the mode of the operands. */
4255
4256 static rtx
4257 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4258 enum machine_mode cmp_mode, rtx op0, rtx op1)
4259 {
4260 enum rtx_code op0code = GET_CODE (op0);
4261
4262 if (op1 == const0_rtx && COMPARISON_P (op0))
4263 {
4264 /* If op0 is a comparison, extract the comparison arguments
4265 from it. */
4266 if (code == NE)
4267 {
4268 if (GET_MODE (op0) == mode)
4269 return simplify_rtx (op0);
4270 else
4271 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4272 XEXP (op0, 0), XEXP (op0, 1));
4273 }
4274 else if (code == EQ)
4275 {
4276 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4277 if (new_code != UNKNOWN)
4278 return simplify_gen_relational (new_code, mode, VOIDmode,
4279 XEXP (op0, 0), XEXP (op0, 1));
4280 }
4281 }
4282
4283 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4284 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4285 if ((code == LTU || code == GEU)
4286 && GET_CODE (op0) == PLUS
4287 && CONST_INT_P (XEXP (op0, 1))
4288 && (rtx_equal_p (op1, XEXP (op0, 0))
4289 || rtx_equal_p (op1, XEXP (op0, 1))))
4290 {
4291 rtx new_cmp
4292 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4293 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4294 cmp_mode, XEXP (op0, 0), new_cmp);
4295 }
4296
4297 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4298 if ((code == LTU || code == GEU)
4299 && GET_CODE (op0) == PLUS
4300 && rtx_equal_p (op1, XEXP (op0, 1))
4301 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4302 && !rtx_equal_p (op1, XEXP (op0, 0)))
4303 return simplify_gen_relational (code, mode, cmp_mode, op0,
4304 copy_rtx (XEXP (op0, 0)));
4305
4306 if (op1 == const0_rtx)
4307 {
4308 /* Canonicalize (GTU x 0) as (NE x 0). */
4309 if (code == GTU)
4310 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4311 /* Canonicalize (LEU x 0) as (EQ x 0). */
4312 if (code == LEU)
4313 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4314 }
4315 else if (op1 == const1_rtx)
4316 {
4317 switch (code)
4318 {
4319 case GE:
4320 /* Canonicalize (GE x 1) as (GT x 0). */
4321 return simplify_gen_relational (GT, mode, cmp_mode,
4322 op0, const0_rtx);
4323 case GEU:
4324 /* Canonicalize (GEU x 1) as (NE x 0). */
4325 return simplify_gen_relational (NE, mode, cmp_mode,
4326 op0, const0_rtx);
4327 case LT:
4328 /* Canonicalize (LT x 1) as (LE x 0). */
4329 return simplify_gen_relational (LE, mode, cmp_mode,
4330 op0, const0_rtx);
4331 case LTU:
4332 /* Canonicalize (LTU x 1) as (EQ x 0). */
4333 return simplify_gen_relational (EQ, mode, cmp_mode,
4334 op0, const0_rtx);
4335 default:
4336 break;
4337 }
4338 }
4339 else if (op1 == constm1_rtx)
4340 {
4341 /* Canonicalize (LE x -1) as (LT x 0). */
4342 if (code == LE)
4343 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4344 /* Canonicalize (GT x -1) as (GE x 0). */
4345 if (code == GT)
4346 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4347 }
4348
4349 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4350 if ((code == EQ || code == NE)
4351 && (op0code == PLUS || op0code == MINUS)
4352 && CONSTANT_P (op1)
4353 && CONSTANT_P (XEXP (op0, 1))
4354 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4355 {
4356 rtx x = XEXP (op0, 0);
4357 rtx c = XEXP (op0, 1);
4358
4359 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4360 cmp_mode, op1, c);
4361 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4362 }
4363
4364 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4365 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4366 if (code == NE
4367 && op1 == const0_rtx
4368 && GET_MODE_CLASS (mode) == MODE_INT
4369 && cmp_mode != VOIDmode
4370 /* ??? Work-around BImode bugs in the ia64 backend. */
4371 && mode != BImode
4372 && cmp_mode != BImode
4373 && nonzero_bits (op0, cmp_mode) == 1
4374 && STORE_FLAG_VALUE == 1)
4375 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4376 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4377 : lowpart_subreg (mode, op0, cmp_mode);
4378
4379 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4380 if ((code == EQ || code == NE)
4381 && op1 == const0_rtx
4382 && op0code == XOR)
4383 return simplify_gen_relational (code, mode, cmp_mode,
4384 XEXP (op0, 0), XEXP (op0, 1));
4385
4386 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4387 if ((code == EQ || code == NE)
4388 && op0code == XOR
4389 && rtx_equal_p (XEXP (op0, 0), op1)
4390 && !side_effects_p (XEXP (op0, 0)))
4391 return simplify_gen_relational (code, mode, cmp_mode,
4392 XEXP (op0, 1), const0_rtx);
4393
4394 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4395 if ((code == EQ || code == NE)
4396 && op0code == XOR
4397 && rtx_equal_p (XEXP (op0, 1), op1)
4398 && !side_effects_p (XEXP (op0, 1)))
4399 return simplify_gen_relational (code, mode, cmp_mode,
4400 XEXP (op0, 0), const0_rtx);
4401
4402 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4403 if ((code == EQ || code == NE)
4404 && op0code == XOR
4405 && (CONST_INT_P (op1)
4406 || GET_CODE (op1) == CONST_DOUBLE)
4407 && (CONST_INT_P (XEXP (op0, 1))
4408 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4409 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4410 simplify_gen_binary (XOR, cmp_mode,
4411 XEXP (op0, 1), op1));
4412
4413 if (op0code == POPCOUNT && op1 == const0_rtx)
4414 switch (code)
4415 {
4416 case EQ:
4417 case LE:
4418 case LEU:
4419 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4420 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4421 XEXP (op0, 0), const0_rtx);
4422
4423 case NE:
4424 case GT:
4425 case GTU:
4426 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4427 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4428 XEXP (op0, 0), const0_rtx);
4429
4430 default:
4431 break;
4432 }
4433
4434 return NULL_RTX;
4435 }
4436
4437 enum
4438 {
4439 CMP_EQ = 1,
4440 CMP_LT = 2,
4441 CMP_GT = 4,
4442 CMP_LTU = 8,
4443 CMP_GTU = 16
4444 };
4445
4446
4447 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4448 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4449 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4450 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4451 For floating-point comparisons, assume that the operands were ordered. */
4452
4453 static rtx
4454 comparison_result (enum rtx_code code, int known_results)
4455 {
4456 switch (code)
4457 {
4458 case EQ:
4459 case UNEQ:
4460 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4461 case NE:
4462 case LTGT:
4463 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4464
4465 case LT:
4466 case UNLT:
4467 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4468 case GE:
4469 case UNGE:
4470 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4471
4472 case GT:
4473 case UNGT:
4474 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4475 case LE:
4476 case UNLE:
4477 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4478
4479 case LTU:
4480 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4481 case GEU:
4482 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4483
4484 case GTU:
4485 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4486 case LEU:
4487 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4488
4489 case ORDERED:
4490 return const_true_rtx;
4491 case UNORDERED:
4492 return const0_rtx;
4493 default:
4494 gcc_unreachable ();
4495 }
4496 }
4497
4498 /* Check if the given comparison (done in the given MODE) is actually a
4499 tautology or a contradiction.
4500 If no simplification is possible, this function returns zero.
4501 Otherwise, it returns either const_true_rtx or const0_rtx. */
4502
4503 rtx
4504 simplify_const_relational_operation (enum rtx_code code,
4505 enum machine_mode mode,
4506 rtx op0, rtx op1)
4507 {
4508 rtx tem;
4509 rtx trueop0;
4510 rtx trueop1;
4511
4512 gcc_assert (mode != VOIDmode
4513 || (GET_MODE (op0) == VOIDmode
4514 && GET_MODE (op1) == VOIDmode));
4515
4516 /* If op0 is a compare, extract the comparison arguments from it. */
4517 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4518 {
4519 op1 = XEXP (op0, 1);
4520 op0 = XEXP (op0, 0);
4521
4522 if (GET_MODE (op0) != VOIDmode)
4523 mode = GET_MODE (op0);
4524 else if (GET_MODE (op1) != VOIDmode)
4525 mode = GET_MODE (op1);
4526 else
4527 return 0;
4528 }
4529
4530 /* We can't simplify MODE_CC values since we don't know what the
4531 actual comparison is. */
4532 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4533 return 0;
4534
4535 /* Make sure the constant is second. */
4536 if (swap_commutative_operands_p (op0, op1))
4537 {
4538 tem = op0, op0 = op1, op1 = tem;
4539 code = swap_condition (code);
4540 }
4541
4542 trueop0 = avoid_constant_pool_reference (op0);
4543 trueop1 = avoid_constant_pool_reference (op1);
4544
4545 /* For integer comparisons of A and B maybe we can simplify A - B and can
4546 then simplify a comparison of that with zero. If A and B are both either
4547 a register or a CONST_INT, this can't help; testing for these cases will
4548 prevent infinite recursion here and speed things up.
4549
4550 We can only do this for EQ and NE comparisons as otherwise we may
4551 lose or introduce overflow which we cannot disregard as undefined as
4552 we do not know the signedness of the operation on either the left or
4553 the right hand side of the comparison. */
4554
4555 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4556 && (code == EQ || code == NE)
4557 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4558 && (REG_P (op1) || CONST_INT_P (trueop1)))
4559 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4560 /* We cannot do this if tem is a nonzero address. */
4561 && ! nonzero_address_p (tem))
4562 return simplify_const_relational_operation (signed_condition (code),
4563 mode, tem, const0_rtx);
4564
4565 if (! HONOR_NANS (mode) && code == ORDERED)
4566 return const_true_rtx;
4567
4568 if (! HONOR_NANS (mode) && code == UNORDERED)
4569 return const0_rtx;
4570
4571 /* For modes without NaNs, if the two operands are equal, we know the
4572 result except if they have side-effects. Even with NaNs we know
4573 the result of unordered comparisons and, if signaling NaNs are
4574 irrelevant, also the result of LT/GT/LTGT. */
4575 if ((! HONOR_NANS (GET_MODE (trueop0))
4576 || code == UNEQ || code == UNLE || code == UNGE
4577 || ((code == LT || code == GT || code == LTGT)
4578 && ! HONOR_SNANS (GET_MODE (trueop0))))
4579 && rtx_equal_p (trueop0, trueop1)
4580 && ! side_effects_p (trueop0))
4581 return comparison_result (code, CMP_EQ);
4582
4583 /* If the operands are floating-point constants, see if we can fold
4584 the result. */
4585 if (GET_CODE (trueop0) == CONST_DOUBLE
4586 && GET_CODE (trueop1) == CONST_DOUBLE
4587 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4588 {
4589 REAL_VALUE_TYPE d0, d1;
4590
4591 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4592 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4593
4594 /* Comparisons are unordered iff at least one of the values is NaN. */
4595 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4596 switch (code)
4597 {
4598 case UNEQ:
4599 case UNLT:
4600 case UNGT:
4601 case UNLE:
4602 case UNGE:
4603 case NE:
4604 case UNORDERED:
4605 return const_true_rtx;
4606 case EQ:
4607 case LT:
4608 case GT:
4609 case LE:
4610 case GE:
4611 case LTGT:
4612 case ORDERED:
4613 return const0_rtx;
4614 default:
4615 return 0;
4616 }
4617
4618 return comparison_result (code,
4619 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4620 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4621 }
4622
4623 /* Otherwise, see if the operands are both integers. */
4624 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4625 && (GET_CODE (trueop0) == CONST_DOUBLE
4626 || CONST_INT_P (trueop0))
4627 && (GET_CODE (trueop1) == CONST_DOUBLE
4628 || CONST_INT_P (trueop1)))
4629 {
4630 int width = GET_MODE_PRECISION (mode);
4631 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4632 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4633
4634 /* Get the two words comprising each integer constant. */
4635 if (GET_CODE (trueop0) == CONST_DOUBLE)
4636 {
4637 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4638 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4639 }
4640 else
4641 {
4642 l0u = l0s = INTVAL (trueop0);
4643 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4644 }
4645
4646 if (GET_CODE (trueop1) == CONST_DOUBLE)
4647 {
4648 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4649 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4650 }
4651 else
4652 {
4653 l1u = l1s = INTVAL (trueop1);
4654 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4655 }
4656
4657 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4658 we have to sign or zero-extend the values. */
4659 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4660 {
4661 l0u &= GET_MODE_MASK (mode);
4662 l1u &= GET_MODE_MASK (mode);
4663
4664 if (val_signbit_known_set_p (mode, l0s))
4665 l0s |= ~GET_MODE_MASK (mode);
4666
4667 if (val_signbit_known_set_p (mode, l1s))
4668 l1s |= ~GET_MODE_MASK (mode);
4669 }
4670 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4671 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4672
4673 if (h0u == h1u && l0u == l1u)
4674 return comparison_result (code, CMP_EQ);
4675 else
4676 {
4677 int cr;
4678 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4679 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4680 return comparison_result (code, cr);
4681 }
4682 }
4683
4684 /* Optimize comparisons with upper and lower bounds. */
4685 if (HWI_COMPUTABLE_MODE_P (mode)
4686 && CONST_INT_P (trueop1))
4687 {
4688 int sign;
4689 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4690 HOST_WIDE_INT val = INTVAL (trueop1);
4691 HOST_WIDE_INT mmin, mmax;
4692
4693 if (code == GEU
4694 || code == LEU
4695 || code == GTU
4696 || code == LTU)
4697 sign = 0;
4698 else
4699 sign = 1;
4700
4701 /* Get a reduced range if the sign bit is zero. */
4702 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4703 {
4704 mmin = 0;
4705 mmax = nonzero;
4706 }
4707 else
4708 {
4709 rtx mmin_rtx, mmax_rtx;
4710 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4711
4712 mmin = INTVAL (mmin_rtx);
4713 mmax = INTVAL (mmax_rtx);
4714 if (sign)
4715 {
4716 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4717
4718 mmin >>= (sign_copies - 1);
4719 mmax >>= (sign_copies - 1);
4720 }
4721 }
4722
4723 switch (code)
4724 {
4725 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4726 case GEU:
4727 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4728 return const_true_rtx;
4729 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4730 return const0_rtx;
4731 break;
4732 case GE:
4733 if (val <= mmin)
4734 return const_true_rtx;
4735 if (val > mmax)
4736 return const0_rtx;
4737 break;
4738
4739 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4740 case LEU:
4741 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4742 return const_true_rtx;
4743 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4744 return const0_rtx;
4745 break;
4746 case LE:
4747 if (val >= mmax)
4748 return const_true_rtx;
4749 if (val < mmin)
4750 return const0_rtx;
4751 break;
4752
4753 case EQ:
4754 /* x == y is always false for y out of range. */
4755 if (val < mmin || val > mmax)
4756 return const0_rtx;
4757 break;
4758
4759 /* x > y is always false for y >= mmax, always true for y < mmin. */
4760 case GTU:
4761 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4762 return const0_rtx;
4763 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4764 return const_true_rtx;
4765 break;
4766 case GT:
4767 if (val >= mmax)
4768 return const0_rtx;
4769 if (val < mmin)
4770 return const_true_rtx;
4771 break;
4772
4773 /* x < y is always false for y <= mmin, always true for y > mmax. */
4774 case LTU:
4775 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4776 return const0_rtx;
4777 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4778 return const_true_rtx;
4779 break;
4780 case LT:
4781 if (val <= mmin)
4782 return const0_rtx;
4783 if (val > mmax)
4784 return const_true_rtx;
4785 break;
4786
4787 case NE:
4788 /* x != y is always true for y out of range. */
4789 if (val < mmin || val > mmax)
4790 return const_true_rtx;
4791 break;
4792
4793 default:
4794 break;
4795 }
4796 }
4797
4798 /* Optimize integer comparisons with zero. */
4799 if (trueop1 == const0_rtx)
4800 {
4801 /* Some addresses are known to be nonzero. We don't know
4802 their sign, but equality comparisons are known. */
4803 if (nonzero_address_p (trueop0))
4804 {
4805 if (code == EQ || code == LEU)
4806 return const0_rtx;
4807 if (code == NE || code == GTU)
4808 return const_true_rtx;
4809 }
4810
4811 /* See if the first operand is an IOR with a constant. If so, we
4812 may be able to determine the result of this comparison. */
4813 if (GET_CODE (op0) == IOR)
4814 {
4815 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4816 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4817 {
4818 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4819 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4820 && (UINTVAL (inner_const)
4821 & ((unsigned HOST_WIDE_INT) 1
4822 << sign_bitnum)));
4823
4824 switch (code)
4825 {
4826 case EQ:
4827 case LEU:
4828 return const0_rtx;
4829 case NE:
4830 case GTU:
4831 return const_true_rtx;
4832 case LT:
4833 case LE:
4834 if (has_sign)
4835 return const_true_rtx;
4836 break;
4837 case GT:
4838 case GE:
4839 if (has_sign)
4840 return const0_rtx;
4841 break;
4842 default:
4843 break;
4844 }
4845 }
4846 }
4847 }
4848
4849 /* Optimize comparison of ABS with zero. */
4850 if (trueop1 == CONST0_RTX (mode)
4851 && (GET_CODE (trueop0) == ABS
4852 || (GET_CODE (trueop0) == FLOAT_EXTEND
4853 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4854 {
4855 switch (code)
4856 {
4857 case LT:
4858 /* Optimize abs(x) < 0.0. */
4859 if (!HONOR_SNANS (mode)
4860 && (!INTEGRAL_MODE_P (mode)
4861 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4862 {
4863 if (INTEGRAL_MODE_P (mode)
4864 && (issue_strict_overflow_warning
4865 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4866 warning (OPT_Wstrict_overflow,
4867 ("assuming signed overflow does not occur when "
4868 "assuming abs (x) < 0 is false"));
4869 return const0_rtx;
4870 }
4871 break;
4872
4873 case GE:
4874 /* Optimize abs(x) >= 0.0. */
4875 if (!HONOR_NANS (mode)
4876 && (!INTEGRAL_MODE_P (mode)
4877 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4878 {
4879 if (INTEGRAL_MODE_P (mode)
4880 && (issue_strict_overflow_warning
4881 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4882 warning (OPT_Wstrict_overflow,
4883 ("assuming signed overflow does not occur when "
4884 "assuming abs (x) >= 0 is true"));
4885 return const_true_rtx;
4886 }
4887 break;
4888
4889 case UNGE:
4890 /* Optimize ! (abs(x) < 0.0). */
4891 return const_true_rtx;
4892
4893 default:
4894 break;
4895 }
4896 }
4897
4898 return 0;
4899 }
4900 \f
4901 /* Simplify CODE, an operation with result mode MODE and three operands,
4902 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4903 a constant. Return 0 if no simplifications is possible. */
4904
4905 rtx
4906 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4907 enum machine_mode op0_mode, rtx op0, rtx op1,
4908 rtx op2)
4909 {
4910 unsigned int width = GET_MODE_PRECISION (mode);
4911 bool any_change = false;
4912 rtx tem;
4913
4914 /* VOIDmode means "infinite" precision. */
4915 if (width == 0)
4916 width = HOST_BITS_PER_WIDE_INT;
4917
4918 switch (code)
4919 {
4920 case FMA:
4921 /* Simplify negations around the multiplication. */
4922 /* -a * -b + c => a * b + c. */
4923 if (GET_CODE (op0) == NEG)
4924 {
4925 tem = simplify_unary_operation (NEG, mode, op1, mode);
4926 if (tem)
4927 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4928 }
4929 else if (GET_CODE (op1) == NEG)
4930 {
4931 tem = simplify_unary_operation (NEG, mode, op0, mode);
4932 if (tem)
4933 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4934 }
4935
4936 /* Canonicalize the two multiplication operands. */
4937 /* a * -b + c => -b * a + c. */
4938 if (swap_commutative_operands_p (op0, op1))
4939 tem = op0, op0 = op1, op1 = tem, any_change = true;
4940
4941 if (any_change)
4942 return gen_rtx_FMA (mode, op0, op1, op2);
4943 return NULL_RTX;
4944
4945 case SIGN_EXTRACT:
4946 case ZERO_EXTRACT:
4947 if (CONST_INT_P (op0)
4948 && CONST_INT_P (op1)
4949 && CONST_INT_P (op2)
4950 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4951 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4952 {
4953 /* Extracting a bit-field from a constant */
4954 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4955 HOST_WIDE_INT op1val = INTVAL (op1);
4956 HOST_WIDE_INT op2val = INTVAL (op2);
4957 if (BITS_BIG_ENDIAN)
4958 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4959 else
4960 val >>= op2val;
4961
4962 if (HOST_BITS_PER_WIDE_INT != op1val)
4963 {
4964 /* First zero-extend. */
4965 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4966 /* If desired, propagate sign bit. */
4967 if (code == SIGN_EXTRACT
4968 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4969 != 0)
4970 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4971 }
4972
4973 return gen_int_mode (val, mode);
4974 }
4975 break;
4976
4977 case IF_THEN_ELSE:
4978 if (CONST_INT_P (op0))
4979 return op0 != const0_rtx ? op1 : op2;
4980
4981 /* Convert c ? a : a into "a". */
4982 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4983 return op1;
4984
4985 /* Convert a != b ? a : b into "a". */
4986 if (GET_CODE (op0) == NE
4987 && ! side_effects_p (op0)
4988 && ! HONOR_NANS (mode)
4989 && ! HONOR_SIGNED_ZEROS (mode)
4990 && ((rtx_equal_p (XEXP (op0, 0), op1)
4991 && rtx_equal_p (XEXP (op0, 1), op2))
4992 || (rtx_equal_p (XEXP (op0, 0), op2)
4993 && rtx_equal_p (XEXP (op0, 1), op1))))
4994 return op1;
4995
4996 /* Convert a == b ? a : b into "b". */
4997 if (GET_CODE (op0) == EQ
4998 && ! side_effects_p (op0)
4999 && ! HONOR_NANS (mode)
5000 && ! HONOR_SIGNED_ZEROS (mode)
5001 && ((rtx_equal_p (XEXP (op0, 0), op1)
5002 && rtx_equal_p (XEXP (op0, 1), op2))
5003 || (rtx_equal_p (XEXP (op0, 0), op2)
5004 && rtx_equal_p (XEXP (op0, 1), op1))))
5005 return op2;
5006
5007 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5008 {
5009 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5010 ? GET_MODE (XEXP (op0, 1))
5011 : GET_MODE (XEXP (op0, 0)));
5012 rtx temp;
5013
5014 /* Look for happy constants in op1 and op2. */
5015 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5016 {
5017 HOST_WIDE_INT t = INTVAL (op1);
5018 HOST_WIDE_INT f = INTVAL (op2);
5019
5020 if (t == STORE_FLAG_VALUE && f == 0)
5021 code = GET_CODE (op0);
5022 else if (t == 0 && f == STORE_FLAG_VALUE)
5023 {
5024 enum rtx_code tmp;
5025 tmp = reversed_comparison_code (op0, NULL_RTX);
5026 if (tmp == UNKNOWN)
5027 break;
5028 code = tmp;
5029 }
5030 else
5031 break;
5032
5033 return simplify_gen_relational (code, mode, cmp_mode,
5034 XEXP (op0, 0), XEXP (op0, 1));
5035 }
5036
5037 if (cmp_mode == VOIDmode)
5038 cmp_mode = op0_mode;
5039 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5040 cmp_mode, XEXP (op0, 0),
5041 XEXP (op0, 1));
5042
5043 /* See if any simplifications were possible. */
5044 if (temp)
5045 {
5046 if (CONST_INT_P (temp))
5047 return temp == const0_rtx ? op2 : op1;
5048 else if (temp)
5049 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5050 }
5051 }
5052 break;
5053
5054 case VEC_MERGE:
5055 gcc_assert (GET_MODE (op0) == mode);
5056 gcc_assert (GET_MODE (op1) == mode);
5057 gcc_assert (VECTOR_MODE_P (mode));
5058 op2 = avoid_constant_pool_reference (op2);
5059 if (CONST_INT_P (op2))
5060 {
5061 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5062 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5063 int mask = (1 << n_elts) - 1;
5064
5065 if (!(INTVAL (op2) & mask))
5066 return op1;
5067 if ((INTVAL (op2) & mask) == mask)
5068 return op0;
5069
5070 op0 = avoid_constant_pool_reference (op0);
5071 op1 = avoid_constant_pool_reference (op1);
5072 if (GET_CODE (op0) == CONST_VECTOR
5073 && GET_CODE (op1) == CONST_VECTOR)
5074 {
5075 rtvec v = rtvec_alloc (n_elts);
5076 unsigned int i;
5077
5078 for (i = 0; i < n_elts; i++)
5079 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5080 ? CONST_VECTOR_ELT (op0, i)
5081 : CONST_VECTOR_ELT (op1, i));
5082 return gen_rtx_CONST_VECTOR (mode, v);
5083 }
5084 }
5085 break;
5086
5087 default:
5088 gcc_unreachable ();
5089 }
5090
5091 return 0;
5092 }
5093
5094 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5095 or CONST_VECTOR,
5096 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5097
5098 Works by unpacking OP into a collection of 8-bit values
5099 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5100 and then repacking them again for OUTERMODE. */
5101
5102 static rtx
5103 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5104 enum machine_mode innermode, unsigned int byte)
5105 {
5106 /* We support up to 512-bit values (for V8DFmode). */
5107 enum {
5108 max_bitsize = 512,
5109 value_bit = 8,
5110 value_mask = (1 << value_bit) - 1
5111 };
5112 unsigned char value[max_bitsize / value_bit];
5113 int value_start;
5114 int i;
5115 int elem;
5116
5117 int num_elem;
5118 rtx * elems;
5119 int elem_bitsize;
5120 rtx result_s;
5121 rtvec result_v = NULL;
5122 enum mode_class outer_class;
5123 enum machine_mode outer_submode;
5124
5125 /* Some ports misuse CCmode. */
5126 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5127 return op;
5128
5129 /* We have no way to represent a complex constant at the rtl level. */
5130 if (COMPLEX_MODE_P (outermode))
5131 return NULL_RTX;
5132
5133 /* Unpack the value. */
5134
5135 if (GET_CODE (op) == CONST_VECTOR)
5136 {
5137 num_elem = CONST_VECTOR_NUNITS (op);
5138 elems = &CONST_VECTOR_ELT (op, 0);
5139 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5140 }
5141 else
5142 {
5143 num_elem = 1;
5144 elems = &op;
5145 elem_bitsize = max_bitsize;
5146 }
5147 /* If this asserts, it is too complicated; reducing value_bit may help. */
5148 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5149 /* I don't know how to handle endianness of sub-units. */
5150 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5151
5152 for (elem = 0; elem < num_elem; elem++)
5153 {
5154 unsigned char * vp;
5155 rtx el = elems[elem];
5156
5157 /* Vectors are kept in target memory order. (This is probably
5158 a mistake.) */
5159 {
5160 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5161 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5162 / BITS_PER_UNIT);
5163 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5164 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5165 unsigned bytele = (subword_byte % UNITS_PER_WORD
5166 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5167 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5168 }
5169
5170 switch (GET_CODE (el))
5171 {
5172 case CONST_INT:
5173 for (i = 0;
5174 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5175 i += value_bit)
5176 *vp++ = INTVAL (el) >> i;
5177 /* CONST_INTs are always logically sign-extended. */
5178 for (; i < elem_bitsize; i += value_bit)
5179 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5180 break;
5181
5182 case CONST_DOUBLE:
5183 if (GET_MODE (el) == VOIDmode)
5184 {
5185 /* If this triggers, someone should have generated a
5186 CONST_INT instead. */
5187 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5188
5189 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5190 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5191 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5192 {
5193 *vp++
5194 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5195 i += value_bit;
5196 }
5197 /* It shouldn't matter what's done here, so fill it with
5198 zero. */
5199 for (; i < elem_bitsize; i += value_bit)
5200 *vp++ = 0;
5201 }
5202 else
5203 {
5204 long tmp[max_bitsize / 32];
5205 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5206
5207 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5208 gcc_assert (bitsize <= elem_bitsize);
5209 gcc_assert (bitsize % value_bit == 0);
5210
5211 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5212 GET_MODE (el));
5213
5214 /* real_to_target produces its result in words affected by
5215 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5216 and use WORDS_BIG_ENDIAN instead; see the documentation
5217 of SUBREG in rtl.texi. */
5218 for (i = 0; i < bitsize; i += value_bit)
5219 {
5220 int ibase;
5221 if (WORDS_BIG_ENDIAN)
5222 ibase = bitsize - 1 - i;
5223 else
5224 ibase = i;
5225 *vp++ = tmp[ibase / 32] >> i % 32;
5226 }
5227
5228 /* It shouldn't matter what's done here, so fill it with
5229 zero. */
5230 for (; i < elem_bitsize; i += value_bit)
5231 *vp++ = 0;
5232 }
5233 break;
5234
5235 case CONST_FIXED:
5236 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5237 {
5238 for (i = 0; i < elem_bitsize; i += value_bit)
5239 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5240 }
5241 else
5242 {
5243 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5244 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5245 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5246 i += value_bit)
5247 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5248 >> (i - HOST_BITS_PER_WIDE_INT);
5249 for (; i < elem_bitsize; i += value_bit)
5250 *vp++ = 0;
5251 }
5252 break;
5253
5254 default:
5255 gcc_unreachable ();
5256 }
5257 }
5258
5259 /* Now, pick the right byte to start with. */
5260 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5261 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5262 will already have offset 0. */
5263 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5264 {
5265 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5266 - byte);
5267 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5268 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5269 byte = (subword_byte % UNITS_PER_WORD
5270 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5271 }
5272
5273 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5274 so if it's become negative it will instead be very large.) */
5275 gcc_assert (byte < GET_MODE_SIZE (innermode));
5276
5277 /* Convert from bytes to chunks of size value_bit. */
5278 value_start = byte * (BITS_PER_UNIT / value_bit);
5279
5280 /* Re-pack the value. */
5281
5282 if (VECTOR_MODE_P (outermode))
5283 {
5284 num_elem = GET_MODE_NUNITS (outermode);
5285 result_v = rtvec_alloc (num_elem);
5286 elems = &RTVEC_ELT (result_v, 0);
5287 outer_submode = GET_MODE_INNER (outermode);
5288 }
5289 else
5290 {
5291 num_elem = 1;
5292 elems = &result_s;
5293 outer_submode = outermode;
5294 }
5295
5296 outer_class = GET_MODE_CLASS (outer_submode);
5297 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5298
5299 gcc_assert (elem_bitsize % value_bit == 0);
5300 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5301
5302 for (elem = 0; elem < num_elem; elem++)
5303 {
5304 unsigned char *vp;
5305
5306 /* Vectors are stored in target memory order. (This is probably
5307 a mistake.) */
5308 {
5309 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5310 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5311 / BITS_PER_UNIT);
5312 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5313 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5314 unsigned bytele = (subword_byte % UNITS_PER_WORD
5315 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5316 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5317 }
5318
5319 switch (outer_class)
5320 {
5321 case MODE_INT:
5322 case MODE_PARTIAL_INT:
5323 {
5324 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5325
5326 for (i = 0;
5327 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5328 i += value_bit)
5329 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5330 for (; i < elem_bitsize; i += value_bit)
5331 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5332 << (i - HOST_BITS_PER_WIDE_INT);
5333
5334 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5335 know why. */
5336 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5337 elems[elem] = gen_int_mode (lo, outer_submode);
5338 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5339 elems[elem] = immed_double_const (lo, hi, outer_submode);
5340 else
5341 return NULL_RTX;
5342 }
5343 break;
5344
5345 case MODE_FLOAT:
5346 case MODE_DECIMAL_FLOAT:
5347 {
5348 REAL_VALUE_TYPE r;
5349 long tmp[max_bitsize / 32];
5350
5351 /* real_from_target wants its input in words affected by
5352 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5353 and use WORDS_BIG_ENDIAN instead; see the documentation
5354 of SUBREG in rtl.texi. */
5355 for (i = 0; i < max_bitsize / 32; i++)
5356 tmp[i] = 0;
5357 for (i = 0; i < elem_bitsize; i += value_bit)
5358 {
5359 int ibase;
5360 if (WORDS_BIG_ENDIAN)
5361 ibase = elem_bitsize - 1 - i;
5362 else
5363 ibase = i;
5364 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5365 }
5366
5367 real_from_target (&r, tmp, outer_submode);
5368 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5369 }
5370 break;
5371
5372 case MODE_FRACT:
5373 case MODE_UFRACT:
5374 case MODE_ACCUM:
5375 case MODE_UACCUM:
5376 {
5377 FIXED_VALUE_TYPE f;
5378 f.data.low = 0;
5379 f.data.high = 0;
5380 f.mode = outer_submode;
5381
5382 for (i = 0;
5383 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5384 i += value_bit)
5385 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5386 for (; i < elem_bitsize; i += value_bit)
5387 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5388 << (i - HOST_BITS_PER_WIDE_INT));
5389
5390 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5391 }
5392 break;
5393
5394 default:
5395 gcc_unreachable ();
5396 }
5397 }
5398 if (VECTOR_MODE_P (outermode))
5399 return gen_rtx_CONST_VECTOR (outermode, result_v);
5400 else
5401 return result_s;
5402 }
5403
5404 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5405 Return 0 if no simplifications are possible. */
5406 rtx
5407 simplify_subreg (enum machine_mode outermode, rtx op,
5408 enum machine_mode innermode, unsigned int byte)
5409 {
5410 /* Little bit of sanity checking. */
5411 gcc_assert (innermode != VOIDmode);
5412 gcc_assert (outermode != VOIDmode);
5413 gcc_assert (innermode != BLKmode);
5414 gcc_assert (outermode != BLKmode);
5415
5416 gcc_assert (GET_MODE (op) == innermode
5417 || GET_MODE (op) == VOIDmode);
5418
5419 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5420 gcc_assert (byte < GET_MODE_SIZE (innermode));
5421
5422 if (outermode == innermode && !byte)
5423 return op;
5424
5425 if (CONST_INT_P (op)
5426 || GET_CODE (op) == CONST_DOUBLE
5427 || GET_CODE (op) == CONST_FIXED
5428 || GET_CODE (op) == CONST_VECTOR)
5429 return simplify_immed_subreg (outermode, op, innermode, byte);
5430
5431 /* Changing mode twice with SUBREG => just change it once,
5432 or not at all if changing back op starting mode. */
5433 if (GET_CODE (op) == SUBREG)
5434 {
5435 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5436 int final_offset = byte + SUBREG_BYTE (op);
5437 rtx newx;
5438
5439 if (outermode == innermostmode
5440 && byte == 0 && SUBREG_BYTE (op) == 0)
5441 return SUBREG_REG (op);
5442
5443 /* The SUBREG_BYTE represents offset, as if the value were stored
5444 in memory. Irritating exception is paradoxical subreg, where
5445 we define SUBREG_BYTE to be 0. On big endian machines, this
5446 value should be negative. For a moment, undo this exception. */
5447 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5448 {
5449 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5450 if (WORDS_BIG_ENDIAN)
5451 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5452 if (BYTES_BIG_ENDIAN)
5453 final_offset += difference % UNITS_PER_WORD;
5454 }
5455 if (SUBREG_BYTE (op) == 0
5456 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5457 {
5458 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5459 if (WORDS_BIG_ENDIAN)
5460 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5461 if (BYTES_BIG_ENDIAN)
5462 final_offset += difference % UNITS_PER_WORD;
5463 }
5464
5465 /* See whether resulting subreg will be paradoxical. */
5466 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5467 {
5468 /* In nonparadoxical subregs we can't handle negative offsets. */
5469 if (final_offset < 0)
5470 return NULL_RTX;
5471 /* Bail out in case resulting subreg would be incorrect. */
5472 if (final_offset % GET_MODE_SIZE (outermode)
5473 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5474 return NULL_RTX;
5475 }
5476 else
5477 {
5478 int offset = 0;
5479 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5480
5481 /* In paradoxical subreg, see if we are still looking on lower part.
5482 If so, our SUBREG_BYTE will be 0. */
5483 if (WORDS_BIG_ENDIAN)
5484 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5485 if (BYTES_BIG_ENDIAN)
5486 offset += difference % UNITS_PER_WORD;
5487 if (offset == final_offset)
5488 final_offset = 0;
5489 else
5490 return NULL_RTX;
5491 }
5492
5493 /* Recurse for further possible simplifications. */
5494 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5495 final_offset);
5496 if (newx)
5497 return newx;
5498 if (validate_subreg (outermode, innermostmode,
5499 SUBREG_REG (op), final_offset))
5500 {
5501 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5502 if (SUBREG_PROMOTED_VAR_P (op)
5503 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5504 && GET_MODE_CLASS (outermode) == MODE_INT
5505 && IN_RANGE (GET_MODE_SIZE (outermode),
5506 GET_MODE_SIZE (innermode),
5507 GET_MODE_SIZE (innermostmode))
5508 && subreg_lowpart_p (newx))
5509 {
5510 SUBREG_PROMOTED_VAR_P (newx) = 1;
5511 SUBREG_PROMOTED_UNSIGNED_SET
5512 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5513 }
5514 return newx;
5515 }
5516 return NULL_RTX;
5517 }
5518
5519 /* Merge implicit and explicit truncations. */
5520
5521 if (GET_CODE (op) == TRUNCATE
5522 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5523 && subreg_lowpart_offset (outermode, innermode) == byte)
5524 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5525 GET_MODE (XEXP (op, 0)));
5526
5527 /* SUBREG of a hard register => just change the register number
5528 and/or mode. If the hard register is not valid in that mode,
5529 suppress this simplification. If the hard register is the stack,
5530 frame, or argument pointer, leave this as a SUBREG. */
5531
5532 if (REG_P (op) && HARD_REGISTER_P (op))
5533 {
5534 unsigned int regno, final_regno;
5535
5536 regno = REGNO (op);
5537 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5538 if (HARD_REGISTER_NUM_P (final_regno))
5539 {
5540 rtx x;
5541 int final_offset = byte;
5542
5543 /* Adjust offset for paradoxical subregs. */
5544 if (byte == 0
5545 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5546 {
5547 int difference = (GET_MODE_SIZE (innermode)
5548 - GET_MODE_SIZE (outermode));
5549 if (WORDS_BIG_ENDIAN)
5550 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5551 if (BYTES_BIG_ENDIAN)
5552 final_offset += difference % UNITS_PER_WORD;
5553 }
5554
5555 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5556
5557 /* Propagate original regno. We don't have any way to specify
5558 the offset inside original regno, so do so only for lowpart.
5559 The information is used only by alias analysis that can not
5560 grog partial register anyway. */
5561
5562 if (subreg_lowpart_offset (outermode, innermode) == byte)
5563 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5564 return x;
5565 }
5566 }
5567
5568 /* If we have a SUBREG of a register that we are replacing and we are
5569 replacing it with a MEM, make a new MEM and try replacing the
5570 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5571 or if we would be widening it. */
5572
5573 if (MEM_P (op)
5574 && ! mode_dependent_address_p (XEXP (op, 0))
5575 /* Allow splitting of volatile memory references in case we don't
5576 have instruction to move the whole thing. */
5577 && (! MEM_VOLATILE_P (op)
5578 || ! have_insn_for (SET, innermode))
5579 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5580 return adjust_address_nv (op, outermode, byte);
5581
5582 /* Handle complex values represented as CONCAT
5583 of real and imaginary part. */
5584 if (GET_CODE (op) == CONCAT)
5585 {
5586 unsigned int part_size, final_offset;
5587 rtx part, res;
5588
5589 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5590 if (byte < part_size)
5591 {
5592 part = XEXP (op, 0);
5593 final_offset = byte;
5594 }
5595 else
5596 {
5597 part = XEXP (op, 1);
5598 final_offset = byte - part_size;
5599 }
5600
5601 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5602 return NULL_RTX;
5603
5604 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5605 if (res)
5606 return res;
5607 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5608 return gen_rtx_SUBREG (outermode, part, final_offset);
5609 return NULL_RTX;
5610 }
5611
5612 /* Optimize SUBREG truncations of zero and sign extended values. */
5613 if ((GET_CODE (op) == ZERO_EXTEND
5614 || GET_CODE (op) == SIGN_EXTEND)
5615 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5616 {
5617 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5618
5619 /* If we're requesting the lowpart of a zero or sign extension,
5620 there are three possibilities. If the outermode is the same
5621 as the origmode, we can omit both the extension and the subreg.
5622 If the outermode is not larger than the origmode, we can apply
5623 the truncation without the extension. Finally, if the outermode
5624 is larger than the origmode, but both are integer modes, we
5625 can just extend to the appropriate mode. */
5626 if (bitpos == 0)
5627 {
5628 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5629 if (outermode == origmode)
5630 return XEXP (op, 0);
5631 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5632 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5633 subreg_lowpart_offset (outermode,
5634 origmode));
5635 if (SCALAR_INT_MODE_P (outermode))
5636 return simplify_gen_unary (GET_CODE (op), outermode,
5637 XEXP (op, 0), origmode);
5638 }
5639
5640 /* A SUBREG resulting from a zero extension may fold to zero if
5641 it extracts higher bits that the ZERO_EXTEND's source bits. */
5642 if (GET_CODE (op) == ZERO_EXTEND
5643 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5644 return CONST0_RTX (outermode);
5645 }
5646
5647 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5648 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5649 the outer subreg is effectively a truncation to the original mode. */
5650 if ((GET_CODE (op) == LSHIFTRT
5651 || GET_CODE (op) == ASHIFTRT)
5652 && SCALAR_INT_MODE_P (outermode)
5653 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5654 to avoid the possibility that an outer LSHIFTRT shifts by more
5655 than the sign extension's sign_bit_copies and introduces zeros
5656 into the high bits of the result. */
5657 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5658 && CONST_INT_P (XEXP (op, 1))
5659 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5660 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5661 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5662 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5663 return simplify_gen_binary (ASHIFTRT, outermode,
5664 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5665
5666 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5667 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5668 the outer subreg is effectively a truncation to the original mode. */
5669 if ((GET_CODE (op) == LSHIFTRT
5670 || GET_CODE (op) == ASHIFTRT)
5671 && SCALAR_INT_MODE_P (outermode)
5672 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5673 && CONST_INT_P (XEXP (op, 1))
5674 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5675 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5676 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5677 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5678 return simplify_gen_binary (LSHIFTRT, outermode,
5679 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5680
5681 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5682 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5683 the outer subreg is effectively a truncation to the original mode. */
5684 if (GET_CODE (op) == ASHIFT
5685 && SCALAR_INT_MODE_P (outermode)
5686 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5687 && CONST_INT_P (XEXP (op, 1))
5688 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5689 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5690 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5691 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5692 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5693 return simplify_gen_binary (ASHIFT, outermode,
5694 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5695
5696 /* Recognize a word extraction from a multi-word subreg. */
5697 if ((GET_CODE (op) == LSHIFTRT
5698 || GET_CODE (op) == ASHIFTRT)
5699 && SCALAR_INT_MODE_P (outermode)
5700 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5701 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5702 && CONST_INT_P (XEXP (op, 1))
5703 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5704 && INTVAL (XEXP (op, 1)) >= 0
5705 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5706 && byte == subreg_lowpart_offset (outermode, innermode))
5707 {
5708 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5709 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5710 (WORDS_BIG_ENDIAN
5711 ? byte - shifted_bytes
5712 : byte + shifted_bytes));
5713 }
5714
5715 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5716 and try replacing the SUBREG and shift with it. Don't do this if
5717 the MEM has a mode-dependent address or if we would be widening it. */
5718
5719 if ((GET_CODE (op) == LSHIFTRT
5720 || GET_CODE (op) == ASHIFTRT)
5721 && MEM_P (XEXP (op, 0))
5722 && CONST_INT_P (XEXP (op, 1))
5723 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5724 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5725 && INTVAL (XEXP (op, 1)) > 0
5726 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5727 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5728 && ! MEM_VOLATILE_P (XEXP (op, 0))
5729 && byte == subreg_lowpart_offset (outermode, innermode)
5730 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5731 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5732 {
5733 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5734 return adjust_address_nv (XEXP (op, 0), outermode,
5735 (WORDS_BIG_ENDIAN
5736 ? byte - shifted_bytes
5737 : byte + shifted_bytes));
5738 }
5739
5740 return NULL_RTX;
5741 }
5742
5743 /* Make a SUBREG operation or equivalent if it folds. */
5744
5745 rtx
5746 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5747 enum machine_mode innermode, unsigned int byte)
5748 {
5749 rtx newx;
5750
5751 newx = simplify_subreg (outermode, op, innermode, byte);
5752 if (newx)
5753 return newx;
5754
5755 if (GET_CODE (op) == SUBREG
5756 || GET_CODE (op) == CONCAT
5757 || GET_MODE (op) == VOIDmode)
5758 return NULL_RTX;
5759
5760 if (validate_subreg (outermode, innermode, op, byte))
5761 return gen_rtx_SUBREG (outermode, op, byte);
5762
5763 return NULL_RTX;
5764 }
5765
5766 /* Simplify X, an rtx expression.
5767
5768 Return the simplified expression or NULL if no simplifications
5769 were possible.
5770
5771 This is the preferred entry point into the simplification routines;
5772 however, we still allow passes to call the more specific routines.
5773
5774 Right now GCC has three (yes, three) major bodies of RTL simplification
5775 code that need to be unified.
5776
5777 1. fold_rtx in cse.c. This code uses various CSE specific
5778 information to aid in RTL simplification.
5779
5780 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5781 it uses combine specific information to aid in RTL
5782 simplification.
5783
5784 3. The routines in this file.
5785
5786
5787 Long term we want to only have one body of simplification code; to
5788 get to that state I recommend the following steps:
5789
5790 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5791 which are not pass dependent state into these routines.
5792
5793 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5794 use this routine whenever possible.
5795
5796 3. Allow for pass dependent state to be provided to these
5797 routines and add simplifications based on the pass dependent
5798 state. Remove code from cse.c & combine.c that becomes
5799 redundant/dead.
5800
5801 It will take time, but ultimately the compiler will be easier to
5802 maintain and improve. It's totally silly that when we add a
5803 simplification that it needs to be added to 4 places (3 for RTL
5804 simplification and 1 for tree simplification. */
5805
5806 rtx
5807 simplify_rtx (const_rtx x)
5808 {
5809 const enum rtx_code code = GET_CODE (x);
5810 const enum machine_mode mode = GET_MODE (x);
5811
5812 switch (GET_RTX_CLASS (code))
5813 {
5814 case RTX_UNARY:
5815 return simplify_unary_operation (code, mode,
5816 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5817 case RTX_COMM_ARITH:
5818 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5819 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5820
5821 /* Fall through.... */
5822
5823 case RTX_BIN_ARITH:
5824 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5825
5826 case RTX_TERNARY:
5827 case RTX_BITFIELD_OPS:
5828 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5829 XEXP (x, 0), XEXP (x, 1),
5830 XEXP (x, 2));
5831
5832 case RTX_COMPARE:
5833 case RTX_COMM_COMPARE:
5834 return simplify_relational_operation (code, mode,
5835 ((GET_MODE (XEXP (x, 0))
5836 != VOIDmode)
5837 ? GET_MODE (XEXP (x, 0))
5838 : GET_MODE (XEXP (x, 1))),
5839 XEXP (x, 0),
5840 XEXP (x, 1));
5841
5842 case RTX_EXTRA:
5843 if (code == SUBREG)
5844 return simplify_subreg (mode, SUBREG_REG (x),
5845 GET_MODE (SUBREG_REG (x)),
5846 SUBREG_BYTE (x));
5847 break;
5848
5849 case RTX_OBJ:
5850 if (code == LO_SUM)
5851 {
5852 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5853 if (GET_CODE (XEXP (x, 0)) == HIGH
5854 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5855 return XEXP (x, 1);
5856 }
5857 break;
5858
5859 default:
5860 break;
5861 }
5862 return NULL;
5863 }