]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
Modify gcc/*.[hc] double_int call sites to use the new interface.
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
68 {
69 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
101
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
110
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114 unsigned int width;
115
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
118
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
122
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
136
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
140
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
143 }
144
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150 unsigned int width;
151
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
154
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
158
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
161 }
162 \f
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
165
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
169 {
170 rtx tem;
171
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
176
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
181
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 \f
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
193
194 switch (GET_CODE (x))
195 {
196 case MEM:
197 break;
198
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
204 {
205 REAL_VALUE_TYPE d;
206
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 }
210 return x;
211
212 default:
213 return x;
214 }
215
216 if (GET_MODE (x) == BLKmode)
217 return x;
218
219 addr = XEXP (x, 0);
220
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
223
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 {
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
231 }
232
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
235
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
240 {
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
243
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
248 {
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
252 }
253 else
254 return c;
255 }
256
257 return x;
258 }
259 \f
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
263
264 rtx
265 delegitimize_mem_from_attrs (rtx x)
266 {
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
272 {
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
276
277 switch (TREE_CODE (decl))
278 {
279 default:
280 decl = NULL;
281 break;
282
283 case VAR_DECL:
284 break;
285
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
293 {
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
297
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
305 {
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
309 }
310 break;
311 }
312 }
313
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
321 {
322 rtx newx;
323
324 offset += MEM_OFFSET (x);
325
326 newx = DECL_RTL (decl);
327
328 if (MEM_P (newx))
329 {
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
350 }
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
354 }
355 }
356
357 return x;
358 }
359 \f
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
362
363 rtx
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
366 {
367 rtx tem;
368
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
372
373 return gen_rtx_fmt_e (code, mode, op);
374 }
375
376 /* Likewise for ternary operations. */
377
378 rtx
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 {
382 rtx tem;
383
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
388
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 }
391
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
394
395 rtx
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 {
399 rtx tem;
400
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
404
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
406 }
407 \f
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
412
413 rtx
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 {
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
424
425 if (__builtin_expect (fn != NULL, 0))
426 {
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
430 }
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
433
434 switch (GET_RTX_CLASS (code))
435 {
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
443
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
451
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475
476 case RTX_EXTRA:
477 if (code == SUBREG)
478 {
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
486 }
487 break;
488
489 case RTX_OBJ:
490 if (code == MEM)
491 {
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
496 }
497 else if (code == LO_SUM)
498 {
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
505
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
509 }
510 break;
511
512 default:
513 break;
514 }
515
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
520 {
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 {
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
529 {
530 if (newvec == vec)
531 {
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
536 }
537 RTVEC_ELT (newvec, j) = op;
538 }
539 }
540 break;
541
542 case 'e':
543 if (XEXP (x, i))
544 {
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
547 {
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
551 }
552 }
553 break;
554 }
555 return newx;
556 }
557
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
560
561 rtx
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 {
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 }
566 \f
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
570 rtx
571 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572 rtx op, enum machine_mode op_mode)
573 {
574 rtx trueop, tem;
575
576 trueop = avoid_constant_pool_reference (op);
577
578 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579 if (tem)
580 return tem;
581
582 return simplify_unary_operation_1 (code, mode, op);
583 }
584
585 /* Perform some simplifications we can do even if the operands
586 aren't constant. */
587 static rtx
588 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
589 {
590 enum rtx_code reversed;
591 rtx temp;
592
593 switch (code)
594 {
595 case NOT:
596 /* (not (not X)) == X. */
597 if (GET_CODE (op) == NOT)
598 return XEXP (op, 0);
599
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op)
603 && (mode == BImode || STORE_FLAG_VALUE == -1)
604 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605 return simplify_gen_relational (reversed, mode, VOIDmode,
606 XEXP (op, 0), XEXP (op, 1));
607
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op) == PLUS
610 && XEXP (op, 1) == constm1_rtx)
611 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
612
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op) == NEG)
615 return plus_constant (mode, XEXP (op, 0), -1);
616
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op) == XOR
619 && CONST_INT_P (XEXP (op, 1))
620 && (temp = simplify_unary_operation (NOT, mode,
621 XEXP (op, 1), mode)) != 0)
622 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
623
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op) == PLUS
626 && CONST_INT_P (XEXP (op, 1))
627 && mode_signbit_p (mode, XEXP (op, 1))
628 && (temp = simplify_unary_operation (NOT, mode,
629 XEXP (op, 1), mode)) != 0)
630 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
631
632
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
637 bother with. */
638 if (GET_CODE (op) == ASHIFT
639 && XEXP (op, 0) == const1_rtx)
640 {
641 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
643 }
644
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
648
649 if (STORE_FLAG_VALUE == -1
650 && GET_CODE (op) == ASHIFTRT
651 && GET_CODE (XEXP (op, 1))
652 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653 return simplify_gen_relational (GE, mode, VOIDmode,
654 XEXP (op, 0), const0_rtx);
655
656
657 if (GET_CODE (op) == SUBREG
658 && subreg_lowpart_p (op)
659 && (GET_MODE_SIZE (GET_MODE (op))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661 && GET_CODE (SUBREG_REG (op)) == ASHIFT
662 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
663 {
664 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665 rtx x;
666
667 x = gen_rtx_ROTATE (inner_mode,
668 simplify_gen_unary (NOT, inner_mode, const1_rtx,
669 inner_mode),
670 XEXP (SUBREG_REG (op), 1));
671 return rtl_hooks.gen_lowpart_no_emit (mode, x);
672 }
673
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
677 coded. */
678
679 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
680 {
681 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682 enum machine_mode op_mode;
683
684 op_mode = GET_MODE (in1);
685 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
686
687 op_mode = GET_MODE (in2);
688 if (op_mode == VOIDmode)
689 op_mode = mode;
690 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
691
692 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
693 {
694 rtx tem = in2;
695 in2 = in1; in1 = tem;
696 }
697
698 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699 mode, in1, in2);
700 }
701 break;
702
703 case NEG:
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op) == NEG)
706 return XEXP (op, 0);
707
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op) == PLUS
710 && XEXP (op, 1) == const1_rtx)
711 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
712
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op) == NOT)
715 return plus_constant (mode, XEXP (op, 0), 1);
716
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
726
727 if (GET_CODE (op) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
730 {
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op, 1))
733 || CONST_DOUBLE_P (XEXP (op, 1)))
734 {
735 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736 if (temp)
737 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
738 }
739
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
743 }
744
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
749 {
750 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
752 }
753
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
756 is a constant). */
757 if (GET_CODE (op) == ASHIFT)
758 {
759 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760 if (temp)
761 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
762 }
763
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op) == ASHIFTRT
767 && CONST_INT_P (XEXP (op, 1))
768 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769 return simplify_gen_binary (LSHIFTRT, mode,
770 XEXP (op, 0), XEXP (op, 1));
771
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op) == LSHIFTRT
775 && CONST_INT_P (XEXP (op, 1))
776 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777 return simplify_gen_binary (ASHIFTRT, mode,
778 XEXP (op, 0), XEXP (op, 1));
779
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op) == XOR
782 && XEXP (op, 1) == const1_rtx
783 && nonzero_bits (XEXP (op, 0), mode) == 1)
784 return plus_constant (mode, XEXP (op, 0), -1);
785
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op) == LT
789 && XEXP (op, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
791 {
792 enum machine_mode inner = GET_MODE (XEXP (op, 0));
793 int isize = GET_MODE_PRECISION (inner);
794 if (STORE_FLAG_VALUE == 1)
795 {
796 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797 GEN_INT (isize - 1));
798 if (mode == inner)
799 return temp;
800 if (GET_MODE_PRECISION (mode) > isize)
801 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
803 }
804 else if (STORE_FLAG_VALUE == -1)
805 {
806 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807 GEN_INT (isize - 1));
808 if (mode == inner)
809 return temp;
810 if (GET_MODE_PRECISION (mode) > isize)
811 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
813 }
814 }
815 break;
816
817 case TRUNCATE:
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
820 integer mode. */
821 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822 break;
823
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op) == SIGN_EXTEND
826 || GET_CODE (op) == ZERO_EXTEND)
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
829
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op) == ABS
833 || GET_CODE (op) == NEG)
834 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837 return simplify_gen_unary (GET_CODE (op), mode,
838 XEXP (XEXP (op, 0), 0), mode);
839
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
841 (truncate:A X). */
842 if (GET_CODE (op) == SUBREG
843 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844 && subreg_lowpart_p (op))
845 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846 GET_MODE (XEXP (SUBREG_REG (op), 0)));
847
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
854 patterns. */
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856 ? (num_sign_bit_copies (op, GET_MODE (op))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858 - GET_MODE_PRECISION (mode)))
859 : truncated_to_mode (mode, op))
860 && ! (GET_CODE (op) == LSHIFTRT
861 && GET_CODE (XEXP (op, 0)) == MULT))
862 return rtl_hooks.gen_lowpart_no_emit (mode, op);
863
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode)
869 && COMPARISON_P (op)
870 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871 return rtl_hooks.gen_lowpart_no_emit (mode, op);
872
873 /* A truncate of a memory is just loading the low part of the memory
874 if we are not changing the meaning of the address. */
875 if (GET_CODE (op) == MEM
876 && !MEM_VOLATILE_P (op)
877 && !mode_dependent_address_p (XEXP (op, 0)))
878 return rtl_hooks.gen_lowpart_no_emit (mode, op);
879
880 break;
881
882 case FLOAT_TRUNCATE:
883 if (DECIMAL_FLOAT_MODE_P (mode))
884 break;
885
886 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
887 if (GET_CODE (op) == FLOAT_EXTEND
888 && GET_MODE (XEXP (op, 0)) == mode)
889 return XEXP (op, 0);
890
891 /* (float_truncate:SF (float_truncate:DF foo:XF))
892 = (float_truncate:SF foo:XF).
893 This may eliminate double rounding, so it is unsafe.
894
895 (float_truncate:SF (float_extend:XF foo:DF))
896 = (float_truncate:SF foo:DF).
897
898 (float_truncate:DF (float_extend:XF foo:SF))
899 = (float_extend:SF foo:DF). */
900 if ((GET_CODE (op) == FLOAT_TRUNCATE
901 && flag_unsafe_math_optimizations)
902 || GET_CODE (op) == FLOAT_EXTEND)
903 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
904 0)))
905 > GET_MODE_SIZE (mode)
906 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
907 mode,
908 XEXP (op, 0), mode);
909
910 /* (float_truncate (float x)) is (float x) */
911 if (GET_CODE (op) == FLOAT
912 && (flag_unsafe_math_optimizations
913 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
914 && ((unsigned)significand_size (GET_MODE (op))
915 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
916 - num_sign_bit_copies (XEXP (op, 0),
917 GET_MODE (XEXP (op, 0))))))))
918 return simplify_gen_unary (FLOAT, mode,
919 XEXP (op, 0),
920 GET_MODE (XEXP (op, 0)));
921
922 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
923 (OP:SF foo:SF) if OP is NEG or ABS. */
924 if ((GET_CODE (op) == ABS
925 || GET_CODE (op) == NEG)
926 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
927 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
928 return simplify_gen_unary (GET_CODE (op), mode,
929 XEXP (XEXP (op, 0), 0), mode);
930
931 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
932 is (float_truncate:SF x). */
933 if (GET_CODE (op) == SUBREG
934 && subreg_lowpart_p (op)
935 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
936 return SUBREG_REG (op);
937 break;
938
939 case FLOAT_EXTEND:
940 if (DECIMAL_FLOAT_MODE_P (mode))
941 break;
942
943 /* (float_extend (float_extend x)) is (float_extend x)
944
945 (float_extend (float x)) is (float x) assuming that double
946 rounding can't happen.
947 */
948 if (GET_CODE (op) == FLOAT_EXTEND
949 || (GET_CODE (op) == FLOAT
950 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
951 && ((unsigned)significand_size (GET_MODE (op))
952 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
953 - num_sign_bit_copies (XEXP (op, 0),
954 GET_MODE (XEXP (op, 0)))))))
955 return simplify_gen_unary (GET_CODE (op), mode,
956 XEXP (op, 0),
957 GET_MODE (XEXP (op, 0)));
958
959 break;
960
961 case ABS:
962 /* (abs (neg <foo>)) -> (abs <foo>) */
963 if (GET_CODE (op) == NEG)
964 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
965 GET_MODE (XEXP (op, 0)));
966
967 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
968 do nothing. */
969 if (GET_MODE (op) == VOIDmode)
970 break;
971
972 /* If operand is something known to be positive, ignore the ABS. */
973 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
974 || val_signbit_known_clear_p (GET_MODE (op),
975 nonzero_bits (op, GET_MODE (op))))
976 return op;
977
978 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
979 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
980 return gen_rtx_NEG (mode, op);
981
982 break;
983
984 case FFS:
985 /* (ffs (*_extend <X>)) = (ffs <X>) */
986 if (GET_CODE (op) == SIGN_EXTEND
987 || GET_CODE (op) == ZERO_EXTEND)
988 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
989 GET_MODE (XEXP (op, 0)));
990 break;
991
992 case POPCOUNT:
993 switch (GET_CODE (op))
994 {
995 case BSWAP:
996 case ZERO_EXTEND:
997 /* (popcount (zero_extend <X>)) = (popcount <X>) */
998 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
999 GET_MODE (XEXP (op, 0)));
1000
1001 case ROTATE:
1002 case ROTATERT:
1003 /* Rotations don't affect popcount. */
1004 if (!side_effects_p (XEXP (op, 1)))
1005 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1006 GET_MODE (XEXP (op, 0)));
1007 break;
1008
1009 default:
1010 break;
1011 }
1012 break;
1013
1014 case PARITY:
1015 switch (GET_CODE (op))
1016 {
1017 case NOT:
1018 case BSWAP:
1019 case ZERO_EXTEND:
1020 case SIGN_EXTEND:
1021 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1022 GET_MODE (XEXP (op, 0)));
1023
1024 case ROTATE:
1025 case ROTATERT:
1026 /* Rotations don't affect parity. */
1027 if (!side_effects_p (XEXP (op, 1)))
1028 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1029 GET_MODE (XEXP (op, 0)));
1030 break;
1031
1032 default:
1033 break;
1034 }
1035 break;
1036
1037 case BSWAP:
1038 /* (bswap (bswap x)) -> x. */
1039 if (GET_CODE (op) == BSWAP)
1040 return XEXP (op, 0);
1041 break;
1042
1043 case FLOAT:
1044 /* (float (sign_extend <X>)) = (float <X>). */
1045 if (GET_CODE (op) == SIGN_EXTEND)
1046 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1047 GET_MODE (XEXP (op, 0)));
1048 break;
1049
1050 case SIGN_EXTEND:
1051 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1052 becomes just the MINUS if its mode is MODE. This allows
1053 folding switch statements on machines using casesi (such as
1054 the VAX). */
1055 if (GET_CODE (op) == TRUNCATE
1056 && GET_MODE (XEXP (op, 0)) == mode
1057 && GET_CODE (XEXP (op, 0)) == MINUS
1058 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1059 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1060 return XEXP (op, 0);
1061
1062 /* Extending a widening multiplication should be canonicalized to
1063 a wider widening multiplication. */
1064 if (GET_CODE (op) == MULT)
1065 {
1066 rtx lhs = XEXP (op, 0);
1067 rtx rhs = XEXP (op, 1);
1068 enum rtx_code lcode = GET_CODE (lhs);
1069 enum rtx_code rcode = GET_CODE (rhs);
1070
1071 /* Widening multiplies usually extend both operands, but sometimes
1072 they use a shift to extract a portion of a register. */
1073 if ((lcode == SIGN_EXTEND
1074 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1075 && (rcode == SIGN_EXTEND
1076 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1077 {
1078 enum machine_mode lmode = GET_MODE (lhs);
1079 enum machine_mode rmode = GET_MODE (rhs);
1080 int bits;
1081
1082 if (lcode == ASHIFTRT)
1083 /* Number of bits not shifted off the end. */
1084 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1085 else /* lcode == SIGN_EXTEND */
1086 /* Size of inner mode. */
1087 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1088
1089 if (rcode == ASHIFTRT)
1090 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1091 else /* rcode == SIGN_EXTEND */
1092 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1093
1094 /* We can only widen multiplies if the result is mathematiclly
1095 equivalent. I.e. if overflow was impossible. */
1096 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1097 return simplify_gen_binary
1098 (MULT, mode,
1099 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1100 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1101 }
1102 }
1103
1104 /* Check for a sign extension of a subreg of a promoted
1105 variable, where the promotion is sign-extended, and the
1106 target mode is the same as the variable's promotion. */
1107 if (GET_CODE (op) == SUBREG
1108 && SUBREG_PROMOTED_VAR_P (op)
1109 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1110 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1111 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1112
1113 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1114 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1115 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1116 {
1117 gcc_assert (GET_MODE_BITSIZE (mode)
1118 > GET_MODE_BITSIZE (GET_MODE (op)));
1119 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1120 GET_MODE (XEXP (op, 0)));
1121 }
1122
1123 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1124 is (sign_extend:M (subreg:O <X>)) if there is mode with
1125 GET_MODE_BITSIZE (N) - I bits.
1126 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1127 is similarly (zero_extend:M (subreg:O <X>)). */
1128 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1129 && GET_CODE (XEXP (op, 0)) == ASHIFT
1130 && CONST_INT_P (XEXP (op, 1))
1131 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1132 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1133 {
1134 enum machine_mode tmode
1135 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1136 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1137 gcc_assert (GET_MODE_BITSIZE (mode)
1138 > GET_MODE_BITSIZE (GET_MODE (op)));
1139 if (tmode != BLKmode)
1140 {
1141 rtx inner =
1142 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1143 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1144 ? SIGN_EXTEND : ZERO_EXTEND,
1145 mode, inner, tmode);
1146 }
1147 }
1148
1149 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1150 /* As we do not know which address space the pointer is referring to,
1151 we can do this only if the target does not support different pointer
1152 or address modes depending on the address space. */
1153 if (target_default_pointer_address_modes_p ()
1154 && ! POINTERS_EXTEND_UNSIGNED
1155 && mode == Pmode && GET_MODE (op) == ptr_mode
1156 && (CONSTANT_P (op)
1157 || (GET_CODE (op) == SUBREG
1158 && REG_P (SUBREG_REG (op))
1159 && REG_POINTER (SUBREG_REG (op))
1160 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1161 return convert_memory_address (Pmode, op);
1162 #endif
1163 break;
1164
1165 case ZERO_EXTEND:
1166 /* Check for a zero extension of a subreg of a promoted
1167 variable, where the promotion is zero-extended, and the
1168 target mode is the same as the variable's promotion. */
1169 if (GET_CODE (op) == SUBREG
1170 && SUBREG_PROMOTED_VAR_P (op)
1171 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1172 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1173 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1174
1175 /* Extending a widening multiplication should be canonicalized to
1176 a wider widening multiplication. */
1177 if (GET_CODE (op) == MULT)
1178 {
1179 rtx lhs = XEXP (op, 0);
1180 rtx rhs = XEXP (op, 1);
1181 enum rtx_code lcode = GET_CODE (lhs);
1182 enum rtx_code rcode = GET_CODE (rhs);
1183
1184 /* Widening multiplies usually extend both operands, but sometimes
1185 they use a shift to extract a portion of a register. */
1186 if ((lcode == ZERO_EXTEND
1187 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1188 && (rcode == ZERO_EXTEND
1189 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1190 {
1191 enum machine_mode lmode = GET_MODE (lhs);
1192 enum machine_mode rmode = GET_MODE (rhs);
1193 int bits;
1194
1195 if (lcode == LSHIFTRT)
1196 /* Number of bits not shifted off the end. */
1197 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1198 else /* lcode == ZERO_EXTEND */
1199 /* Size of inner mode. */
1200 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1201
1202 if (rcode == LSHIFTRT)
1203 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1204 else /* rcode == ZERO_EXTEND */
1205 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1206
1207 /* We can only widen multiplies if the result is mathematiclly
1208 equivalent. I.e. if overflow was impossible. */
1209 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1210 return simplify_gen_binary
1211 (MULT, mode,
1212 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1213 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1214 }
1215 }
1216
1217 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1218 if (GET_CODE (op) == ZERO_EXTEND)
1219 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1220 GET_MODE (XEXP (op, 0)));
1221
1222 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1223 is (zero_extend:M (subreg:O <X>)) if there is mode with
1224 GET_MODE_BITSIZE (N) - I bits. */
1225 if (GET_CODE (op) == LSHIFTRT
1226 && GET_CODE (XEXP (op, 0)) == ASHIFT
1227 && CONST_INT_P (XEXP (op, 1))
1228 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1229 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1230 {
1231 enum machine_mode tmode
1232 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1233 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1234 if (tmode != BLKmode)
1235 {
1236 rtx inner =
1237 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1238 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1239 }
1240 }
1241
1242 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1243 /* As we do not know which address space the pointer is referring to,
1244 we can do this only if the target does not support different pointer
1245 or address modes depending on the address space. */
1246 if (target_default_pointer_address_modes_p ()
1247 && POINTERS_EXTEND_UNSIGNED > 0
1248 && mode == Pmode && GET_MODE (op) == ptr_mode
1249 && (CONSTANT_P (op)
1250 || (GET_CODE (op) == SUBREG
1251 && REG_P (SUBREG_REG (op))
1252 && REG_POINTER (SUBREG_REG (op))
1253 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1254 return convert_memory_address (Pmode, op);
1255 #endif
1256 break;
1257
1258 default:
1259 break;
1260 }
1261
1262 return 0;
1263 }
1264
1265 /* Try to compute the value of a unary operation CODE whose output mode is to
1266 be MODE with input operand OP whose mode was originally OP_MODE.
1267 Return zero if the value cannot be computed. */
1268 rtx
1269 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1270 rtx op, enum machine_mode op_mode)
1271 {
1272 unsigned int width = GET_MODE_PRECISION (mode);
1273 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1274
1275 if (code == VEC_DUPLICATE)
1276 {
1277 gcc_assert (VECTOR_MODE_P (mode));
1278 if (GET_MODE (op) != VOIDmode)
1279 {
1280 if (!VECTOR_MODE_P (GET_MODE (op)))
1281 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1282 else
1283 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1284 (GET_MODE (op)));
1285 }
1286 if (CONST_INT_P (op) || CONST_DOUBLE_P (op)
1287 || GET_CODE (op) == CONST_VECTOR)
1288 {
1289 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1290 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1291 rtvec v = rtvec_alloc (n_elts);
1292 unsigned int i;
1293
1294 if (GET_CODE (op) != CONST_VECTOR)
1295 for (i = 0; i < n_elts; i++)
1296 RTVEC_ELT (v, i) = op;
1297 else
1298 {
1299 enum machine_mode inmode = GET_MODE (op);
1300 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1301 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1302
1303 gcc_assert (in_n_elts < n_elts);
1304 gcc_assert ((n_elts % in_n_elts) == 0);
1305 for (i = 0; i < n_elts; i++)
1306 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1307 }
1308 return gen_rtx_CONST_VECTOR (mode, v);
1309 }
1310 }
1311
1312 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1313 {
1314 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1315 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1316 enum machine_mode opmode = GET_MODE (op);
1317 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1318 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1319 rtvec v = rtvec_alloc (n_elts);
1320 unsigned int i;
1321
1322 gcc_assert (op_n_elts == n_elts);
1323 for (i = 0; i < n_elts; i++)
1324 {
1325 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1326 CONST_VECTOR_ELT (op, i),
1327 GET_MODE_INNER (opmode));
1328 if (!x)
1329 return 0;
1330 RTVEC_ELT (v, i) = x;
1331 }
1332 return gen_rtx_CONST_VECTOR (mode, v);
1333 }
1334
1335 /* The order of these tests is critical so that, for example, we don't
1336 check the wrong mode (input vs. output) for a conversion operation,
1337 such as FIX. At some point, this should be simplified. */
1338
1339 if (code == FLOAT && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1340 {
1341 HOST_WIDE_INT hv, lv;
1342 REAL_VALUE_TYPE d;
1343
1344 if (CONST_INT_P (op))
1345 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1346 else
1347 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1348
1349 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1350 d = real_value_truncate (mode, d);
1351 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1352 }
1353 else if (code == UNSIGNED_FLOAT
1354 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1355 {
1356 HOST_WIDE_INT hv, lv;
1357 REAL_VALUE_TYPE d;
1358
1359 if (CONST_INT_P (op))
1360 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1361 else
1362 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1363
1364 if (op_mode == VOIDmode
1365 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1366 /* We should never get a negative number. */
1367 gcc_assert (hv >= 0);
1368 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1369 hv = 0, lv &= GET_MODE_MASK (op_mode);
1370
1371 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1372 d = real_value_truncate (mode, d);
1373 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1374 }
1375
1376 if (CONST_INT_P (op)
1377 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1378 {
1379 HOST_WIDE_INT arg0 = INTVAL (op);
1380 HOST_WIDE_INT val;
1381
1382 switch (code)
1383 {
1384 case NOT:
1385 val = ~ arg0;
1386 break;
1387
1388 case NEG:
1389 val = - arg0;
1390 break;
1391
1392 case ABS:
1393 val = (arg0 >= 0 ? arg0 : - arg0);
1394 break;
1395
1396 case FFS:
1397 arg0 &= GET_MODE_MASK (mode);
1398 val = ffs_hwi (arg0);
1399 break;
1400
1401 case CLZ:
1402 arg0 &= GET_MODE_MASK (mode);
1403 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1404 ;
1405 else
1406 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1407 break;
1408
1409 case CLRSB:
1410 arg0 &= GET_MODE_MASK (mode);
1411 if (arg0 == 0)
1412 val = GET_MODE_PRECISION (mode) - 1;
1413 else if (arg0 >= 0)
1414 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1415 else if (arg0 < 0)
1416 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1417 break;
1418
1419 case CTZ:
1420 arg0 &= GET_MODE_MASK (mode);
1421 if (arg0 == 0)
1422 {
1423 /* Even if the value at zero is undefined, we have to come
1424 up with some replacement. Seems good enough. */
1425 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1426 val = GET_MODE_PRECISION (mode);
1427 }
1428 else
1429 val = ctz_hwi (arg0);
1430 break;
1431
1432 case POPCOUNT:
1433 arg0 &= GET_MODE_MASK (mode);
1434 val = 0;
1435 while (arg0)
1436 val++, arg0 &= arg0 - 1;
1437 break;
1438
1439 case PARITY:
1440 arg0 &= GET_MODE_MASK (mode);
1441 val = 0;
1442 while (arg0)
1443 val++, arg0 &= arg0 - 1;
1444 val &= 1;
1445 break;
1446
1447 case BSWAP:
1448 {
1449 unsigned int s;
1450
1451 val = 0;
1452 for (s = 0; s < width; s += 8)
1453 {
1454 unsigned int d = width - s - 8;
1455 unsigned HOST_WIDE_INT byte;
1456 byte = (arg0 >> s) & 0xff;
1457 val |= byte << d;
1458 }
1459 }
1460 break;
1461
1462 case TRUNCATE:
1463 val = arg0;
1464 break;
1465
1466 case ZERO_EXTEND:
1467 /* When zero-extending a CONST_INT, we need to know its
1468 original mode. */
1469 gcc_assert (op_mode != VOIDmode);
1470 if (op_width == HOST_BITS_PER_WIDE_INT)
1471 {
1472 /* If we were really extending the mode,
1473 we would have to distinguish between zero-extension
1474 and sign-extension. */
1475 gcc_assert (width == op_width);
1476 val = arg0;
1477 }
1478 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1479 val = arg0 & GET_MODE_MASK (op_mode);
1480 else
1481 return 0;
1482 break;
1483
1484 case SIGN_EXTEND:
1485 if (op_mode == VOIDmode)
1486 op_mode = mode;
1487 op_width = GET_MODE_PRECISION (op_mode);
1488 if (op_width == HOST_BITS_PER_WIDE_INT)
1489 {
1490 /* If we were really extending the mode,
1491 we would have to distinguish between zero-extension
1492 and sign-extension. */
1493 gcc_assert (width == op_width);
1494 val = arg0;
1495 }
1496 else if (op_width < HOST_BITS_PER_WIDE_INT)
1497 {
1498 val = arg0 & GET_MODE_MASK (op_mode);
1499 if (val_signbit_known_set_p (op_mode, val))
1500 val |= ~GET_MODE_MASK (op_mode);
1501 }
1502 else
1503 return 0;
1504 break;
1505
1506 case SQRT:
1507 case FLOAT_EXTEND:
1508 case FLOAT_TRUNCATE:
1509 case SS_TRUNCATE:
1510 case US_TRUNCATE:
1511 case SS_NEG:
1512 case US_NEG:
1513 case SS_ABS:
1514 return 0;
1515
1516 default:
1517 gcc_unreachable ();
1518 }
1519
1520 return gen_int_mode (val, mode);
1521 }
1522
1523 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1524 for a DImode operation on a CONST_INT. */
1525 else if (width <= HOST_BITS_PER_DOUBLE_INT
1526 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1527 {
1528 unsigned HOST_WIDE_INT l1, lv;
1529 HOST_WIDE_INT h1, hv;
1530
1531 if (CONST_DOUBLE_AS_INT_P (op))
1532 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1533 else
1534 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1535
1536 switch (code)
1537 {
1538 case NOT:
1539 lv = ~ l1;
1540 hv = ~ h1;
1541 break;
1542
1543 case NEG:
1544 neg_double (l1, h1, &lv, &hv);
1545 break;
1546
1547 case ABS:
1548 if (h1 < 0)
1549 neg_double (l1, h1, &lv, &hv);
1550 else
1551 lv = l1, hv = h1;
1552 break;
1553
1554 case FFS:
1555 hv = 0;
1556 if (l1 != 0)
1557 lv = ffs_hwi (l1);
1558 else if (h1 != 0)
1559 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1560 else
1561 lv = 0;
1562 break;
1563
1564 case CLZ:
1565 hv = 0;
1566 if (h1 != 0)
1567 lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1568 - HOST_BITS_PER_WIDE_INT;
1569 else if (l1 != 0)
1570 lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1571 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1572 lv = GET_MODE_PRECISION (mode);
1573 break;
1574
1575 case CTZ:
1576 hv = 0;
1577 if (l1 != 0)
1578 lv = ctz_hwi (l1);
1579 else if (h1 != 0)
1580 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1581 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1582 lv = GET_MODE_PRECISION (mode);
1583 break;
1584
1585 case POPCOUNT:
1586 hv = 0;
1587 lv = 0;
1588 while (l1)
1589 lv++, l1 &= l1 - 1;
1590 while (h1)
1591 lv++, h1 &= h1 - 1;
1592 break;
1593
1594 case PARITY:
1595 hv = 0;
1596 lv = 0;
1597 while (l1)
1598 lv++, l1 &= l1 - 1;
1599 while (h1)
1600 lv++, h1 &= h1 - 1;
1601 lv &= 1;
1602 break;
1603
1604 case BSWAP:
1605 {
1606 unsigned int s;
1607
1608 hv = 0;
1609 lv = 0;
1610 for (s = 0; s < width; s += 8)
1611 {
1612 unsigned int d = width - s - 8;
1613 unsigned HOST_WIDE_INT byte;
1614
1615 if (s < HOST_BITS_PER_WIDE_INT)
1616 byte = (l1 >> s) & 0xff;
1617 else
1618 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1619
1620 if (d < HOST_BITS_PER_WIDE_INT)
1621 lv |= byte << d;
1622 else
1623 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1624 }
1625 }
1626 break;
1627
1628 case TRUNCATE:
1629 /* This is just a change-of-mode, so do nothing. */
1630 lv = l1, hv = h1;
1631 break;
1632
1633 case ZERO_EXTEND:
1634 gcc_assert (op_mode != VOIDmode);
1635
1636 if (op_width > HOST_BITS_PER_WIDE_INT)
1637 return 0;
1638
1639 hv = 0;
1640 lv = l1 & GET_MODE_MASK (op_mode);
1641 break;
1642
1643 case SIGN_EXTEND:
1644 if (op_mode == VOIDmode
1645 || op_width > HOST_BITS_PER_WIDE_INT)
1646 return 0;
1647 else
1648 {
1649 lv = l1 & GET_MODE_MASK (op_mode);
1650 if (val_signbit_known_set_p (op_mode, lv))
1651 lv |= ~GET_MODE_MASK (op_mode);
1652
1653 hv = HWI_SIGN_EXTEND (lv);
1654 }
1655 break;
1656
1657 case SQRT:
1658 return 0;
1659
1660 default:
1661 return 0;
1662 }
1663
1664 return immed_double_const (lv, hv, mode);
1665 }
1666
1667 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1668 && SCALAR_FLOAT_MODE_P (mode)
1669 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1670 {
1671 REAL_VALUE_TYPE d, t;
1672 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1673
1674 switch (code)
1675 {
1676 case SQRT:
1677 if (HONOR_SNANS (mode) && real_isnan (&d))
1678 return 0;
1679 real_sqrt (&t, mode, &d);
1680 d = t;
1681 break;
1682 case ABS:
1683 d = real_value_abs (&d);
1684 break;
1685 case NEG:
1686 d = real_value_negate (&d);
1687 break;
1688 case FLOAT_TRUNCATE:
1689 d = real_value_truncate (mode, d);
1690 break;
1691 case FLOAT_EXTEND:
1692 /* All this does is change the mode, unless changing
1693 mode class. */
1694 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1695 real_convert (&d, mode, &d);
1696 break;
1697 case FIX:
1698 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1699 break;
1700 case NOT:
1701 {
1702 long tmp[4];
1703 int i;
1704
1705 real_to_target (tmp, &d, GET_MODE (op));
1706 for (i = 0; i < 4; i++)
1707 tmp[i] = ~tmp[i];
1708 real_from_target (&d, tmp, mode);
1709 break;
1710 }
1711 default:
1712 gcc_unreachable ();
1713 }
1714 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1715 }
1716
1717 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1718 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1719 && GET_MODE_CLASS (mode) == MODE_INT
1720 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1721 {
1722 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1723 operators are intentionally left unspecified (to ease implementation
1724 by target backends), for consistency, this routine implements the
1725 same semantics for constant folding as used by the middle-end. */
1726
1727 /* This was formerly used only for non-IEEE float.
1728 eggert@twinsun.com says it is safe for IEEE also. */
1729 HOST_WIDE_INT xh, xl, th, tl;
1730 REAL_VALUE_TYPE x, t;
1731 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1732 switch (code)
1733 {
1734 case FIX:
1735 if (REAL_VALUE_ISNAN (x))
1736 return const0_rtx;
1737
1738 /* Test against the signed upper bound. */
1739 if (width > HOST_BITS_PER_WIDE_INT)
1740 {
1741 th = ((unsigned HOST_WIDE_INT) 1
1742 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1743 tl = -1;
1744 }
1745 else
1746 {
1747 th = 0;
1748 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1749 }
1750 real_from_integer (&t, VOIDmode, tl, th, 0);
1751 if (REAL_VALUES_LESS (t, x))
1752 {
1753 xh = th;
1754 xl = tl;
1755 break;
1756 }
1757
1758 /* Test against the signed lower bound. */
1759 if (width > HOST_BITS_PER_WIDE_INT)
1760 {
1761 th = (unsigned HOST_WIDE_INT) (-1)
1762 << (width - HOST_BITS_PER_WIDE_INT - 1);
1763 tl = 0;
1764 }
1765 else
1766 {
1767 th = -1;
1768 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1769 }
1770 real_from_integer (&t, VOIDmode, tl, th, 0);
1771 if (REAL_VALUES_LESS (x, t))
1772 {
1773 xh = th;
1774 xl = tl;
1775 break;
1776 }
1777 REAL_VALUE_TO_INT (&xl, &xh, x);
1778 break;
1779
1780 case UNSIGNED_FIX:
1781 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1782 return const0_rtx;
1783
1784 /* Test against the unsigned upper bound. */
1785 if (width == HOST_BITS_PER_DOUBLE_INT)
1786 {
1787 th = -1;
1788 tl = -1;
1789 }
1790 else if (width >= HOST_BITS_PER_WIDE_INT)
1791 {
1792 th = ((unsigned HOST_WIDE_INT) 1
1793 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1794 tl = -1;
1795 }
1796 else
1797 {
1798 th = 0;
1799 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1800 }
1801 real_from_integer (&t, VOIDmode, tl, th, 1);
1802 if (REAL_VALUES_LESS (t, x))
1803 {
1804 xh = th;
1805 xl = tl;
1806 break;
1807 }
1808
1809 REAL_VALUE_TO_INT (&xl, &xh, x);
1810 break;
1811
1812 default:
1813 gcc_unreachable ();
1814 }
1815 return immed_double_const (xl, xh, mode);
1816 }
1817
1818 return NULL_RTX;
1819 }
1820 \f
1821 /* Subroutine of simplify_binary_operation to simplify a commutative,
1822 associative binary operation CODE with result mode MODE, operating
1823 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1824 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1825 canonicalization is possible. */
1826
1827 static rtx
1828 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1829 rtx op0, rtx op1)
1830 {
1831 rtx tem;
1832
1833 /* Linearize the operator to the left. */
1834 if (GET_CODE (op1) == code)
1835 {
1836 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1837 if (GET_CODE (op0) == code)
1838 {
1839 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1840 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1841 }
1842
1843 /* "a op (b op c)" becomes "(b op c) op a". */
1844 if (! swap_commutative_operands_p (op1, op0))
1845 return simplify_gen_binary (code, mode, op1, op0);
1846
1847 tem = op0;
1848 op0 = op1;
1849 op1 = tem;
1850 }
1851
1852 if (GET_CODE (op0) == code)
1853 {
1854 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1855 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1856 {
1857 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1858 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1859 }
1860
1861 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1862 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1863 if (tem != 0)
1864 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1865
1866 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1867 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1868 if (tem != 0)
1869 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1870 }
1871
1872 return 0;
1873 }
1874
1875
1876 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1877 and OP1. Return 0 if no simplification is possible.
1878
1879 Don't use this for relational operations such as EQ or LT.
1880 Use simplify_relational_operation instead. */
1881 rtx
1882 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1883 rtx op0, rtx op1)
1884 {
1885 rtx trueop0, trueop1;
1886 rtx tem;
1887
1888 /* Relational operations don't work here. We must know the mode
1889 of the operands in order to do the comparison correctly.
1890 Assuming a full word can give incorrect results.
1891 Consider comparing 128 with -128 in QImode. */
1892 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1893 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1894
1895 /* Make sure the constant is second. */
1896 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1897 && swap_commutative_operands_p (op0, op1))
1898 {
1899 tem = op0, op0 = op1, op1 = tem;
1900 }
1901
1902 trueop0 = avoid_constant_pool_reference (op0);
1903 trueop1 = avoid_constant_pool_reference (op1);
1904
1905 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1906 if (tem)
1907 return tem;
1908 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1909 }
1910
1911 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1912 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1913 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1914 actual constants. */
1915
1916 static rtx
1917 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1918 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1919 {
1920 rtx tem, reversed, opleft, opright;
1921 HOST_WIDE_INT val;
1922 unsigned int width = GET_MODE_PRECISION (mode);
1923
1924 /* Even if we can't compute a constant result,
1925 there are some cases worth simplifying. */
1926
1927 switch (code)
1928 {
1929 case PLUS:
1930 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1931 when x is NaN, infinite, or finite and nonzero. They aren't
1932 when x is -0 and the rounding mode is not towards -infinity,
1933 since (-0) + 0 is then 0. */
1934 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1935 return op0;
1936
1937 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1938 transformations are safe even for IEEE. */
1939 if (GET_CODE (op0) == NEG)
1940 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1941 else if (GET_CODE (op1) == NEG)
1942 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1943
1944 /* (~a) + 1 -> -a */
1945 if (INTEGRAL_MODE_P (mode)
1946 && GET_CODE (op0) == NOT
1947 && trueop1 == const1_rtx)
1948 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1949
1950 /* Handle both-operands-constant cases. We can only add
1951 CONST_INTs to constants since the sum of relocatable symbols
1952 can't be handled by most assemblers. Don't add CONST_INT
1953 to CONST_INT since overflow won't be computed properly if wider
1954 than HOST_BITS_PER_WIDE_INT. */
1955
1956 if ((GET_CODE (op0) == CONST
1957 || GET_CODE (op0) == SYMBOL_REF
1958 || GET_CODE (op0) == LABEL_REF)
1959 && CONST_INT_P (op1))
1960 return plus_constant (mode, op0, INTVAL (op1));
1961 else if ((GET_CODE (op1) == CONST
1962 || GET_CODE (op1) == SYMBOL_REF
1963 || GET_CODE (op1) == LABEL_REF)
1964 && CONST_INT_P (op0))
1965 return plus_constant (mode, op1, INTVAL (op0));
1966
1967 /* See if this is something like X * C - X or vice versa or
1968 if the multiplication is written as a shift. If so, we can
1969 distribute and make a new multiply, shift, or maybe just
1970 have X (if C is 2 in the example above). But don't make
1971 something more expensive than we had before. */
1972
1973 if (SCALAR_INT_MODE_P (mode))
1974 {
1975 double_int coeff0, coeff1;
1976 rtx lhs = op0, rhs = op1;
1977
1978 coeff0 = double_int_one;
1979 coeff1 = double_int_one;
1980
1981 if (GET_CODE (lhs) == NEG)
1982 {
1983 coeff0 = double_int_minus_one;
1984 lhs = XEXP (lhs, 0);
1985 }
1986 else if (GET_CODE (lhs) == MULT
1987 && CONST_INT_P (XEXP (lhs, 1)))
1988 {
1989 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
1990 lhs = XEXP (lhs, 0);
1991 }
1992 else if (GET_CODE (lhs) == ASHIFT
1993 && CONST_INT_P (XEXP (lhs, 1))
1994 && INTVAL (XEXP (lhs, 1)) >= 0
1995 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1996 {
1997 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
1998 lhs = XEXP (lhs, 0);
1999 }
2000
2001 if (GET_CODE (rhs) == NEG)
2002 {
2003 coeff1 = double_int_minus_one;
2004 rhs = XEXP (rhs, 0);
2005 }
2006 else if (GET_CODE (rhs) == MULT
2007 && CONST_INT_P (XEXP (rhs, 1)))
2008 {
2009 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2010 rhs = XEXP (rhs, 0);
2011 }
2012 else if (GET_CODE (rhs) == ASHIFT
2013 && CONST_INT_P (XEXP (rhs, 1))
2014 && INTVAL (XEXP (rhs, 1)) >= 0
2015 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2016 {
2017 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2018 rhs = XEXP (rhs, 0);
2019 }
2020
2021 if (rtx_equal_p (lhs, rhs))
2022 {
2023 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2024 rtx coeff;
2025 double_int val;
2026 bool speed = optimize_function_for_speed_p (cfun);
2027
2028 val = coeff0 + coeff1;
2029 coeff = immed_double_int_const (val, mode);
2030
2031 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2032 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2033 ? tem : 0;
2034 }
2035 }
2036
2037 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2038 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2039 && GET_CODE (op0) == XOR
2040 && (CONST_INT_P (XEXP (op0, 1))
2041 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2042 && mode_signbit_p (mode, op1))
2043 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2044 simplify_gen_binary (XOR, mode, op1,
2045 XEXP (op0, 1)));
2046
2047 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2048 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2049 && GET_CODE (op0) == MULT
2050 && GET_CODE (XEXP (op0, 0)) == NEG)
2051 {
2052 rtx in1, in2;
2053
2054 in1 = XEXP (XEXP (op0, 0), 0);
2055 in2 = XEXP (op0, 1);
2056 return simplify_gen_binary (MINUS, mode, op1,
2057 simplify_gen_binary (MULT, mode,
2058 in1, in2));
2059 }
2060
2061 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2062 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2063 is 1. */
2064 if (COMPARISON_P (op0)
2065 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2066 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2067 && (reversed = reversed_comparison (op0, mode)))
2068 return
2069 simplify_gen_unary (NEG, mode, reversed, mode);
2070
2071 /* If one of the operands is a PLUS or a MINUS, see if we can
2072 simplify this by the associative law.
2073 Don't use the associative law for floating point.
2074 The inaccuracy makes it nonassociative,
2075 and subtle programs can break if operations are associated. */
2076
2077 if (INTEGRAL_MODE_P (mode)
2078 && (plus_minus_operand_p (op0)
2079 || plus_minus_operand_p (op1))
2080 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2081 return tem;
2082
2083 /* Reassociate floating point addition only when the user
2084 specifies associative math operations. */
2085 if (FLOAT_MODE_P (mode)
2086 && flag_associative_math)
2087 {
2088 tem = simplify_associative_operation (code, mode, op0, op1);
2089 if (tem)
2090 return tem;
2091 }
2092 break;
2093
2094 case COMPARE:
2095 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2096 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2097 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2098 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2099 {
2100 rtx xop00 = XEXP (op0, 0);
2101 rtx xop10 = XEXP (op1, 0);
2102
2103 #ifdef HAVE_cc0
2104 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2105 #else
2106 if (REG_P (xop00) && REG_P (xop10)
2107 && GET_MODE (xop00) == GET_MODE (xop10)
2108 && REGNO (xop00) == REGNO (xop10)
2109 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2110 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2111 #endif
2112 return xop00;
2113 }
2114 break;
2115
2116 case MINUS:
2117 /* We can't assume x-x is 0 even with non-IEEE floating point,
2118 but since it is zero except in very strange circumstances, we
2119 will treat it as zero with -ffinite-math-only. */
2120 if (rtx_equal_p (trueop0, trueop1)
2121 && ! side_effects_p (op0)
2122 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2123 return CONST0_RTX (mode);
2124
2125 /* Change subtraction from zero into negation. (0 - x) is the
2126 same as -x when x is NaN, infinite, or finite and nonzero.
2127 But if the mode has signed zeros, and does not round towards
2128 -infinity, then 0 - 0 is 0, not -0. */
2129 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2130 return simplify_gen_unary (NEG, mode, op1, mode);
2131
2132 /* (-1 - a) is ~a. */
2133 if (trueop0 == constm1_rtx)
2134 return simplify_gen_unary (NOT, mode, op1, mode);
2135
2136 /* Subtracting 0 has no effect unless the mode has signed zeros
2137 and supports rounding towards -infinity. In such a case,
2138 0 - 0 is -0. */
2139 if (!(HONOR_SIGNED_ZEROS (mode)
2140 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2141 && trueop1 == CONST0_RTX (mode))
2142 return op0;
2143
2144 /* See if this is something like X * C - X or vice versa or
2145 if the multiplication is written as a shift. If so, we can
2146 distribute and make a new multiply, shift, or maybe just
2147 have X (if C is 2 in the example above). But don't make
2148 something more expensive than we had before. */
2149
2150 if (SCALAR_INT_MODE_P (mode))
2151 {
2152 double_int coeff0, negcoeff1;
2153 rtx lhs = op0, rhs = op1;
2154
2155 coeff0 = double_int_one;
2156 negcoeff1 = double_int_minus_one;
2157
2158 if (GET_CODE (lhs) == NEG)
2159 {
2160 coeff0 = double_int_minus_one;
2161 lhs = XEXP (lhs, 0);
2162 }
2163 else if (GET_CODE (lhs) == MULT
2164 && CONST_INT_P (XEXP (lhs, 1)))
2165 {
2166 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2167 lhs = XEXP (lhs, 0);
2168 }
2169 else if (GET_CODE (lhs) == ASHIFT
2170 && CONST_INT_P (XEXP (lhs, 1))
2171 && INTVAL (XEXP (lhs, 1)) >= 0
2172 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2173 {
2174 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2175 lhs = XEXP (lhs, 0);
2176 }
2177
2178 if (GET_CODE (rhs) == NEG)
2179 {
2180 negcoeff1 = double_int_one;
2181 rhs = XEXP (rhs, 0);
2182 }
2183 else if (GET_CODE (rhs) == MULT
2184 && CONST_INT_P (XEXP (rhs, 1)))
2185 {
2186 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2187 rhs = XEXP (rhs, 0);
2188 }
2189 else if (GET_CODE (rhs) == ASHIFT
2190 && CONST_INT_P (XEXP (rhs, 1))
2191 && INTVAL (XEXP (rhs, 1)) >= 0
2192 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2193 {
2194 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2195 negcoeff1 = -negcoeff1;
2196 rhs = XEXP (rhs, 0);
2197 }
2198
2199 if (rtx_equal_p (lhs, rhs))
2200 {
2201 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2202 rtx coeff;
2203 double_int val;
2204 bool speed = optimize_function_for_speed_p (cfun);
2205
2206 val = coeff0 + negcoeff1;
2207 coeff = immed_double_int_const (val, mode);
2208
2209 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2210 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2211 ? tem : 0;
2212 }
2213 }
2214
2215 /* (a - (-b)) -> (a + b). True even for IEEE. */
2216 if (GET_CODE (op1) == NEG)
2217 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2218
2219 /* (-x - c) may be simplified as (-c - x). */
2220 if (GET_CODE (op0) == NEG
2221 && (CONST_INT_P (op1) || CONST_DOUBLE_P (op1)))
2222 {
2223 tem = simplify_unary_operation (NEG, mode, op1, mode);
2224 if (tem)
2225 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2226 }
2227
2228 /* Don't let a relocatable value get a negative coeff. */
2229 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2230 return simplify_gen_binary (PLUS, mode,
2231 op0,
2232 neg_const_int (mode, op1));
2233
2234 /* (x - (x & y)) -> (x & ~y) */
2235 if (GET_CODE (op1) == AND)
2236 {
2237 if (rtx_equal_p (op0, XEXP (op1, 0)))
2238 {
2239 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2240 GET_MODE (XEXP (op1, 1)));
2241 return simplify_gen_binary (AND, mode, op0, tem);
2242 }
2243 if (rtx_equal_p (op0, XEXP (op1, 1)))
2244 {
2245 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2246 GET_MODE (XEXP (op1, 0)));
2247 return simplify_gen_binary (AND, mode, op0, tem);
2248 }
2249 }
2250
2251 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2252 by reversing the comparison code if valid. */
2253 if (STORE_FLAG_VALUE == 1
2254 && trueop0 == const1_rtx
2255 && COMPARISON_P (op1)
2256 && (reversed = reversed_comparison (op1, mode)))
2257 return reversed;
2258
2259 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2260 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2261 && GET_CODE (op1) == MULT
2262 && GET_CODE (XEXP (op1, 0)) == NEG)
2263 {
2264 rtx in1, in2;
2265
2266 in1 = XEXP (XEXP (op1, 0), 0);
2267 in2 = XEXP (op1, 1);
2268 return simplify_gen_binary (PLUS, mode,
2269 simplify_gen_binary (MULT, mode,
2270 in1, in2),
2271 op0);
2272 }
2273
2274 /* Canonicalize (minus (neg A) (mult B C)) to
2275 (minus (mult (neg B) C) A). */
2276 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2277 && GET_CODE (op1) == MULT
2278 && GET_CODE (op0) == NEG)
2279 {
2280 rtx in1, in2;
2281
2282 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2283 in2 = XEXP (op1, 1);
2284 return simplify_gen_binary (MINUS, mode,
2285 simplify_gen_binary (MULT, mode,
2286 in1, in2),
2287 XEXP (op0, 0));
2288 }
2289
2290 /* If one of the operands is a PLUS or a MINUS, see if we can
2291 simplify this by the associative law. This will, for example,
2292 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2293 Don't use the associative law for floating point.
2294 The inaccuracy makes it nonassociative,
2295 and subtle programs can break if operations are associated. */
2296
2297 if (INTEGRAL_MODE_P (mode)
2298 && (plus_minus_operand_p (op0)
2299 || plus_minus_operand_p (op1))
2300 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2301 return tem;
2302 break;
2303
2304 case MULT:
2305 if (trueop1 == constm1_rtx)
2306 return simplify_gen_unary (NEG, mode, op0, mode);
2307
2308 if (GET_CODE (op0) == NEG)
2309 {
2310 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2311 /* If op1 is a MULT as well and simplify_unary_operation
2312 just moved the NEG to the second operand, simplify_gen_binary
2313 below could through simplify_associative_operation move
2314 the NEG around again and recurse endlessly. */
2315 if (temp
2316 && GET_CODE (op1) == MULT
2317 && GET_CODE (temp) == MULT
2318 && XEXP (op1, 0) == XEXP (temp, 0)
2319 && GET_CODE (XEXP (temp, 1)) == NEG
2320 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2321 temp = NULL_RTX;
2322 if (temp)
2323 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2324 }
2325 if (GET_CODE (op1) == NEG)
2326 {
2327 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2328 /* If op0 is a MULT as well and simplify_unary_operation
2329 just moved the NEG to the second operand, simplify_gen_binary
2330 below could through simplify_associative_operation move
2331 the NEG around again and recurse endlessly. */
2332 if (temp
2333 && GET_CODE (op0) == MULT
2334 && GET_CODE (temp) == MULT
2335 && XEXP (op0, 0) == XEXP (temp, 0)
2336 && GET_CODE (XEXP (temp, 1)) == NEG
2337 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2338 temp = NULL_RTX;
2339 if (temp)
2340 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2341 }
2342
2343 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2344 x is NaN, since x * 0 is then also NaN. Nor is it valid
2345 when the mode has signed zeros, since multiplying a negative
2346 number by 0 will give -0, not 0. */
2347 if (!HONOR_NANS (mode)
2348 && !HONOR_SIGNED_ZEROS (mode)
2349 && trueop1 == CONST0_RTX (mode)
2350 && ! side_effects_p (op0))
2351 return op1;
2352
2353 /* In IEEE floating point, x*1 is not equivalent to x for
2354 signalling NaNs. */
2355 if (!HONOR_SNANS (mode)
2356 && trueop1 == CONST1_RTX (mode))
2357 return op0;
2358
2359 /* Convert multiply by constant power of two into shift unless
2360 we are still generating RTL. This test is a kludge. */
2361 if (CONST_INT_P (trueop1)
2362 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2363 /* If the mode is larger than the host word size, and the
2364 uppermost bit is set, then this isn't a power of two due
2365 to implicit sign extension. */
2366 && (width <= HOST_BITS_PER_WIDE_INT
2367 || val != HOST_BITS_PER_WIDE_INT - 1))
2368 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2369
2370 /* Likewise for multipliers wider than a word. */
2371 if (CONST_DOUBLE_AS_INT_P (trueop1)
2372 && GET_MODE (op0) == mode
2373 && CONST_DOUBLE_LOW (trueop1) == 0
2374 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2375 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2376 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2377 return simplify_gen_binary (ASHIFT, mode, op0,
2378 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2379
2380 /* x*2 is x+x and x*(-1) is -x */
2381 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2382 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2383 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2384 && GET_MODE (op0) == mode)
2385 {
2386 REAL_VALUE_TYPE d;
2387 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2388
2389 if (REAL_VALUES_EQUAL (d, dconst2))
2390 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2391
2392 if (!HONOR_SNANS (mode)
2393 && REAL_VALUES_EQUAL (d, dconstm1))
2394 return simplify_gen_unary (NEG, mode, op0, mode);
2395 }
2396
2397 /* Optimize -x * -x as x * x. */
2398 if (FLOAT_MODE_P (mode)
2399 && GET_CODE (op0) == NEG
2400 && GET_CODE (op1) == NEG
2401 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2402 && !side_effects_p (XEXP (op0, 0)))
2403 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2404
2405 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2406 if (SCALAR_FLOAT_MODE_P (mode)
2407 && GET_CODE (op0) == ABS
2408 && GET_CODE (op1) == ABS
2409 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2410 && !side_effects_p (XEXP (op0, 0)))
2411 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2412
2413 /* Reassociate multiplication, but for floating point MULTs
2414 only when the user specifies unsafe math optimizations. */
2415 if (! FLOAT_MODE_P (mode)
2416 || flag_unsafe_math_optimizations)
2417 {
2418 tem = simplify_associative_operation (code, mode, op0, op1);
2419 if (tem)
2420 return tem;
2421 }
2422 break;
2423
2424 case IOR:
2425 if (trueop1 == CONST0_RTX (mode))
2426 return op0;
2427 if (INTEGRAL_MODE_P (mode)
2428 && trueop1 == CONSTM1_RTX (mode)
2429 && !side_effects_p (op0))
2430 return op1;
2431 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2432 return op0;
2433 /* A | (~A) -> -1 */
2434 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2435 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2436 && ! side_effects_p (op0)
2437 && SCALAR_INT_MODE_P (mode))
2438 return constm1_rtx;
2439
2440 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2441 if (CONST_INT_P (op1)
2442 && HWI_COMPUTABLE_MODE_P (mode)
2443 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2444 && !side_effects_p (op0))
2445 return op1;
2446
2447 /* Canonicalize (X & C1) | C2. */
2448 if (GET_CODE (op0) == AND
2449 && CONST_INT_P (trueop1)
2450 && CONST_INT_P (XEXP (op0, 1)))
2451 {
2452 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2453 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2454 HOST_WIDE_INT c2 = INTVAL (trueop1);
2455
2456 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2457 if ((c1 & c2) == c1
2458 && !side_effects_p (XEXP (op0, 0)))
2459 return trueop1;
2460
2461 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2462 if (((c1|c2) & mask) == mask)
2463 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2464
2465 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2466 if (((c1 & ~c2) & mask) != (c1 & mask))
2467 {
2468 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2469 gen_int_mode (c1 & ~c2, mode));
2470 return simplify_gen_binary (IOR, mode, tem, op1);
2471 }
2472 }
2473
2474 /* Convert (A & B) | A to A. */
2475 if (GET_CODE (op0) == AND
2476 && (rtx_equal_p (XEXP (op0, 0), op1)
2477 || rtx_equal_p (XEXP (op0, 1), op1))
2478 && ! side_effects_p (XEXP (op0, 0))
2479 && ! side_effects_p (XEXP (op0, 1)))
2480 return op1;
2481
2482 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2483 mode size to (rotate A CX). */
2484
2485 if (GET_CODE (op1) == ASHIFT
2486 || GET_CODE (op1) == SUBREG)
2487 {
2488 opleft = op1;
2489 opright = op0;
2490 }
2491 else
2492 {
2493 opright = op1;
2494 opleft = op0;
2495 }
2496
2497 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2498 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2499 && CONST_INT_P (XEXP (opleft, 1))
2500 && CONST_INT_P (XEXP (opright, 1))
2501 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2502 == GET_MODE_PRECISION (mode)))
2503 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2504
2505 /* Same, but for ashift that has been "simplified" to a wider mode
2506 by simplify_shift_const. */
2507
2508 if (GET_CODE (opleft) == SUBREG
2509 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2510 && GET_CODE (opright) == LSHIFTRT
2511 && GET_CODE (XEXP (opright, 0)) == SUBREG
2512 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2513 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2514 && (GET_MODE_SIZE (GET_MODE (opleft))
2515 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2516 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2517 SUBREG_REG (XEXP (opright, 0)))
2518 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2519 && CONST_INT_P (XEXP (opright, 1))
2520 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2521 == GET_MODE_PRECISION (mode)))
2522 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2523 XEXP (SUBREG_REG (opleft), 1));
2524
2525 /* If we have (ior (and (X C1) C2)), simplify this by making
2526 C1 as small as possible if C1 actually changes. */
2527 if (CONST_INT_P (op1)
2528 && (HWI_COMPUTABLE_MODE_P (mode)
2529 || INTVAL (op1) > 0)
2530 && GET_CODE (op0) == AND
2531 && CONST_INT_P (XEXP (op0, 1))
2532 && CONST_INT_P (op1)
2533 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2534 return simplify_gen_binary (IOR, mode,
2535 simplify_gen_binary
2536 (AND, mode, XEXP (op0, 0),
2537 GEN_INT (UINTVAL (XEXP (op0, 1))
2538 & ~UINTVAL (op1))),
2539 op1);
2540
2541 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2542 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2543 the PLUS does not affect any of the bits in OP1: then we can do
2544 the IOR as a PLUS and we can associate. This is valid if OP1
2545 can be safely shifted left C bits. */
2546 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2547 && GET_CODE (XEXP (op0, 0)) == PLUS
2548 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2549 && CONST_INT_P (XEXP (op0, 1))
2550 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2551 {
2552 int count = INTVAL (XEXP (op0, 1));
2553 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2554
2555 if (mask >> count == INTVAL (trueop1)
2556 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2557 return simplify_gen_binary (ASHIFTRT, mode,
2558 plus_constant (mode, XEXP (op0, 0),
2559 mask),
2560 XEXP (op0, 1));
2561 }
2562
2563 tem = simplify_associative_operation (code, mode, op0, op1);
2564 if (tem)
2565 return tem;
2566 break;
2567
2568 case XOR:
2569 if (trueop1 == CONST0_RTX (mode))
2570 return op0;
2571 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2572 return simplify_gen_unary (NOT, mode, op0, mode);
2573 if (rtx_equal_p (trueop0, trueop1)
2574 && ! side_effects_p (op0)
2575 && GET_MODE_CLASS (mode) != MODE_CC)
2576 return CONST0_RTX (mode);
2577
2578 /* Canonicalize XOR of the most significant bit to PLUS. */
2579 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2580 && mode_signbit_p (mode, op1))
2581 return simplify_gen_binary (PLUS, mode, op0, op1);
2582 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2583 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2584 && GET_CODE (op0) == PLUS
2585 && (CONST_INT_P (XEXP (op0, 1))
2586 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2587 && mode_signbit_p (mode, XEXP (op0, 1)))
2588 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2589 simplify_gen_binary (XOR, mode, op1,
2590 XEXP (op0, 1)));
2591
2592 /* If we are XORing two things that have no bits in common,
2593 convert them into an IOR. This helps to detect rotation encoded
2594 using those methods and possibly other simplifications. */
2595
2596 if (HWI_COMPUTABLE_MODE_P (mode)
2597 && (nonzero_bits (op0, mode)
2598 & nonzero_bits (op1, mode)) == 0)
2599 return (simplify_gen_binary (IOR, mode, op0, op1));
2600
2601 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2602 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2603 (NOT y). */
2604 {
2605 int num_negated = 0;
2606
2607 if (GET_CODE (op0) == NOT)
2608 num_negated++, op0 = XEXP (op0, 0);
2609 if (GET_CODE (op1) == NOT)
2610 num_negated++, op1 = XEXP (op1, 0);
2611
2612 if (num_negated == 2)
2613 return simplify_gen_binary (XOR, mode, op0, op1);
2614 else if (num_negated == 1)
2615 return simplify_gen_unary (NOT, mode,
2616 simplify_gen_binary (XOR, mode, op0, op1),
2617 mode);
2618 }
2619
2620 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2621 correspond to a machine insn or result in further simplifications
2622 if B is a constant. */
2623
2624 if (GET_CODE (op0) == AND
2625 && rtx_equal_p (XEXP (op0, 1), op1)
2626 && ! side_effects_p (op1))
2627 return simplify_gen_binary (AND, mode,
2628 simplify_gen_unary (NOT, mode,
2629 XEXP (op0, 0), mode),
2630 op1);
2631
2632 else if (GET_CODE (op0) == AND
2633 && rtx_equal_p (XEXP (op0, 0), op1)
2634 && ! side_effects_p (op1))
2635 return simplify_gen_binary (AND, mode,
2636 simplify_gen_unary (NOT, mode,
2637 XEXP (op0, 1), mode),
2638 op1);
2639
2640 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2641 we can transform like this:
2642 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2643 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2644 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2645 Attempt a few simplifications when B and C are both constants. */
2646 if (GET_CODE (op0) == AND
2647 && CONST_INT_P (op1)
2648 && CONST_INT_P (XEXP (op0, 1)))
2649 {
2650 rtx a = XEXP (op0, 0);
2651 rtx b = XEXP (op0, 1);
2652 rtx c = op1;
2653 HOST_WIDE_INT bval = INTVAL (b);
2654 HOST_WIDE_INT cval = INTVAL (c);
2655
2656 rtx na_c
2657 = simplify_binary_operation (AND, mode,
2658 simplify_gen_unary (NOT, mode, a, mode),
2659 c);
2660 if ((~cval & bval) == 0)
2661 {
2662 /* Try to simplify ~A&C | ~B&C. */
2663 if (na_c != NULL_RTX)
2664 return simplify_gen_binary (IOR, mode, na_c,
2665 GEN_INT (~bval & cval));
2666 }
2667 else
2668 {
2669 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2670 if (na_c == const0_rtx)
2671 {
2672 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2673 GEN_INT (~cval & bval));
2674 return simplify_gen_binary (IOR, mode, a_nc_b,
2675 GEN_INT (~bval & cval));
2676 }
2677 }
2678 }
2679
2680 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2681 comparison if STORE_FLAG_VALUE is 1. */
2682 if (STORE_FLAG_VALUE == 1
2683 && trueop1 == const1_rtx
2684 && COMPARISON_P (op0)
2685 && (reversed = reversed_comparison (op0, mode)))
2686 return reversed;
2687
2688 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2689 is (lt foo (const_int 0)), so we can perform the above
2690 simplification if STORE_FLAG_VALUE is 1. */
2691
2692 if (STORE_FLAG_VALUE == 1
2693 && trueop1 == const1_rtx
2694 && GET_CODE (op0) == LSHIFTRT
2695 && CONST_INT_P (XEXP (op0, 1))
2696 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2697 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2698
2699 /* (xor (comparison foo bar) (const_int sign-bit))
2700 when STORE_FLAG_VALUE is the sign bit. */
2701 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2702 && trueop1 == const_true_rtx
2703 && COMPARISON_P (op0)
2704 && (reversed = reversed_comparison (op0, mode)))
2705 return reversed;
2706
2707 tem = simplify_associative_operation (code, mode, op0, op1);
2708 if (tem)
2709 return tem;
2710 break;
2711
2712 case AND:
2713 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2714 return trueop1;
2715 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2716 return op0;
2717 if (HWI_COMPUTABLE_MODE_P (mode))
2718 {
2719 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2720 HOST_WIDE_INT nzop1;
2721 if (CONST_INT_P (trueop1))
2722 {
2723 HOST_WIDE_INT val1 = INTVAL (trueop1);
2724 /* If we are turning off bits already known off in OP0, we need
2725 not do an AND. */
2726 if ((nzop0 & ~val1) == 0)
2727 return op0;
2728 }
2729 nzop1 = nonzero_bits (trueop1, mode);
2730 /* If we are clearing all the nonzero bits, the result is zero. */
2731 if ((nzop1 & nzop0) == 0
2732 && !side_effects_p (op0) && !side_effects_p (op1))
2733 return CONST0_RTX (mode);
2734 }
2735 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2736 && GET_MODE_CLASS (mode) != MODE_CC)
2737 return op0;
2738 /* A & (~A) -> 0 */
2739 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2740 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2741 && ! side_effects_p (op0)
2742 && GET_MODE_CLASS (mode) != MODE_CC)
2743 return CONST0_RTX (mode);
2744
2745 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2746 there are no nonzero bits of C outside of X's mode. */
2747 if ((GET_CODE (op0) == SIGN_EXTEND
2748 || GET_CODE (op0) == ZERO_EXTEND)
2749 && CONST_INT_P (trueop1)
2750 && HWI_COMPUTABLE_MODE_P (mode)
2751 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2752 & UINTVAL (trueop1)) == 0)
2753 {
2754 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2755 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2756 gen_int_mode (INTVAL (trueop1),
2757 imode));
2758 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2759 }
2760
2761 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2762 we might be able to further simplify the AND with X and potentially
2763 remove the truncation altogether. */
2764 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2765 {
2766 rtx x = XEXP (op0, 0);
2767 enum machine_mode xmode = GET_MODE (x);
2768 tem = simplify_gen_binary (AND, xmode, x,
2769 gen_int_mode (INTVAL (trueop1), xmode));
2770 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2771 }
2772
2773 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2774 if (GET_CODE (op0) == IOR
2775 && CONST_INT_P (trueop1)
2776 && CONST_INT_P (XEXP (op0, 1)))
2777 {
2778 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2779 return simplify_gen_binary (IOR, mode,
2780 simplify_gen_binary (AND, mode,
2781 XEXP (op0, 0), op1),
2782 gen_int_mode (tmp, mode));
2783 }
2784
2785 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2786 insn (and may simplify more). */
2787 if (GET_CODE (op0) == XOR
2788 && rtx_equal_p (XEXP (op0, 0), op1)
2789 && ! side_effects_p (op1))
2790 return simplify_gen_binary (AND, mode,
2791 simplify_gen_unary (NOT, mode,
2792 XEXP (op0, 1), mode),
2793 op1);
2794
2795 if (GET_CODE (op0) == XOR
2796 && rtx_equal_p (XEXP (op0, 1), op1)
2797 && ! side_effects_p (op1))
2798 return simplify_gen_binary (AND, mode,
2799 simplify_gen_unary (NOT, mode,
2800 XEXP (op0, 0), mode),
2801 op1);
2802
2803 /* Similarly for (~(A ^ B)) & A. */
2804 if (GET_CODE (op0) == NOT
2805 && GET_CODE (XEXP (op0, 0)) == XOR
2806 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2807 && ! side_effects_p (op1))
2808 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2809
2810 if (GET_CODE (op0) == NOT
2811 && GET_CODE (XEXP (op0, 0)) == XOR
2812 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2813 && ! side_effects_p (op1))
2814 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2815
2816 /* Convert (A | B) & A to A. */
2817 if (GET_CODE (op0) == IOR
2818 && (rtx_equal_p (XEXP (op0, 0), op1)
2819 || rtx_equal_p (XEXP (op0, 1), op1))
2820 && ! side_effects_p (XEXP (op0, 0))
2821 && ! side_effects_p (XEXP (op0, 1)))
2822 return op1;
2823
2824 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2825 ((A & N) + B) & M -> (A + B) & M
2826 Similarly if (N & M) == 0,
2827 ((A | N) + B) & M -> (A + B) & M
2828 and for - instead of + and/or ^ instead of |.
2829 Also, if (N & M) == 0, then
2830 (A +- N) & M -> A & M. */
2831 if (CONST_INT_P (trueop1)
2832 && HWI_COMPUTABLE_MODE_P (mode)
2833 && ~UINTVAL (trueop1)
2834 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2835 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2836 {
2837 rtx pmop[2];
2838 int which;
2839
2840 pmop[0] = XEXP (op0, 0);
2841 pmop[1] = XEXP (op0, 1);
2842
2843 if (CONST_INT_P (pmop[1])
2844 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2845 return simplify_gen_binary (AND, mode, pmop[0], op1);
2846
2847 for (which = 0; which < 2; which++)
2848 {
2849 tem = pmop[which];
2850 switch (GET_CODE (tem))
2851 {
2852 case AND:
2853 if (CONST_INT_P (XEXP (tem, 1))
2854 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2855 == UINTVAL (trueop1))
2856 pmop[which] = XEXP (tem, 0);
2857 break;
2858 case IOR:
2859 case XOR:
2860 if (CONST_INT_P (XEXP (tem, 1))
2861 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2862 pmop[which] = XEXP (tem, 0);
2863 break;
2864 default:
2865 break;
2866 }
2867 }
2868
2869 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2870 {
2871 tem = simplify_gen_binary (GET_CODE (op0), mode,
2872 pmop[0], pmop[1]);
2873 return simplify_gen_binary (code, mode, tem, op1);
2874 }
2875 }
2876
2877 /* (and X (ior (not X) Y) -> (and X Y) */
2878 if (GET_CODE (op1) == IOR
2879 && GET_CODE (XEXP (op1, 0)) == NOT
2880 && op0 == XEXP (XEXP (op1, 0), 0))
2881 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2882
2883 /* (and (ior (not X) Y) X) -> (and X Y) */
2884 if (GET_CODE (op0) == IOR
2885 && GET_CODE (XEXP (op0, 0)) == NOT
2886 && op1 == XEXP (XEXP (op0, 0), 0))
2887 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2888
2889 tem = simplify_associative_operation (code, mode, op0, op1);
2890 if (tem)
2891 return tem;
2892 break;
2893
2894 case UDIV:
2895 /* 0/x is 0 (or x&0 if x has side-effects). */
2896 if (trueop0 == CONST0_RTX (mode))
2897 {
2898 if (side_effects_p (op1))
2899 return simplify_gen_binary (AND, mode, op1, trueop0);
2900 return trueop0;
2901 }
2902 /* x/1 is x. */
2903 if (trueop1 == CONST1_RTX (mode))
2904 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2905 /* Convert divide by power of two into shift. */
2906 if (CONST_INT_P (trueop1)
2907 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2908 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2909 break;
2910
2911 case DIV:
2912 /* Handle floating point and integers separately. */
2913 if (SCALAR_FLOAT_MODE_P (mode))
2914 {
2915 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2916 safe for modes with NaNs, since 0.0 / 0.0 will then be
2917 NaN rather than 0.0. Nor is it safe for modes with signed
2918 zeros, since dividing 0 by a negative number gives -0.0 */
2919 if (trueop0 == CONST0_RTX (mode)
2920 && !HONOR_NANS (mode)
2921 && !HONOR_SIGNED_ZEROS (mode)
2922 && ! side_effects_p (op1))
2923 return op0;
2924 /* x/1.0 is x. */
2925 if (trueop1 == CONST1_RTX (mode)
2926 && !HONOR_SNANS (mode))
2927 return op0;
2928
2929 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2930 && trueop1 != CONST0_RTX (mode))
2931 {
2932 REAL_VALUE_TYPE d;
2933 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2934
2935 /* x/-1.0 is -x. */
2936 if (REAL_VALUES_EQUAL (d, dconstm1)
2937 && !HONOR_SNANS (mode))
2938 return simplify_gen_unary (NEG, mode, op0, mode);
2939
2940 /* Change FP division by a constant into multiplication.
2941 Only do this with -freciprocal-math. */
2942 if (flag_reciprocal_math
2943 && !REAL_VALUES_EQUAL (d, dconst0))
2944 {
2945 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2946 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2947 return simplify_gen_binary (MULT, mode, op0, tem);
2948 }
2949 }
2950 }
2951 else if (SCALAR_INT_MODE_P (mode))
2952 {
2953 /* 0/x is 0 (or x&0 if x has side-effects). */
2954 if (trueop0 == CONST0_RTX (mode)
2955 && !cfun->can_throw_non_call_exceptions)
2956 {
2957 if (side_effects_p (op1))
2958 return simplify_gen_binary (AND, mode, op1, trueop0);
2959 return trueop0;
2960 }
2961 /* x/1 is x. */
2962 if (trueop1 == CONST1_RTX (mode))
2963 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2964 /* x/-1 is -x. */
2965 if (trueop1 == constm1_rtx)
2966 {
2967 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2968 return simplify_gen_unary (NEG, mode, x, mode);
2969 }
2970 }
2971 break;
2972
2973 case UMOD:
2974 /* 0%x is 0 (or x&0 if x has side-effects). */
2975 if (trueop0 == CONST0_RTX (mode))
2976 {
2977 if (side_effects_p (op1))
2978 return simplify_gen_binary (AND, mode, op1, trueop0);
2979 return trueop0;
2980 }
2981 /* x%1 is 0 (of x&0 if x has side-effects). */
2982 if (trueop1 == CONST1_RTX (mode))
2983 {
2984 if (side_effects_p (op0))
2985 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2986 return CONST0_RTX (mode);
2987 }
2988 /* Implement modulus by power of two as AND. */
2989 if (CONST_INT_P (trueop1)
2990 && exact_log2 (UINTVAL (trueop1)) > 0)
2991 return simplify_gen_binary (AND, mode, op0,
2992 GEN_INT (INTVAL (op1) - 1));
2993 break;
2994
2995 case MOD:
2996 /* 0%x is 0 (or x&0 if x has side-effects). */
2997 if (trueop0 == CONST0_RTX (mode))
2998 {
2999 if (side_effects_p (op1))
3000 return simplify_gen_binary (AND, mode, op1, trueop0);
3001 return trueop0;
3002 }
3003 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3004 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3005 {
3006 if (side_effects_p (op0))
3007 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3008 return CONST0_RTX (mode);
3009 }
3010 break;
3011
3012 case ROTATERT:
3013 case ROTATE:
3014 case ASHIFTRT:
3015 if (trueop1 == CONST0_RTX (mode))
3016 return op0;
3017 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3018 return op0;
3019 /* Rotating ~0 always results in ~0. */
3020 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3021 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3022 && ! side_effects_p (op1))
3023 return op0;
3024 canonicalize_shift:
3025 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3026 {
3027 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3028 if (val != INTVAL (op1))
3029 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3030 }
3031 break;
3032
3033 case ASHIFT:
3034 case SS_ASHIFT:
3035 case US_ASHIFT:
3036 if (trueop1 == CONST0_RTX (mode))
3037 return op0;
3038 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3039 return op0;
3040 goto canonicalize_shift;
3041
3042 case LSHIFTRT:
3043 if (trueop1 == CONST0_RTX (mode))
3044 return op0;
3045 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3046 return op0;
3047 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3048 if (GET_CODE (op0) == CLZ
3049 && CONST_INT_P (trueop1)
3050 && STORE_FLAG_VALUE == 1
3051 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3052 {
3053 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3054 unsigned HOST_WIDE_INT zero_val = 0;
3055
3056 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3057 && zero_val == GET_MODE_PRECISION (imode)
3058 && INTVAL (trueop1) == exact_log2 (zero_val))
3059 return simplify_gen_relational (EQ, mode, imode,
3060 XEXP (op0, 0), const0_rtx);
3061 }
3062 goto canonicalize_shift;
3063
3064 case SMIN:
3065 if (width <= HOST_BITS_PER_WIDE_INT
3066 && mode_signbit_p (mode, trueop1)
3067 && ! side_effects_p (op0))
3068 return op1;
3069 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3070 return op0;
3071 tem = simplify_associative_operation (code, mode, op0, op1);
3072 if (tem)
3073 return tem;
3074 break;
3075
3076 case SMAX:
3077 if (width <= HOST_BITS_PER_WIDE_INT
3078 && CONST_INT_P (trueop1)
3079 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3080 && ! side_effects_p (op0))
3081 return op1;
3082 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3083 return op0;
3084 tem = simplify_associative_operation (code, mode, op0, op1);
3085 if (tem)
3086 return tem;
3087 break;
3088
3089 case UMIN:
3090 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3091 return op1;
3092 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3093 return op0;
3094 tem = simplify_associative_operation (code, mode, op0, op1);
3095 if (tem)
3096 return tem;
3097 break;
3098
3099 case UMAX:
3100 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3101 return op1;
3102 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3103 return op0;
3104 tem = simplify_associative_operation (code, mode, op0, op1);
3105 if (tem)
3106 return tem;
3107 break;
3108
3109 case SS_PLUS:
3110 case US_PLUS:
3111 case SS_MINUS:
3112 case US_MINUS:
3113 case SS_MULT:
3114 case US_MULT:
3115 case SS_DIV:
3116 case US_DIV:
3117 /* ??? There are simplifications that can be done. */
3118 return 0;
3119
3120 case VEC_SELECT:
3121 if (!VECTOR_MODE_P (mode))
3122 {
3123 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3124 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3125 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3126 gcc_assert (XVECLEN (trueop1, 0) == 1);
3127 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3128
3129 if (GET_CODE (trueop0) == CONST_VECTOR)
3130 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3131 (trueop1, 0, 0)));
3132
3133 /* Extract a scalar element from a nested VEC_SELECT expression
3134 (with optional nested VEC_CONCAT expression). Some targets
3135 (i386) extract scalar element from a vector using chain of
3136 nested VEC_SELECT expressions. When input operand is a memory
3137 operand, this operation can be simplified to a simple scalar
3138 load from an offseted memory address. */
3139 if (GET_CODE (trueop0) == VEC_SELECT)
3140 {
3141 rtx op0 = XEXP (trueop0, 0);
3142 rtx op1 = XEXP (trueop0, 1);
3143
3144 enum machine_mode opmode = GET_MODE (op0);
3145 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3146 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3147
3148 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3149 int elem;
3150
3151 rtvec vec;
3152 rtx tmp_op, tmp;
3153
3154 gcc_assert (GET_CODE (op1) == PARALLEL);
3155 gcc_assert (i < n_elts);
3156
3157 /* Select element, pointed by nested selector. */
3158 elem = INTVAL (XVECEXP (op1, 0, i));
3159
3160 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3161 if (GET_CODE (op0) == VEC_CONCAT)
3162 {
3163 rtx op00 = XEXP (op0, 0);
3164 rtx op01 = XEXP (op0, 1);
3165
3166 enum machine_mode mode00, mode01;
3167 int n_elts00, n_elts01;
3168
3169 mode00 = GET_MODE (op00);
3170 mode01 = GET_MODE (op01);
3171
3172 /* Find out number of elements of each operand. */
3173 if (VECTOR_MODE_P (mode00))
3174 {
3175 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3176 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3177 }
3178 else
3179 n_elts00 = 1;
3180
3181 if (VECTOR_MODE_P (mode01))
3182 {
3183 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3184 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3185 }
3186 else
3187 n_elts01 = 1;
3188
3189 gcc_assert (n_elts == n_elts00 + n_elts01);
3190
3191 /* Select correct operand of VEC_CONCAT
3192 and adjust selector. */
3193 if (elem < n_elts01)
3194 tmp_op = op00;
3195 else
3196 {
3197 tmp_op = op01;
3198 elem -= n_elts00;
3199 }
3200 }
3201 else
3202 tmp_op = op0;
3203
3204 vec = rtvec_alloc (1);
3205 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3206
3207 tmp = gen_rtx_fmt_ee (code, mode,
3208 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3209 return tmp;
3210 }
3211 if (GET_CODE (trueop0) == VEC_DUPLICATE
3212 && GET_MODE (XEXP (trueop0, 0)) == mode)
3213 return XEXP (trueop0, 0);
3214 }
3215 else
3216 {
3217 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3218 gcc_assert (GET_MODE_INNER (mode)
3219 == GET_MODE_INNER (GET_MODE (trueop0)));
3220 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3221
3222 if (GET_CODE (trueop0) == CONST_VECTOR)
3223 {
3224 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3225 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3226 rtvec v = rtvec_alloc (n_elts);
3227 unsigned int i;
3228
3229 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3230 for (i = 0; i < n_elts; i++)
3231 {
3232 rtx x = XVECEXP (trueop1, 0, i);
3233
3234 gcc_assert (CONST_INT_P (x));
3235 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3236 INTVAL (x));
3237 }
3238
3239 return gen_rtx_CONST_VECTOR (mode, v);
3240 }
3241
3242 /* If we build {a,b} then permute it, build the result directly. */
3243 if (XVECLEN (trueop1, 0) == 2
3244 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3245 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3246 && GET_CODE (trueop0) == VEC_CONCAT
3247 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3248 && GET_MODE (XEXP (trueop0, 0)) == mode
3249 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3250 && GET_MODE (XEXP (trueop0, 1)) == mode)
3251 {
3252 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3253 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3254 rtx subop0, subop1;
3255
3256 gcc_assert (i0 < 4 && i1 < 4);
3257 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3258 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3259
3260 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3261 }
3262
3263 if (XVECLEN (trueop1, 0) == 2
3264 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3265 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3266 && GET_CODE (trueop0) == VEC_CONCAT
3267 && GET_MODE (trueop0) == mode)
3268 {
3269 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3270 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3271 rtx subop0, subop1;
3272
3273 gcc_assert (i0 < 2 && i1 < 2);
3274 subop0 = XEXP (trueop0, i0);
3275 subop1 = XEXP (trueop0, i1);
3276
3277 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3278 }
3279 }
3280
3281 if (XVECLEN (trueop1, 0) == 1
3282 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3283 && GET_CODE (trueop0) == VEC_CONCAT)
3284 {
3285 rtx vec = trueop0;
3286 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3287
3288 /* Try to find the element in the VEC_CONCAT. */
3289 while (GET_MODE (vec) != mode
3290 && GET_CODE (vec) == VEC_CONCAT)
3291 {
3292 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3293 if (offset < vec_size)
3294 vec = XEXP (vec, 0);
3295 else
3296 {
3297 offset -= vec_size;
3298 vec = XEXP (vec, 1);
3299 }
3300 vec = avoid_constant_pool_reference (vec);
3301 }
3302
3303 if (GET_MODE (vec) == mode)
3304 return vec;
3305 }
3306
3307 return 0;
3308 case VEC_CONCAT:
3309 {
3310 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3311 ? GET_MODE (trueop0)
3312 : GET_MODE_INNER (mode));
3313 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3314 ? GET_MODE (trueop1)
3315 : GET_MODE_INNER (mode));
3316
3317 gcc_assert (VECTOR_MODE_P (mode));
3318 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3319 == GET_MODE_SIZE (mode));
3320
3321 if (VECTOR_MODE_P (op0_mode))
3322 gcc_assert (GET_MODE_INNER (mode)
3323 == GET_MODE_INNER (op0_mode));
3324 else
3325 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3326
3327 if (VECTOR_MODE_P (op1_mode))
3328 gcc_assert (GET_MODE_INNER (mode)
3329 == GET_MODE_INNER (op1_mode));
3330 else
3331 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3332
3333 if ((GET_CODE (trueop0) == CONST_VECTOR
3334 || CONST_INT_P (trueop0) || CONST_DOUBLE_P (trueop0))
3335 && (GET_CODE (trueop1) == CONST_VECTOR
3336 || CONST_INT_P (trueop1) || CONST_DOUBLE_P (trueop1)))
3337 {
3338 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3339 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3340 rtvec v = rtvec_alloc (n_elts);
3341 unsigned int i;
3342 unsigned in_n_elts = 1;
3343
3344 if (VECTOR_MODE_P (op0_mode))
3345 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3346 for (i = 0; i < n_elts; i++)
3347 {
3348 if (i < in_n_elts)
3349 {
3350 if (!VECTOR_MODE_P (op0_mode))
3351 RTVEC_ELT (v, i) = trueop0;
3352 else
3353 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3354 }
3355 else
3356 {
3357 if (!VECTOR_MODE_P (op1_mode))
3358 RTVEC_ELT (v, i) = trueop1;
3359 else
3360 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3361 i - in_n_elts);
3362 }
3363 }
3364
3365 return gen_rtx_CONST_VECTOR (mode, v);
3366 }
3367 }
3368 return 0;
3369
3370 default:
3371 gcc_unreachable ();
3372 }
3373
3374 return 0;
3375 }
3376
3377 rtx
3378 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3379 rtx op0, rtx op1)
3380 {
3381 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3382 HOST_WIDE_INT val;
3383 unsigned int width = GET_MODE_PRECISION (mode);
3384
3385 if (VECTOR_MODE_P (mode)
3386 && code != VEC_CONCAT
3387 && GET_CODE (op0) == CONST_VECTOR
3388 && GET_CODE (op1) == CONST_VECTOR)
3389 {
3390 unsigned n_elts = GET_MODE_NUNITS (mode);
3391 enum machine_mode op0mode = GET_MODE (op0);
3392 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3393 enum machine_mode op1mode = GET_MODE (op1);
3394 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3395 rtvec v = rtvec_alloc (n_elts);
3396 unsigned int i;
3397
3398 gcc_assert (op0_n_elts == n_elts);
3399 gcc_assert (op1_n_elts == n_elts);
3400 for (i = 0; i < n_elts; i++)
3401 {
3402 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3403 CONST_VECTOR_ELT (op0, i),
3404 CONST_VECTOR_ELT (op1, i));
3405 if (!x)
3406 return 0;
3407 RTVEC_ELT (v, i) = x;
3408 }
3409
3410 return gen_rtx_CONST_VECTOR (mode, v);
3411 }
3412
3413 if (VECTOR_MODE_P (mode)
3414 && code == VEC_CONCAT
3415 && (CONST_INT_P (op0)
3416 || GET_CODE (op0) == CONST_FIXED
3417 || CONST_DOUBLE_P (op0))
3418 && (CONST_INT_P (op1)
3419 || CONST_DOUBLE_P (op1)
3420 || GET_CODE (op1) == CONST_FIXED))
3421 {
3422 unsigned n_elts = GET_MODE_NUNITS (mode);
3423 rtvec v = rtvec_alloc (n_elts);
3424
3425 gcc_assert (n_elts >= 2);
3426 if (n_elts == 2)
3427 {
3428 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3429 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3430
3431 RTVEC_ELT (v, 0) = op0;
3432 RTVEC_ELT (v, 1) = op1;
3433 }
3434 else
3435 {
3436 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3437 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3438 unsigned i;
3439
3440 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3441 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3442 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3443
3444 for (i = 0; i < op0_n_elts; ++i)
3445 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3446 for (i = 0; i < op1_n_elts; ++i)
3447 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3448 }
3449
3450 return gen_rtx_CONST_VECTOR (mode, v);
3451 }
3452
3453 if (SCALAR_FLOAT_MODE_P (mode)
3454 && CONST_DOUBLE_AS_FLOAT_P (op0)
3455 && CONST_DOUBLE_AS_FLOAT_P (op1)
3456 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3457 {
3458 if (code == AND
3459 || code == IOR
3460 || code == XOR)
3461 {
3462 long tmp0[4];
3463 long tmp1[4];
3464 REAL_VALUE_TYPE r;
3465 int i;
3466
3467 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3468 GET_MODE (op0));
3469 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3470 GET_MODE (op1));
3471 for (i = 0; i < 4; i++)
3472 {
3473 switch (code)
3474 {
3475 case AND:
3476 tmp0[i] &= tmp1[i];
3477 break;
3478 case IOR:
3479 tmp0[i] |= tmp1[i];
3480 break;
3481 case XOR:
3482 tmp0[i] ^= tmp1[i];
3483 break;
3484 default:
3485 gcc_unreachable ();
3486 }
3487 }
3488 real_from_target (&r, tmp0, mode);
3489 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3490 }
3491 else
3492 {
3493 REAL_VALUE_TYPE f0, f1, value, result;
3494 bool inexact;
3495
3496 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3497 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3498 real_convert (&f0, mode, &f0);
3499 real_convert (&f1, mode, &f1);
3500
3501 if (HONOR_SNANS (mode)
3502 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3503 return 0;
3504
3505 if (code == DIV
3506 && REAL_VALUES_EQUAL (f1, dconst0)
3507 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3508 return 0;
3509
3510 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3511 && flag_trapping_math
3512 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3513 {
3514 int s0 = REAL_VALUE_NEGATIVE (f0);
3515 int s1 = REAL_VALUE_NEGATIVE (f1);
3516
3517 switch (code)
3518 {
3519 case PLUS:
3520 /* Inf + -Inf = NaN plus exception. */
3521 if (s0 != s1)
3522 return 0;
3523 break;
3524 case MINUS:
3525 /* Inf - Inf = NaN plus exception. */
3526 if (s0 == s1)
3527 return 0;
3528 break;
3529 case DIV:
3530 /* Inf / Inf = NaN plus exception. */
3531 return 0;
3532 default:
3533 break;
3534 }
3535 }
3536
3537 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3538 && flag_trapping_math
3539 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3540 || (REAL_VALUE_ISINF (f1)
3541 && REAL_VALUES_EQUAL (f0, dconst0))))
3542 /* Inf * 0 = NaN plus exception. */
3543 return 0;
3544
3545 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3546 &f0, &f1);
3547 real_convert (&result, mode, &value);
3548
3549 /* Don't constant fold this floating point operation if
3550 the result has overflowed and flag_trapping_math. */
3551
3552 if (flag_trapping_math
3553 && MODE_HAS_INFINITIES (mode)
3554 && REAL_VALUE_ISINF (result)
3555 && !REAL_VALUE_ISINF (f0)
3556 && !REAL_VALUE_ISINF (f1))
3557 /* Overflow plus exception. */
3558 return 0;
3559
3560 /* Don't constant fold this floating point operation if the
3561 result may dependent upon the run-time rounding mode and
3562 flag_rounding_math is set, or if GCC's software emulation
3563 is unable to accurately represent the result. */
3564
3565 if ((flag_rounding_math
3566 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3567 && (inexact || !real_identical (&result, &value)))
3568 return NULL_RTX;
3569
3570 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3571 }
3572 }
3573
3574 /* We can fold some multi-word operations. */
3575 if (GET_MODE_CLASS (mode) == MODE_INT
3576 && width == HOST_BITS_PER_DOUBLE_INT
3577 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3578 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3579 {
3580 double_int o0, o1, res, tmp;
3581
3582 o0 = rtx_to_double_int (op0);
3583 o1 = rtx_to_double_int (op1);
3584
3585 switch (code)
3586 {
3587 case MINUS:
3588 /* A - B == A + (-B). */
3589 o1 = -o1;
3590
3591 /* Fall through.... */
3592
3593 case PLUS:
3594 res = o0 + o1;
3595 break;
3596
3597 case MULT:
3598 res = o0 * o1;
3599 break;
3600
3601 case DIV:
3602 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3603 o0.low, o0.high, o1.low, o1.high,
3604 &res.low, &res.high,
3605 &tmp.low, &tmp.high))
3606 return 0;
3607 break;
3608
3609 case MOD:
3610 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3611 o0.low, o0.high, o1.low, o1.high,
3612 &tmp.low, &tmp.high,
3613 &res.low, &res.high))
3614 return 0;
3615 break;
3616
3617 case UDIV:
3618 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3619 o0.low, o0.high, o1.low, o1.high,
3620 &res.low, &res.high,
3621 &tmp.low, &tmp.high))
3622 return 0;
3623 break;
3624
3625 case UMOD:
3626 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3627 o0.low, o0.high, o1.low, o1.high,
3628 &tmp.low, &tmp.high,
3629 &res.low, &res.high))
3630 return 0;
3631 break;
3632
3633 case AND:
3634 res = o0 & o1;
3635 break;
3636
3637 case IOR:
3638 res = o0 | o1;
3639 break;
3640
3641 case XOR:
3642 res = o0 ^ o1;
3643 break;
3644
3645 case SMIN:
3646 res = o0.smin (o1);
3647 break;
3648
3649 case SMAX:
3650 res = o0.smax (o1);
3651 break;
3652
3653 case UMIN:
3654 res = o0.umin (o1);
3655 break;
3656
3657 case UMAX:
3658 res = o0.umax (o1);
3659 break;
3660
3661 case LSHIFTRT: case ASHIFTRT:
3662 case ASHIFT:
3663 case ROTATE: case ROTATERT:
3664 {
3665 unsigned HOST_WIDE_INT cnt;
3666
3667 if (SHIFT_COUNT_TRUNCATED)
3668 {
3669 o1.high = 0;
3670 o1.low &= GET_MODE_PRECISION (mode) - 1;
3671 }
3672
3673 if (!o1.fits_uhwi ()
3674 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3675 return 0;
3676
3677 cnt = o1.to_uhwi ();
3678 unsigned short prec = GET_MODE_PRECISION (mode);
3679
3680 if (code == LSHIFTRT || code == ASHIFTRT)
3681 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3682 else if (code == ASHIFT)
3683 res = o0.alshift (cnt, prec);
3684 else if (code == ROTATE)
3685 res = o0.lrotate (cnt, prec);
3686 else /* code == ROTATERT */
3687 res = o0.rrotate (cnt, prec);
3688 }
3689 break;
3690
3691 default:
3692 return 0;
3693 }
3694
3695 return immed_double_int_const (res, mode);
3696 }
3697
3698 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3699 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3700 {
3701 /* Get the integer argument values in two forms:
3702 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3703
3704 arg0 = INTVAL (op0);
3705 arg1 = INTVAL (op1);
3706
3707 if (width < HOST_BITS_PER_WIDE_INT)
3708 {
3709 arg0 &= GET_MODE_MASK (mode);
3710 arg1 &= GET_MODE_MASK (mode);
3711
3712 arg0s = arg0;
3713 if (val_signbit_known_set_p (mode, arg0s))
3714 arg0s |= ~GET_MODE_MASK (mode);
3715
3716 arg1s = arg1;
3717 if (val_signbit_known_set_p (mode, arg1s))
3718 arg1s |= ~GET_MODE_MASK (mode);
3719 }
3720 else
3721 {
3722 arg0s = arg0;
3723 arg1s = arg1;
3724 }
3725
3726 /* Compute the value of the arithmetic. */
3727
3728 switch (code)
3729 {
3730 case PLUS:
3731 val = arg0s + arg1s;
3732 break;
3733
3734 case MINUS:
3735 val = arg0s - arg1s;
3736 break;
3737
3738 case MULT:
3739 val = arg0s * arg1s;
3740 break;
3741
3742 case DIV:
3743 if (arg1s == 0
3744 || ((unsigned HOST_WIDE_INT) arg0s
3745 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3746 && arg1s == -1))
3747 return 0;
3748 val = arg0s / arg1s;
3749 break;
3750
3751 case MOD:
3752 if (arg1s == 0
3753 || ((unsigned HOST_WIDE_INT) arg0s
3754 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3755 && arg1s == -1))
3756 return 0;
3757 val = arg0s % arg1s;
3758 break;
3759
3760 case UDIV:
3761 if (arg1 == 0
3762 || ((unsigned HOST_WIDE_INT) arg0s
3763 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3764 && arg1s == -1))
3765 return 0;
3766 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3767 break;
3768
3769 case UMOD:
3770 if (arg1 == 0
3771 || ((unsigned HOST_WIDE_INT) arg0s
3772 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3773 && arg1s == -1))
3774 return 0;
3775 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3776 break;
3777
3778 case AND:
3779 val = arg0 & arg1;
3780 break;
3781
3782 case IOR:
3783 val = arg0 | arg1;
3784 break;
3785
3786 case XOR:
3787 val = arg0 ^ arg1;
3788 break;
3789
3790 case LSHIFTRT:
3791 case ASHIFT:
3792 case ASHIFTRT:
3793 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3794 the value is in range. We can't return any old value for
3795 out-of-range arguments because either the middle-end (via
3796 shift_truncation_mask) or the back-end might be relying on
3797 target-specific knowledge. Nor can we rely on
3798 shift_truncation_mask, since the shift might not be part of an
3799 ashlM3, lshrM3 or ashrM3 instruction. */
3800 if (SHIFT_COUNT_TRUNCATED)
3801 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3802 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3803 return 0;
3804
3805 val = (code == ASHIFT
3806 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3807 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3808
3809 /* Sign-extend the result for arithmetic right shifts. */
3810 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3811 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3812 break;
3813
3814 case ROTATERT:
3815 if (arg1 < 0)
3816 return 0;
3817
3818 arg1 %= width;
3819 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3820 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3821 break;
3822
3823 case ROTATE:
3824 if (arg1 < 0)
3825 return 0;
3826
3827 arg1 %= width;
3828 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3829 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3830 break;
3831
3832 case COMPARE:
3833 /* Do nothing here. */
3834 return 0;
3835
3836 case SMIN:
3837 val = arg0s <= arg1s ? arg0s : arg1s;
3838 break;
3839
3840 case UMIN:
3841 val = ((unsigned HOST_WIDE_INT) arg0
3842 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3843 break;
3844
3845 case SMAX:
3846 val = arg0s > arg1s ? arg0s : arg1s;
3847 break;
3848
3849 case UMAX:
3850 val = ((unsigned HOST_WIDE_INT) arg0
3851 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3852 break;
3853
3854 case SS_PLUS:
3855 case US_PLUS:
3856 case SS_MINUS:
3857 case US_MINUS:
3858 case SS_MULT:
3859 case US_MULT:
3860 case SS_DIV:
3861 case US_DIV:
3862 case SS_ASHIFT:
3863 case US_ASHIFT:
3864 /* ??? There are simplifications that can be done. */
3865 return 0;
3866
3867 default:
3868 gcc_unreachable ();
3869 }
3870
3871 return gen_int_mode (val, mode);
3872 }
3873
3874 return NULL_RTX;
3875 }
3876
3877
3878 \f
3879 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3880 PLUS or MINUS.
3881
3882 Rather than test for specific case, we do this by a brute-force method
3883 and do all possible simplifications until no more changes occur. Then
3884 we rebuild the operation. */
3885
3886 struct simplify_plus_minus_op_data
3887 {
3888 rtx op;
3889 short neg;
3890 };
3891
3892 static bool
3893 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3894 {
3895 int result;
3896
3897 result = (commutative_operand_precedence (y)
3898 - commutative_operand_precedence (x));
3899 if (result)
3900 return result > 0;
3901
3902 /* Group together equal REGs to do more simplification. */
3903 if (REG_P (x) && REG_P (y))
3904 return REGNO (x) > REGNO (y);
3905 else
3906 return false;
3907 }
3908
3909 static rtx
3910 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3911 rtx op1)
3912 {
3913 struct simplify_plus_minus_op_data ops[8];
3914 rtx result, tem;
3915 int n_ops = 2, input_ops = 2;
3916 int changed, n_constants = 0, canonicalized = 0;
3917 int i, j;
3918
3919 memset (ops, 0, sizeof ops);
3920
3921 /* Set up the two operands and then expand them until nothing has been
3922 changed. If we run out of room in our array, give up; this should
3923 almost never happen. */
3924
3925 ops[0].op = op0;
3926 ops[0].neg = 0;
3927 ops[1].op = op1;
3928 ops[1].neg = (code == MINUS);
3929
3930 do
3931 {
3932 changed = 0;
3933
3934 for (i = 0; i < n_ops; i++)
3935 {
3936 rtx this_op = ops[i].op;
3937 int this_neg = ops[i].neg;
3938 enum rtx_code this_code = GET_CODE (this_op);
3939
3940 switch (this_code)
3941 {
3942 case PLUS:
3943 case MINUS:
3944 if (n_ops == 7)
3945 return NULL_RTX;
3946
3947 ops[n_ops].op = XEXP (this_op, 1);
3948 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3949 n_ops++;
3950
3951 ops[i].op = XEXP (this_op, 0);
3952 input_ops++;
3953 changed = 1;
3954 canonicalized |= this_neg;
3955 break;
3956
3957 case NEG:
3958 ops[i].op = XEXP (this_op, 0);
3959 ops[i].neg = ! this_neg;
3960 changed = 1;
3961 canonicalized = 1;
3962 break;
3963
3964 case CONST:
3965 if (n_ops < 7
3966 && GET_CODE (XEXP (this_op, 0)) == PLUS
3967 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3968 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3969 {
3970 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3971 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3972 ops[n_ops].neg = this_neg;
3973 n_ops++;
3974 changed = 1;
3975 canonicalized = 1;
3976 }
3977 break;
3978
3979 case NOT:
3980 /* ~a -> (-a - 1) */
3981 if (n_ops != 7)
3982 {
3983 ops[n_ops].op = CONSTM1_RTX (mode);
3984 ops[n_ops++].neg = this_neg;
3985 ops[i].op = XEXP (this_op, 0);
3986 ops[i].neg = !this_neg;
3987 changed = 1;
3988 canonicalized = 1;
3989 }
3990 break;
3991
3992 case CONST_INT:
3993 n_constants++;
3994 if (this_neg)
3995 {
3996 ops[i].op = neg_const_int (mode, this_op);
3997 ops[i].neg = 0;
3998 changed = 1;
3999 canonicalized = 1;
4000 }
4001 break;
4002
4003 default:
4004 break;
4005 }
4006 }
4007 }
4008 while (changed);
4009
4010 if (n_constants > 1)
4011 canonicalized = 1;
4012
4013 gcc_assert (n_ops >= 2);
4014
4015 /* If we only have two operands, we can avoid the loops. */
4016 if (n_ops == 2)
4017 {
4018 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4019 rtx lhs, rhs;
4020
4021 /* Get the two operands. Be careful with the order, especially for
4022 the cases where code == MINUS. */
4023 if (ops[0].neg && ops[1].neg)
4024 {
4025 lhs = gen_rtx_NEG (mode, ops[0].op);
4026 rhs = ops[1].op;
4027 }
4028 else if (ops[0].neg)
4029 {
4030 lhs = ops[1].op;
4031 rhs = ops[0].op;
4032 }
4033 else
4034 {
4035 lhs = ops[0].op;
4036 rhs = ops[1].op;
4037 }
4038
4039 return simplify_const_binary_operation (code, mode, lhs, rhs);
4040 }
4041
4042 /* Now simplify each pair of operands until nothing changes. */
4043 do
4044 {
4045 /* Insertion sort is good enough for an eight-element array. */
4046 for (i = 1; i < n_ops; i++)
4047 {
4048 struct simplify_plus_minus_op_data save;
4049 j = i - 1;
4050 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4051 continue;
4052
4053 canonicalized = 1;
4054 save = ops[i];
4055 do
4056 ops[j + 1] = ops[j];
4057 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4058 ops[j + 1] = save;
4059 }
4060
4061 changed = 0;
4062 for (i = n_ops - 1; i > 0; i--)
4063 for (j = i - 1; j >= 0; j--)
4064 {
4065 rtx lhs = ops[j].op, rhs = ops[i].op;
4066 int lneg = ops[j].neg, rneg = ops[i].neg;
4067
4068 if (lhs != 0 && rhs != 0)
4069 {
4070 enum rtx_code ncode = PLUS;
4071
4072 if (lneg != rneg)
4073 {
4074 ncode = MINUS;
4075 if (lneg)
4076 tem = lhs, lhs = rhs, rhs = tem;
4077 }
4078 else if (swap_commutative_operands_p (lhs, rhs))
4079 tem = lhs, lhs = rhs, rhs = tem;
4080
4081 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4082 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4083 {
4084 rtx tem_lhs, tem_rhs;
4085
4086 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4087 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4088 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4089
4090 if (tem && !CONSTANT_P (tem))
4091 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4092 }
4093 else
4094 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4095
4096 /* Reject "simplifications" that just wrap the two
4097 arguments in a CONST. Failure to do so can result
4098 in infinite recursion with simplify_binary_operation
4099 when it calls us to simplify CONST operations. */
4100 if (tem
4101 && ! (GET_CODE (tem) == CONST
4102 && GET_CODE (XEXP (tem, 0)) == ncode
4103 && XEXP (XEXP (tem, 0), 0) == lhs
4104 && XEXP (XEXP (tem, 0), 1) == rhs))
4105 {
4106 lneg &= rneg;
4107 if (GET_CODE (tem) == NEG)
4108 tem = XEXP (tem, 0), lneg = !lneg;
4109 if (CONST_INT_P (tem) && lneg)
4110 tem = neg_const_int (mode, tem), lneg = 0;
4111
4112 ops[i].op = tem;
4113 ops[i].neg = lneg;
4114 ops[j].op = NULL_RTX;
4115 changed = 1;
4116 canonicalized = 1;
4117 }
4118 }
4119 }
4120
4121 /* If nothing changed, fail. */
4122 if (!canonicalized)
4123 return NULL_RTX;
4124
4125 /* Pack all the operands to the lower-numbered entries. */
4126 for (i = 0, j = 0; j < n_ops; j++)
4127 if (ops[j].op)
4128 {
4129 ops[i] = ops[j];
4130 i++;
4131 }
4132 n_ops = i;
4133 }
4134 while (changed);
4135
4136 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4137 if (n_ops == 2
4138 && CONST_INT_P (ops[1].op)
4139 && CONSTANT_P (ops[0].op)
4140 && ops[0].neg)
4141 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4142
4143 /* We suppressed creation of trivial CONST expressions in the
4144 combination loop to avoid recursion. Create one manually now.
4145 The combination loop should have ensured that there is exactly
4146 one CONST_INT, and the sort will have ensured that it is last
4147 in the array and that any other constant will be next-to-last. */
4148
4149 if (n_ops > 1
4150 && CONST_INT_P (ops[n_ops - 1].op)
4151 && CONSTANT_P (ops[n_ops - 2].op))
4152 {
4153 rtx value = ops[n_ops - 1].op;
4154 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4155 value = neg_const_int (mode, value);
4156 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4157 INTVAL (value));
4158 n_ops--;
4159 }
4160
4161 /* Put a non-negated operand first, if possible. */
4162
4163 for (i = 0; i < n_ops && ops[i].neg; i++)
4164 continue;
4165 if (i == n_ops)
4166 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4167 else if (i != 0)
4168 {
4169 tem = ops[0].op;
4170 ops[0] = ops[i];
4171 ops[i].op = tem;
4172 ops[i].neg = 1;
4173 }
4174
4175 /* Now make the result by performing the requested operations. */
4176 result = ops[0].op;
4177 for (i = 1; i < n_ops; i++)
4178 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4179 mode, result, ops[i].op);
4180
4181 return result;
4182 }
4183
4184 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4185 static bool
4186 plus_minus_operand_p (const_rtx x)
4187 {
4188 return GET_CODE (x) == PLUS
4189 || GET_CODE (x) == MINUS
4190 || (GET_CODE (x) == CONST
4191 && GET_CODE (XEXP (x, 0)) == PLUS
4192 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4193 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4194 }
4195
4196 /* Like simplify_binary_operation except used for relational operators.
4197 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4198 not also be VOIDmode.
4199
4200 CMP_MODE specifies in which mode the comparison is done in, so it is
4201 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4202 the operands or, if both are VOIDmode, the operands are compared in
4203 "infinite precision". */
4204 rtx
4205 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4206 enum machine_mode cmp_mode, rtx op0, rtx op1)
4207 {
4208 rtx tem, trueop0, trueop1;
4209
4210 if (cmp_mode == VOIDmode)
4211 cmp_mode = GET_MODE (op0);
4212 if (cmp_mode == VOIDmode)
4213 cmp_mode = GET_MODE (op1);
4214
4215 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4216 if (tem)
4217 {
4218 if (SCALAR_FLOAT_MODE_P (mode))
4219 {
4220 if (tem == const0_rtx)
4221 return CONST0_RTX (mode);
4222 #ifdef FLOAT_STORE_FLAG_VALUE
4223 {
4224 REAL_VALUE_TYPE val;
4225 val = FLOAT_STORE_FLAG_VALUE (mode);
4226 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4227 }
4228 #else
4229 return NULL_RTX;
4230 #endif
4231 }
4232 if (VECTOR_MODE_P (mode))
4233 {
4234 if (tem == const0_rtx)
4235 return CONST0_RTX (mode);
4236 #ifdef VECTOR_STORE_FLAG_VALUE
4237 {
4238 int i, units;
4239 rtvec v;
4240
4241 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4242 if (val == NULL_RTX)
4243 return NULL_RTX;
4244 if (val == const1_rtx)
4245 return CONST1_RTX (mode);
4246
4247 units = GET_MODE_NUNITS (mode);
4248 v = rtvec_alloc (units);
4249 for (i = 0; i < units; i++)
4250 RTVEC_ELT (v, i) = val;
4251 return gen_rtx_raw_CONST_VECTOR (mode, v);
4252 }
4253 #else
4254 return NULL_RTX;
4255 #endif
4256 }
4257
4258 return tem;
4259 }
4260
4261 /* For the following tests, ensure const0_rtx is op1. */
4262 if (swap_commutative_operands_p (op0, op1)
4263 || (op0 == const0_rtx && op1 != const0_rtx))
4264 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4265
4266 /* If op0 is a compare, extract the comparison arguments from it. */
4267 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4268 return simplify_gen_relational (code, mode, VOIDmode,
4269 XEXP (op0, 0), XEXP (op0, 1));
4270
4271 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4272 || CC0_P (op0))
4273 return NULL_RTX;
4274
4275 trueop0 = avoid_constant_pool_reference (op0);
4276 trueop1 = avoid_constant_pool_reference (op1);
4277 return simplify_relational_operation_1 (code, mode, cmp_mode,
4278 trueop0, trueop1);
4279 }
4280
4281 /* This part of simplify_relational_operation is only used when CMP_MODE
4282 is not in class MODE_CC (i.e. it is a real comparison).
4283
4284 MODE is the mode of the result, while CMP_MODE specifies in which
4285 mode the comparison is done in, so it is the mode of the operands. */
4286
4287 static rtx
4288 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4289 enum machine_mode cmp_mode, rtx op0, rtx op1)
4290 {
4291 enum rtx_code op0code = GET_CODE (op0);
4292
4293 if (op1 == const0_rtx && COMPARISON_P (op0))
4294 {
4295 /* If op0 is a comparison, extract the comparison arguments
4296 from it. */
4297 if (code == NE)
4298 {
4299 if (GET_MODE (op0) == mode)
4300 return simplify_rtx (op0);
4301 else
4302 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4303 XEXP (op0, 0), XEXP (op0, 1));
4304 }
4305 else if (code == EQ)
4306 {
4307 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4308 if (new_code != UNKNOWN)
4309 return simplify_gen_relational (new_code, mode, VOIDmode,
4310 XEXP (op0, 0), XEXP (op0, 1));
4311 }
4312 }
4313
4314 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4315 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4316 if ((code == LTU || code == GEU)
4317 && GET_CODE (op0) == PLUS
4318 && CONST_INT_P (XEXP (op0, 1))
4319 && (rtx_equal_p (op1, XEXP (op0, 0))
4320 || rtx_equal_p (op1, XEXP (op0, 1))))
4321 {
4322 rtx new_cmp
4323 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4324 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4325 cmp_mode, XEXP (op0, 0), new_cmp);
4326 }
4327
4328 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4329 if ((code == LTU || code == GEU)
4330 && GET_CODE (op0) == PLUS
4331 && rtx_equal_p (op1, XEXP (op0, 1))
4332 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4333 && !rtx_equal_p (op1, XEXP (op0, 0)))
4334 return simplify_gen_relational (code, mode, cmp_mode, op0,
4335 copy_rtx (XEXP (op0, 0)));
4336
4337 if (op1 == const0_rtx)
4338 {
4339 /* Canonicalize (GTU x 0) as (NE x 0). */
4340 if (code == GTU)
4341 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4342 /* Canonicalize (LEU x 0) as (EQ x 0). */
4343 if (code == LEU)
4344 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4345 }
4346 else if (op1 == const1_rtx)
4347 {
4348 switch (code)
4349 {
4350 case GE:
4351 /* Canonicalize (GE x 1) as (GT x 0). */
4352 return simplify_gen_relational (GT, mode, cmp_mode,
4353 op0, const0_rtx);
4354 case GEU:
4355 /* Canonicalize (GEU x 1) as (NE x 0). */
4356 return simplify_gen_relational (NE, mode, cmp_mode,
4357 op0, const0_rtx);
4358 case LT:
4359 /* Canonicalize (LT x 1) as (LE x 0). */
4360 return simplify_gen_relational (LE, mode, cmp_mode,
4361 op0, const0_rtx);
4362 case LTU:
4363 /* Canonicalize (LTU x 1) as (EQ x 0). */
4364 return simplify_gen_relational (EQ, mode, cmp_mode,
4365 op0, const0_rtx);
4366 default:
4367 break;
4368 }
4369 }
4370 else if (op1 == constm1_rtx)
4371 {
4372 /* Canonicalize (LE x -1) as (LT x 0). */
4373 if (code == LE)
4374 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4375 /* Canonicalize (GT x -1) as (GE x 0). */
4376 if (code == GT)
4377 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4378 }
4379
4380 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4381 if ((code == EQ || code == NE)
4382 && (op0code == PLUS || op0code == MINUS)
4383 && CONSTANT_P (op1)
4384 && CONSTANT_P (XEXP (op0, 1))
4385 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4386 {
4387 rtx x = XEXP (op0, 0);
4388 rtx c = XEXP (op0, 1);
4389 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4390 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4391
4392 /* Detect an infinite recursive condition, where we oscillate at this
4393 simplification case between:
4394 A + B == C <---> C - B == A,
4395 where A, B, and C are all constants with non-simplifiable expressions,
4396 usually SYMBOL_REFs. */
4397 if (GET_CODE (tem) == invcode
4398 && CONSTANT_P (x)
4399 && rtx_equal_p (c, XEXP (tem, 1)))
4400 return NULL_RTX;
4401
4402 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4403 }
4404
4405 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4406 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4407 if (code == NE
4408 && op1 == const0_rtx
4409 && GET_MODE_CLASS (mode) == MODE_INT
4410 && cmp_mode != VOIDmode
4411 /* ??? Work-around BImode bugs in the ia64 backend. */
4412 && mode != BImode
4413 && cmp_mode != BImode
4414 && nonzero_bits (op0, cmp_mode) == 1
4415 && STORE_FLAG_VALUE == 1)
4416 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4417 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4418 : lowpart_subreg (mode, op0, cmp_mode);
4419
4420 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4421 if ((code == EQ || code == NE)
4422 && op1 == const0_rtx
4423 && op0code == XOR)
4424 return simplify_gen_relational (code, mode, cmp_mode,
4425 XEXP (op0, 0), XEXP (op0, 1));
4426
4427 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4428 if ((code == EQ || code == NE)
4429 && op0code == XOR
4430 && rtx_equal_p (XEXP (op0, 0), op1)
4431 && !side_effects_p (XEXP (op0, 0)))
4432 return simplify_gen_relational (code, mode, cmp_mode,
4433 XEXP (op0, 1), const0_rtx);
4434
4435 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4436 if ((code == EQ || code == NE)
4437 && op0code == XOR
4438 && rtx_equal_p (XEXP (op0, 1), op1)
4439 && !side_effects_p (XEXP (op0, 1)))
4440 return simplify_gen_relational (code, mode, cmp_mode,
4441 XEXP (op0, 0), const0_rtx);
4442
4443 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4444 if ((code == EQ || code == NE)
4445 && op0code == XOR
4446 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
4447 && (CONST_INT_P (XEXP (op0, 1))
4448 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1))))
4449 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4450 simplify_gen_binary (XOR, cmp_mode,
4451 XEXP (op0, 1), op1));
4452
4453 if (op0code == POPCOUNT && op1 == const0_rtx)
4454 switch (code)
4455 {
4456 case EQ:
4457 case LE:
4458 case LEU:
4459 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4460 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4461 XEXP (op0, 0), const0_rtx);
4462
4463 case NE:
4464 case GT:
4465 case GTU:
4466 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4467 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4468 XEXP (op0, 0), const0_rtx);
4469
4470 default:
4471 break;
4472 }
4473
4474 return NULL_RTX;
4475 }
4476
4477 enum
4478 {
4479 CMP_EQ = 1,
4480 CMP_LT = 2,
4481 CMP_GT = 4,
4482 CMP_LTU = 8,
4483 CMP_GTU = 16
4484 };
4485
4486
4487 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4488 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4489 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4490 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4491 For floating-point comparisons, assume that the operands were ordered. */
4492
4493 static rtx
4494 comparison_result (enum rtx_code code, int known_results)
4495 {
4496 switch (code)
4497 {
4498 case EQ:
4499 case UNEQ:
4500 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4501 case NE:
4502 case LTGT:
4503 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4504
4505 case LT:
4506 case UNLT:
4507 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4508 case GE:
4509 case UNGE:
4510 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4511
4512 case GT:
4513 case UNGT:
4514 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4515 case LE:
4516 case UNLE:
4517 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4518
4519 case LTU:
4520 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4521 case GEU:
4522 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4523
4524 case GTU:
4525 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4526 case LEU:
4527 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4528
4529 case ORDERED:
4530 return const_true_rtx;
4531 case UNORDERED:
4532 return const0_rtx;
4533 default:
4534 gcc_unreachable ();
4535 }
4536 }
4537
4538 /* Check if the given comparison (done in the given MODE) is actually a
4539 tautology or a contradiction.
4540 If no simplification is possible, this function returns zero.
4541 Otherwise, it returns either const_true_rtx or const0_rtx. */
4542
4543 rtx
4544 simplify_const_relational_operation (enum rtx_code code,
4545 enum machine_mode mode,
4546 rtx op0, rtx op1)
4547 {
4548 rtx tem;
4549 rtx trueop0;
4550 rtx trueop1;
4551
4552 gcc_assert (mode != VOIDmode
4553 || (GET_MODE (op0) == VOIDmode
4554 && GET_MODE (op1) == VOIDmode));
4555
4556 /* If op0 is a compare, extract the comparison arguments from it. */
4557 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4558 {
4559 op1 = XEXP (op0, 1);
4560 op0 = XEXP (op0, 0);
4561
4562 if (GET_MODE (op0) != VOIDmode)
4563 mode = GET_MODE (op0);
4564 else if (GET_MODE (op1) != VOIDmode)
4565 mode = GET_MODE (op1);
4566 else
4567 return 0;
4568 }
4569
4570 /* We can't simplify MODE_CC values since we don't know what the
4571 actual comparison is. */
4572 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4573 return 0;
4574
4575 /* Make sure the constant is second. */
4576 if (swap_commutative_operands_p (op0, op1))
4577 {
4578 tem = op0, op0 = op1, op1 = tem;
4579 code = swap_condition (code);
4580 }
4581
4582 trueop0 = avoid_constant_pool_reference (op0);
4583 trueop1 = avoid_constant_pool_reference (op1);
4584
4585 /* For integer comparisons of A and B maybe we can simplify A - B and can
4586 then simplify a comparison of that with zero. If A and B are both either
4587 a register or a CONST_INT, this can't help; testing for these cases will
4588 prevent infinite recursion here and speed things up.
4589
4590 We can only do this for EQ and NE comparisons as otherwise we may
4591 lose or introduce overflow which we cannot disregard as undefined as
4592 we do not know the signedness of the operation on either the left or
4593 the right hand side of the comparison. */
4594
4595 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4596 && (code == EQ || code == NE)
4597 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4598 && (REG_P (op1) || CONST_INT_P (trueop1)))
4599 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4600 /* We cannot do this if tem is a nonzero address. */
4601 && ! nonzero_address_p (tem))
4602 return simplify_const_relational_operation (signed_condition (code),
4603 mode, tem, const0_rtx);
4604
4605 if (! HONOR_NANS (mode) && code == ORDERED)
4606 return const_true_rtx;
4607
4608 if (! HONOR_NANS (mode) && code == UNORDERED)
4609 return const0_rtx;
4610
4611 /* For modes without NaNs, if the two operands are equal, we know the
4612 result except if they have side-effects. Even with NaNs we know
4613 the result of unordered comparisons and, if signaling NaNs are
4614 irrelevant, also the result of LT/GT/LTGT. */
4615 if ((! HONOR_NANS (GET_MODE (trueop0))
4616 || code == UNEQ || code == UNLE || code == UNGE
4617 || ((code == LT || code == GT || code == LTGT)
4618 && ! HONOR_SNANS (GET_MODE (trueop0))))
4619 && rtx_equal_p (trueop0, trueop1)
4620 && ! side_effects_p (trueop0))
4621 return comparison_result (code, CMP_EQ);
4622
4623 /* If the operands are floating-point constants, see if we can fold
4624 the result. */
4625 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4626 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4627 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4628 {
4629 REAL_VALUE_TYPE d0, d1;
4630
4631 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4632 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4633
4634 /* Comparisons are unordered iff at least one of the values is NaN. */
4635 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4636 switch (code)
4637 {
4638 case UNEQ:
4639 case UNLT:
4640 case UNGT:
4641 case UNLE:
4642 case UNGE:
4643 case NE:
4644 case UNORDERED:
4645 return const_true_rtx;
4646 case EQ:
4647 case LT:
4648 case GT:
4649 case LE:
4650 case GE:
4651 case LTGT:
4652 case ORDERED:
4653 return const0_rtx;
4654 default:
4655 return 0;
4656 }
4657
4658 return comparison_result (code,
4659 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4660 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4661 }
4662
4663 /* Otherwise, see if the operands are both integers. */
4664 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4665 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4666 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4667 {
4668 int width = GET_MODE_PRECISION (mode);
4669 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4670 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4671
4672 /* Get the two words comprising each integer constant. */
4673 if (CONST_DOUBLE_AS_INT_P (trueop0))
4674 {
4675 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4676 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4677 }
4678 else
4679 {
4680 l0u = l0s = INTVAL (trueop0);
4681 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4682 }
4683
4684 if (CONST_DOUBLE_AS_INT_P (trueop1))
4685 {
4686 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4687 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4688 }
4689 else
4690 {
4691 l1u = l1s = INTVAL (trueop1);
4692 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4693 }
4694
4695 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4696 we have to sign or zero-extend the values. */
4697 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4698 {
4699 l0u &= GET_MODE_MASK (mode);
4700 l1u &= GET_MODE_MASK (mode);
4701
4702 if (val_signbit_known_set_p (mode, l0s))
4703 l0s |= ~GET_MODE_MASK (mode);
4704
4705 if (val_signbit_known_set_p (mode, l1s))
4706 l1s |= ~GET_MODE_MASK (mode);
4707 }
4708 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4709 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4710
4711 if (h0u == h1u && l0u == l1u)
4712 return comparison_result (code, CMP_EQ);
4713 else
4714 {
4715 int cr;
4716 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4717 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4718 return comparison_result (code, cr);
4719 }
4720 }
4721
4722 /* Optimize comparisons with upper and lower bounds. */
4723 if (HWI_COMPUTABLE_MODE_P (mode)
4724 && CONST_INT_P (trueop1))
4725 {
4726 int sign;
4727 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4728 HOST_WIDE_INT val = INTVAL (trueop1);
4729 HOST_WIDE_INT mmin, mmax;
4730
4731 if (code == GEU
4732 || code == LEU
4733 || code == GTU
4734 || code == LTU)
4735 sign = 0;
4736 else
4737 sign = 1;
4738
4739 /* Get a reduced range if the sign bit is zero. */
4740 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4741 {
4742 mmin = 0;
4743 mmax = nonzero;
4744 }
4745 else
4746 {
4747 rtx mmin_rtx, mmax_rtx;
4748 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4749
4750 mmin = INTVAL (mmin_rtx);
4751 mmax = INTVAL (mmax_rtx);
4752 if (sign)
4753 {
4754 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4755
4756 mmin >>= (sign_copies - 1);
4757 mmax >>= (sign_copies - 1);
4758 }
4759 }
4760
4761 switch (code)
4762 {
4763 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4764 case GEU:
4765 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4766 return const_true_rtx;
4767 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4768 return const0_rtx;
4769 break;
4770 case GE:
4771 if (val <= mmin)
4772 return const_true_rtx;
4773 if (val > mmax)
4774 return const0_rtx;
4775 break;
4776
4777 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4778 case LEU:
4779 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4780 return const_true_rtx;
4781 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4782 return const0_rtx;
4783 break;
4784 case LE:
4785 if (val >= mmax)
4786 return const_true_rtx;
4787 if (val < mmin)
4788 return const0_rtx;
4789 break;
4790
4791 case EQ:
4792 /* x == y is always false for y out of range. */
4793 if (val < mmin || val > mmax)
4794 return const0_rtx;
4795 break;
4796
4797 /* x > y is always false for y >= mmax, always true for y < mmin. */
4798 case GTU:
4799 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4800 return const0_rtx;
4801 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4802 return const_true_rtx;
4803 break;
4804 case GT:
4805 if (val >= mmax)
4806 return const0_rtx;
4807 if (val < mmin)
4808 return const_true_rtx;
4809 break;
4810
4811 /* x < y is always false for y <= mmin, always true for y > mmax. */
4812 case LTU:
4813 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4814 return const0_rtx;
4815 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4816 return const_true_rtx;
4817 break;
4818 case LT:
4819 if (val <= mmin)
4820 return const0_rtx;
4821 if (val > mmax)
4822 return const_true_rtx;
4823 break;
4824
4825 case NE:
4826 /* x != y is always true for y out of range. */
4827 if (val < mmin || val > mmax)
4828 return const_true_rtx;
4829 break;
4830
4831 default:
4832 break;
4833 }
4834 }
4835
4836 /* Optimize integer comparisons with zero. */
4837 if (trueop1 == const0_rtx)
4838 {
4839 /* Some addresses are known to be nonzero. We don't know
4840 their sign, but equality comparisons are known. */
4841 if (nonzero_address_p (trueop0))
4842 {
4843 if (code == EQ || code == LEU)
4844 return const0_rtx;
4845 if (code == NE || code == GTU)
4846 return const_true_rtx;
4847 }
4848
4849 /* See if the first operand is an IOR with a constant. If so, we
4850 may be able to determine the result of this comparison. */
4851 if (GET_CODE (op0) == IOR)
4852 {
4853 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4854 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4855 {
4856 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4857 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4858 && (UINTVAL (inner_const)
4859 & ((unsigned HOST_WIDE_INT) 1
4860 << sign_bitnum)));
4861
4862 switch (code)
4863 {
4864 case EQ:
4865 case LEU:
4866 return const0_rtx;
4867 case NE:
4868 case GTU:
4869 return const_true_rtx;
4870 case LT:
4871 case LE:
4872 if (has_sign)
4873 return const_true_rtx;
4874 break;
4875 case GT:
4876 case GE:
4877 if (has_sign)
4878 return const0_rtx;
4879 break;
4880 default:
4881 break;
4882 }
4883 }
4884 }
4885 }
4886
4887 /* Optimize comparison of ABS with zero. */
4888 if (trueop1 == CONST0_RTX (mode)
4889 && (GET_CODE (trueop0) == ABS
4890 || (GET_CODE (trueop0) == FLOAT_EXTEND
4891 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4892 {
4893 switch (code)
4894 {
4895 case LT:
4896 /* Optimize abs(x) < 0.0. */
4897 if (!HONOR_SNANS (mode)
4898 && (!INTEGRAL_MODE_P (mode)
4899 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4900 {
4901 if (INTEGRAL_MODE_P (mode)
4902 && (issue_strict_overflow_warning
4903 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4904 warning (OPT_Wstrict_overflow,
4905 ("assuming signed overflow does not occur when "
4906 "assuming abs (x) < 0 is false"));
4907 return const0_rtx;
4908 }
4909 break;
4910
4911 case GE:
4912 /* Optimize abs(x) >= 0.0. */
4913 if (!HONOR_NANS (mode)
4914 && (!INTEGRAL_MODE_P (mode)
4915 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4916 {
4917 if (INTEGRAL_MODE_P (mode)
4918 && (issue_strict_overflow_warning
4919 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4920 warning (OPT_Wstrict_overflow,
4921 ("assuming signed overflow does not occur when "
4922 "assuming abs (x) >= 0 is true"));
4923 return const_true_rtx;
4924 }
4925 break;
4926
4927 case UNGE:
4928 /* Optimize ! (abs(x) < 0.0). */
4929 return const_true_rtx;
4930
4931 default:
4932 break;
4933 }
4934 }
4935
4936 return 0;
4937 }
4938 \f
4939 /* Simplify CODE, an operation with result mode MODE and three operands,
4940 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4941 a constant. Return 0 if no simplifications is possible. */
4942
4943 rtx
4944 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4945 enum machine_mode op0_mode, rtx op0, rtx op1,
4946 rtx op2)
4947 {
4948 unsigned int width = GET_MODE_PRECISION (mode);
4949 bool any_change = false;
4950 rtx tem;
4951
4952 /* VOIDmode means "infinite" precision. */
4953 if (width == 0)
4954 width = HOST_BITS_PER_WIDE_INT;
4955
4956 switch (code)
4957 {
4958 case FMA:
4959 /* Simplify negations around the multiplication. */
4960 /* -a * -b + c => a * b + c. */
4961 if (GET_CODE (op0) == NEG)
4962 {
4963 tem = simplify_unary_operation (NEG, mode, op1, mode);
4964 if (tem)
4965 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4966 }
4967 else if (GET_CODE (op1) == NEG)
4968 {
4969 tem = simplify_unary_operation (NEG, mode, op0, mode);
4970 if (tem)
4971 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4972 }
4973
4974 /* Canonicalize the two multiplication operands. */
4975 /* a * -b + c => -b * a + c. */
4976 if (swap_commutative_operands_p (op0, op1))
4977 tem = op0, op0 = op1, op1 = tem, any_change = true;
4978
4979 if (any_change)
4980 return gen_rtx_FMA (mode, op0, op1, op2);
4981 return NULL_RTX;
4982
4983 case SIGN_EXTRACT:
4984 case ZERO_EXTRACT:
4985 if (CONST_INT_P (op0)
4986 && CONST_INT_P (op1)
4987 && CONST_INT_P (op2)
4988 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4989 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4990 {
4991 /* Extracting a bit-field from a constant */
4992 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4993 HOST_WIDE_INT op1val = INTVAL (op1);
4994 HOST_WIDE_INT op2val = INTVAL (op2);
4995 if (BITS_BIG_ENDIAN)
4996 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4997 else
4998 val >>= op2val;
4999
5000 if (HOST_BITS_PER_WIDE_INT != op1val)
5001 {
5002 /* First zero-extend. */
5003 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5004 /* If desired, propagate sign bit. */
5005 if (code == SIGN_EXTRACT
5006 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5007 != 0)
5008 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5009 }
5010
5011 return gen_int_mode (val, mode);
5012 }
5013 break;
5014
5015 case IF_THEN_ELSE:
5016 if (CONST_INT_P (op0))
5017 return op0 != const0_rtx ? op1 : op2;
5018
5019 /* Convert c ? a : a into "a". */
5020 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5021 return op1;
5022
5023 /* Convert a != b ? a : b into "a". */
5024 if (GET_CODE (op0) == NE
5025 && ! side_effects_p (op0)
5026 && ! HONOR_NANS (mode)
5027 && ! HONOR_SIGNED_ZEROS (mode)
5028 && ((rtx_equal_p (XEXP (op0, 0), op1)
5029 && rtx_equal_p (XEXP (op0, 1), op2))
5030 || (rtx_equal_p (XEXP (op0, 0), op2)
5031 && rtx_equal_p (XEXP (op0, 1), op1))))
5032 return op1;
5033
5034 /* Convert a == b ? a : b into "b". */
5035 if (GET_CODE (op0) == EQ
5036 && ! side_effects_p (op0)
5037 && ! HONOR_NANS (mode)
5038 && ! HONOR_SIGNED_ZEROS (mode)
5039 && ((rtx_equal_p (XEXP (op0, 0), op1)
5040 && rtx_equal_p (XEXP (op0, 1), op2))
5041 || (rtx_equal_p (XEXP (op0, 0), op2)
5042 && rtx_equal_p (XEXP (op0, 1), op1))))
5043 return op2;
5044
5045 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5046 {
5047 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5048 ? GET_MODE (XEXP (op0, 1))
5049 : GET_MODE (XEXP (op0, 0)));
5050 rtx temp;
5051
5052 /* Look for happy constants in op1 and op2. */
5053 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5054 {
5055 HOST_WIDE_INT t = INTVAL (op1);
5056 HOST_WIDE_INT f = INTVAL (op2);
5057
5058 if (t == STORE_FLAG_VALUE && f == 0)
5059 code = GET_CODE (op0);
5060 else if (t == 0 && f == STORE_FLAG_VALUE)
5061 {
5062 enum rtx_code tmp;
5063 tmp = reversed_comparison_code (op0, NULL_RTX);
5064 if (tmp == UNKNOWN)
5065 break;
5066 code = tmp;
5067 }
5068 else
5069 break;
5070
5071 return simplify_gen_relational (code, mode, cmp_mode,
5072 XEXP (op0, 0), XEXP (op0, 1));
5073 }
5074
5075 if (cmp_mode == VOIDmode)
5076 cmp_mode = op0_mode;
5077 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5078 cmp_mode, XEXP (op0, 0),
5079 XEXP (op0, 1));
5080
5081 /* See if any simplifications were possible. */
5082 if (temp)
5083 {
5084 if (CONST_INT_P (temp))
5085 return temp == const0_rtx ? op2 : op1;
5086 else if (temp)
5087 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5088 }
5089 }
5090 break;
5091
5092 case VEC_MERGE:
5093 gcc_assert (GET_MODE (op0) == mode);
5094 gcc_assert (GET_MODE (op1) == mode);
5095 gcc_assert (VECTOR_MODE_P (mode));
5096 op2 = avoid_constant_pool_reference (op2);
5097 if (CONST_INT_P (op2))
5098 {
5099 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5100 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5101 int mask = (1 << n_elts) - 1;
5102
5103 if (!(INTVAL (op2) & mask))
5104 return op1;
5105 if ((INTVAL (op2) & mask) == mask)
5106 return op0;
5107
5108 op0 = avoid_constant_pool_reference (op0);
5109 op1 = avoid_constant_pool_reference (op1);
5110 if (GET_CODE (op0) == CONST_VECTOR
5111 && GET_CODE (op1) == CONST_VECTOR)
5112 {
5113 rtvec v = rtvec_alloc (n_elts);
5114 unsigned int i;
5115
5116 for (i = 0; i < n_elts; i++)
5117 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5118 ? CONST_VECTOR_ELT (op0, i)
5119 : CONST_VECTOR_ELT (op1, i));
5120 return gen_rtx_CONST_VECTOR (mode, v);
5121 }
5122 }
5123 break;
5124
5125 default:
5126 gcc_unreachable ();
5127 }
5128
5129 return 0;
5130 }
5131
5132 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5133 or CONST_VECTOR,
5134 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5135
5136 Works by unpacking OP into a collection of 8-bit values
5137 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5138 and then repacking them again for OUTERMODE. */
5139
5140 static rtx
5141 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5142 enum machine_mode innermode, unsigned int byte)
5143 {
5144 /* We support up to 512-bit values (for V8DFmode). */
5145 enum {
5146 max_bitsize = 512,
5147 value_bit = 8,
5148 value_mask = (1 << value_bit) - 1
5149 };
5150 unsigned char value[max_bitsize / value_bit];
5151 int value_start;
5152 int i;
5153 int elem;
5154
5155 int num_elem;
5156 rtx * elems;
5157 int elem_bitsize;
5158 rtx result_s;
5159 rtvec result_v = NULL;
5160 enum mode_class outer_class;
5161 enum machine_mode outer_submode;
5162
5163 /* Some ports misuse CCmode. */
5164 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5165 return op;
5166
5167 /* We have no way to represent a complex constant at the rtl level. */
5168 if (COMPLEX_MODE_P (outermode))
5169 return NULL_RTX;
5170
5171 /* Unpack the value. */
5172
5173 if (GET_CODE (op) == CONST_VECTOR)
5174 {
5175 num_elem = CONST_VECTOR_NUNITS (op);
5176 elems = &CONST_VECTOR_ELT (op, 0);
5177 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5178 }
5179 else
5180 {
5181 num_elem = 1;
5182 elems = &op;
5183 elem_bitsize = max_bitsize;
5184 }
5185 /* If this asserts, it is too complicated; reducing value_bit may help. */
5186 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5187 /* I don't know how to handle endianness of sub-units. */
5188 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5189
5190 for (elem = 0; elem < num_elem; elem++)
5191 {
5192 unsigned char * vp;
5193 rtx el = elems[elem];
5194
5195 /* Vectors are kept in target memory order. (This is probably
5196 a mistake.) */
5197 {
5198 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5199 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5200 / BITS_PER_UNIT);
5201 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5202 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5203 unsigned bytele = (subword_byte % UNITS_PER_WORD
5204 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5205 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5206 }
5207
5208 switch (GET_CODE (el))
5209 {
5210 case CONST_INT:
5211 for (i = 0;
5212 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5213 i += value_bit)
5214 *vp++ = INTVAL (el) >> i;
5215 /* CONST_INTs are always logically sign-extended. */
5216 for (; i < elem_bitsize; i += value_bit)
5217 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5218 break;
5219
5220 case CONST_DOUBLE:
5221 if (GET_MODE (el) == VOIDmode)
5222 {
5223 unsigned char extend = 0;
5224 /* If this triggers, someone should have generated a
5225 CONST_INT instead. */
5226 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5227
5228 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5229 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5230 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5231 {
5232 *vp++
5233 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5234 i += value_bit;
5235 }
5236
5237 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5238 extend = -1;
5239 for (; i < elem_bitsize; i += value_bit)
5240 *vp++ = extend;
5241 }
5242 else
5243 {
5244 long tmp[max_bitsize / 32];
5245 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5246
5247 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5248 gcc_assert (bitsize <= elem_bitsize);
5249 gcc_assert (bitsize % value_bit == 0);
5250
5251 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5252 GET_MODE (el));
5253
5254 /* real_to_target produces its result in words affected by
5255 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5256 and use WORDS_BIG_ENDIAN instead; see the documentation
5257 of SUBREG in rtl.texi. */
5258 for (i = 0; i < bitsize; i += value_bit)
5259 {
5260 int ibase;
5261 if (WORDS_BIG_ENDIAN)
5262 ibase = bitsize - 1 - i;
5263 else
5264 ibase = i;
5265 *vp++ = tmp[ibase / 32] >> i % 32;
5266 }
5267
5268 /* It shouldn't matter what's done here, so fill it with
5269 zero. */
5270 for (; i < elem_bitsize; i += value_bit)
5271 *vp++ = 0;
5272 }
5273 break;
5274
5275 case CONST_FIXED:
5276 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5277 {
5278 for (i = 0; i < elem_bitsize; i += value_bit)
5279 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5280 }
5281 else
5282 {
5283 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5284 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5285 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5286 i += value_bit)
5287 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5288 >> (i - HOST_BITS_PER_WIDE_INT);
5289 for (; i < elem_bitsize; i += value_bit)
5290 *vp++ = 0;
5291 }
5292 break;
5293
5294 default:
5295 gcc_unreachable ();
5296 }
5297 }
5298
5299 /* Now, pick the right byte to start with. */
5300 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5301 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5302 will already have offset 0. */
5303 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5304 {
5305 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5306 - byte);
5307 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5308 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5309 byte = (subword_byte % UNITS_PER_WORD
5310 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5311 }
5312
5313 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5314 so if it's become negative it will instead be very large.) */
5315 gcc_assert (byte < GET_MODE_SIZE (innermode));
5316
5317 /* Convert from bytes to chunks of size value_bit. */
5318 value_start = byte * (BITS_PER_UNIT / value_bit);
5319
5320 /* Re-pack the value. */
5321
5322 if (VECTOR_MODE_P (outermode))
5323 {
5324 num_elem = GET_MODE_NUNITS (outermode);
5325 result_v = rtvec_alloc (num_elem);
5326 elems = &RTVEC_ELT (result_v, 0);
5327 outer_submode = GET_MODE_INNER (outermode);
5328 }
5329 else
5330 {
5331 num_elem = 1;
5332 elems = &result_s;
5333 outer_submode = outermode;
5334 }
5335
5336 outer_class = GET_MODE_CLASS (outer_submode);
5337 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5338
5339 gcc_assert (elem_bitsize % value_bit == 0);
5340 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5341
5342 for (elem = 0; elem < num_elem; elem++)
5343 {
5344 unsigned char *vp;
5345
5346 /* Vectors are stored in target memory order. (This is probably
5347 a mistake.) */
5348 {
5349 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5350 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5351 / BITS_PER_UNIT);
5352 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5353 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5354 unsigned bytele = (subword_byte % UNITS_PER_WORD
5355 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5356 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5357 }
5358
5359 switch (outer_class)
5360 {
5361 case MODE_INT:
5362 case MODE_PARTIAL_INT:
5363 {
5364 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5365
5366 for (i = 0;
5367 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5368 i += value_bit)
5369 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5370 for (; i < elem_bitsize; i += value_bit)
5371 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5372 << (i - HOST_BITS_PER_WIDE_INT);
5373
5374 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5375 know why. */
5376 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5377 elems[elem] = gen_int_mode (lo, outer_submode);
5378 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5379 elems[elem] = immed_double_const (lo, hi, outer_submode);
5380 else
5381 return NULL_RTX;
5382 }
5383 break;
5384
5385 case MODE_FLOAT:
5386 case MODE_DECIMAL_FLOAT:
5387 {
5388 REAL_VALUE_TYPE r;
5389 long tmp[max_bitsize / 32];
5390
5391 /* real_from_target wants its input in words affected by
5392 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5393 and use WORDS_BIG_ENDIAN instead; see the documentation
5394 of SUBREG in rtl.texi. */
5395 for (i = 0; i < max_bitsize / 32; i++)
5396 tmp[i] = 0;
5397 for (i = 0; i < elem_bitsize; i += value_bit)
5398 {
5399 int ibase;
5400 if (WORDS_BIG_ENDIAN)
5401 ibase = elem_bitsize - 1 - i;
5402 else
5403 ibase = i;
5404 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5405 }
5406
5407 real_from_target (&r, tmp, outer_submode);
5408 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5409 }
5410 break;
5411
5412 case MODE_FRACT:
5413 case MODE_UFRACT:
5414 case MODE_ACCUM:
5415 case MODE_UACCUM:
5416 {
5417 FIXED_VALUE_TYPE f;
5418 f.data.low = 0;
5419 f.data.high = 0;
5420 f.mode = outer_submode;
5421
5422 for (i = 0;
5423 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5424 i += value_bit)
5425 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5426 for (; i < elem_bitsize; i += value_bit)
5427 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5428 << (i - HOST_BITS_PER_WIDE_INT));
5429
5430 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5431 }
5432 break;
5433
5434 default:
5435 gcc_unreachable ();
5436 }
5437 }
5438 if (VECTOR_MODE_P (outermode))
5439 return gen_rtx_CONST_VECTOR (outermode, result_v);
5440 else
5441 return result_s;
5442 }
5443
5444 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5445 Return 0 if no simplifications are possible. */
5446 rtx
5447 simplify_subreg (enum machine_mode outermode, rtx op,
5448 enum machine_mode innermode, unsigned int byte)
5449 {
5450 /* Little bit of sanity checking. */
5451 gcc_assert (innermode != VOIDmode);
5452 gcc_assert (outermode != VOIDmode);
5453 gcc_assert (innermode != BLKmode);
5454 gcc_assert (outermode != BLKmode);
5455
5456 gcc_assert (GET_MODE (op) == innermode
5457 || GET_MODE (op) == VOIDmode);
5458
5459 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5460 gcc_assert (byte < GET_MODE_SIZE (innermode));
5461
5462 if (outermode == innermode && !byte)
5463 return op;
5464
5465 if (CONST_INT_P (op)
5466 || CONST_DOUBLE_P (op)
5467 || GET_CODE (op) == CONST_FIXED
5468 || GET_CODE (op) == CONST_VECTOR)
5469 return simplify_immed_subreg (outermode, op, innermode, byte);
5470
5471 /* Changing mode twice with SUBREG => just change it once,
5472 or not at all if changing back op starting mode. */
5473 if (GET_CODE (op) == SUBREG)
5474 {
5475 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5476 int final_offset = byte + SUBREG_BYTE (op);
5477 rtx newx;
5478
5479 if (outermode == innermostmode
5480 && byte == 0 && SUBREG_BYTE (op) == 0)
5481 return SUBREG_REG (op);
5482
5483 /* The SUBREG_BYTE represents offset, as if the value were stored
5484 in memory. Irritating exception is paradoxical subreg, where
5485 we define SUBREG_BYTE to be 0. On big endian machines, this
5486 value should be negative. For a moment, undo this exception. */
5487 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5488 {
5489 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5490 if (WORDS_BIG_ENDIAN)
5491 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5492 if (BYTES_BIG_ENDIAN)
5493 final_offset += difference % UNITS_PER_WORD;
5494 }
5495 if (SUBREG_BYTE (op) == 0
5496 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5497 {
5498 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5499 if (WORDS_BIG_ENDIAN)
5500 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5501 if (BYTES_BIG_ENDIAN)
5502 final_offset += difference % UNITS_PER_WORD;
5503 }
5504
5505 /* See whether resulting subreg will be paradoxical. */
5506 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5507 {
5508 /* In nonparadoxical subregs we can't handle negative offsets. */
5509 if (final_offset < 0)
5510 return NULL_RTX;
5511 /* Bail out in case resulting subreg would be incorrect. */
5512 if (final_offset % GET_MODE_SIZE (outermode)
5513 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5514 return NULL_RTX;
5515 }
5516 else
5517 {
5518 int offset = 0;
5519 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5520
5521 /* In paradoxical subreg, see if we are still looking on lower part.
5522 If so, our SUBREG_BYTE will be 0. */
5523 if (WORDS_BIG_ENDIAN)
5524 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5525 if (BYTES_BIG_ENDIAN)
5526 offset += difference % UNITS_PER_WORD;
5527 if (offset == final_offset)
5528 final_offset = 0;
5529 else
5530 return NULL_RTX;
5531 }
5532
5533 /* Recurse for further possible simplifications. */
5534 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5535 final_offset);
5536 if (newx)
5537 return newx;
5538 if (validate_subreg (outermode, innermostmode,
5539 SUBREG_REG (op), final_offset))
5540 {
5541 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5542 if (SUBREG_PROMOTED_VAR_P (op)
5543 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5544 && GET_MODE_CLASS (outermode) == MODE_INT
5545 && IN_RANGE (GET_MODE_SIZE (outermode),
5546 GET_MODE_SIZE (innermode),
5547 GET_MODE_SIZE (innermostmode))
5548 && subreg_lowpart_p (newx))
5549 {
5550 SUBREG_PROMOTED_VAR_P (newx) = 1;
5551 SUBREG_PROMOTED_UNSIGNED_SET
5552 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5553 }
5554 return newx;
5555 }
5556 return NULL_RTX;
5557 }
5558
5559 /* Merge implicit and explicit truncations. */
5560
5561 if (GET_CODE (op) == TRUNCATE
5562 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5563 && subreg_lowpart_offset (outermode, innermode) == byte)
5564 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5565 GET_MODE (XEXP (op, 0)));
5566
5567 /* SUBREG of a hard register => just change the register number
5568 and/or mode. If the hard register is not valid in that mode,
5569 suppress this simplification. If the hard register is the stack,
5570 frame, or argument pointer, leave this as a SUBREG. */
5571
5572 if (REG_P (op) && HARD_REGISTER_P (op))
5573 {
5574 unsigned int regno, final_regno;
5575
5576 regno = REGNO (op);
5577 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5578 if (HARD_REGISTER_NUM_P (final_regno))
5579 {
5580 rtx x;
5581 int final_offset = byte;
5582
5583 /* Adjust offset for paradoxical subregs. */
5584 if (byte == 0
5585 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5586 {
5587 int difference = (GET_MODE_SIZE (innermode)
5588 - GET_MODE_SIZE (outermode));
5589 if (WORDS_BIG_ENDIAN)
5590 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5591 if (BYTES_BIG_ENDIAN)
5592 final_offset += difference % UNITS_PER_WORD;
5593 }
5594
5595 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5596
5597 /* Propagate original regno. We don't have any way to specify
5598 the offset inside original regno, so do so only for lowpart.
5599 The information is used only by alias analysis that can not
5600 grog partial register anyway. */
5601
5602 if (subreg_lowpart_offset (outermode, innermode) == byte)
5603 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5604 return x;
5605 }
5606 }
5607
5608 /* If we have a SUBREG of a register that we are replacing and we are
5609 replacing it with a MEM, make a new MEM and try replacing the
5610 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5611 or if we would be widening it. */
5612
5613 if (MEM_P (op)
5614 && ! mode_dependent_address_p (XEXP (op, 0))
5615 /* Allow splitting of volatile memory references in case we don't
5616 have instruction to move the whole thing. */
5617 && (! MEM_VOLATILE_P (op)
5618 || ! have_insn_for (SET, innermode))
5619 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5620 return adjust_address_nv (op, outermode, byte);
5621
5622 /* Handle complex values represented as CONCAT
5623 of real and imaginary part. */
5624 if (GET_CODE (op) == CONCAT)
5625 {
5626 unsigned int part_size, final_offset;
5627 rtx part, res;
5628
5629 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5630 if (byte < part_size)
5631 {
5632 part = XEXP (op, 0);
5633 final_offset = byte;
5634 }
5635 else
5636 {
5637 part = XEXP (op, 1);
5638 final_offset = byte - part_size;
5639 }
5640
5641 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5642 return NULL_RTX;
5643
5644 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5645 if (res)
5646 return res;
5647 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5648 return gen_rtx_SUBREG (outermode, part, final_offset);
5649 return NULL_RTX;
5650 }
5651
5652 /* Optimize SUBREG truncations of zero and sign extended values. */
5653 if ((GET_CODE (op) == ZERO_EXTEND
5654 || GET_CODE (op) == SIGN_EXTEND)
5655 && SCALAR_INT_MODE_P (innermode)
5656 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5657 {
5658 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5659
5660 /* If we're requesting the lowpart of a zero or sign extension,
5661 there are three possibilities. If the outermode is the same
5662 as the origmode, we can omit both the extension and the subreg.
5663 If the outermode is not larger than the origmode, we can apply
5664 the truncation without the extension. Finally, if the outermode
5665 is larger than the origmode, but both are integer modes, we
5666 can just extend to the appropriate mode. */
5667 if (bitpos == 0)
5668 {
5669 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5670 if (outermode == origmode)
5671 return XEXP (op, 0);
5672 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5673 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5674 subreg_lowpart_offset (outermode,
5675 origmode));
5676 if (SCALAR_INT_MODE_P (outermode))
5677 return simplify_gen_unary (GET_CODE (op), outermode,
5678 XEXP (op, 0), origmode);
5679 }
5680
5681 /* A SUBREG resulting from a zero extension may fold to zero if
5682 it extracts higher bits that the ZERO_EXTEND's source bits. */
5683 if (GET_CODE (op) == ZERO_EXTEND
5684 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5685 return CONST0_RTX (outermode);
5686 }
5687
5688 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5689 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5690 the outer subreg is effectively a truncation to the original mode. */
5691 if ((GET_CODE (op) == LSHIFTRT
5692 || GET_CODE (op) == ASHIFTRT)
5693 && SCALAR_INT_MODE_P (outermode)
5694 && SCALAR_INT_MODE_P (innermode)
5695 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5696 to avoid the possibility that an outer LSHIFTRT shifts by more
5697 than the sign extension's sign_bit_copies and introduces zeros
5698 into the high bits of the result. */
5699 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5700 && CONST_INT_P (XEXP (op, 1))
5701 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5702 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5703 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5704 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5705 return simplify_gen_binary (ASHIFTRT, outermode,
5706 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5707
5708 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5709 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5710 the outer subreg is effectively a truncation to the original mode. */
5711 if ((GET_CODE (op) == LSHIFTRT
5712 || GET_CODE (op) == ASHIFTRT)
5713 && SCALAR_INT_MODE_P (outermode)
5714 && SCALAR_INT_MODE_P (innermode)
5715 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5716 && CONST_INT_P (XEXP (op, 1))
5717 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5718 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5719 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5720 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5721 return simplify_gen_binary (LSHIFTRT, outermode,
5722 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5723
5724 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5725 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5726 the outer subreg is effectively a truncation to the original mode. */
5727 if (GET_CODE (op) == ASHIFT
5728 && SCALAR_INT_MODE_P (outermode)
5729 && SCALAR_INT_MODE_P (innermode)
5730 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5731 && CONST_INT_P (XEXP (op, 1))
5732 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5733 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5734 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5735 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5736 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5737 return simplify_gen_binary (ASHIFT, outermode,
5738 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5739
5740 /* Recognize a word extraction from a multi-word subreg. */
5741 if ((GET_CODE (op) == LSHIFTRT
5742 || GET_CODE (op) == ASHIFTRT)
5743 && SCALAR_INT_MODE_P (innermode)
5744 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5745 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5746 && CONST_INT_P (XEXP (op, 1))
5747 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5748 && INTVAL (XEXP (op, 1)) >= 0
5749 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5750 && byte == subreg_lowpart_offset (outermode, innermode))
5751 {
5752 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5753 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5754 (WORDS_BIG_ENDIAN
5755 ? byte - shifted_bytes
5756 : byte + shifted_bytes));
5757 }
5758
5759 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5760 and try replacing the SUBREG and shift with it. Don't do this if
5761 the MEM has a mode-dependent address or if we would be widening it. */
5762
5763 if ((GET_CODE (op) == LSHIFTRT
5764 || GET_CODE (op) == ASHIFTRT)
5765 && SCALAR_INT_MODE_P (innermode)
5766 && MEM_P (XEXP (op, 0))
5767 && CONST_INT_P (XEXP (op, 1))
5768 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5769 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5770 && INTVAL (XEXP (op, 1)) > 0
5771 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5772 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5773 && ! MEM_VOLATILE_P (XEXP (op, 0))
5774 && byte == subreg_lowpart_offset (outermode, innermode)
5775 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5776 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5777 {
5778 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5779 return adjust_address_nv (XEXP (op, 0), outermode,
5780 (WORDS_BIG_ENDIAN
5781 ? byte - shifted_bytes
5782 : byte + shifted_bytes));
5783 }
5784
5785 return NULL_RTX;
5786 }
5787
5788 /* Make a SUBREG operation or equivalent if it folds. */
5789
5790 rtx
5791 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5792 enum machine_mode innermode, unsigned int byte)
5793 {
5794 rtx newx;
5795
5796 newx = simplify_subreg (outermode, op, innermode, byte);
5797 if (newx)
5798 return newx;
5799
5800 if (GET_CODE (op) == SUBREG
5801 || GET_CODE (op) == CONCAT
5802 || GET_MODE (op) == VOIDmode)
5803 return NULL_RTX;
5804
5805 if (validate_subreg (outermode, innermode, op, byte))
5806 return gen_rtx_SUBREG (outermode, op, byte);
5807
5808 return NULL_RTX;
5809 }
5810
5811 /* Simplify X, an rtx expression.
5812
5813 Return the simplified expression or NULL if no simplifications
5814 were possible.
5815
5816 This is the preferred entry point into the simplification routines;
5817 however, we still allow passes to call the more specific routines.
5818
5819 Right now GCC has three (yes, three) major bodies of RTL simplification
5820 code that need to be unified.
5821
5822 1. fold_rtx in cse.c. This code uses various CSE specific
5823 information to aid in RTL simplification.
5824
5825 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5826 it uses combine specific information to aid in RTL
5827 simplification.
5828
5829 3. The routines in this file.
5830
5831
5832 Long term we want to only have one body of simplification code; to
5833 get to that state I recommend the following steps:
5834
5835 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5836 which are not pass dependent state into these routines.
5837
5838 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5839 use this routine whenever possible.
5840
5841 3. Allow for pass dependent state to be provided to these
5842 routines and add simplifications based on the pass dependent
5843 state. Remove code from cse.c & combine.c that becomes
5844 redundant/dead.
5845
5846 It will take time, but ultimately the compiler will be easier to
5847 maintain and improve. It's totally silly that when we add a
5848 simplification that it needs to be added to 4 places (3 for RTL
5849 simplification and 1 for tree simplification. */
5850
5851 rtx
5852 simplify_rtx (const_rtx x)
5853 {
5854 const enum rtx_code code = GET_CODE (x);
5855 const enum machine_mode mode = GET_MODE (x);
5856
5857 switch (GET_RTX_CLASS (code))
5858 {
5859 case RTX_UNARY:
5860 return simplify_unary_operation (code, mode,
5861 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5862 case RTX_COMM_ARITH:
5863 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5864 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5865
5866 /* Fall through.... */
5867
5868 case RTX_BIN_ARITH:
5869 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5870
5871 case RTX_TERNARY:
5872 case RTX_BITFIELD_OPS:
5873 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5874 XEXP (x, 0), XEXP (x, 1),
5875 XEXP (x, 2));
5876
5877 case RTX_COMPARE:
5878 case RTX_COMM_COMPARE:
5879 return simplify_relational_operation (code, mode,
5880 ((GET_MODE (XEXP (x, 0))
5881 != VOIDmode)
5882 ? GET_MODE (XEXP (x, 0))
5883 : GET_MODE (XEXP (x, 1))),
5884 XEXP (x, 0),
5885 XEXP (x, 1));
5886
5887 case RTX_EXTRA:
5888 if (code == SUBREG)
5889 return simplify_subreg (mode, SUBREG_REG (x),
5890 GET_MODE (SUBREG_REG (x)),
5891 SUBREG_BYTE (x));
5892 break;
5893
5894 case RTX_OBJ:
5895 if (code == LO_SUM)
5896 {
5897 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5898 if (GET_CODE (XEXP (x, 0)) == HIGH
5899 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5900 return XEXP (x, 1);
5901 }
5902 break;
5903
5904 default:
5905 break;
5906 }
5907 return NULL;
5908 }