]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
2012-09-01 Andrew Pinski <apinski@cavium.com>
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
68 {
69 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
101
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
110
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114 unsigned int width;
115
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
118
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
122
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
136
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
140
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
143 }
144
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150 unsigned int width;
151
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
154
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
158
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
161 }
162 \f
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
165
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
169 {
170 rtx tem;
171
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
176
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
181
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 \f
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
193
194 switch (GET_CODE (x))
195 {
196 case MEM:
197 break;
198
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
204 {
205 REAL_VALUE_TYPE d;
206
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 }
210 return x;
211
212 default:
213 return x;
214 }
215
216 if (GET_MODE (x) == BLKmode)
217 return x;
218
219 addr = XEXP (x, 0);
220
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
223
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 {
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
231 }
232
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
235
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
240 {
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
243
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
248 {
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
252 }
253 else
254 return c;
255 }
256
257 return x;
258 }
259 \f
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
263
264 rtx
265 delegitimize_mem_from_attrs (rtx x)
266 {
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
272 {
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
276
277 switch (TREE_CODE (decl))
278 {
279 default:
280 decl = NULL;
281 break;
282
283 case VAR_DECL:
284 break;
285
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
293 {
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
297
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
305 {
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
309 }
310 break;
311 }
312 }
313
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
321 {
322 rtx newx;
323
324 offset += MEM_OFFSET (x);
325
326 newx = DECL_RTL (decl);
327
328 if (MEM_P (newx))
329 {
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
350 }
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
354 }
355 }
356
357 return x;
358 }
359 \f
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
362
363 rtx
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
366 {
367 rtx tem;
368
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
372
373 return gen_rtx_fmt_e (code, mode, op);
374 }
375
376 /* Likewise for ternary operations. */
377
378 rtx
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 {
382 rtx tem;
383
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
388
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 }
391
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
394
395 rtx
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 {
399 rtx tem;
400
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
404
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
406 }
407 \f
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
412
413 rtx
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 {
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
424
425 if (__builtin_expect (fn != NULL, 0))
426 {
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
430 }
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
433
434 switch (GET_RTX_CLASS (code))
435 {
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
443
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
451
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475
476 case RTX_EXTRA:
477 if (code == SUBREG)
478 {
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
486 }
487 break;
488
489 case RTX_OBJ:
490 if (code == MEM)
491 {
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
496 }
497 else if (code == LO_SUM)
498 {
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
505
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
509 }
510 break;
511
512 default:
513 break;
514 }
515
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
520 {
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 {
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
529 {
530 if (newvec == vec)
531 {
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
536 }
537 RTVEC_ELT (newvec, j) = op;
538 }
539 }
540 break;
541
542 case 'e':
543 if (XEXP (x, i))
544 {
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
547 {
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
551 }
552 }
553 break;
554 }
555 return newx;
556 }
557
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
560
561 rtx
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 {
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 }
566 \f
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
570 rtx
571 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572 rtx op, enum machine_mode op_mode)
573 {
574 rtx trueop, tem;
575
576 trueop = avoid_constant_pool_reference (op);
577
578 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579 if (tem)
580 return tem;
581
582 return simplify_unary_operation_1 (code, mode, op);
583 }
584
585 /* Perform some simplifications we can do even if the operands
586 aren't constant. */
587 static rtx
588 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
589 {
590 enum rtx_code reversed;
591 rtx temp;
592
593 switch (code)
594 {
595 case NOT:
596 /* (not (not X)) == X. */
597 if (GET_CODE (op) == NOT)
598 return XEXP (op, 0);
599
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op)
603 && (mode == BImode || STORE_FLAG_VALUE == -1)
604 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605 return simplify_gen_relational (reversed, mode, VOIDmode,
606 XEXP (op, 0), XEXP (op, 1));
607
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op) == PLUS
610 && XEXP (op, 1) == constm1_rtx)
611 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
612
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op) == NEG)
615 return plus_constant (mode, XEXP (op, 0), -1);
616
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op) == XOR
619 && CONST_INT_P (XEXP (op, 1))
620 && (temp = simplify_unary_operation (NOT, mode,
621 XEXP (op, 1), mode)) != 0)
622 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
623
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op) == PLUS
626 && CONST_INT_P (XEXP (op, 1))
627 && mode_signbit_p (mode, XEXP (op, 1))
628 && (temp = simplify_unary_operation (NOT, mode,
629 XEXP (op, 1), mode)) != 0)
630 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
631
632
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
637 bother with. */
638 if (GET_CODE (op) == ASHIFT
639 && XEXP (op, 0) == const1_rtx)
640 {
641 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
643 }
644
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
648
649 if (STORE_FLAG_VALUE == -1
650 && GET_CODE (op) == ASHIFTRT
651 && GET_CODE (XEXP (op, 1))
652 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653 return simplify_gen_relational (GE, mode, VOIDmode,
654 XEXP (op, 0), const0_rtx);
655
656
657 if (GET_CODE (op) == SUBREG
658 && subreg_lowpart_p (op)
659 && (GET_MODE_SIZE (GET_MODE (op))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661 && GET_CODE (SUBREG_REG (op)) == ASHIFT
662 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
663 {
664 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665 rtx x;
666
667 x = gen_rtx_ROTATE (inner_mode,
668 simplify_gen_unary (NOT, inner_mode, const1_rtx,
669 inner_mode),
670 XEXP (SUBREG_REG (op), 1));
671 return rtl_hooks.gen_lowpart_no_emit (mode, x);
672 }
673
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
677 coded. */
678
679 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
680 {
681 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682 enum machine_mode op_mode;
683
684 op_mode = GET_MODE (in1);
685 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
686
687 op_mode = GET_MODE (in2);
688 if (op_mode == VOIDmode)
689 op_mode = mode;
690 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
691
692 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
693 {
694 rtx tem = in2;
695 in2 = in1; in1 = tem;
696 }
697
698 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699 mode, in1, in2);
700 }
701 break;
702
703 case NEG:
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op) == NEG)
706 return XEXP (op, 0);
707
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op) == PLUS
710 && XEXP (op, 1) == const1_rtx)
711 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
712
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op) == NOT)
715 return plus_constant (mode, XEXP (op, 0), 1);
716
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
726
727 if (GET_CODE (op) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
730 {
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op, 1))
733 || CONST_DOUBLE_P (XEXP (op, 1)))
734 {
735 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736 if (temp)
737 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
738 }
739
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
743 }
744
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
749 {
750 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
752 }
753
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
756 is a constant). */
757 if (GET_CODE (op) == ASHIFT)
758 {
759 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760 if (temp)
761 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
762 }
763
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op) == ASHIFTRT
767 && CONST_INT_P (XEXP (op, 1))
768 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769 return simplify_gen_binary (LSHIFTRT, mode,
770 XEXP (op, 0), XEXP (op, 1));
771
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op) == LSHIFTRT
775 && CONST_INT_P (XEXP (op, 1))
776 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777 return simplify_gen_binary (ASHIFTRT, mode,
778 XEXP (op, 0), XEXP (op, 1));
779
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op) == XOR
782 && XEXP (op, 1) == const1_rtx
783 && nonzero_bits (XEXP (op, 0), mode) == 1)
784 return plus_constant (mode, XEXP (op, 0), -1);
785
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op) == LT
789 && XEXP (op, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
791 {
792 enum machine_mode inner = GET_MODE (XEXP (op, 0));
793 int isize = GET_MODE_PRECISION (inner);
794 if (STORE_FLAG_VALUE == 1)
795 {
796 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797 GEN_INT (isize - 1));
798 if (mode == inner)
799 return temp;
800 if (GET_MODE_PRECISION (mode) > isize)
801 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
803 }
804 else if (STORE_FLAG_VALUE == -1)
805 {
806 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807 GEN_INT (isize - 1));
808 if (mode == inner)
809 return temp;
810 if (GET_MODE_PRECISION (mode) > isize)
811 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
813 }
814 }
815 break;
816
817 case TRUNCATE:
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
820 integer mode. */
821 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822 break;
823
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op) == SIGN_EXTEND
826 || GET_CODE (op) == ZERO_EXTEND)
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
829
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op) == ABS
833 || GET_CODE (op) == NEG)
834 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837 return simplify_gen_unary (GET_CODE (op), mode,
838 XEXP (XEXP (op, 0), 0), mode);
839
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
841 (truncate:A X). */
842 if (GET_CODE (op) == SUBREG
843 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844 && subreg_lowpart_p (op))
845 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846 GET_MODE (XEXP (SUBREG_REG (op), 0)));
847
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
854 patterns. */
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856 ? (num_sign_bit_copies (op, GET_MODE (op))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858 - GET_MODE_PRECISION (mode)))
859 : truncated_to_mode (mode, op))
860 && ! (GET_CODE (op) == LSHIFTRT
861 && GET_CODE (XEXP (op, 0)) == MULT))
862 return rtl_hooks.gen_lowpart_no_emit (mode, op);
863
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode)
869 && COMPARISON_P (op)
870 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871 return rtl_hooks.gen_lowpart_no_emit (mode, op);
872
873 /* A truncate of a memory is just loading the low part of the memory
874 if we are not changing the meaning of the address. */
875 if (GET_CODE (op) == MEM
876 && !MEM_VOLATILE_P (op)
877 && !mode_dependent_address_p (XEXP (op, 0)))
878 return rtl_hooks.gen_lowpart_no_emit (mode, op);
879
880 break;
881
882 case FLOAT_TRUNCATE:
883 if (DECIMAL_FLOAT_MODE_P (mode))
884 break;
885
886 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
887 if (GET_CODE (op) == FLOAT_EXTEND
888 && GET_MODE (XEXP (op, 0)) == mode)
889 return XEXP (op, 0);
890
891 /* (float_truncate:SF (float_truncate:DF foo:XF))
892 = (float_truncate:SF foo:XF).
893 This may eliminate double rounding, so it is unsafe.
894
895 (float_truncate:SF (float_extend:XF foo:DF))
896 = (float_truncate:SF foo:DF).
897
898 (float_truncate:DF (float_extend:XF foo:SF))
899 = (float_extend:SF foo:DF). */
900 if ((GET_CODE (op) == FLOAT_TRUNCATE
901 && flag_unsafe_math_optimizations)
902 || GET_CODE (op) == FLOAT_EXTEND)
903 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
904 0)))
905 > GET_MODE_SIZE (mode)
906 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
907 mode,
908 XEXP (op, 0), mode);
909
910 /* (float_truncate (float x)) is (float x) */
911 if (GET_CODE (op) == FLOAT
912 && (flag_unsafe_math_optimizations
913 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
914 && ((unsigned)significand_size (GET_MODE (op))
915 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
916 - num_sign_bit_copies (XEXP (op, 0),
917 GET_MODE (XEXP (op, 0))))))))
918 return simplify_gen_unary (FLOAT, mode,
919 XEXP (op, 0),
920 GET_MODE (XEXP (op, 0)));
921
922 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
923 (OP:SF foo:SF) if OP is NEG or ABS. */
924 if ((GET_CODE (op) == ABS
925 || GET_CODE (op) == NEG)
926 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
927 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
928 return simplify_gen_unary (GET_CODE (op), mode,
929 XEXP (XEXP (op, 0), 0), mode);
930
931 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
932 is (float_truncate:SF x). */
933 if (GET_CODE (op) == SUBREG
934 && subreg_lowpart_p (op)
935 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
936 return SUBREG_REG (op);
937 break;
938
939 case FLOAT_EXTEND:
940 if (DECIMAL_FLOAT_MODE_P (mode))
941 break;
942
943 /* (float_extend (float_extend x)) is (float_extend x)
944
945 (float_extend (float x)) is (float x) assuming that double
946 rounding can't happen.
947 */
948 if (GET_CODE (op) == FLOAT_EXTEND
949 || (GET_CODE (op) == FLOAT
950 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
951 && ((unsigned)significand_size (GET_MODE (op))
952 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
953 - num_sign_bit_copies (XEXP (op, 0),
954 GET_MODE (XEXP (op, 0)))))))
955 return simplify_gen_unary (GET_CODE (op), mode,
956 XEXP (op, 0),
957 GET_MODE (XEXP (op, 0)));
958
959 break;
960
961 case ABS:
962 /* (abs (neg <foo>)) -> (abs <foo>) */
963 if (GET_CODE (op) == NEG)
964 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
965 GET_MODE (XEXP (op, 0)));
966
967 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
968 do nothing. */
969 if (GET_MODE (op) == VOIDmode)
970 break;
971
972 /* If operand is something known to be positive, ignore the ABS. */
973 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
974 || val_signbit_known_clear_p (GET_MODE (op),
975 nonzero_bits (op, GET_MODE (op))))
976 return op;
977
978 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
979 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
980 return gen_rtx_NEG (mode, op);
981
982 break;
983
984 case FFS:
985 /* (ffs (*_extend <X>)) = (ffs <X>) */
986 if (GET_CODE (op) == SIGN_EXTEND
987 || GET_CODE (op) == ZERO_EXTEND)
988 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
989 GET_MODE (XEXP (op, 0)));
990 break;
991
992 case POPCOUNT:
993 switch (GET_CODE (op))
994 {
995 case BSWAP:
996 case ZERO_EXTEND:
997 /* (popcount (zero_extend <X>)) = (popcount <X>) */
998 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
999 GET_MODE (XEXP (op, 0)));
1000
1001 case ROTATE:
1002 case ROTATERT:
1003 /* Rotations don't affect popcount. */
1004 if (!side_effects_p (XEXP (op, 1)))
1005 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1006 GET_MODE (XEXP (op, 0)));
1007 break;
1008
1009 default:
1010 break;
1011 }
1012 break;
1013
1014 case PARITY:
1015 switch (GET_CODE (op))
1016 {
1017 case NOT:
1018 case BSWAP:
1019 case ZERO_EXTEND:
1020 case SIGN_EXTEND:
1021 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1022 GET_MODE (XEXP (op, 0)));
1023
1024 case ROTATE:
1025 case ROTATERT:
1026 /* Rotations don't affect parity. */
1027 if (!side_effects_p (XEXP (op, 1)))
1028 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1029 GET_MODE (XEXP (op, 0)));
1030 break;
1031
1032 default:
1033 break;
1034 }
1035 break;
1036
1037 case BSWAP:
1038 /* (bswap (bswap x)) -> x. */
1039 if (GET_CODE (op) == BSWAP)
1040 return XEXP (op, 0);
1041 break;
1042
1043 case FLOAT:
1044 /* (float (sign_extend <X>)) = (float <X>). */
1045 if (GET_CODE (op) == SIGN_EXTEND)
1046 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1047 GET_MODE (XEXP (op, 0)));
1048 break;
1049
1050 case SIGN_EXTEND:
1051 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1052 becomes just the MINUS if its mode is MODE. This allows
1053 folding switch statements on machines using casesi (such as
1054 the VAX). */
1055 if (GET_CODE (op) == TRUNCATE
1056 && GET_MODE (XEXP (op, 0)) == mode
1057 && GET_CODE (XEXP (op, 0)) == MINUS
1058 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1059 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1060 return XEXP (op, 0);
1061
1062 /* Extending a widening multiplication should be canonicalized to
1063 a wider widening multiplication. */
1064 if (GET_CODE (op) == MULT)
1065 {
1066 rtx lhs = XEXP (op, 0);
1067 rtx rhs = XEXP (op, 1);
1068 enum rtx_code lcode = GET_CODE (lhs);
1069 enum rtx_code rcode = GET_CODE (rhs);
1070
1071 /* Widening multiplies usually extend both operands, but sometimes
1072 they use a shift to extract a portion of a register. */
1073 if ((lcode == SIGN_EXTEND
1074 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1075 && (rcode == SIGN_EXTEND
1076 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1077 {
1078 enum machine_mode lmode = GET_MODE (lhs);
1079 enum machine_mode rmode = GET_MODE (rhs);
1080 int bits;
1081
1082 if (lcode == ASHIFTRT)
1083 /* Number of bits not shifted off the end. */
1084 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1085 else /* lcode == SIGN_EXTEND */
1086 /* Size of inner mode. */
1087 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1088
1089 if (rcode == ASHIFTRT)
1090 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1091 else /* rcode == SIGN_EXTEND */
1092 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1093
1094 /* We can only widen multiplies if the result is mathematiclly
1095 equivalent. I.e. if overflow was impossible. */
1096 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1097 return simplify_gen_binary
1098 (MULT, mode,
1099 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1100 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1101 }
1102 }
1103
1104 /* Check for a sign extension of a subreg of a promoted
1105 variable, where the promotion is sign-extended, and the
1106 target mode is the same as the variable's promotion. */
1107 if (GET_CODE (op) == SUBREG
1108 && SUBREG_PROMOTED_VAR_P (op)
1109 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1110 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1111 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1112
1113 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1114 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1115 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1116 {
1117 gcc_assert (GET_MODE_BITSIZE (mode)
1118 > GET_MODE_BITSIZE (GET_MODE (op)));
1119 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1120 GET_MODE (XEXP (op, 0)));
1121 }
1122
1123 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1124 is (sign_extend:M (subreg:O <X>)) if there is mode with
1125 GET_MODE_BITSIZE (N) - I bits.
1126 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1127 is similarly (zero_extend:M (subreg:O <X>)). */
1128 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1129 && GET_CODE (XEXP (op, 0)) == ASHIFT
1130 && CONST_INT_P (XEXP (op, 1))
1131 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1132 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1133 {
1134 enum machine_mode tmode
1135 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1136 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1137 gcc_assert (GET_MODE_BITSIZE (mode)
1138 > GET_MODE_BITSIZE (GET_MODE (op)));
1139 if (tmode != BLKmode)
1140 {
1141 rtx inner =
1142 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1143 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1144 ? SIGN_EXTEND : ZERO_EXTEND,
1145 mode, inner, tmode);
1146 }
1147 }
1148
1149 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1150 /* As we do not know which address space the pointer is referring to,
1151 we can do this only if the target does not support different pointer
1152 or address modes depending on the address space. */
1153 if (target_default_pointer_address_modes_p ()
1154 && ! POINTERS_EXTEND_UNSIGNED
1155 && mode == Pmode && GET_MODE (op) == ptr_mode
1156 && (CONSTANT_P (op)
1157 || (GET_CODE (op) == SUBREG
1158 && REG_P (SUBREG_REG (op))
1159 && REG_POINTER (SUBREG_REG (op))
1160 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1161 return convert_memory_address (Pmode, op);
1162 #endif
1163 break;
1164
1165 case ZERO_EXTEND:
1166 /* Check for a zero extension of a subreg of a promoted
1167 variable, where the promotion is zero-extended, and the
1168 target mode is the same as the variable's promotion. */
1169 if (GET_CODE (op) == SUBREG
1170 && SUBREG_PROMOTED_VAR_P (op)
1171 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1172 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1173 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1174
1175 /* Extending a widening multiplication should be canonicalized to
1176 a wider widening multiplication. */
1177 if (GET_CODE (op) == MULT)
1178 {
1179 rtx lhs = XEXP (op, 0);
1180 rtx rhs = XEXP (op, 1);
1181 enum rtx_code lcode = GET_CODE (lhs);
1182 enum rtx_code rcode = GET_CODE (rhs);
1183
1184 /* Widening multiplies usually extend both operands, but sometimes
1185 they use a shift to extract a portion of a register. */
1186 if ((lcode == ZERO_EXTEND
1187 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1188 && (rcode == ZERO_EXTEND
1189 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1190 {
1191 enum machine_mode lmode = GET_MODE (lhs);
1192 enum machine_mode rmode = GET_MODE (rhs);
1193 int bits;
1194
1195 if (lcode == LSHIFTRT)
1196 /* Number of bits not shifted off the end. */
1197 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1198 else /* lcode == ZERO_EXTEND */
1199 /* Size of inner mode. */
1200 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1201
1202 if (rcode == LSHIFTRT)
1203 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1204 else /* rcode == ZERO_EXTEND */
1205 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1206
1207 /* We can only widen multiplies if the result is mathematiclly
1208 equivalent. I.e. if overflow was impossible. */
1209 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1210 return simplify_gen_binary
1211 (MULT, mode,
1212 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1213 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1214 }
1215 }
1216
1217 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1218 if (GET_CODE (op) == ZERO_EXTEND)
1219 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1220 GET_MODE (XEXP (op, 0)));
1221
1222 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1223 is (zero_extend:M (subreg:O <X>)) if there is mode with
1224 GET_MODE_BITSIZE (N) - I bits. */
1225 if (GET_CODE (op) == LSHIFTRT
1226 && GET_CODE (XEXP (op, 0)) == ASHIFT
1227 && CONST_INT_P (XEXP (op, 1))
1228 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1229 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1230 {
1231 enum machine_mode tmode
1232 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1233 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1234 if (tmode != BLKmode)
1235 {
1236 rtx inner =
1237 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1238 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1239 }
1240 }
1241
1242 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1243 /* As we do not know which address space the pointer is referring to,
1244 we can do this only if the target does not support different pointer
1245 or address modes depending on the address space. */
1246 if (target_default_pointer_address_modes_p ()
1247 && POINTERS_EXTEND_UNSIGNED > 0
1248 && mode == Pmode && GET_MODE (op) == ptr_mode
1249 && (CONSTANT_P (op)
1250 || (GET_CODE (op) == SUBREG
1251 && REG_P (SUBREG_REG (op))
1252 && REG_POINTER (SUBREG_REG (op))
1253 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1254 return convert_memory_address (Pmode, op);
1255 #endif
1256 break;
1257
1258 default:
1259 break;
1260 }
1261
1262 return 0;
1263 }
1264
1265 /* Try to compute the value of a unary operation CODE whose output mode is to
1266 be MODE with input operand OP whose mode was originally OP_MODE.
1267 Return zero if the value cannot be computed. */
1268 rtx
1269 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1270 rtx op, enum machine_mode op_mode)
1271 {
1272 unsigned int width = GET_MODE_PRECISION (mode);
1273 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1274
1275 if (code == VEC_DUPLICATE)
1276 {
1277 gcc_assert (VECTOR_MODE_P (mode));
1278 if (GET_MODE (op) != VOIDmode)
1279 {
1280 if (!VECTOR_MODE_P (GET_MODE (op)))
1281 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1282 else
1283 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1284 (GET_MODE (op)));
1285 }
1286 if (CONST_INT_P (op) || CONST_DOUBLE_P (op)
1287 || GET_CODE (op) == CONST_VECTOR)
1288 {
1289 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1290 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1291 rtvec v = rtvec_alloc (n_elts);
1292 unsigned int i;
1293
1294 if (GET_CODE (op) != CONST_VECTOR)
1295 for (i = 0; i < n_elts; i++)
1296 RTVEC_ELT (v, i) = op;
1297 else
1298 {
1299 enum machine_mode inmode = GET_MODE (op);
1300 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1301 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1302
1303 gcc_assert (in_n_elts < n_elts);
1304 gcc_assert ((n_elts % in_n_elts) == 0);
1305 for (i = 0; i < n_elts; i++)
1306 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1307 }
1308 return gen_rtx_CONST_VECTOR (mode, v);
1309 }
1310 }
1311
1312 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1313 {
1314 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1315 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1316 enum machine_mode opmode = GET_MODE (op);
1317 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1318 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1319 rtvec v = rtvec_alloc (n_elts);
1320 unsigned int i;
1321
1322 gcc_assert (op_n_elts == n_elts);
1323 for (i = 0; i < n_elts; i++)
1324 {
1325 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1326 CONST_VECTOR_ELT (op, i),
1327 GET_MODE_INNER (opmode));
1328 if (!x)
1329 return 0;
1330 RTVEC_ELT (v, i) = x;
1331 }
1332 return gen_rtx_CONST_VECTOR (mode, v);
1333 }
1334
1335 /* The order of these tests is critical so that, for example, we don't
1336 check the wrong mode (input vs. output) for a conversion operation,
1337 such as FIX. At some point, this should be simplified. */
1338
1339 if (code == FLOAT && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1340 {
1341 HOST_WIDE_INT hv, lv;
1342 REAL_VALUE_TYPE d;
1343
1344 if (CONST_INT_P (op))
1345 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1346 else
1347 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1348
1349 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1350 d = real_value_truncate (mode, d);
1351 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1352 }
1353 else if (code == UNSIGNED_FLOAT
1354 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1355 {
1356 HOST_WIDE_INT hv, lv;
1357 REAL_VALUE_TYPE d;
1358
1359 if (CONST_INT_P (op))
1360 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1361 else
1362 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1363
1364 if (op_mode == VOIDmode
1365 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1366 /* We should never get a negative number. */
1367 gcc_assert (hv >= 0);
1368 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1369 hv = 0, lv &= GET_MODE_MASK (op_mode);
1370
1371 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1372 d = real_value_truncate (mode, d);
1373 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1374 }
1375
1376 if (CONST_INT_P (op)
1377 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1378 {
1379 HOST_WIDE_INT arg0 = INTVAL (op);
1380 HOST_WIDE_INT val;
1381
1382 switch (code)
1383 {
1384 case NOT:
1385 val = ~ arg0;
1386 break;
1387
1388 case NEG:
1389 val = - arg0;
1390 break;
1391
1392 case ABS:
1393 val = (arg0 >= 0 ? arg0 : - arg0);
1394 break;
1395
1396 case FFS:
1397 arg0 &= GET_MODE_MASK (mode);
1398 val = ffs_hwi (arg0);
1399 break;
1400
1401 case CLZ:
1402 arg0 &= GET_MODE_MASK (mode);
1403 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1404 ;
1405 else
1406 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1407 break;
1408
1409 case CLRSB:
1410 arg0 &= GET_MODE_MASK (mode);
1411 if (arg0 == 0)
1412 val = GET_MODE_PRECISION (mode) - 1;
1413 else if (arg0 >= 0)
1414 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1415 else if (arg0 < 0)
1416 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1417 break;
1418
1419 case CTZ:
1420 arg0 &= GET_MODE_MASK (mode);
1421 if (arg0 == 0)
1422 {
1423 /* Even if the value at zero is undefined, we have to come
1424 up with some replacement. Seems good enough. */
1425 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1426 val = GET_MODE_PRECISION (mode);
1427 }
1428 else
1429 val = ctz_hwi (arg0);
1430 break;
1431
1432 case POPCOUNT:
1433 arg0 &= GET_MODE_MASK (mode);
1434 val = 0;
1435 while (arg0)
1436 val++, arg0 &= arg0 - 1;
1437 break;
1438
1439 case PARITY:
1440 arg0 &= GET_MODE_MASK (mode);
1441 val = 0;
1442 while (arg0)
1443 val++, arg0 &= arg0 - 1;
1444 val &= 1;
1445 break;
1446
1447 case BSWAP:
1448 {
1449 unsigned int s;
1450
1451 val = 0;
1452 for (s = 0; s < width; s += 8)
1453 {
1454 unsigned int d = width - s - 8;
1455 unsigned HOST_WIDE_INT byte;
1456 byte = (arg0 >> s) & 0xff;
1457 val |= byte << d;
1458 }
1459 }
1460 break;
1461
1462 case TRUNCATE:
1463 val = arg0;
1464 break;
1465
1466 case ZERO_EXTEND:
1467 /* When zero-extending a CONST_INT, we need to know its
1468 original mode. */
1469 gcc_assert (op_mode != VOIDmode);
1470 if (op_width == HOST_BITS_PER_WIDE_INT)
1471 {
1472 /* If we were really extending the mode,
1473 we would have to distinguish between zero-extension
1474 and sign-extension. */
1475 gcc_assert (width == op_width);
1476 val = arg0;
1477 }
1478 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1479 val = arg0 & GET_MODE_MASK (op_mode);
1480 else
1481 return 0;
1482 break;
1483
1484 case SIGN_EXTEND:
1485 if (op_mode == VOIDmode)
1486 op_mode = mode;
1487 op_width = GET_MODE_PRECISION (op_mode);
1488 if (op_width == HOST_BITS_PER_WIDE_INT)
1489 {
1490 /* If we were really extending the mode,
1491 we would have to distinguish between zero-extension
1492 and sign-extension. */
1493 gcc_assert (width == op_width);
1494 val = arg0;
1495 }
1496 else if (op_width < HOST_BITS_PER_WIDE_INT)
1497 {
1498 val = arg0 & GET_MODE_MASK (op_mode);
1499 if (val_signbit_known_set_p (op_mode, val))
1500 val |= ~GET_MODE_MASK (op_mode);
1501 }
1502 else
1503 return 0;
1504 break;
1505
1506 case SQRT:
1507 case FLOAT_EXTEND:
1508 case FLOAT_TRUNCATE:
1509 case SS_TRUNCATE:
1510 case US_TRUNCATE:
1511 case SS_NEG:
1512 case US_NEG:
1513 case SS_ABS:
1514 return 0;
1515
1516 default:
1517 gcc_unreachable ();
1518 }
1519
1520 return gen_int_mode (val, mode);
1521 }
1522
1523 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1524 for a DImode operation on a CONST_INT. */
1525 else if (width <= HOST_BITS_PER_DOUBLE_INT
1526 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1527 {
1528 unsigned HOST_WIDE_INT l1, lv;
1529 HOST_WIDE_INT h1, hv;
1530
1531 if (CONST_DOUBLE_AS_INT_P (op))
1532 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1533 else
1534 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1535
1536 switch (code)
1537 {
1538 case NOT:
1539 lv = ~ l1;
1540 hv = ~ h1;
1541 break;
1542
1543 case NEG:
1544 neg_double (l1, h1, &lv, &hv);
1545 break;
1546
1547 case ABS:
1548 if (h1 < 0)
1549 neg_double (l1, h1, &lv, &hv);
1550 else
1551 lv = l1, hv = h1;
1552 break;
1553
1554 case FFS:
1555 hv = 0;
1556 if (l1 != 0)
1557 lv = ffs_hwi (l1);
1558 else if (h1 != 0)
1559 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1560 else
1561 lv = 0;
1562 break;
1563
1564 case CLZ:
1565 hv = 0;
1566 if (h1 != 0)
1567 lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1568 - HOST_BITS_PER_WIDE_INT;
1569 else if (l1 != 0)
1570 lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1571 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1572 lv = GET_MODE_PRECISION (mode);
1573 break;
1574
1575 case CTZ:
1576 hv = 0;
1577 if (l1 != 0)
1578 lv = ctz_hwi (l1);
1579 else if (h1 != 0)
1580 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1581 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1582 lv = GET_MODE_PRECISION (mode);
1583 break;
1584
1585 case POPCOUNT:
1586 hv = 0;
1587 lv = 0;
1588 while (l1)
1589 lv++, l1 &= l1 - 1;
1590 while (h1)
1591 lv++, h1 &= h1 - 1;
1592 break;
1593
1594 case PARITY:
1595 hv = 0;
1596 lv = 0;
1597 while (l1)
1598 lv++, l1 &= l1 - 1;
1599 while (h1)
1600 lv++, h1 &= h1 - 1;
1601 lv &= 1;
1602 break;
1603
1604 case BSWAP:
1605 {
1606 unsigned int s;
1607
1608 hv = 0;
1609 lv = 0;
1610 for (s = 0; s < width; s += 8)
1611 {
1612 unsigned int d = width - s - 8;
1613 unsigned HOST_WIDE_INT byte;
1614
1615 if (s < HOST_BITS_PER_WIDE_INT)
1616 byte = (l1 >> s) & 0xff;
1617 else
1618 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1619
1620 if (d < HOST_BITS_PER_WIDE_INT)
1621 lv |= byte << d;
1622 else
1623 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1624 }
1625 }
1626 break;
1627
1628 case TRUNCATE:
1629 /* This is just a change-of-mode, so do nothing. */
1630 lv = l1, hv = h1;
1631 break;
1632
1633 case ZERO_EXTEND:
1634 gcc_assert (op_mode != VOIDmode);
1635
1636 if (op_width > HOST_BITS_PER_WIDE_INT)
1637 return 0;
1638
1639 hv = 0;
1640 lv = l1 & GET_MODE_MASK (op_mode);
1641 break;
1642
1643 case SIGN_EXTEND:
1644 if (op_mode == VOIDmode
1645 || op_width > HOST_BITS_PER_WIDE_INT)
1646 return 0;
1647 else
1648 {
1649 lv = l1 & GET_MODE_MASK (op_mode);
1650 if (val_signbit_known_set_p (op_mode, lv))
1651 lv |= ~GET_MODE_MASK (op_mode);
1652
1653 hv = HWI_SIGN_EXTEND (lv);
1654 }
1655 break;
1656
1657 case SQRT:
1658 return 0;
1659
1660 default:
1661 return 0;
1662 }
1663
1664 return immed_double_const (lv, hv, mode);
1665 }
1666
1667 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1668 && SCALAR_FLOAT_MODE_P (mode)
1669 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1670 {
1671 REAL_VALUE_TYPE d, t;
1672 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1673
1674 switch (code)
1675 {
1676 case SQRT:
1677 if (HONOR_SNANS (mode) && real_isnan (&d))
1678 return 0;
1679 real_sqrt (&t, mode, &d);
1680 d = t;
1681 break;
1682 case ABS:
1683 d = real_value_abs (&d);
1684 break;
1685 case NEG:
1686 d = real_value_negate (&d);
1687 break;
1688 case FLOAT_TRUNCATE:
1689 d = real_value_truncate (mode, d);
1690 break;
1691 case FLOAT_EXTEND:
1692 /* All this does is change the mode, unless changing
1693 mode class. */
1694 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1695 real_convert (&d, mode, &d);
1696 break;
1697 case FIX:
1698 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1699 break;
1700 case NOT:
1701 {
1702 long tmp[4];
1703 int i;
1704
1705 real_to_target (tmp, &d, GET_MODE (op));
1706 for (i = 0; i < 4; i++)
1707 tmp[i] = ~tmp[i];
1708 real_from_target (&d, tmp, mode);
1709 break;
1710 }
1711 default:
1712 gcc_unreachable ();
1713 }
1714 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1715 }
1716
1717 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1718 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1719 && GET_MODE_CLASS (mode) == MODE_INT
1720 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1721 {
1722 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1723 operators are intentionally left unspecified (to ease implementation
1724 by target backends), for consistency, this routine implements the
1725 same semantics for constant folding as used by the middle-end. */
1726
1727 /* This was formerly used only for non-IEEE float.
1728 eggert@twinsun.com says it is safe for IEEE also. */
1729 HOST_WIDE_INT xh, xl, th, tl;
1730 REAL_VALUE_TYPE x, t;
1731 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1732 switch (code)
1733 {
1734 case FIX:
1735 if (REAL_VALUE_ISNAN (x))
1736 return const0_rtx;
1737
1738 /* Test against the signed upper bound. */
1739 if (width > HOST_BITS_PER_WIDE_INT)
1740 {
1741 th = ((unsigned HOST_WIDE_INT) 1
1742 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1743 tl = -1;
1744 }
1745 else
1746 {
1747 th = 0;
1748 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1749 }
1750 real_from_integer (&t, VOIDmode, tl, th, 0);
1751 if (REAL_VALUES_LESS (t, x))
1752 {
1753 xh = th;
1754 xl = tl;
1755 break;
1756 }
1757
1758 /* Test against the signed lower bound. */
1759 if (width > HOST_BITS_PER_WIDE_INT)
1760 {
1761 th = (unsigned HOST_WIDE_INT) (-1)
1762 << (width - HOST_BITS_PER_WIDE_INT - 1);
1763 tl = 0;
1764 }
1765 else
1766 {
1767 th = -1;
1768 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1769 }
1770 real_from_integer (&t, VOIDmode, tl, th, 0);
1771 if (REAL_VALUES_LESS (x, t))
1772 {
1773 xh = th;
1774 xl = tl;
1775 break;
1776 }
1777 REAL_VALUE_TO_INT (&xl, &xh, x);
1778 break;
1779
1780 case UNSIGNED_FIX:
1781 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1782 return const0_rtx;
1783
1784 /* Test against the unsigned upper bound. */
1785 if (width == HOST_BITS_PER_DOUBLE_INT)
1786 {
1787 th = -1;
1788 tl = -1;
1789 }
1790 else if (width >= HOST_BITS_PER_WIDE_INT)
1791 {
1792 th = ((unsigned HOST_WIDE_INT) 1
1793 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1794 tl = -1;
1795 }
1796 else
1797 {
1798 th = 0;
1799 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1800 }
1801 real_from_integer (&t, VOIDmode, tl, th, 1);
1802 if (REAL_VALUES_LESS (t, x))
1803 {
1804 xh = th;
1805 xl = tl;
1806 break;
1807 }
1808
1809 REAL_VALUE_TO_INT (&xl, &xh, x);
1810 break;
1811
1812 default:
1813 gcc_unreachable ();
1814 }
1815 return immed_double_const (xl, xh, mode);
1816 }
1817
1818 return NULL_RTX;
1819 }
1820 \f
1821 /* Subroutine of simplify_binary_operation to simplify a commutative,
1822 associative binary operation CODE with result mode MODE, operating
1823 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1824 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1825 canonicalization is possible. */
1826
1827 static rtx
1828 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1829 rtx op0, rtx op1)
1830 {
1831 rtx tem;
1832
1833 /* Linearize the operator to the left. */
1834 if (GET_CODE (op1) == code)
1835 {
1836 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1837 if (GET_CODE (op0) == code)
1838 {
1839 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1840 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1841 }
1842
1843 /* "a op (b op c)" becomes "(b op c) op a". */
1844 if (! swap_commutative_operands_p (op1, op0))
1845 return simplify_gen_binary (code, mode, op1, op0);
1846
1847 tem = op0;
1848 op0 = op1;
1849 op1 = tem;
1850 }
1851
1852 if (GET_CODE (op0) == code)
1853 {
1854 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1855 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1856 {
1857 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1858 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1859 }
1860
1861 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1862 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1863 if (tem != 0)
1864 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1865
1866 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1867 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1868 if (tem != 0)
1869 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1870 }
1871
1872 return 0;
1873 }
1874
1875
1876 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1877 and OP1. Return 0 if no simplification is possible.
1878
1879 Don't use this for relational operations such as EQ or LT.
1880 Use simplify_relational_operation instead. */
1881 rtx
1882 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1883 rtx op0, rtx op1)
1884 {
1885 rtx trueop0, trueop1;
1886 rtx tem;
1887
1888 /* Relational operations don't work here. We must know the mode
1889 of the operands in order to do the comparison correctly.
1890 Assuming a full word can give incorrect results.
1891 Consider comparing 128 with -128 in QImode. */
1892 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1893 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1894
1895 /* Make sure the constant is second. */
1896 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1897 && swap_commutative_operands_p (op0, op1))
1898 {
1899 tem = op0, op0 = op1, op1 = tem;
1900 }
1901
1902 trueop0 = avoid_constant_pool_reference (op0);
1903 trueop1 = avoid_constant_pool_reference (op1);
1904
1905 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1906 if (tem)
1907 return tem;
1908 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1909 }
1910
1911 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1912 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1913 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1914 actual constants. */
1915
1916 static rtx
1917 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1918 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1919 {
1920 rtx tem, reversed, opleft, opright;
1921 HOST_WIDE_INT val;
1922 unsigned int width = GET_MODE_PRECISION (mode);
1923
1924 /* Even if we can't compute a constant result,
1925 there are some cases worth simplifying. */
1926
1927 switch (code)
1928 {
1929 case PLUS:
1930 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1931 when x is NaN, infinite, or finite and nonzero. They aren't
1932 when x is -0 and the rounding mode is not towards -infinity,
1933 since (-0) + 0 is then 0. */
1934 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1935 return op0;
1936
1937 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1938 transformations are safe even for IEEE. */
1939 if (GET_CODE (op0) == NEG)
1940 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1941 else if (GET_CODE (op1) == NEG)
1942 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1943
1944 /* (~a) + 1 -> -a */
1945 if (INTEGRAL_MODE_P (mode)
1946 && GET_CODE (op0) == NOT
1947 && trueop1 == const1_rtx)
1948 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1949
1950 /* Handle both-operands-constant cases. We can only add
1951 CONST_INTs to constants since the sum of relocatable symbols
1952 can't be handled by most assemblers. Don't add CONST_INT
1953 to CONST_INT since overflow won't be computed properly if wider
1954 than HOST_BITS_PER_WIDE_INT. */
1955
1956 if ((GET_CODE (op0) == CONST
1957 || GET_CODE (op0) == SYMBOL_REF
1958 || GET_CODE (op0) == LABEL_REF)
1959 && CONST_INT_P (op1))
1960 return plus_constant (mode, op0, INTVAL (op1));
1961 else if ((GET_CODE (op1) == CONST
1962 || GET_CODE (op1) == SYMBOL_REF
1963 || GET_CODE (op1) == LABEL_REF)
1964 && CONST_INT_P (op0))
1965 return plus_constant (mode, op1, INTVAL (op0));
1966
1967 /* See if this is something like X * C - X or vice versa or
1968 if the multiplication is written as a shift. If so, we can
1969 distribute and make a new multiply, shift, or maybe just
1970 have X (if C is 2 in the example above). But don't make
1971 something more expensive than we had before. */
1972
1973 if (SCALAR_INT_MODE_P (mode))
1974 {
1975 double_int coeff0, coeff1;
1976 rtx lhs = op0, rhs = op1;
1977
1978 coeff0 = double_int_one;
1979 coeff1 = double_int_one;
1980
1981 if (GET_CODE (lhs) == NEG)
1982 {
1983 coeff0 = double_int_minus_one;
1984 lhs = XEXP (lhs, 0);
1985 }
1986 else if (GET_CODE (lhs) == MULT
1987 && CONST_INT_P (XEXP (lhs, 1)))
1988 {
1989 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1990 lhs = XEXP (lhs, 0);
1991 }
1992 else if (GET_CODE (lhs) == ASHIFT
1993 && CONST_INT_P (XEXP (lhs, 1))
1994 && INTVAL (XEXP (lhs, 1)) >= 0
1995 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1996 {
1997 coeff0 = double_int_setbit (double_int_zero,
1998 INTVAL (XEXP (lhs, 1)));
1999 lhs = XEXP (lhs, 0);
2000 }
2001
2002 if (GET_CODE (rhs) == NEG)
2003 {
2004 coeff1 = double_int_minus_one;
2005 rhs = XEXP (rhs, 0);
2006 }
2007 else if (GET_CODE (rhs) == MULT
2008 && CONST_INT_P (XEXP (rhs, 1)))
2009 {
2010 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2011 rhs = XEXP (rhs, 0);
2012 }
2013 else if (GET_CODE (rhs) == ASHIFT
2014 && CONST_INT_P (XEXP (rhs, 1))
2015 && INTVAL (XEXP (rhs, 1)) >= 0
2016 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2017 {
2018 coeff1 = double_int_setbit (double_int_zero,
2019 INTVAL (XEXP (rhs, 1)));
2020 rhs = XEXP (rhs, 0);
2021 }
2022
2023 if (rtx_equal_p (lhs, rhs))
2024 {
2025 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2026 rtx coeff;
2027 double_int val;
2028 bool speed = optimize_function_for_speed_p (cfun);
2029
2030 val = double_int_add (coeff0, coeff1);
2031 coeff = immed_double_int_const (val, mode);
2032
2033 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2034 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2035 ? tem : 0;
2036 }
2037 }
2038
2039 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2040 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2041 && GET_CODE (op0) == XOR
2042 && (CONST_INT_P (XEXP (op0, 1))
2043 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2044 && mode_signbit_p (mode, op1))
2045 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2046 simplify_gen_binary (XOR, mode, op1,
2047 XEXP (op0, 1)));
2048
2049 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2050 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2051 && GET_CODE (op0) == MULT
2052 && GET_CODE (XEXP (op0, 0)) == NEG)
2053 {
2054 rtx in1, in2;
2055
2056 in1 = XEXP (XEXP (op0, 0), 0);
2057 in2 = XEXP (op0, 1);
2058 return simplify_gen_binary (MINUS, mode, op1,
2059 simplify_gen_binary (MULT, mode,
2060 in1, in2));
2061 }
2062
2063 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2064 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2065 is 1. */
2066 if (COMPARISON_P (op0)
2067 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2068 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2069 && (reversed = reversed_comparison (op0, mode)))
2070 return
2071 simplify_gen_unary (NEG, mode, reversed, mode);
2072
2073 /* If one of the operands is a PLUS or a MINUS, see if we can
2074 simplify this by the associative law.
2075 Don't use the associative law for floating point.
2076 The inaccuracy makes it nonassociative,
2077 and subtle programs can break if operations are associated. */
2078
2079 if (INTEGRAL_MODE_P (mode)
2080 && (plus_minus_operand_p (op0)
2081 || plus_minus_operand_p (op1))
2082 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2083 return tem;
2084
2085 /* Reassociate floating point addition only when the user
2086 specifies associative math operations. */
2087 if (FLOAT_MODE_P (mode)
2088 && flag_associative_math)
2089 {
2090 tem = simplify_associative_operation (code, mode, op0, op1);
2091 if (tem)
2092 return tem;
2093 }
2094 break;
2095
2096 case COMPARE:
2097 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2098 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2099 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2100 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2101 {
2102 rtx xop00 = XEXP (op0, 0);
2103 rtx xop10 = XEXP (op1, 0);
2104
2105 #ifdef HAVE_cc0
2106 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2107 #else
2108 if (REG_P (xop00) && REG_P (xop10)
2109 && GET_MODE (xop00) == GET_MODE (xop10)
2110 && REGNO (xop00) == REGNO (xop10)
2111 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2112 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2113 #endif
2114 return xop00;
2115 }
2116 break;
2117
2118 case MINUS:
2119 /* We can't assume x-x is 0 even with non-IEEE floating point,
2120 but since it is zero except in very strange circumstances, we
2121 will treat it as zero with -ffinite-math-only. */
2122 if (rtx_equal_p (trueop0, trueop1)
2123 && ! side_effects_p (op0)
2124 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2125 return CONST0_RTX (mode);
2126
2127 /* Change subtraction from zero into negation. (0 - x) is the
2128 same as -x when x is NaN, infinite, or finite and nonzero.
2129 But if the mode has signed zeros, and does not round towards
2130 -infinity, then 0 - 0 is 0, not -0. */
2131 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2132 return simplify_gen_unary (NEG, mode, op1, mode);
2133
2134 /* (-1 - a) is ~a. */
2135 if (trueop0 == constm1_rtx)
2136 return simplify_gen_unary (NOT, mode, op1, mode);
2137
2138 /* Subtracting 0 has no effect unless the mode has signed zeros
2139 and supports rounding towards -infinity. In such a case,
2140 0 - 0 is -0. */
2141 if (!(HONOR_SIGNED_ZEROS (mode)
2142 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2143 && trueop1 == CONST0_RTX (mode))
2144 return op0;
2145
2146 /* See if this is something like X * C - X or vice versa or
2147 if the multiplication is written as a shift. If so, we can
2148 distribute and make a new multiply, shift, or maybe just
2149 have X (if C is 2 in the example above). But don't make
2150 something more expensive than we had before. */
2151
2152 if (SCALAR_INT_MODE_P (mode))
2153 {
2154 double_int coeff0, negcoeff1;
2155 rtx lhs = op0, rhs = op1;
2156
2157 coeff0 = double_int_one;
2158 negcoeff1 = double_int_minus_one;
2159
2160 if (GET_CODE (lhs) == NEG)
2161 {
2162 coeff0 = double_int_minus_one;
2163 lhs = XEXP (lhs, 0);
2164 }
2165 else if (GET_CODE (lhs) == MULT
2166 && CONST_INT_P (XEXP (lhs, 1)))
2167 {
2168 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2169 lhs = XEXP (lhs, 0);
2170 }
2171 else if (GET_CODE (lhs) == ASHIFT
2172 && CONST_INT_P (XEXP (lhs, 1))
2173 && INTVAL (XEXP (lhs, 1)) >= 0
2174 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2175 {
2176 coeff0 = double_int_setbit (double_int_zero,
2177 INTVAL (XEXP (lhs, 1)));
2178 lhs = XEXP (lhs, 0);
2179 }
2180
2181 if (GET_CODE (rhs) == NEG)
2182 {
2183 negcoeff1 = double_int_one;
2184 rhs = XEXP (rhs, 0);
2185 }
2186 else if (GET_CODE (rhs) == MULT
2187 && CONST_INT_P (XEXP (rhs, 1)))
2188 {
2189 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2190 rhs = XEXP (rhs, 0);
2191 }
2192 else if (GET_CODE (rhs) == ASHIFT
2193 && CONST_INT_P (XEXP (rhs, 1))
2194 && INTVAL (XEXP (rhs, 1)) >= 0
2195 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2196 {
2197 negcoeff1 = double_int_setbit (double_int_zero,
2198 INTVAL (XEXP (rhs, 1)));
2199 negcoeff1 = double_int_neg (negcoeff1);
2200 rhs = XEXP (rhs, 0);
2201 }
2202
2203 if (rtx_equal_p (lhs, rhs))
2204 {
2205 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2206 rtx coeff;
2207 double_int val;
2208 bool speed = optimize_function_for_speed_p (cfun);
2209
2210 val = double_int_add (coeff0, negcoeff1);
2211 coeff = immed_double_int_const (val, mode);
2212
2213 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2214 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2215 ? tem : 0;
2216 }
2217 }
2218
2219 /* (a - (-b)) -> (a + b). True even for IEEE. */
2220 if (GET_CODE (op1) == NEG)
2221 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2222
2223 /* (-x - c) may be simplified as (-c - x). */
2224 if (GET_CODE (op0) == NEG
2225 && (CONST_INT_P (op1) || CONST_DOUBLE_P (op1)))
2226 {
2227 tem = simplify_unary_operation (NEG, mode, op1, mode);
2228 if (tem)
2229 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2230 }
2231
2232 /* Don't let a relocatable value get a negative coeff. */
2233 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2234 return simplify_gen_binary (PLUS, mode,
2235 op0,
2236 neg_const_int (mode, op1));
2237
2238 /* (x - (x & y)) -> (x & ~y) */
2239 if (GET_CODE (op1) == AND)
2240 {
2241 if (rtx_equal_p (op0, XEXP (op1, 0)))
2242 {
2243 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2244 GET_MODE (XEXP (op1, 1)));
2245 return simplify_gen_binary (AND, mode, op0, tem);
2246 }
2247 if (rtx_equal_p (op0, XEXP (op1, 1)))
2248 {
2249 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2250 GET_MODE (XEXP (op1, 0)));
2251 return simplify_gen_binary (AND, mode, op0, tem);
2252 }
2253 }
2254
2255 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2256 by reversing the comparison code if valid. */
2257 if (STORE_FLAG_VALUE == 1
2258 && trueop0 == const1_rtx
2259 && COMPARISON_P (op1)
2260 && (reversed = reversed_comparison (op1, mode)))
2261 return reversed;
2262
2263 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2264 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2265 && GET_CODE (op1) == MULT
2266 && GET_CODE (XEXP (op1, 0)) == NEG)
2267 {
2268 rtx in1, in2;
2269
2270 in1 = XEXP (XEXP (op1, 0), 0);
2271 in2 = XEXP (op1, 1);
2272 return simplify_gen_binary (PLUS, mode,
2273 simplify_gen_binary (MULT, mode,
2274 in1, in2),
2275 op0);
2276 }
2277
2278 /* Canonicalize (minus (neg A) (mult B C)) to
2279 (minus (mult (neg B) C) A). */
2280 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2281 && GET_CODE (op1) == MULT
2282 && GET_CODE (op0) == NEG)
2283 {
2284 rtx in1, in2;
2285
2286 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2287 in2 = XEXP (op1, 1);
2288 return simplify_gen_binary (MINUS, mode,
2289 simplify_gen_binary (MULT, mode,
2290 in1, in2),
2291 XEXP (op0, 0));
2292 }
2293
2294 /* If one of the operands is a PLUS or a MINUS, see if we can
2295 simplify this by the associative law. This will, for example,
2296 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2297 Don't use the associative law for floating point.
2298 The inaccuracy makes it nonassociative,
2299 and subtle programs can break if operations are associated. */
2300
2301 if (INTEGRAL_MODE_P (mode)
2302 && (plus_minus_operand_p (op0)
2303 || plus_minus_operand_p (op1))
2304 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2305 return tem;
2306 break;
2307
2308 case MULT:
2309 if (trueop1 == constm1_rtx)
2310 return simplify_gen_unary (NEG, mode, op0, mode);
2311
2312 if (GET_CODE (op0) == NEG)
2313 {
2314 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2315 /* If op1 is a MULT as well and simplify_unary_operation
2316 just moved the NEG to the second operand, simplify_gen_binary
2317 below could through simplify_associative_operation move
2318 the NEG around again and recurse endlessly. */
2319 if (temp
2320 && GET_CODE (op1) == MULT
2321 && GET_CODE (temp) == MULT
2322 && XEXP (op1, 0) == XEXP (temp, 0)
2323 && GET_CODE (XEXP (temp, 1)) == NEG
2324 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2325 temp = NULL_RTX;
2326 if (temp)
2327 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2328 }
2329 if (GET_CODE (op1) == NEG)
2330 {
2331 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2332 /* If op0 is a MULT as well and simplify_unary_operation
2333 just moved the NEG to the second operand, simplify_gen_binary
2334 below could through simplify_associative_operation move
2335 the NEG around again and recurse endlessly. */
2336 if (temp
2337 && GET_CODE (op0) == MULT
2338 && GET_CODE (temp) == MULT
2339 && XEXP (op0, 0) == XEXP (temp, 0)
2340 && GET_CODE (XEXP (temp, 1)) == NEG
2341 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2342 temp = NULL_RTX;
2343 if (temp)
2344 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2345 }
2346
2347 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2348 x is NaN, since x * 0 is then also NaN. Nor is it valid
2349 when the mode has signed zeros, since multiplying a negative
2350 number by 0 will give -0, not 0. */
2351 if (!HONOR_NANS (mode)
2352 && !HONOR_SIGNED_ZEROS (mode)
2353 && trueop1 == CONST0_RTX (mode)
2354 && ! side_effects_p (op0))
2355 return op1;
2356
2357 /* In IEEE floating point, x*1 is not equivalent to x for
2358 signalling NaNs. */
2359 if (!HONOR_SNANS (mode)
2360 && trueop1 == CONST1_RTX (mode))
2361 return op0;
2362
2363 /* Convert multiply by constant power of two into shift unless
2364 we are still generating RTL. This test is a kludge. */
2365 if (CONST_INT_P (trueop1)
2366 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2367 /* If the mode is larger than the host word size, and the
2368 uppermost bit is set, then this isn't a power of two due
2369 to implicit sign extension. */
2370 && (width <= HOST_BITS_PER_WIDE_INT
2371 || val != HOST_BITS_PER_WIDE_INT - 1))
2372 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2373
2374 /* Likewise for multipliers wider than a word. */
2375 if (CONST_DOUBLE_AS_INT_P (trueop1)
2376 && GET_MODE (op0) == mode
2377 && CONST_DOUBLE_LOW (trueop1) == 0
2378 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2379 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2380 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2381 return simplify_gen_binary (ASHIFT, mode, op0,
2382 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2383
2384 /* x*2 is x+x and x*(-1) is -x */
2385 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2386 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2387 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2388 && GET_MODE (op0) == mode)
2389 {
2390 REAL_VALUE_TYPE d;
2391 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2392
2393 if (REAL_VALUES_EQUAL (d, dconst2))
2394 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2395
2396 if (!HONOR_SNANS (mode)
2397 && REAL_VALUES_EQUAL (d, dconstm1))
2398 return simplify_gen_unary (NEG, mode, op0, mode);
2399 }
2400
2401 /* Optimize -x * -x as x * x. */
2402 if (FLOAT_MODE_P (mode)
2403 && GET_CODE (op0) == NEG
2404 && GET_CODE (op1) == NEG
2405 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2406 && !side_effects_p (XEXP (op0, 0)))
2407 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2408
2409 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2410 if (SCALAR_FLOAT_MODE_P (mode)
2411 && GET_CODE (op0) == ABS
2412 && GET_CODE (op1) == ABS
2413 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2414 && !side_effects_p (XEXP (op0, 0)))
2415 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2416
2417 /* Reassociate multiplication, but for floating point MULTs
2418 only when the user specifies unsafe math optimizations. */
2419 if (! FLOAT_MODE_P (mode)
2420 || flag_unsafe_math_optimizations)
2421 {
2422 tem = simplify_associative_operation (code, mode, op0, op1);
2423 if (tem)
2424 return tem;
2425 }
2426 break;
2427
2428 case IOR:
2429 if (trueop1 == CONST0_RTX (mode))
2430 return op0;
2431 if (INTEGRAL_MODE_P (mode)
2432 && trueop1 == CONSTM1_RTX (mode)
2433 && !side_effects_p (op0))
2434 return op1;
2435 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2436 return op0;
2437 /* A | (~A) -> -1 */
2438 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2439 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2440 && ! side_effects_p (op0)
2441 && SCALAR_INT_MODE_P (mode))
2442 return constm1_rtx;
2443
2444 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2445 if (CONST_INT_P (op1)
2446 && HWI_COMPUTABLE_MODE_P (mode)
2447 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2448 && !side_effects_p (op0))
2449 return op1;
2450
2451 /* Canonicalize (X & C1) | C2. */
2452 if (GET_CODE (op0) == AND
2453 && CONST_INT_P (trueop1)
2454 && CONST_INT_P (XEXP (op0, 1)))
2455 {
2456 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2457 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2458 HOST_WIDE_INT c2 = INTVAL (trueop1);
2459
2460 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2461 if ((c1 & c2) == c1
2462 && !side_effects_p (XEXP (op0, 0)))
2463 return trueop1;
2464
2465 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2466 if (((c1|c2) & mask) == mask)
2467 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2468
2469 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2470 if (((c1 & ~c2) & mask) != (c1 & mask))
2471 {
2472 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2473 gen_int_mode (c1 & ~c2, mode));
2474 return simplify_gen_binary (IOR, mode, tem, op1);
2475 }
2476 }
2477
2478 /* Convert (A & B) | A to A. */
2479 if (GET_CODE (op0) == AND
2480 && (rtx_equal_p (XEXP (op0, 0), op1)
2481 || rtx_equal_p (XEXP (op0, 1), op1))
2482 && ! side_effects_p (XEXP (op0, 0))
2483 && ! side_effects_p (XEXP (op0, 1)))
2484 return op1;
2485
2486 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2487 mode size to (rotate A CX). */
2488
2489 if (GET_CODE (op1) == ASHIFT
2490 || GET_CODE (op1) == SUBREG)
2491 {
2492 opleft = op1;
2493 opright = op0;
2494 }
2495 else
2496 {
2497 opright = op1;
2498 opleft = op0;
2499 }
2500
2501 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2502 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2503 && CONST_INT_P (XEXP (opleft, 1))
2504 && CONST_INT_P (XEXP (opright, 1))
2505 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2506 == GET_MODE_PRECISION (mode)))
2507 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2508
2509 /* Same, but for ashift that has been "simplified" to a wider mode
2510 by simplify_shift_const. */
2511
2512 if (GET_CODE (opleft) == SUBREG
2513 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2514 && GET_CODE (opright) == LSHIFTRT
2515 && GET_CODE (XEXP (opright, 0)) == SUBREG
2516 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2517 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2518 && (GET_MODE_SIZE (GET_MODE (opleft))
2519 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2520 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2521 SUBREG_REG (XEXP (opright, 0)))
2522 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2523 && CONST_INT_P (XEXP (opright, 1))
2524 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2525 == GET_MODE_PRECISION (mode)))
2526 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2527 XEXP (SUBREG_REG (opleft), 1));
2528
2529 /* If we have (ior (and (X C1) C2)), simplify this by making
2530 C1 as small as possible if C1 actually changes. */
2531 if (CONST_INT_P (op1)
2532 && (HWI_COMPUTABLE_MODE_P (mode)
2533 || INTVAL (op1) > 0)
2534 && GET_CODE (op0) == AND
2535 && CONST_INT_P (XEXP (op0, 1))
2536 && CONST_INT_P (op1)
2537 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2538 return simplify_gen_binary (IOR, mode,
2539 simplify_gen_binary
2540 (AND, mode, XEXP (op0, 0),
2541 GEN_INT (UINTVAL (XEXP (op0, 1))
2542 & ~UINTVAL (op1))),
2543 op1);
2544
2545 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2546 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2547 the PLUS does not affect any of the bits in OP1: then we can do
2548 the IOR as a PLUS and we can associate. This is valid if OP1
2549 can be safely shifted left C bits. */
2550 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2551 && GET_CODE (XEXP (op0, 0)) == PLUS
2552 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2553 && CONST_INT_P (XEXP (op0, 1))
2554 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2555 {
2556 int count = INTVAL (XEXP (op0, 1));
2557 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2558
2559 if (mask >> count == INTVAL (trueop1)
2560 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2561 return simplify_gen_binary (ASHIFTRT, mode,
2562 plus_constant (mode, XEXP (op0, 0),
2563 mask),
2564 XEXP (op0, 1));
2565 }
2566
2567 tem = simplify_associative_operation (code, mode, op0, op1);
2568 if (tem)
2569 return tem;
2570 break;
2571
2572 case XOR:
2573 if (trueop1 == CONST0_RTX (mode))
2574 return op0;
2575 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2576 return simplify_gen_unary (NOT, mode, op0, mode);
2577 if (rtx_equal_p (trueop0, trueop1)
2578 && ! side_effects_p (op0)
2579 && GET_MODE_CLASS (mode) != MODE_CC)
2580 return CONST0_RTX (mode);
2581
2582 /* Canonicalize XOR of the most significant bit to PLUS. */
2583 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2584 && mode_signbit_p (mode, op1))
2585 return simplify_gen_binary (PLUS, mode, op0, op1);
2586 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2587 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2588 && GET_CODE (op0) == PLUS
2589 && (CONST_INT_P (XEXP (op0, 1))
2590 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2591 && mode_signbit_p (mode, XEXP (op0, 1)))
2592 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2593 simplify_gen_binary (XOR, mode, op1,
2594 XEXP (op0, 1)));
2595
2596 /* If we are XORing two things that have no bits in common,
2597 convert them into an IOR. This helps to detect rotation encoded
2598 using those methods and possibly other simplifications. */
2599
2600 if (HWI_COMPUTABLE_MODE_P (mode)
2601 && (nonzero_bits (op0, mode)
2602 & nonzero_bits (op1, mode)) == 0)
2603 return (simplify_gen_binary (IOR, mode, op0, op1));
2604
2605 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2606 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2607 (NOT y). */
2608 {
2609 int num_negated = 0;
2610
2611 if (GET_CODE (op0) == NOT)
2612 num_negated++, op0 = XEXP (op0, 0);
2613 if (GET_CODE (op1) == NOT)
2614 num_negated++, op1 = XEXP (op1, 0);
2615
2616 if (num_negated == 2)
2617 return simplify_gen_binary (XOR, mode, op0, op1);
2618 else if (num_negated == 1)
2619 return simplify_gen_unary (NOT, mode,
2620 simplify_gen_binary (XOR, mode, op0, op1),
2621 mode);
2622 }
2623
2624 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2625 correspond to a machine insn or result in further simplifications
2626 if B is a constant. */
2627
2628 if (GET_CODE (op0) == AND
2629 && rtx_equal_p (XEXP (op0, 1), op1)
2630 && ! side_effects_p (op1))
2631 return simplify_gen_binary (AND, mode,
2632 simplify_gen_unary (NOT, mode,
2633 XEXP (op0, 0), mode),
2634 op1);
2635
2636 else if (GET_CODE (op0) == AND
2637 && rtx_equal_p (XEXP (op0, 0), op1)
2638 && ! side_effects_p (op1))
2639 return simplify_gen_binary (AND, mode,
2640 simplify_gen_unary (NOT, mode,
2641 XEXP (op0, 1), mode),
2642 op1);
2643
2644 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2645 we can transform like this:
2646 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2647 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2648 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2649 Attempt a few simplifications when B and C are both constants. */
2650 if (GET_CODE (op0) == AND
2651 && CONST_INT_P (op1)
2652 && CONST_INT_P (XEXP (op0, 1)))
2653 {
2654 rtx a = XEXP (op0, 0);
2655 rtx b = XEXP (op0, 1);
2656 rtx c = op1;
2657 HOST_WIDE_INT bval = INTVAL (b);
2658 HOST_WIDE_INT cval = INTVAL (c);
2659
2660 rtx na_c
2661 = simplify_binary_operation (AND, mode,
2662 simplify_gen_unary (NOT, mode, a, mode),
2663 c);
2664 if ((~cval & bval) == 0)
2665 {
2666 /* Try to simplify ~A&C | ~B&C. */
2667 if (na_c != NULL_RTX)
2668 return simplify_gen_binary (IOR, mode, na_c,
2669 GEN_INT (~bval & cval));
2670 }
2671 else
2672 {
2673 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2674 if (na_c == const0_rtx)
2675 {
2676 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2677 GEN_INT (~cval & bval));
2678 return simplify_gen_binary (IOR, mode, a_nc_b,
2679 GEN_INT (~bval & cval));
2680 }
2681 }
2682 }
2683
2684 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2685 comparison if STORE_FLAG_VALUE is 1. */
2686 if (STORE_FLAG_VALUE == 1
2687 && trueop1 == const1_rtx
2688 && COMPARISON_P (op0)
2689 && (reversed = reversed_comparison (op0, mode)))
2690 return reversed;
2691
2692 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2693 is (lt foo (const_int 0)), so we can perform the above
2694 simplification if STORE_FLAG_VALUE is 1. */
2695
2696 if (STORE_FLAG_VALUE == 1
2697 && trueop1 == const1_rtx
2698 && GET_CODE (op0) == LSHIFTRT
2699 && CONST_INT_P (XEXP (op0, 1))
2700 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2701 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2702
2703 /* (xor (comparison foo bar) (const_int sign-bit))
2704 when STORE_FLAG_VALUE is the sign bit. */
2705 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2706 && trueop1 == const_true_rtx
2707 && COMPARISON_P (op0)
2708 && (reversed = reversed_comparison (op0, mode)))
2709 return reversed;
2710
2711 tem = simplify_associative_operation (code, mode, op0, op1);
2712 if (tem)
2713 return tem;
2714 break;
2715
2716 case AND:
2717 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2718 return trueop1;
2719 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2720 return op0;
2721 if (HWI_COMPUTABLE_MODE_P (mode))
2722 {
2723 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2724 HOST_WIDE_INT nzop1;
2725 if (CONST_INT_P (trueop1))
2726 {
2727 HOST_WIDE_INT val1 = INTVAL (trueop1);
2728 /* If we are turning off bits already known off in OP0, we need
2729 not do an AND. */
2730 if ((nzop0 & ~val1) == 0)
2731 return op0;
2732 }
2733 nzop1 = nonzero_bits (trueop1, mode);
2734 /* If we are clearing all the nonzero bits, the result is zero. */
2735 if ((nzop1 & nzop0) == 0
2736 && !side_effects_p (op0) && !side_effects_p (op1))
2737 return CONST0_RTX (mode);
2738 }
2739 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2740 && GET_MODE_CLASS (mode) != MODE_CC)
2741 return op0;
2742 /* A & (~A) -> 0 */
2743 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2744 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2745 && ! side_effects_p (op0)
2746 && GET_MODE_CLASS (mode) != MODE_CC)
2747 return CONST0_RTX (mode);
2748
2749 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2750 there are no nonzero bits of C outside of X's mode. */
2751 if ((GET_CODE (op0) == SIGN_EXTEND
2752 || GET_CODE (op0) == ZERO_EXTEND)
2753 && CONST_INT_P (trueop1)
2754 && HWI_COMPUTABLE_MODE_P (mode)
2755 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2756 & UINTVAL (trueop1)) == 0)
2757 {
2758 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2759 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2760 gen_int_mode (INTVAL (trueop1),
2761 imode));
2762 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2763 }
2764
2765 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2766 we might be able to further simplify the AND with X and potentially
2767 remove the truncation altogether. */
2768 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2769 {
2770 rtx x = XEXP (op0, 0);
2771 enum machine_mode xmode = GET_MODE (x);
2772 tem = simplify_gen_binary (AND, xmode, x,
2773 gen_int_mode (INTVAL (trueop1), xmode));
2774 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2775 }
2776
2777 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2778 if (GET_CODE (op0) == IOR
2779 && CONST_INT_P (trueop1)
2780 && CONST_INT_P (XEXP (op0, 1)))
2781 {
2782 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2783 return simplify_gen_binary (IOR, mode,
2784 simplify_gen_binary (AND, mode,
2785 XEXP (op0, 0), op1),
2786 gen_int_mode (tmp, mode));
2787 }
2788
2789 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2790 insn (and may simplify more). */
2791 if (GET_CODE (op0) == XOR
2792 && rtx_equal_p (XEXP (op0, 0), op1)
2793 && ! side_effects_p (op1))
2794 return simplify_gen_binary (AND, mode,
2795 simplify_gen_unary (NOT, mode,
2796 XEXP (op0, 1), mode),
2797 op1);
2798
2799 if (GET_CODE (op0) == XOR
2800 && rtx_equal_p (XEXP (op0, 1), op1)
2801 && ! side_effects_p (op1))
2802 return simplify_gen_binary (AND, mode,
2803 simplify_gen_unary (NOT, mode,
2804 XEXP (op0, 0), mode),
2805 op1);
2806
2807 /* Similarly for (~(A ^ B)) & A. */
2808 if (GET_CODE (op0) == NOT
2809 && GET_CODE (XEXP (op0, 0)) == XOR
2810 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2811 && ! side_effects_p (op1))
2812 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2813
2814 if (GET_CODE (op0) == NOT
2815 && GET_CODE (XEXP (op0, 0)) == XOR
2816 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2817 && ! side_effects_p (op1))
2818 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2819
2820 /* Convert (A | B) & A to A. */
2821 if (GET_CODE (op0) == IOR
2822 && (rtx_equal_p (XEXP (op0, 0), op1)
2823 || rtx_equal_p (XEXP (op0, 1), op1))
2824 && ! side_effects_p (XEXP (op0, 0))
2825 && ! side_effects_p (XEXP (op0, 1)))
2826 return op1;
2827
2828 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2829 ((A & N) + B) & M -> (A + B) & M
2830 Similarly if (N & M) == 0,
2831 ((A | N) + B) & M -> (A + B) & M
2832 and for - instead of + and/or ^ instead of |.
2833 Also, if (N & M) == 0, then
2834 (A +- N) & M -> A & M. */
2835 if (CONST_INT_P (trueop1)
2836 && HWI_COMPUTABLE_MODE_P (mode)
2837 && ~UINTVAL (trueop1)
2838 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2839 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2840 {
2841 rtx pmop[2];
2842 int which;
2843
2844 pmop[0] = XEXP (op0, 0);
2845 pmop[1] = XEXP (op0, 1);
2846
2847 if (CONST_INT_P (pmop[1])
2848 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2849 return simplify_gen_binary (AND, mode, pmop[0], op1);
2850
2851 for (which = 0; which < 2; which++)
2852 {
2853 tem = pmop[which];
2854 switch (GET_CODE (tem))
2855 {
2856 case AND:
2857 if (CONST_INT_P (XEXP (tem, 1))
2858 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2859 == UINTVAL (trueop1))
2860 pmop[which] = XEXP (tem, 0);
2861 break;
2862 case IOR:
2863 case XOR:
2864 if (CONST_INT_P (XEXP (tem, 1))
2865 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2866 pmop[which] = XEXP (tem, 0);
2867 break;
2868 default:
2869 break;
2870 }
2871 }
2872
2873 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2874 {
2875 tem = simplify_gen_binary (GET_CODE (op0), mode,
2876 pmop[0], pmop[1]);
2877 return simplify_gen_binary (code, mode, tem, op1);
2878 }
2879 }
2880
2881 /* (and X (ior (not X) Y) -> (and X Y) */
2882 if (GET_CODE (op1) == IOR
2883 && GET_CODE (XEXP (op1, 0)) == NOT
2884 && op0 == XEXP (XEXP (op1, 0), 0))
2885 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2886
2887 /* (and (ior (not X) Y) X) -> (and X Y) */
2888 if (GET_CODE (op0) == IOR
2889 && GET_CODE (XEXP (op0, 0)) == NOT
2890 && op1 == XEXP (XEXP (op0, 0), 0))
2891 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2892
2893 tem = simplify_associative_operation (code, mode, op0, op1);
2894 if (tem)
2895 return tem;
2896 break;
2897
2898 case UDIV:
2899 /* 0/x is 0 (or x&0 if x has side-effects). */
2900 if (trueop0 == CONST0_RTX (mode))
2901 {
2902 if (side_effects_p (op1))
2903 return simplify_gen_binary (AND, mode, op1, trueop0);
2904 return trueop0;
2905 }
2906 /* x/1 is x. */
2907 if (trueop1 == CONST1_RTX (mode))
2908 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2909 /* Convert divide by power of two into shift. */
2910 if (CONST_INT_P (trueop1)
2911 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2912 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2913 break;
2914
2915 case DIV:
2916 /* Handle floating point and integers separately. */
2917 if (SCALAR_FLOAT_MODE_P (mode))
2918 {
2919 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2920 safe for modes with NaNs, since 0.0 / 0.0 will then be
2921 NaN rather than 0.0. Nor is it safe for modes with signed
2922 zeros, since dividing 0 by a negative number gives -0.0 */
2923 if (trueop0 == CONST0_RTX (mode)
2924 && !HONOR_NANS (mode)
2925 && !HONOR_SIGNED_ZEROS (mode)
2926 && ! side_effects_p (op1))
2927 return op0;
2928 /* x/1.0 is x. */
2929 if (trueop1 == CONST1_RTX (mode)
2930 && !HONOR_SNANS (mode))
2931 return op0;
2932
2933 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2934 && trueop1 != CONST0_RTX (mode))
2935 {
2936 REAL_VALUE_TYPE d;
2937 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2938
2939 /* x/-1.0 is -x. */
2940 if (REAL_VALUES_EQUAL (d, dconstm1)
2941 && !HONOR_SNANS (mode))
2942 return simplify_gen_unary (NEG, mode, op0, mode);
2943
2944 /* Change FP division by a constant into multiplication.
2945 Only do this with -freciprocal-math. */
2946 if (flag_reciprocal_math
2947 && !REAL_VALUES_EQUAL (d, dconst0))
2948 {
2949 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2950 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2951 return simplify_gen_binary (MULT, mode, op0, tem);
2952 }
2953 }
2954 }
2955 else if (SCALAR_INT_MODE_P (mode))
2956 {
2957 /* 0/x is 0 (or x&0 if x has side-effects). */
2958 if (trueop0 == CONST0_RTX (mode)
2959 && !cfun->can_throw_non_call_exceptions)
2960 {
2961 if (side_effects_p (op1))
2962 return simplify_gen_binary (AND, mode, op1, trueop0);
2963 return trueop0;
2964 }
2965 /* x/1 is x. */
2966 if (trueop1 == CONST1_RTX (mode))
2967 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2968 /* x/-1 is -x. */
2969 if (trueop1 == constm1_rtx)
2970 {
2971 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2972 return simplify_gen_unary (NEG, mode, x, mode);
2973 }
2974 }
2975 break;
2976
2977 case UMOD:
2978 /* 0%x is 0 (or x&0 if x has side-effects). */
2979 if (trueop0 == CONST0_RTX (mode))
2980 {
2981 if (side_effects_p (op1))
2982 return simplify_gen_binary (AND, mode, op1, trueop0);
2983 return trueop0;
2984 }
2985 /* x%1 is 0 (of x&0 if x has side-effects). */
2986 if (trueop1 == CONST1_RTX (mode))
2987 {
2988 if (side_effects_p (op0))
2989 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2990 return CONST0_RTX (mode);
2991 }
2992 /* Implement modulus by power of two as AND. */
2993 if (CONST_INT_P (trueop1)
2994 && exact_log2 (UINTVAL (trueop1)) > 0)
2995 return simplify_gen_binary (AND, mode, op0,
2996 GEN_INT (INTVAL (op1) - 1));
2997 break;
2998
2999 case MOD:
3000 /* 0%x is 0 (or x&0 if x has side-effects). */
3001 if (trueop0 == CONST0_RTX (mode))
3002 {
3003 if (side_effects_p (op1))
3004 return simplify_gen_binary (AND, mode, op1, trueop0);
3005 return trueop0;
3006 }
3007 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3008 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3009 {
3010 if (side_effects_p (op0))
3011 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3012 return CONST0_RTX (mode);
3013 }
3014 break;
3015
3016 case ROTATERT:
3017 case ROTATE:
3018 case ASHIFTRT:
3019 if (trueop1 == CONST0_RTX (mode))
3020 return op0;
3021 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3022 return op0;
3023 /* Rotating ~0 always results in ~0. */
3024 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3025 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3026 && ! side_effects_p (op1))
3027 return op0;
3028 canonicalize_shift:
3029 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3030 {
3031 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3032 if (val != INTVAL (op1))
3033 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3034 }
3035 break;
3036
3037 case ASHIFT:
3038 case SS_ASHIFT:
3039 case US_ASHIFT:
3040 if (trueop1 == CONST0_RTX (mode))
3041 return op0;
3042 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3043 return op0;
3044 goto canonicalize_shift;
3045
3046 case LSHIFTRT:
3047 if (trueop1 == CONST0_RTX (mode))
3048 return op0;
3049 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3050 return op0;
3051 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3052 if (GET_CODE (op0) == CLZ
3053 && CONST_INT_P (trueop1)
3054 && STORE_FLAG_VALUE == 1
3055 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3056 {
3057 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3058 unsigned HOST_WIDE_INT zero_val = 0;
3059
3060 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3061 && zero_val == GET_MODE_PRECISION (imode)
3062 && INTVAL (trueop1) == exact_log2 (zero_val))
3063 return simplify_gen_relational (EQ, mode, imode,
3064 XEXP (op0, 0), const0_rtx);
3065 }
3066 goto canonicalize_shift;
3067
3068 case SMIN:
3069 if (width <= HOST_BITS_PER_WIDE_INT
3070 && mode_signbit_p (mode, trueop1)
3071 && ! side_effects_p (op0))
3072 return op1;
3073 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3074 return op0;
3075 tem = simplify_associative_operation (code, mode, op0, op1);
3076 if (tem)
3077 return tem;
3078 break;
3079
3080 case SMAX:
3081 if (width <= HOST_BITS_PER_WIDE_INT
3082 && CONST_INT_P (trueop1)
3083 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3084 && ! side_effects_p (op0))
3085 return op1;
3086 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3087 return op0;
3088 tem = simplify_associative_operation (code, mode, op0, op1);
3089 if (tem)
3090 return tem;
3091 break;
3092
3093 case UMIN:
3094 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3095 return op1;
3096 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3097 return op0;
3098 tem = simplify_associative_operation (code, mode, op0, op1);
3099 if (tem)
3100 return tem;
3101 break;
3102
3103 case UMAX:
3104 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3105 return op1;
3106 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3107 return op0;
3108 tem = simplify_associative_operation (code, mode, op0, op1);
3109 if (tem)
3110 return tem;
3111 break;
3112
3113 case SS_PLUS:
3114 case US_PLUS:
3115 case SS_MINUS:
3116 case US_MINUS:
3117 case SS_MULT:
3118 case US_MULT:
3119 case SS_DIV:
3120 case US_DIV:
3121 /* ??? There are simplifications that can be done. */
3122 return 0;
3123
3124 case VEC_SELECT:
3125 if (!VECTOR_MODE_P (mode))
3126 {
3127 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3128 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3129 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3130 gcc_assert (XVECLEN (trueop1, 0) == 1);
3131 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3132
3133 if (GET_CODE (trueop0) == CONST_VECTOR)
3134 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3135 (trueop1, 0, 0)));
3136
3137 /* Extract a scalar element from a nested VEC_SELECT expression
3138 (with optional nested VEC_CONCAT expression). Some targets
3139 (i386) extract scalar element from a vector using chain of
3140 nested VEC_SELECT expressions. When input operand is a memory
3141 operand, this operation can be simplified to a simple scalar
3142 load from an offseted memory address. */
3143 if (GET_CODE (trueop0) == VEC_SELECT)
3144 {
3145 rtx op0 = XEXP (trueop0, 0);
3146 rtx op1 = XEXP (trueop0, 1);
3147
3148 enum machine_mode opmode = GET_MODE (op0);
3149 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3150 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3151
3152 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3153 int elem;
3154
3155 rtvec vec;
3156 rtx tmp_op, tmp;
3157
3158 gcc_assert (GET_CODE (op1) == PARALLEL);
3159 gcc_assert (i < n_elts);
3160
3161 /* Select element, pointed by nested selector. */
3162 elem = INTVAL (XVECEXP (op1, 0, i));
3163
3164 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3165 if (GET_CODE (op0) == VEC_CONCAT)
3166 {
3167 rtx op00 = XEXP (op0, 0);
3168 rtx op01 = XEXP (op0, 1);
3169
3170 enum machine_mode mode00, mode01;
3171 int n_elts00, n_elts01;
3172
3173 mode00 = GET_MODE (op00);
3174 mode01 = GET_MODE (op01);
3175
3176 /* Find out number of elements of each operand. */
3177 if (VECTOR_MODE_P (mode00))
3178 {
3179 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3180 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3181 }
3182 else
3183 n_elts00 = 1;
3184
3185 if (VECTOR_MODE_P (mode01))
3186 {
3187 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3188 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3189 }
3190 else
3191 n_elts01 = 1;
3192
3193 gcc_assert (n_elts == n_elts00 + n_elts01);
3194
3195 /* Select correct operand of VEC_CONCAT
3196 and adjust selector. */
3197 if (elem < n_elts01)
3198 tmp_op = op00;
3199 else
3200 {
3201 tmp_op = op01;
3202 elem -= n_elts00;
3203 }
3204 }
3205 else
3206 tmp_op = op0;
3207
3208 vec = rtvec_alloc (1);
3209 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3210
3211 tmp = gen_rtx_fmt_ee (code, mode,
3212 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3213 return tmp;
3214 }
3215 if (GET_CODE (trueop0) == VEC_DUPLICATE
3216 && GET_MODE (XEXP (trueop0, 0)) == mode)
3217 return XEXP (trueop0, 0);
3218 }
3219 else
3220 {
3221 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3222 gcc_assert (GET_MODE_INNER (mode)
3223 == GET_MODE_INNER (GET_MODE (trueop0)));
3224 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3225
3226 if (GET_CODE (trueop0) == CONST_VECTOR)
3227 {
3228 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3229 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3230 rtvec v = rtvec_alloc (n_elts);
3231 unsigned int i;
3232
3233 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3234 for (i = 0; i < n_elts; i++)
3235 {
3236 rtx x = XVECEXP (trueop1, 0, i);
3237
3238 gcc_assert (CONST_INT_P (x));
3239 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3240 INTVAL (x));
3241 }
3242
3243 return gen_rtx_CONST_VECTOR (mode, v);
3244 }
3245
3246 /* If we build {a,b} then permute it, build the result directly. */
3247 if (XVECLEN (trueop1, 0) == 2
3248 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3249 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3250 && GET_CODE (trueop0) == VEC_CONCAT
3251 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3252 && GET_MODE (XEXP (trueop0, 0)) == mode
3253 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3254 && GET_MODE (XEXP (trueop0, 1)) == mode)
3255 {
3256 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3257 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3258 rtx subop0, subop1;
3259
3260 gcc_assert (i0 < 4 && i1 < 4);
3261 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3262 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3263
3264 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3265 }
3266
3267 if (XVECLEN (trueop1, 0) == 2
3268 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3269 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3270 && GET_CODE (trueop0) == VEC_CONCAT
3271 && GET_MODE (trueop0) == mode)
3272 {
3273 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3274 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3275 rtx subop0, subop1;
3276
3277 gcc_assert (i0 < 2 && i1 < 2);
3278 subop0 = XEXP (trueop0, i0);
3279 subop1 = XEXP (trueop0, i1);
3280
3281 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3282 }
3283 }
3284
3285 if (XVECLEN (trueop1, 0) == 1
3286 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3287 && GET_CODE (trueop0) == VEC_CONCAT)
3288 {
3289 rtx vec = trueop0;
3290 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3291
3292 /* Try to find the element in the VEC_CONCAT. */
3293 while (GET_MODE (vec) != mode
3294 && GET_CODE (vec) == VEC_CONCAT)
3295 {
3296 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3297 if (offset < vec_size)
3298 vec = XEXP (vec, 0);
3299 else
3300 {
3301 offset -= vec_size;
3302 vec = XEXP (vec, 1);
3303 }
3304 vec = avoid_constant_pool_reference (vec);
3305 }
3306
3307 if (GET_MODE (vec) == mode)
3308 return vec;
3309 }
3310
3311 return 0;
3312 case VEC_CONCAT:
3313 {
3314 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3315 ? GET_MODE (trueop0)
3316 : GET_MODE_INNER (mode));
3317 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3318 ? GET_MODE (trueop1)
3319 : GET_MODE_INNER (mode));
3320
3321 gcc_assert (VECTOR_MODE_P (mode));
3322 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3323 == GET_MODE_SIZE (mode));
3324
3325 if (VECTOR_MODE_P (op0_mode))
3326 gcc_assert (GET_MODE_INNER (mode)
3327 == GET_MODE_INNER (op0_mode));
3328 else
3329 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3330
3331 if (VECTOR_MODE_P (op1_mode))
3332 gcc_assert (GET_MODE_INNER (mode)
3333 == GET_MODE_INNER (op1_mode));
3334 else
3335 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3336
3337 if ((GET_CODE (trueop0) == CONST_VECTOR
3338 || CONST_INT_P (trueop0) || CONST_DOUBLE_P (trueop0))
3339 && (GET_CODE (trueop1) == CONST_VECTOR
3340 || CONST_INT_P (trueop1) || CONST_DOUBLE_P (trueop1)))
3341 {
3342 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3343 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3344 rtvec v = rtvec_alloc (n_elts);
3345 unsigned int i;
3346 unsigned in_n_elts = 1;
3347
3348 if (VECTOR_MODE_P (op0_mode))
3349 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3350 for (i = 0; i < n_elts; i++)
3351 {
3352 if (i < in_n_elts)
3353 {
3354 if (!VECTOR_MODE_P (op0_mode))
3355 RTVEC_ELT (v, i) = trueop0;
3356 else
3357 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3358 }
3359 else
3360 {
3361 if (!VECTOR_MODE_P (op1_mode))
3362 RTVEC_ELT (v, i) = trueop1;
3363 else
3364 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3365 i - in_n_elts);
3366 }
3367 }
3368
3369 return gen_rtx_CONST_VECTOR (mode, v);
3370 }
3371 }
3372 return 0;
3373
3374 default:
3375 gcc_unreachable ();
3376 }
3377
3378 return 0;
3379 }
3380
3381 rtx
3382 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3383 rtx op0, rtx op1)
3384 {
3385 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3386 HOST_WIDE_INT val;
3387 unsigned int width = GET_MODE_PRECISION (mode);
3388
3389 if (VECTOR_MODE_P (mode)
3390 && code != VEC_CONCAT
3391 && GET_CODE (op0) == CONST_VECTOR
3392 && GET_CODE (op1) == CONST_VECTOR)
3393 {
3394 unsigned n_elts = GET_MODE_NUNITS (mode);
3395 enum machine_mode op0mode = GET_MODE (op0);
3396 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3397 enum machine_mode op1mode = GET_MODE (op1);
3398 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3399 rtvec v = rtvec_alloc (n_elts);
3400 unsigned int i;
3401
3402 gcc_assert (op0_n_elts == n_elts);
3403 gcc_assert (op1_n_elts == n_elts);
3404 for (i = 0; i < n_elts; i++)
3405 {
3406 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3407 CONST_VECTOR_ELT (op0, i),
3408 CONST_VECTOR_ELT (op1, i));
3409 if (!x)
3410 return 0;
3411 RTVEC_ELT (v, i) = x;
3412 }
3413
3414 return gen_rtx_CONST_VECTOR (mode, v);
3415 }
3416
3417 if (VECTOR_MODE_P (mode)
3418 && code == VEC_CONCAT
3419 && (CONST_INT_P (op0)
3420 || GET_CODE (op0) == CONST_FIXED
3421 || CONST_DOUBLE_P (op0))
3422 && (CONST_INT_P (op1)
3423 || CONST_DOUBLE_P (op1)
3424 || GET_CODE (op1) == CONST_FIXED))
3425 {
3426 unsigned n_elts = GET_MODE_NUNITS (mode);
3427 rtvec v = rtvec_alloc (n_elts);
3428
3429 gcc_assert (n_elts >= 2);
3430 if (n_elts == 2)
3431 {
3432 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3433 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3434
3435 RTVEC_ELT (v, 0) = op0;
3436 RTVEC_ELT (v, 1) = op1;
3437 }
3438 else
3439 {
3440 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3441 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3442 unsigned i;
3443
3444 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3445 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3446 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3447
3448 for (i = 0; i < op0_n_elts; ++i)
3449 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3450 for (i = 0; i < op1_n_elts; ++i)
3451 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3452 }
3453
3454 return gen_rtx_CONST_VECTOR (mode, v);
3455 }
3456
3457 if (SCALAR_FLOAT_MODE_P (mode)
3458 && CONST_DOUBLE_AS_FLOAT_P (op0)
3459 && CONST_DOUBLE_AS_FLOAT_P (op1)
3460 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3461 {
3462 if (code == AND
3463 || code == IOR
3464 || code == XOR)
3465 {
3466 long tmp0[4];
3467 long tmp1[4];
3468 REAL_VALUE_TYPE r;
3469 int i;
3470
3471 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3472 GET_MODE (op0));
3473 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3474 GET_MODE (op1));
3475 for (i = 0; i < 4; i++)
3476 {
3477 switch (code)
3478 {
3479 case AND:
3480 tmp0[i] &= tmp1[i];
3481 break;
3482 case IOR:
3483 tmp0[i] |= tmp1[i];
3484 break;
3485 case XOR:
3486 tmp0[i] ^= tmp1[i];
3487 break;
3488 default:
3489 gcc_unreachable ();
3490 }
3491 }
3492 real_from_target (&r, tmp0, mode);
3493 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3494 }
3495 else
3496 {
3497 REAL_VALUE_TYPE f0, f1, value, result;
3498 bool inexact;
3499
3500 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3501 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3502 real_convert (&f0, mode, &f0);
3503 real_convert (&f1, mode, &f1);
3504
3505 if (HONOR_SNANS (mode)
3506 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3507 return 0;
3508
3509 if (code == DIV
3510 && REAL_VALUES_EQUAL (f1, dconst0)
3511 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3512 return 0;
3513
3514 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3515 && flag_trapping_math
3516 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3517 {
3518 int s0 = REAL_VALUE_NEGATIVE (f0);
3519 int s1 = REAL_VALUE_NEGATIVE (f1);
3520
3521 switch (code)
3522 {
3523 case PLUS:
3524 /* Inf + -Inf = NaN plus exception. */
3525 if (s0 != s1)
3526 return 0;
3527 break;
3528 case MINUS:
3529 /* Inf - Inf = NaN plus exception. */
3530 if (s0 == s1)
3531 return 0;
3532 break;
3533 case DIV:
3534 /* Inf / Inf = NaN plus exception. */
3535 return 0;
3536 default:
3537 break;
3538 }
3539 }
3540
3541 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3542 && flag_trapping_math
3543 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3544 || (REAL_VALUE_ISINF (f1)
3545 && REAL_VALUES_EQUAL (f0, dconst0))))
3546 /* Inf * 0 = NaN plus exception. */
3547 return 0;
3548
3549 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3550 &f0, &f1);
3551 real_convert (&result, mode, &value);
3552
3553 /* Don't constant fold this floating point operation if
3554 the result has overflowed and flag_trapping_math. */
3555
3556 if (flag_trapping_math
3557 && MODE_HAS_INFINITIES (mode)
3558 && REAL_VALUE_ISINF (result)
3559 && !REAL_VALUE_ISINF (f0)
3560 && !REAL_VALUE_ISINF (f1))
3561 /* Overflow plus exception. */
3562 return 0;
3563
3564 /* Don't constant fold this floating point operation if the
3565 result may dependent upon the run-time rounding mode and
3566 flag_rounding_math is set, or if GCC's software emulation
3567 is unable to accurately represent the result. */
3568
3569 if ((flag_rounding_math
3570 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3571 && (inexact || !real_identical (&result, &value)))
3572 return NULL_RTX;
3573
3574 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3575 }
3576 }
3577
3578 /* We can fold some multi-word operations. */
3579 if (GET_MODE_CLASS (mode) == MODE_INT
3580 && width == HOST_BITS_PER_DOUBLE_INT
3581 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3582 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3583 {
3584 double_int o0, o1, res, tmp;
3585
3586 o0 = rtx_to_double_int (op0);
3587 o1 = rtx_to_double_int (op1);
3588
3589 switch (code)
3590 {
3591 case MINUS:
3592 /* A - B == A + (-B). */
3593 o1 = double_int_neg (o1);
3594
3595 /* Fall through.... */
3596
3597 case PLUS:
3598 res = double_int_add (o0, o1);
3599 break;
3600
3601 case MULT:
3602 res = double_int_mul (o0, o1);
3603 break;
3604
3605 case DIV:
3606 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3607 o0.low, o0.high, o1.low, o1.high,
3608 &res.low, &res.high,
3609 &tmp.low, &tmp.high))
3610 return 0;
3611 break;
3612
3613 case MOD:
3614 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3615 o0.low, o0.high, o1.low, o1.high,
3616 &tmp.low, &tmp.high,
3617 &res.low, &res.high))
3618 return 0;
3619 break;
3620
3621 case UDIV:
3622 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3623 o0.low, o0.high, o1.low, o1.high,
3624 &res.low, &res.high,
3625 &tmp.low, &tmp.high))
3626 return 0;
3627 break;
3628
3629 case UMOD:
3630 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3631 o0.low, o0.high, o1.low, o1.high,
3632 &tmp.low, &tmp.high,
3633 &res.low, &res.high))
3634 return 0;
3635 break;
3636
3637 case AND:
3638 res = double_int_and (o0, o1);
3639 break;
3640
3641 case IOR:
3642 res = double_int_ior (o0, o1);
3643 break;
3644
3645 case XOR:
3646 res = double_int_xor (o0, o1);
3647 break;
3648
3649 case SMIN:
3650 res = double_int_smin (o0, o1);
3651 break;
3652
3653 case SMAX:
3654 res = double_int_smax (o0, o1);
3655 break;
3656
3657 case UMIN:
3658 res = double_int_umin (o0, o1);
3659 break;
3660
3661 case UMAX:
3662 res = double_int_umax (o0, o1);
3663 break;
3664
3665 case LSHIFTRT: case ASHIFTRT:
3666 case ASHIFT:
3667 case ROTATE: case ROTATERT:
3668 {
3669 unsigned HOST_WIDE_INT cnt;
3670
3671 if (SHIFT_COUNT_TRUNCATED)
3672 {
3673 o1.high = 0;
3674 o1.low &= GET_MODE_PRECISION (mode) - 1;
3675 }
3676
3677 if (!double_int_fits_in_uhwi_p (o1)
3678 || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3679 return 0;
3680
3681 cnt = double_int_to_uhwi (o1);
3682
3683 if (code == LSHIFTRT || code == ASHIFTRT)
3684 res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3685 code == ASHIFTRT);
3686 else if (code == ASHIFT)
3687 res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3688 true);
3689 else if (code == ROTATE)
3690 res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3691 else /* code == ROTATERT */
3692 res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3693 }
3694 break;
3695
3696 default:
3697 return 0;
3698 }
3699
3700 return immed_double_int_const (res, mode);
3701 }
3702
3703 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3704 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3705 {
3706 /* Get the integer argument values in two forms:
3707 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3708
3709 arg0 = INTVAL (op0);
3710 arg1 = INTVAL (op1);
3711
3712 if (width < HOST_BITS_PER_WIDE_INT)
3713 {
3714 arg0 &= GET_MODE_MASK (mode);
3715 arg1 &= GET_MODE_MASK (mode);
3716
3717 arg0s = arg0;
3718 if (val_signbit_known_set_p (mode, arg0s))
3719 arg0s |= ~GET_MODE_MASK (mode);
3720
3721 arg1s = arg1;
3722 if (val_signbit_known_set_p (mode, arg1s))
3723 arg1s |= ~GET_MODE_MASK (mode);
3724 }
3725 else
3726 {
3727 arg0s = arg0;
3728 arg1s = arg1;
3729 }
3730
3731 /* Compute the value of the arithmetic. */
3732
3733 switch (code)
3734 {
3735 case PLUS:
3736 val = arg0s + arg1s;
3737 break;
3738
3739 case MINUS:
3740 val = arg0s - arg1s;
3741 break;
3742
3743 case MULT:
3744 val = arg0s * arg1s;
3745 break;
3746
3747 case DIV:
3748 if (arg1s == 0
3749 || ((unsigned HOST_WIDE_INT) arg0s
3750 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3751 && arg1s == -1))
3752 return 0;
3753 val = arg0s / arg1s;
3754 break;
3755
3756 case MOD:
3757 if (arg1s == 0
3758 || ((unsigned HOST_WIDE_INT) arg0s
3759 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3760 && arg1s == -1))
3761 return 0;
3762 val = arg0s % arg1s;
3763 break;
3764
3765 case UDIV:
3766 if (arg1 == 0
3767 || ((unsigned HOST_WIDE_INT) arg0s
3768 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3769 && arg1s == -1))
3770 return 0;
3771 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3772 break;
3773
3774 case UMOD:
3775 if (arg1 == 0
3776 || ((unsigned HOST_WIDE_INT) arg0s
3777 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3778 && arg1s == -1))
3779 return 0;
3780 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3781 break;
3782
3783 case AND:
3784 val = arg0 & arg1;
3785 break;
3786
3787 case IOR:
3788 val = arg0 | arg1;
3789 break;
3790
3791 case XOR:
3792 val = arg0 ^ arg1;
3793 break;
3794
3795 case LSHIFTRT:
3796 case ASHIFT:
3797 case ASHIFTRT:
3798 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3799 the value is in range. We can't return any old value for
3800 out-of-range arguments because either the middle-end (via
3801 shift_truncation_mask) or the back-end might be relying on
3802 target-specific knowledge. Nor can we rely on
3803 shift_truncation_mask, since the shift might not be part of an
3804 ashlM3, lshrM3 or ashrM3 instruction. */
3805 if (SHIFT_COUNT_TRUNCATED)
3806 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3807 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3808 return 0;
3809
3810 val = (code == ASHIFT
3811 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3812 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3813
3814 /* Sign-extend the result for arithmetic right shifts. */
3815 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3816 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3817 break;
3818
3819 case ROTATERT:
3820 if (arg1 < 0)
3821 return 0;
3822
3823 arg1 %= width;
3824 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3825 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3826 break;
3827
3828 case ROTATE:
3829 if (arg1 < 0)
3830 return 0;
3831
3832 arg1 %= width;
3833 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3834 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3835 break;
3836
3837 case COMPARE:
3838 /* Do nothing here. */
3839 return 0;
3840
3841 case SMIN:
3842 val = arg0s <= arg1s ? arg0s : arg1s;
3843 break;
3844
3845 case UMIN:
3846 val = ((unsigned HOST_WIDE_INT) arg0
3847 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3848 break;
3849
3850 case SMAX:
3851 val = arg0s > arg1s ? arg0s : arg1s;
3852 break;
3853
3854 case UMAX:
3855 val = ((unsigned HOST_WIDE_INT) arg0
3856 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3857 break;
3858
3859 case SS_PLUS:
3860 case US_PLUS:
3861 case SS_MINUS:
3862 case US_MINUS:
3863 case SS_MULT:
3864 case US_MULT:
3865 case SS_DIV:
3866 case US_DIV:
3867 case SS_ASHIFT:
3868 case US_ASHIFT:
3869 /* ??? There are simplifications that can be done. */
3870 return 0;
3871
3872 default:
3873 gcc_unreachable ();
3874 }
3875
3876 return gen_int_mode (val, mode);
3877 }
3878
3879 return NULL_RTX;
3880 }
3881
3882
3883 \f
3884 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3885 PLUS or MINUS.
3886
3887 Rather than test for specific case, we do this by a brute-force method
3888 and do all possible simplifications until no more changes occur. Then
3889 we rebuild the operation. */
3890
3891 struct simplify_plus_minus_op_data
3892 {
3893 rtx op;
3894 short neg;
3895 };
3896
3897 static bool
3898 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3899 {
3900 int result;
3901
3902 result = (commutative_operand_precedence (y)
3903 - commutative_operand_precedence (x));
3904 if (result)
3905 return result > 0;
3906
3907 /* Group together equal REGs to do more simplification. */
3908 if (REG_P (x) && REG_P (y))
3909 return REGNO (x) > REGNO (y);
3910 else
3911 return false;
3912 }
3913
3914 static rtx
3915 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3916 rtx op1)
3917 {
3918 struct simplify_plus_minus_op_data ops[8];
3919 rtx result, tem;
3920 int n_ops = 2, input_ops = 2;
3921 int changed, n_constants = 0, canonicalized = 0;
3922 int i, j;
3923
3924 memset (ops, 0, sizeof ops);
3925
3926 /* Set up the two operands and then expand them until nothing has been
3927 changed. If we run out of room in our array, give up; this should
3928 almost never happen. */
3929
3930 ops[0].op = op0;
3931 ops[0].neg = 0;
3932 ops[1].op = op1;
3933 ops[1].neg = (code == MINUS);
3934
3935 do
3936 {
3937 changed = 0;
3938
3939 for (i = 0; i < n_ops; i++)
3940 {
3941 rtx this_op = ops[i].op;
3942 int this_neg = ops[i].neg;
3943 enum rtx_code this_code = GET_CODE (this_op);
3944
3945 switch (this_code)
3946 {
3947 case PLUS:
3948 case MINUS:
3949 if (n_ops == 7)
3950 return NULL_RTX;
3951
3952 ops[n_ops].op = XEXP (this_op, 1);
3953 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3954 n_ops++;
3955
3956 ops[i].op = XEXP (this_op, 0);
3957 input_ops++;
3958 changed = 1;
3959 canonicalized |= this_neg;
3960 break;
3961
3962 case NEG:
3963 ops[i].op = XEXP (this_op, 0);
3964 ops[i].neg = ! this_neg;
3965 changed = 1;
3966 canonicalized = 1;
3967 break;
3968
3969 case CONST:
3970 if (n_ops < 7
3971 && GET_CODE (XEXP (this_op, 0)) == PLUS
3972 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3973 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3974 {
3975 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3976 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3977 ops[n_ops].neg = this_neg;
3978 n_ops++;
3979 changed = 1;
3980 canonicalized = 1;
3981 }
3982 break;
3983
3984 case NOT:
3985 /* ~a -> (-a - 1) */
3986 if (n_ops != 7)
3987 {
3988 ops[n_ops].op = CONSTM1_RTX (mode);
3989 ops[n_ops++].neg = this_neg;
3990 ops[i].op = XEXP (this_op, 0);
3991 ops[i].neg = !this_neg;
3992 changed = 1;
3993 canonicalized = 1;
3994 }
3995 break;
3996
3997 case CONST_INT:
3998 n_constants++;
3999 if (this_neg)
4000 {
4001 ops[i].op = neg_const_int (mode, this_op);
4002 ops[i].neg = 0;
4003 changed = 1;
4004 canonicalized = 1;
4005 }
4006 break;
4007
4008 default:
4009 break;
4010 }
4011 }
4012 }
4013 while (changed);
4014
4015 if (n_constants > 1)
4016 canonicalized = 1;
4017
4018 gcc_assert (n_ops >= 2);
4019
4020 /* If we only have two operands, we can avoid the loops. */
4021 if (n_ops == 2)
4022 {
4023 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4024 rtx lhs, rhs;
4025
4026 /* Get the two operands. Be careful with the order, especially for
4027 the cases where code == MINUS. */
4028 if (ops[0].neg && ops[1].neg)
4029 {
4030 lhs = gen_rtx_NEG (mode, ops[0].op);
4031 rhs = ops[1].op;
4032 }
4033 else if (ops[0].neg)
4034 {
4035 lhs = ops[1].op;
4036 rhs = ops[0].op;
4037 }
4038 else
4039 {
4040 lhs = ops[0].op;
4041 rhs = ops[1].op;
4042 }
4043
4044 return simplify_const_binary_operation (code, mode, lhs, rhs);
4045 }
4046
4047 /* Now simplify each pair of operands until nothing changes. */
4048 do
4049 {
4050 /* Insertion sort is good enough for an eight-element array. */
4051 for (i = 1; i < n_ops; i++)
4052 {
4053 struct simplify_plus_minus_op_data save;
4054 j = i - 1;
4055 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4056 continue;
4057
4058 canonicalized = 1;
4059 save = ops[i];
4060 do
4061 ops[j + 1] = ops[j];
4062 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4063 ops[j + 1] = save;
4064 }
4065
4066 changed = 0;
4067 for (i = n_ops - 1; i > 0; i--)
4068 for (j = i - 1; j >= 0; j--)
4069 {
4070 rtx lhs = ops[j].op, rhs = ops[i].op;
4071 int lneg = ops[j].neg, rneg = ops[i].neg;
4072
4073 if (lhs != 0 && rhs != 0)
4074 {
4075 enum rtx_code ncode = PLUS;
4076
4077 if (lneg != rneg)
4078 {
4079 ncode = MINUS;
4080 if (lneg)
4081 tem = lhs, lhs = rhs, rhs = tem;
4082 }
4083 else if (swap_commutative_operands_p (lhs, rhs))
4084 tem = lhs, lhs = rhs, rhs = tem;
4085
4086 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4087 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4088 {
4089 rtx tem_lhs, tem_rhs;
4090
4091 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4092 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4093 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4094
4095 if (tem && !CONSTANT_P (tem))
4096 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4097 }
4098 else
4099 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4100
4101 /* Reject "simplifications" that just wrap the two
4102 arguments in a CONST. Failure to do so can result
4103 in infinite recursion with simplify_binary_operation
4104 when it calls us to simplify CONST operations. */
4105 if (tem
4106 && ! (GET_CODE (tem) == CONST
4107 && GET_CODE (XEXP (tem, 0)) == ncode
4108 && XEXP (XEXP (tem, 0), 0) == lhs
4109 && XEXP (XEXP (tem, 0), 1) == rhs))
4110 {
4111 lneg &= rneg;
4112 if (GET_CODE (tem) == NEG)
4113 tem = XEXP (tem, 0), lneg = !lneg;
4114 if (CONST_INT_P (tem) && lneg)
4115 tem = neg_const_int (mode, tem), lneg = 0;
4116
4117 ops[i].op = tem;
4118 ops[i].neg = lneg;
4119 ops[j].op = NULL_RTX;
4120 changed = 1;
4121 canonicalized = 1;
4122 }
4123 }
4124 }
4125
4126 /* If nothing changed, fail. */
4127 if (!canonicalized)
4128 return NULL_RTX;
4129
4130 /* Pack all the operands to the lower-numbered entries. */
4131 for (i = 0, j = 0; j < n_ops; j++)
4132 if (ops[j].op)
4133 {
4134 ops[i] = ops[j];
4135 i++;
4136 }
4137 n_ops = i;
4138 }
4139 while (changed);
4140
4141 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4142 if (n_ops == 2
4143 && CONST_INT_P (ops[1].op)
4144 && CONSTANT_P (ops[0].op)
4145 && ops[0].neg)
4146 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4147
4148 /* We suppressed creation of trivial CONST expressions in the
4149 combination loop to avoid recursion. Create one manually now.
4150 The combination loop should have ensured that there is exactly
4151 one CONST_INT, and the sort will have ensured that it is last
4152 in the array and that any other constant will be next-to-last. */
4153
4154 if (n_ops > 1
4155 && CONST_INT_P (ops[n_ops - 1].op)
4156 && CONSTANT_P (ops[n_ops - 2].op))
4157 {
4158 rtx value = ops[n_ops - 1].op;
4159 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4160 value = neg_const_int (mode, value);
4161 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4162 INTVAL (value));
4163 n_ops--;
4164 }
4165
4166 /* Put a non-negated operand first, if possible. */
4167
4168 for (i = 0; i < n_ops && ops[i].neg; i++)
4169 continue;
4170 if (i == n_ops)
4171 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4172 else if (i != 0)
4173 {
4174 tem = ops[0].op;
4175 ops[0] = ops[i];
4176 ops[i].op = tem;
4177 ops[i].neg = 1;
4178 }
4179
4180 /* Now make the result by performing the requested operations. */
4181 result = ops[0].op;
4182 for (i = 1; i < n_ops; i++)
4183 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4184 mode, result, ops[i].op);
4185
4186 return result;
4187 }
4188
4189 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4190 static bool
4191 plus_minus_operand_p (const_rtx x)
4192 {
4193 return GET_CODE (x) == PLUS
4194 || GET_CODE (x) == MINUS
4195 || (GET_CODE (x) == CONST
4196 && GET_CODE (XEXP (x, 0)) == PLUS
4197 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4198 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4199 }
4200
4201 /* Like simplify_binary_operation except used for relational operators.
4202 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4203 not also be VOIDmode.
4204
4205 CMP_MODE specifies in which mode the comparison is done in, so it is
4206 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4207 the operands or, if both are VOIDmode, the operands are compared in
4208 "infinite precision". */
4209 rtx
4210 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4211 enum machine_mode cmp_mode, rtx op0, rtx op1)
4212 {
4213 rtx tem, trueop0, trueop1;
4214
4215 if (cmp_mode == VOIDmode)
4216 cmp_mode = GET_MODE (op0);
4217 if (cmp_mode == VOIDmode)
4218 cmp_mode = GET_MODE (op1);
4219
4220 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4221 if (tem)
4222 {
4223 if (SCALAR_FLOAT_MODE_P (mode))
4224 {
4225 if (tem == const0_rtx)
4226 return CONST0_RTX (mode);
4227 #ifdef FLOAT_STORE_FLAG_VALUE
4228 {
4229 REAL_VALUE_TYPE val;
4230 val = FLOAT_STORE_FLAG_VALUE (mode);
4231 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4232 }
4233 #else
4234 return NULL_RTX;
4235 #endif
4236 }
4237 if (VECTOR_MODE_P (mode))
4238 {
4239 if (tem == const0_rtx)
4240 return CONST0_RTX (mode);
4241 #ifdef VECTOR_STORE_FLAG_VALUE
4242 {
4243 int i, units;
4244 rtvec v;
4245
4246 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4247 if (val == NULL_RTX)
4248 return NULL_RTX;
4249 if (val == const1_rtx)
4250 return CONST1_RTX (mode);
4251
4252 units = GET_MODE_NUNITS (mode);
4253 v = rtvec_alloc (units);
4254 for (i = 0; i < units; i++)
4255 RTVEC_ELT (v, i) = val;
4256 return gen_rtx_raw_CONST_VECTOR (mode, v);
4257 }
4258 #else
4259 return NULL_RTX;
4260 #endif
4261 }
4262
4263 return tem;
4264 }
4265
4266 /* For the following tests, ensure const0_rtx is op1. */
4267 if (swap_commutative_operands_p (op0, op1)
4268 || (op0 == const0_rtx && op1 != const0_rtx))
4269 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4270
4271 /* If op0 is a compare, extract the comparison arguments from it. */
4272 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4273 return simplify_gen_relational (code, mode, VOIDmode,
4274 XEXP (op0, 0), XEXP (op0, 1));
4275
4276 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4277 || CC0_P (op0))
4278 return NULL_RTX;
4279
4280 trueop0 = avoid_constant_pool_reference (op0);
4281 trueop1 = avoid_constant_pool_reference (op1);
4282 return simplify_relational_operation_1 (code, mode, cmp_mode,
4283 trueop0, trueop1);
4284 }
4285
4286 /* This part of simplify_relational_operation is only used when CMP_MODE
4287 is not in class MODE_CC (i.e. it is a real comparison).
4288
4289 MODE is the mode of the result, while CMP_MODE specifies in which
4290 mode the comparison is done in, so it is the mode of the operands. */
4291
4292 static rtx
4293 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4294 enum machine_mode cmp_mode, rtx op0, rtx op1)
4295 {
4296 enum rtx_code op0code = GET_CODE (op0);
4297
4298 if (op1 == const0_rtx && COMPARISON_P (op0))
4299 {
4300 /* If op0 is a comparison, extract the comparison arguments
4301 from it. */
4302 if (code == NE)
4303 {
4304 if (GET_MODE (op0) == mode)
4305 return simplify_rtx (op0);
4306 else
4307 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4308 XEXP (op0, 0), XEXP (op0, 1));
4309 }
4310 else if (code == EQ)
4311 {
4312 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4313 if (new_code != UNKNOWN)
4314 return simplify_gen_relational (new_code, mode, VOIDmode,
4315 XEXP (op0, 0), XEXP (op0, 1));
4316 }
4317 }
4318
4319 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4320 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4321 if ((code == LTU || code == GEU)
4322 && GET_CODE (op0) == PLUS
4323 && CONST_INT_P (XEXP (op0, 1))
4324 && (rtx_equal_p (op1, XEXP (op0, 0))
4325 || rtx_equal_p (op1, XEXP (op0, 1))))
4326 {
4327 rtx new_cmp
4328 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4329 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4330 cmp_mode, XEXP (op0, 0), new_cmp);
4331 }
4332
4333 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4334 if ((code == LTU || code == GEU)
4335 && GET_CODE (op0) == PLUS
4336 && rtx_equal_p (op1, XEXP (op0, 1))
4337 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4338 && !rtx_equal_p (op1, XEXP (op0, 0)))
4339 return simplify_gen_relational (code, mode, cmp_mode, op0,
4340 copy_rtx (XEXP (op0, 0)));
4341
4342 if (op1 == const0_rtx)
4343 {
4344 /* Canonicalize (GTU x 0) as (NE x 0). */
4345 if (code == GTU)
4346 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4347 /* Canonicalize (LEU x 0) as (EQ x 0). */
4348 if (code == LEU)
4349 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4350 }
4351 else if (op1 == const1_rtx)
4352 {
4353 switch (code)
4354 {
4355 case GE:
4356 /* Canonicalize (GE x 1) as (GT x 0). */
4357 return simplify_gen_relational (GT, mode, cmp_mode,
4358 op0, const0_rtx);
4359 case GEU:
4360 /* Canonicalize (GEU x 1) as (NE x 0). */
4361 return simplify_gen_relational (NE, mode, cmp_mode,
4362 op0, const0_rtx);
4363 case LT:
4364 /* Canonicalize (LT x 1) as (LE x 0). */
4365 return simplify_gen_relational (LE, mode, cmp_mode,
4366 op0, const0_rtx);
4367 case LTU:
4368 /* Canonicalize (LTU x 1) as (EQ x 0). */
4369 return simplify_gen_relational (EQ, mode, cmp_mode,
4370 op0, const0_rtx);
4371 default:
4372 break;
4373 }
4374 }
4375 else if (op1 == constm1_rtx)
4376 {
4377 /* Canonicalize (LE x -1) as (LT x 0). */
4378 if (code == LE)
4379 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4380 /* Canonicalize (GT x -1) as (GE x 0). */
4381 if (code == GT)
4382 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4383 }
4384
4385 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4386 if ((code == EQ || code == NE)
4387 && (op0code == PLUS || op0code == MINUS)
4388 && CONSTANT_P (op1)
4389 && CONSTANT_P (XEXP (op0, 1))
4390 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4391 {
4392 rtx x = XEXP (op0, 0);
4393 rtx c = XEXP (op0, 1);
4394 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4395 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4396
4397 /* Detect an infinite recursive condition, where we oscillate at this
4398 simplification case between:
4399 A + B == C <---> C - B == A,
4400 where A, B, and C are all constants with non-simplifiable expressions,
4401 usually SYMBOL_REFs. */
4402 if (GET_CODE (tem) == invcode
4403 && CONSTANT_P (x)
4404 && rtx_equal_p (c, XEXP (tem, 1)))
4405 return NULL_RTX;
4406
4407 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4408 }
4409
4410 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4411 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4412 if (code == NE
4413 && op1 == const0_rtx
4414 && GET_MODE_CLASS (mode) == MODE_INT
4415 && cmp_mode != VOIDmode
4416 /* ??? Work-around BImode bugs in the ia64 backend. */
4417 && mode != BImode
4418 && cmp_mode != BImode
4419 && nonzero_bits (op0, cmp_mode) == 1
4420 && STORE_FLAG_VALUE == 1)
4421 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4422 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4423 : lowpart_subreg (mode, op0, cmp_mode);
4424
4425 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4426 if ((code == EQ || code == NE)
4427 && op1 == const0_rtx
4428 && op0code == XOR)
4429 return simplify_gen_relational (code, mode, cmp_mode,
4430 XEXP (op0, 0), XEXP (op0, 1));
4431
4432 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4433 if ((code == EQ || code == NE)
4434 && op0code == XOR
4435 && rtx_equal_p (XEXP (op0, 0), op1)
4436 && !side_effects_p (XEXP (op0, 0)))
4437 return simplify_gen_relational (code, mode, cmp_mode,
4438 XEXP (op0, 1), const0_rtx);
4439
4440 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4441 if ((code == EQ || code == NE)
4442 && op0code == XOR
4443 && rtx_equal_p (XEXP (op0, 1), op1)
4444 && !side_effects_p (XEXP (op0, 1)))
4445 return simplify_gen_relational (code, mode, cmp_mode,
4446 XEXP (op0, 0), const0_rtx);
4447
4448 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4449 if ((code == EQ || code == NE)
4450 && op0code == XOR
4451 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
4452 && (CONST_INT_P (XEXP (op0, 1))
4453 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1))))
4454 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4455 simplify_gen_binary (XOR, cmp_mode,
4456 XEXP (op0, 1), op1));
4457
4458 if (op0code == POPCOUNT && op1 == const0_rtx)
4459 switch (code)
4460 {
4461 case EQ:
4462 case LE:
4463 case LEU:
4464 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4465 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4466 XEXP (op0, 0), const0_rtx);
4467
4468 case NE:
4469 case GT:
4470 case GTU:
4471 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4472 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4473 XEXP (op0, 0), const0_rtx);
4474
4475 default:
4476 break;
4477 }
4478
4479 return NULL_RTX;
4480 }
4481
4482 enum
4483 {
4484 CMP_EQ = 1,
4485 CMP_LT = 2,
4486 CMP_GT = 4,
4487 CMP_LTU = 8,
4488 CMP_GTU = 16
4489 };
4490
4491
4492 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4493 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4494 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4495 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4496 For floating-point comparisons, assume that the operands were ordered. */
4497
4498 static rtx
4499 comparison_result (enum rtx_code code, int known_results)
4500 {
4501 switch (code)
4502 {
4503 case EQ:
4504 case UNEQ:
4505 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4506 case NE:
4507 case LTGT:
4508 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4509
4510 case LT:
4511 case UNLT:
4512 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4513 case GE:
4514 case UNGE:
4515 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4516
4517 case GT:
4518 case UNGT:
4519 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4520 case LE:
4521 case UNLE:
4522 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4523
4524 case LTU:
4525 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4526 case GEU:
4527 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4528
4529 case GTU:
4530 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4531 case LEU:
4532 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4533
4534 case ORDERED:
4535 return const_true_rtx;
4536 case UNORDERED:
4537 return const0_rtx;
4538 default:
4539 gcc_unreachable ();
4540 }
4541 }
4542
4543 /* Check if the given comparison (done in the given MODE) is actually a
4544 tautology or a contradiction.
4545 If no simplification is possible, this function returns zero.
4546 Otherwise, it returns either const_true_rtx or const0_rtx. */
4547
4548 rtx
4549 simplify_const_relational_operation (enum rtx_code code,
4550 enum machine_mode mode,
4551 rtx op0, rtx op1)
4552 {
4553 rtx tem;
4554 rtx trueop0;
4555 rtx trueop1;
4556
4557 gcc_assert (mode != VOIDmode
4558 || (GET_MODE (op0) == VOIDmode
4559 && GET_MODE (op1) == VOIDmode));
4560
4561 /* If op0 is a compare, extract the comparison arguments from it. */
4562 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4563 {
4564 op1 = XEXP (op0, 1);
4565 op0 = XEXP (op0, 0);
4566
4567 if (GET_MODE (op0) != VOIDmode)
4568 mode = GET_MODE (op0);
4569 else if (GET_MODE (op1) != VOIDmode)
4570 mode = GET_MODE (op1);
4571 else
4572 return 0;
4573 }
4574
4575 /* We can't simplify MODE_CC values since we don't know what the
4576 actual comparison is. */
4577 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4578 return 0;
4579
4580 /* Make sure the constant is second. */
4581 if (swap_commutative_operands_p (op0, op1))
4582 {
4583 tem = op0, op0 = op1, op1 = tem;
4584 code = swap_condition (code);
4585 }
4586
4587 trueop0 = avoid_constant_pool_reference (op0);
4588 trueop1 = avoid_constant_pool_reference (op1);
4589
4590 /* For integer comparisons of A and B maybe we can simplify A - B and can
4591 then simplify a comparison of that with zero. If A and B are both either
4592 a register or a CONST_INT, this can't help; testing for these cases will
4593 prevent infinite recursion here and speed things up.
4594
4595 We can only do this for EQ and NE comparisons as otherwise we may
4596 lose or introduce overflow which we cannot disregard as undefined as
4597 we do not know the signedness of the operation on either the left or
4598 the right hand side of the comparison. */
4599
4600 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4601 && (code == EQ || code == NE)
4602 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4603 && (REG_P (op1) || CONST_INT_P (trueop1)))
4604 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4605 /* We cannot do this if tem is a nonzero address. */
4606 && ! nonzero_address_p (tem))
4607 return simplify_const_relational_operation (signed_condition (code),
4608 mode, tem, const0_rtx);
4609
4610 if (! HONOR_NANS (mode) && code == ORDERED)
4611 return const_true_rtx;
4612
4613 if (! HONOR_NANS (mode) && code == UNORDERED)
4614 return const0_rtx;
4615
4616 /* For modes without NaNs, if the two operands are equal, we know the
4617 result except if they have side-effects. Even with NaNs we know
4618 the result of unordered comparisons and, if signaling NaNs are
4619 irrelevant, also the result of LT/GT/LTGT. */
4620 if ((! HONOR_NANS (GET_MODE (trueop0))
4621 || code == UNEQ || code == UNLE || code == UNGE
4622 || ((code == LT || code == GT || code == LTGT)
4623 && ! HONOR_SNANS (GET_MODE (trueop0))))
4624 && rtx_equal_p (trueop0, trueop1)
4625 && ! side_effects_p (trueop0))
4626 return comparison_result (code, CMP_EQ);
4627
4628 /* If the operands are floating-point constants, see if we can fold
4629 the result. */
4630 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4631 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4632 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4633 {
4634 REAL_VALUE_TYPE d0, d1;
4635
4636 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4637 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4638
4639 /* Comparisons are unordered iff at least one of the values is NaN. */
4640 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4641 switch (code)
4642 {
4643 case UNEQ:
4644 case UNLT:
4645 case UNGT:
4646 case UNLE:
4647 case UNGE:
4648 case NE:
4649 case UNORDERED:
4650 return const_true_rtx;
4651 case EQ:
4652 case LT:
4653 case GT:
4654 case LE:
4655 case GE:
4656 case LTGT:
4657 case ORDERED:
4658 return const0_rtx;
4659 default:
4660 return 0;
4661 }
4662
4663 return comparison_result (code,
4664 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4665 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4666 }
4667
4668 /* Otherwise, see if the operands are both integers. */
4669 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4670 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4671 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4672 {
4673 int width = GET_MODE_PRECISION (mode);
4674 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4675 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4676
4677 /* Get the two words comprising each integer constant. */
4678 if (CONST_DOUBLE_AS_INT_P (trueop0))
4679 {
4680 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4681 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4682 }
4683 else
4684 {
4685 l0u = l0s = INTVAL (trueop0);
4686 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4687 }
4688
4689 if (CONST_DOUBLE_AS_INT_P (trueop1))
4690 {
4691 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4692 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4693 }
4694 else
4695 {
4696 l1u = l1s = INTVAL (trueop1);
4697 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4698 }
4699
4700 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4701 we have to sign or zero-extend the values. */
4702 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4703 {
4704 l0u &= GET_MODE_MASK (mode);
4705 l1u &= GET_MODE_MASK (mode);
4706
4707 if (val_signbit_known_set_p (mode, l0s))
4708 l0s |= ~GET_MODE_MASK (mode);
4709
4710 if (val_signbit_known_set_p (mode, l1s))
4711 l1s |= ~GET_MODE_MASK (mode);
4712 }
4713 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4714 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4715
4716 if (h0u == h1u && l0u == l1u)
4717 return comparison_result (code, CMP_EQ);
4718 else
4719 {
4720 int cr;
4721 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4722 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4723 return comparison_result (code, cr);
4724 }
4725 }
4726
4727 /* Optimize comparisons with upper and lower bounds. */
4728 if (HWI_COMPUTABLE_MODE_P (mode)
4729 && CONST_INT_P (trueop1))
4730 {
4731 int sign;
4732 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4733 HOST_WIDE_INT val = INTVAL (trueop1);
4734 HOST_WIDE_INT mmin, mmax;
4735
4736 if (code == GEU
4737 || code == LEU
4738 || code == GTU
4739 || code == LTU)
4740 sign = 0;
4741 else
4742 sign = 1;
4743
4744 /* Get a reduced range if the sign bit is zero. */
4745 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4746 {
4747 mmin = 0;
4748 mmax = nonzero;
4749 }
4750 else
4751 {
4752 rtx mmin_rtx, mmax_rtx;
4753 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4754
4755 mmin = INTVAL (mmin_rtx);
4756 mmax = INTVAL (mmax_rtx);
4757 if (sign)
4758 {
4759 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4760
4761 mmin >>= (sign_copies - 1);
4762 mmax >>= (sign_copies - 1);
4763 }
4764 }
4765
4766 switch (code)
4767 {
4768 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4769 case GEU:
4770 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4771 return const_true_rtx;
4772 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4773 return const0_rtx;
4774 break;
4775 case GE:
4776 if (val <= mmin)
4777 return const_true_rtx;
4778 if (val > mmax)
4779 return const0_rtx;
4780 break;
4781
4782 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4783 case LEU:
4784 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4785 return const_true_rtx;
4786 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4787 return const0_rtx;
4788 break;
4789 case LE:
4790 if (val >= mmax)
4791 return const_true_rtx;
4792 if (val < mmin)
4793 return const0_rtx;
4794 break;
4795
4796 case EQ:
4797 /* x == y is always false for y out of range. */
4798 if (val < mmin || val > mmax)
4799 return const0_rtx;
4800 break;
4801
4802 /* x > y is always false for y >= mmax, always true for y < mmin. */
4803 case GTU:
4804 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4805 return const0_rtx;
4806 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4807 return const_true_rtx;
4808 break;
4809 case GT:
4810 if (val >= mmax)
4811 return const0_rtx;
4812 if (val < mmin)
4813 return const_true_rtx;
4814 break;
4815
4816 /* x < y is always false for y <= mmin, always true for y > mmax. */
4817 case LTU:
4818 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4819 return const0_rtx;
4820 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4821 return const_true_rtx;
4822 break;
4823 case LT:
4824 if (val <= mmin)
4825 return const0_rtx;
4826 if (val > mmax)
4827 return const_true_rtx;
4828 break;
4829
4830 case NE:
4831 /* x != y is always true for y out of range. */
4832 if (val < mmin || val > mmax)
4833 return const_true_rtx;
4834 break;
4835
4836 default:
4837 break;
4838 }
4839 }
4840
4841 /* Optimize integer comparisons with zero. */
4842 if (trueop1 == const0_rtx)
4843 {
4844 /* Some addresses are known to be nonzero. We don't know
4845 their sign, but equality comparisons are known. */
4846 if (nonzero_address_p (trueop0))
4847 {
4848 if (code == EQ || code == LEU)
4849 return const0_rtx;
4850 if (code == NE || code == GTU)
4851 return const_true_rtx;
4852 }
4853
4854 /* See if the first operand is an IOR with a constant. If so, we
4855 may be able to determine the result of this comparison. */
4856 if (GET_CODE (op0) == IOR)
4857 {
4858 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4859 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4860 {
4861 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4862 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4863 && (UINTVAL (inner_const)
4864 & ((unsigned HOST_WIDE_INT) 1
4865 << sign_bitnum)));
4866
4867 switch (code)
4868 {
4869 case EQ:
4870 case LEU:
4871 return const0_rtx;
4872 case NE:
4873 case GTU:
4874 return const_true_rtx;
4875 case LT:
4876 case LE:
4877 if (has_sign)
4878 return const_true_rtx;
4879 break;
4880 case GT:
4881 case GE:
4882 if (has_sign)
4883 return const0_rtx;
4884 break;
4885 default:
4886 break;
4887 }
4888 }
4889 }
4890 }
4891
4892 /* Optimize comparison of ABS with zero. */
4893 if (trueop1 == CONST0_RTX (mode)
4894 && (GET_CODE (trueop0) == ABS
4895 || (GET_CODE (trueop0) == FLOAT_EXTEND
4896 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4897 {
4898 switch (code)
4899 {
4900 case LT:
4901 /* Optimize abs(x) < 0.0. */
4902 if (!HONOR_SNANS (mode)
4903 && (!INTEGRAL_MODE_P (mode)
4904 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4905 {
4906 if (INTEGRAL_MODE_P (mode)
4907 && (issue_strict_overflow_warning
4908 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4909 warning (OPT_Wstrict_overflow,
4910 ("assuming signed overflow does not occur when "
4911 "assuming abs (x) < 0 is false"));
4912 return const0_rtx;
4913 }
4914 break;
4915
4916 case GE:
4917 /* Optimize abs(x) >= 0.0. */
4918 if (!HONOR_NANS (mode)
4919 && (!INTEGRAL_MODE_P (mode)
4920 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4921 {
4922 if (INTEGRAL_MODE_P (mode)
4923 && (issue_strict_overflow_warning
4924 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4925 warning (OPT_Wstrict_overflow,
4926 ("assuming signed overflow does not occur when "
4927 "assuming abs (x) >= 0 is true"));
4928 return const_true_rtx;
4929 }
4930 break;
4931
4932 case UNGE:
4933 /* Optimize ! (abs(x) < 0.0). */
4934 return const_true_rtx;
4935
4936 default:
4937 break;
4938 }
4939 }
4940
4941 return 0;
4942 }
4943 \f
4944 /* Simplify CODE, an operation with result mode MODE and three operands,
4945 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4946 a constant. Return 0 if no simplifications is possible. */
4947
4948 rtx
4949 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4950 enum machine_mode op0_mode, rtx op0, rtx op1,
4951 rtx op2)
4952 {
4953 unsigned int width = GET_MODE_PRECISION (mode);
4954 bool any_change = false;
4955 rtx tem;
4956
4957 /* VOIDmode means "infinite" precision. */
4958 if (width == 0)
4959 width = HOST_BITS_PER_WIDE_INT;
4960
4961 switch (code)
4962 {
4963 case FMA:
4964 /* Simplify negations around the multiplication. */
4965 /* -a * -b + c => a * b + c. */
4966 if (GET_CODE (op0) == NEG)
4967 {
4968 tem = simplify_unary_operation (NEG, mode, op1, mode);
4969 if (tem)
4970 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4971 }
4972 else if (GET_CODE (op1) == NEG)
4973 {
4974 tem = simplify_unary_operation (NEG, mode, op0, mode);
4975 if (tem)
4976 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4977 }
4978
4979 /* Canonicalize the two multiplication operands. */
4980 /* a * -b + c => -b * a + c. */
4981 if (swap_commutative_operands_p (op0, op1))
4982 tem = op0, op0 = op1, op1 = tem, any_change = true;
4983
4984 if (any_change)
4985 return gen_rtx_FMA (mode, op0, op1, op2);
4986 return NULL_RTX;
4987
4988 case SIGN_EXTRACT:
4989 case ZERO_EXTRACT:
4990 if (CONST_INT_P (op0)
4991 && CONST_INT_P (op1)
4992 && CONST_INT_P (op2)
4993 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4994 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4995 {
4996 /* Extracting a bit-field from a constant */
4997 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4998 HOST_WIDE_INT op1val = INTVAL (op1);
4999 HOST_WIDE_INT op2val = INTVAL (op2);
5000 if (BITS_BIG_ENDIAN)
5001 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5002 else
5003 val >>= op2val;
5004
5005 if (HOST_BITS_PER_WIDE_INT != op1val)
5006 {
5007 /* First zero-extend. */
5008 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5009 /* If desired, propagate sign bit. */
5010 if (code == SIGN_EXTRACT
5011 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5012 != 0)
5013 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5014 }
5015
5016 return gen_int_mode (val, mode);
5017 }
5018 break;
5019
5020 case IF_THEN_ELSE:
5021 if (CONST_INT_P (op0))
5022 return op0 != const0_rtx ? op1 : op2;
5023
5024 /* Convert c ? a : a into "a". */
5025 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5026 return op1;
5027
5028 /* Convert a != b ? a : b into "a". */
5029 if (GET_CODE (op0) == NE
5030 && ! side_effects_p (op0)
5031 && ! HONOR_NANS (mode)
5032 && ! HONOR_SIGNED_ZEROS (mode)
5033 && ((rtx_equal_p (XEXP (op0, 0), op1)
5034 && rtx_equal_p (XEXP (op0, 1), op2))
5035 || (rtx_equal_p (XEXP (op0, 0), op2)
5036 && rtx_equal_p (XEXP (op0, 1), op1))))
5037 return op1;
5038
5039 /* Convert a == b ? a : b into "b". */
5040 if (GET_CODE (op0) == EQ
5041 && ! side_effects_p (op0)
5042 && ! HONOR_NANS (mode)
5043 && ! HONOR_SIGNED_ZEROS (mode)
5044 && ((rtx_equal_p (XEXP (op0, 0), op1)
5045 && rtx_equal_p (XEXP (op0, 1), op2))
5046 || (rtx_equal_p (XEXP (op0, 0), op2)
5047 && rtx_equal_p (XEXP (op0, 1), op1))))
5048 return op2;
5049
5050 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5051 {
5052 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5053 ? GET_MODE (XEXP (op0, 1))
5054 : GET_MODE (XEXP (op0, 0)));
5055 rtx temp;
5056
5057 /* Look for happy constants in op1 and op2. */
5058 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5059 {
5060 HOST_WIDE_INT t = INTVAL (op1);
5061 HOST_WIDE_INT f = INTVAL (op2);
5062
5063 if (t == STORE_FLAG_VALUE && f == 0)
5064 code = GET_CODE (op0);
5065 else if (t == 0 && f == STORE_FLAG_VALUE)
5066 {
5067 enum rtx_code tmp;
5068 tmp = reversed_comparison_code (op0, NULL_RTX);
5069 if (tmp == UNKNOWN)
5070 break;
5071 code = tmp;
5072 }
5073 else
5074 break;
5075
5076 return simplify_gen_relational (code, mode, cmp_mode,
5077 XEXP (op0, 0), XEXP (op0, 1));
5078 }
5079
5080 if (cmp_mode == VOIDmode)
5081 cmp_mode = op0_mode;
5082 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5083 cmp_mode, XEXP (op0, 0),
5084 XEXP (op0, 1));
5085
5086 /* See if any simplifications were possible. */
5087 if (temp)
5088 {
5089 if (CONST_INT_P (temp))
5090 return temp == const0_rtx ? op2 : op1;
5091 else if (temp)
5092 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5093 }
5094 }
5095 break;
5096
5097 case VEC_MERGE:
5098 gcc_assert (GET_MODE (op0) == mode);
5099 gcc_assert (GET_MODE (op1) == mode);
5100 gcc_assert (VECTOR_MODE_P (mode));
5101 op2 = avoid_constant_pool_reference (op2);
5102 if (CONST_INT_P (op2))
5103 {
5104 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5105 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5106 int mask = (1 << n_elts) - 1;
5107
5108 if (!(INTVAL (op2) & mask))
5109 return op1;
5110 if ((INTVAL (op2) & mask) == mask)
5111 return op0;
5112
5113 op0 = avoid_constant_pool_reference (op0);
5114 op1 = avoid_constant_pool_reference (op1);
5115 if (GET_CODE (op0) == CONST_VECTOR
5116 && GET_CODE (op1) == CONST_VECTOR)
5117 {
5118 rtvec v = rtvec_alloc (n_elts);
5119 unsigned int i;
5120
5121 for (i = 0; i < n_elts; i++)
5122 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5123 ? CONST_VECTOR_ELT (op0, i)
5124 : CONST_VECTOR_ELT (op1, i));
5125 return gen_rtx_CONST_VECTOR (mode, v);
5126 }
5127 }
5128 break;
5129
5130 default:
5131 gcc_unreachable ();
5132 }
5133
5134 return 0;
5135 }
5136
5137 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5138 or CONST_VECTOR,
5139 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5140
5141 Works by unpacking OP into a collection of 8-bit values
5142 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5143 and then repacking them again for OUTERMODE. */
5144
5145 static rtx
5146 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5147 enum machine_mode innermode, unsigned int byte)
5148 {
5149 /* We support up to 512-bit values (for V8DFmode). */
5150 enum {
5151 max_bitsize = 512,
5152 value_bit = 8,
5153 value_mask = (1 << value_bit) - 1
5154 };
5155 unsigned char value[max_bitsize / value_bit];
5156 int value_start;
5157 int i;
5158 int elem;
5159
5160 int num_elem;
5161 rtx * elems;
5162 int elem_bitsize;
5163 rtx result_s;
5164 rtvec result_v = NULL;
5165 enum mode_class outer_class;
5166 enum machine_mode outer_submode;
5167
5168 /* Some ports misuse CCmode. */
5169 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5170 return op;
5171
5172 /* We have no way to represent a complex constant at the rtl level. */
5173 if (COMPLEX_MODE_P (outermode))
5174 return NULL_RTX;
5175
5176 /* Unpack the value. */
5177
5178 if (GET_CODE (op) == CONST_VECTOR)
5179 {
5180 num_elem = CONST_VECTOR_NUNITS (op);
5181 elems = &CONST_VECTOR_ELT (op, 0);
5182 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5183 }
5184 else
5185 {
5186 num_elem = 1;
5187 elems = &op;
5188 elem_bitsize = max_bitsize;
5189 }
5190 /* If this asserts, it is too complicated; reducing value_bit may help. */
5191 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5192 /* I don't know how to handle endianness of sub-units. */
5193 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5194
5195 for (elem = 0; elem < num_elem; elem++)
5196 {
5197 unsigned char * vp;
5198 rtx el = elems[elem];
5199
5200 /* Vectors are kept in target memory order. (This is probably
5201 a mistake.) */
5202 {
5203 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5204 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5205 / BITS_PER_UNIT);
5206 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5207 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5208 unsigned bytele = (subword_byte % UNITS_PER_WORD
5209 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5210 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5211 }
5212
5213 switch (GET_CODE (el))
5214 {
5215 case CONST_INT:
5216 for (i = 0;
5217 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5218 i += value_bit)
5219 *vp++ = INTVAL (el) >> i;
5220 /* CONST_INTs are always logically sign-extended. */
5221 for (; i < elem_bitsize; i += value_bit)
5222 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5223 break;
5224
5225 case CONST_DOUBLE:
5226 if (GET_MODE (el) == VOIDmode)
5227 {
5228 unsigned char extend = 0;
5229 /* If this triggers, someone should have generated a
5230 CONST_INT instead. */
5231 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5232
5233 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5234 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5235 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5236 {
5237 *vp++
5238 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5239 i += value_bit;
5240 }
5241
5242 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5243 extend = -1;
5244 for (; i < elem_bitsize; i += value_bit)
5245 *vp++ = extend;
5246 }
5247 else
5248 {
5249 long tmp[max_bitsize / 32];
5250 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5251
5252 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5253 gcc_assert (bitsize <= elem_bitsize);
5254 gcc_assert (bitsize % value_bit == 0);
5255
5256 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5257 GET_MODE (el));
5258
5259 /* real_to_target produces its result in words affected by
5260 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5261 and use WORDS_BIG_ENDIAN instead; see the documentation
5262 of SUBREG in rtl.texi. */
5263 for (i = 0; i < bitsize; i += value_bit)
5264 {
5265 int ibase;
5266 if (WORDS_BIG_ENDIAN)
5267 ibase = bitsize - 1 - i;
5268 else
5269 ibase = i;
5270 *vp++ = tmp[ibase / 32] >> i % 32;
5271 }
5272
5273 /* It shouldn't matter what's done here, so fill it with
5274 zero. */
5275 for (; i < elem_bitsize; i += value_bit)
5276 *vp++ = 0;
5277 }
5278 break;
5279
5280 case CONST_FIXED:
5281 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5282 {
5283 for (i = 0; i < elem_bitsize; i += value_bit)
5284 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5285 }
5286 else
5287 {
5288 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5289 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5290 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5291 i += value_bit)
5292 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5293 >> (i - HOST_BITS_PER_WIDE_INT);
5294 for (; i < elem_bitsize; i += value_bit)
5295 *vp++ = 0;
5296 }
5297 break;
5298
5299 default:
5300 gcc_unreachable ();
5301 }
5302 }
5303
5304 /* Now, pick the right byte to start with. */
5305 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5306 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5307 will already have offset 0. */
5308 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5309 {
5310 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5311 - byte);
5312 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5313 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5314 byte = (subword_byte % UNITS_PER_WORD
5315 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5316 }
5317
5318 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5319 so if it's become negative it will instead be very large.) */
5320 gcc_assert (byte < GET_MODE_SIZE (innermode));
5321
5322 /* Convert from bytes to chunks of size value_bit. */
5323 value_start = byte * (BITS_PER_UNIT / value_bit);
5324
5325 /* Re-pack the value. */
5326
5327 if (VECTOR_MODE_P (outermode))
5328 {
5329 num_elem = GET_MODE_NUNITS (outermode);
5330 result_v = rtvec_alloc (num_elem);
5331 elems = &RTVEC_ELT (result_v, 0);
5332 outer_submode = GET_MODE_INNER (outermode);
5333 }
5334 else
5335 {
5336 num_elem = 1;
5337 elems = &result_s;
5338 outer_submode = outermode;
5339 }
5340
5341 outer_class = GET_MODE_CLASS (outer_submode);
5342 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5343
5344 gcc_assert (elem_bitsize % value_bit == 0);
5345 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5346
5347 for (elem = 0; elem < num_elem; elem++)
5348 {
5349 unsigned char *vp;
5350
5351 /* Vectors are stored in target memory order. (This is probably
5352 a mistake.) */
5353 {
5354 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5355 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5356 / BITS_PER_UNIT);
5357 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5358 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5359 unsigned bytele = (subword_byte % UNITS_PER_WORD
5360 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5361 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5362 }
5363
5364 switch (outer_class)
5365 {
5366 case MODE_INT:
5367 case MODE_PARTIAL_INT:
5368 {
5369 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5370
5371 for (i = 0;
5372 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5373 i += value_bit)
5374 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5375 for (; i < elem_bitsize; i += value_bit)
5376 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5377 << (i - HOST_BITS_PER_WIDE_INT);
5378
5379 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5380 know why. */
5381 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5382 elems[elem] = gen_int_mode (lo, outer_submode);
5383 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5384 elems[elem] = immed_double_const (lo, hi, outer_submode);
5385 else
5386 return NULL_RTX;
5387 }
5388 break;
5389
5390 case MODE_FLOAT:
5391 case MODE_DECIMAL_FLOAT:
5392 {
5393 REAL_VALUE_TYPE r;
5394 long tmp[max_bitsize / 32];
5395
5396 /* real_from_target wants its input in words affected by
5397 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5398 and use WORDS_BIG_ENDIAN instead; see the documentation
5399 of SUBREG in rtl.texi. */
5400 for (i = 0; i < max_bitsize / 32; i++)
5401 tmp[i] = 0;
5402 for (i = 0; i < elem_bitsize; i += value_bit)
5403 {
5404 int ibase;
5405 if (WORDS_BIG_ENDIAN)
5406 ibase = elem_bitsize - 1 - i;
5407 else
5408 ibase = i;
5409 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5410 }
5411
5412 real_from_target (&r, tmp, outer_submode);
5413 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5414 }
5415 break;
5416
5417 case MODE_FRACT:
5418 case MODE_UFRACT:
5419 case MODE_ACCUM:
5420 case MODE_UACCUM:
5421 {
5422 FIXED_VALUE_TYPE f;
5423 f.data.low = 0;
5424 f.data.high = 0;
5425 f.mode = outer_submode;
5426
5427 for (i = 0;
5428 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5429 i += value_bit)
5430 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5431 for (; i < elem_bitsize; i += value_bit)
5432 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5433 << (i - HOST_BITS_PER_WIDE_INT));
5434
5435 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5436 }
5437 break;
5438
5439 default:
5440 gcc_unreachable ();
5441 }
5442 }
5443 if (VECTOR_MODE_P (outermode))
5444 return gen_rtx_CONST_VECTOR (outermode, result_v);
5445 else
5446 return result_s;
5447 }
5448
5449 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5450 Return 0 if no simplifications are possible. */
5451 rtx
5452 simplify_subreg (enum machine_mode outermode, rtx op,
5453 enum machine_mode innermode, unsigned int byte)
5454 {
5455 /* Little bit of sanity checking. */
5456 gcc_assert (innermode != VOIDmode);
5457 gcc_assert (outermode != VOIDmode);
5458 gcc_assert (innermode != BLKmode);
5459 gcc_assert (outermode != BLKmode);
5460
5461 gcc_assert (GET_MODE (op) == innermode
5462 || GET_MODE (op) == VOIDmode);
5463
5464 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5465 gcc_assert (byte < GET_MODE_SIZE (innermode));
5466
5467 if (outermode == innermode && !byte)
5468 return op;
5469
5470 if (CONST_INT_P (op)
5471 || CONST_DOUBLE_P (op)
5472 || GET_CODE (op) == CONST_FIXED
5473 || GET_CODE (op) == CONST_VECTOR)
5474 return simplify_immed_subreg (outermode, op, innermode, byte);
5475
5476 /* Changing mode twice with SUBREG => just change it once,
5477 or not at all if changing back op starting mode. */
5478 if (GET_CODE (op) == SUBREG)
5479 {
5480 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5481 int final_offset = byte + SUBREG_BYTE (op);
5482 rtx newx;
5483
5484 if (outermode == innermostmode
5485 && byte == 0 && SUBREG_BYTE (op) == 0)
5486 return SUBREG_REG (op);
5487
5488 /* The SUBREG_BYTE represents offset, as if the value were stored
5489 in memory. Irritating exception is paradoxical subreg, where
5490 we define SUBREG_BYTE to be 0. On big endian machines, this
5491 value should be negative. For a moment, undo this exception. */
5492 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5493 {
5494 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5495 if (WORDS_BIG_ENDIAN)
5496 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5497 if (BYTES_BIG_ENDIAN)
5498 final_offset += difference % UNITS_PER_WORD;
5499 }
5500 if (SUBREG_BYTE (op) == 0
5501 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5502 {
5503 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5504 if (WORDS_BIG_ENDIAN)
5505 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5506 if (BYTES_BIG_ENDIAN)
5507 final_offset += difference % UNITS_PER_WORD;
5508 }
5509
5510 /* See whether resulting subreg will be paradoxical. */
5511 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5512 {
5513 /* In nonparadoxical subregs we can't handle negative offsets. */
5514 if (final_offset < 0)
5515 return NULL_RTX;
5516 /* Bail out in case resulting subreg would be incorrect. */
5517 if (final_offset % GET_MODE_SIZE (outermode)
5518 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5519 return NULL_RTX;
5520 }
5521 else
5522 {
5523 int offset = 0;
5524 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5525
5526 /* In paradoxical subreg, see if we are still looking on lower part.
5527 If so, our SUBREG_BYTE will be 0. */
5528 if (WORDS_BIG_ENDIAN)
5529 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5530 if (BYTES_BIG_ENDIAN)
5531 offset += difference % UNITS_PER_WORD;
5532 if (offset == final_offset)
5533 final_offset = 0;
5534 else
5535 return NULL_RTX;
5536 }
5537
5538 /* Recurse for further possible simplifications. */
5539 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5540 final_offset);
5541 if (newx)
5542 return newx;
5543 if (validate_subreg (outermode, innermostmode,
5544 SUBREG_REG (op), final_offset))
5545 {
5546 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5547 if (SUBREG_PROMOTED_VAR_P (op)
5548 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5549 && GET_MODE_CLASS (outermode) == MODE_INT
5550 && IN_RANGE (GET_MODE_SIZE (outermode),
5551 GET_MODE_SIZE (innermode),
5552 GET_MODE_SIZE (innermostmode))
5553 && subreg_lowpart_p (newx))
5554 {
5555 SUBREG_PROMOTED_VAR_P (newx) = 1;
5556 SUBREG_PROMOTED_UNSIGNED_SET
5557 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5558 }
5559 return newx;
5560 }
5561 return NULL_RTX;
5562 }
5563
5564 /* Merge implicit and explicit truncations. */
5565
5566 if (GET_CODE (op) == TRUNCATE
5567 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5568 && subreg_lowpart_offset (outermode, innermode) == byte)
5569 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5570 GET_MODE (XEXP (op, 0)));
5571
5572 /* SUBREG of a hard register => just change the register number
5573 and/or mode. If the hard register is not valid in that mode,
5574 suppress this simplification. If the hard register is the stack,
5575 frame, or argument pointer, leave this as a SUBREG. */
5576
5577 if (REG_P (op) && HARD_REGISTER_P (op))
5578 {
5579 unsigned int regno, final_regno;
5580
5581 regno = REGNO (op);
5582 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5583 if (HARD_REGISTER_NUM_P (final_regno))
5584 {
5585 rtx x;
5586 int final_offset = byte;
5587
5588 /* Adjust offset for paradoxical subregs. */
5589 if (byte == 0
5590 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5591 {
5592 int difference = (GET_MODE_SIZE (innermode)
5593 - GET_MODE_SIZE (outermode));
5594 if (WORDS_BIG_ENDIAN)
5595 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5596 if (BYTES_BIG_ENDIAN)
5597 final_offset += difference % UNITS_PER_WORD;
5598 }
5599
5600 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5601
5602 /* Propagate original regno. We don't have any way to specify
5603 the offset inside original regno, so do so only for lowpart.
5604 The information is used only by alias analysis that can not
5605 grog partial register anyway. */
5606
5607 if (subreg_lowpart_offset (outermode, innermode) == byte)
5608 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5609 return x;
5610 }
5611 }
5612
5613 /* If we have a SUBREG of a register that we are replacing and we are
5614 replacing it with a MEM, make a new MEM and try replacing the
5615 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5616 or if we would be widening it. */
5617
5618 if (MEM_P (op)
5619 && ! mode_dependent_address_p (XEXP (op, 0))
5620 /* Allow splitting of volatile memory references in case we don't
5621 have instruction to move the whole thing. */
5622 && (! MEM_VOLATILE_P (op)
5623 || ! have_insn_for (SET, innermode))
5624 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5625 return adjust_address_nv (op, outermode, byte);
5626
5627 /* Handle complex values represented as CONCAT
5628 of real and imaginary part. */
5629 if (GET_CODE (op) == CONCAT)
5630 {
5631 unsigned int part_size, final_offset;
5632 rtx part, res;
5633
5634 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5635 if (byte < part_size)
5636 {
5637 part = XEXP (op, 0);
5638 final_offset = byte;
5639 }
5640 else
5641 {
5642 part = XEXP (op, 1);
5643 final_offset = byte - part_size;
5644 }
5645
5646 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5647 return NULL_RTX;
5648
5649 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5650 if (res)
5651 return res;
5652 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5653 return gen_rtx_SUBREG (outermode, part, final_offset);
5654 return NULL_RTX;
5655 }
5656
5657 /* Optimize SUBREG truncations of zero and sign extended values. */
5658 if ((GET_CODE (op) == ZERO_EXTEND
5659 || GET_CODE (op) == SIGN_EXTEND)
5660 && SCALAR_INT_MODE_P (innermode)
5661 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5662 {
5663 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5664
5665 /* If we're requesting the lowpart of a zero or sign extension,
5666 there are three possibilities. If the outermode is the same
5667 as the origmode, we can omit both the extension and the subreg.
5668 If the outermode is not larger than the origmode, we can apply
5669 the truncation without the extension. Finally, if the outermode
5670 is larger than the origmode, but both are integer modes, we
5671 can just extend to the appropriate mode. */
5672 if (bitpos == 0)
5673 {
5674 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5675 if (outermode == origmode)
5676 return XEXP (op, 0);
5677 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5678 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5679 subreg_lowpart_offset (outermode,
5680 origmode));
5681 if (SCALAR_INT_MODE_P (outermode))
5682 return simplify_gen_unary (GET_CODE (op), outermode,
5683 XEXP (op, 0), origmode);
5684 }
5685
5686 /* A SUBREG resulting from a zero extension may fold to zero if
5687 it extracts higher bits that the ZERO_EXTEND's source bits. */
5688 if (GET_CODE (op) == ZERO_EXTEND
5689 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5690 return CONST0_RTX (outermode);
5691 }
5692
5693 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5694 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5695 the outer subreg is effectively a truncation to the original mode. */
5696 if ((GET_CODE (op) == LSHIFTRT
5697 || GET_CODE (op) == ASHIFTRT)
5698 && SCALAR_INT_MODE_P (outermode)
5699 && SCALAR_INT_MODE_P (innermode)
5700 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5701 to avoid the possibility that an outer LSHIFTRT shifts by more
5702 than the sign extension's sign_bit_copies and introduces zeros
5703 into the high bits of the result. */
5704 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5705 && CONST_INT_P (XEXP (op, 1))
5706 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5707 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5708 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5709 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5710 return simplify_gen_binary (ASHIFTRT, outermode,
5711 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5712
5713 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5714 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5715 the outer subreg is effectively a truncation to the original mode. */
5716 if ((GET_CODE (op) == LSHIFTRT
5717 || GET_CODE (op) == ASHIFTRT)
5718 && SCALAR_INT_MODE_P (outermode)
5719 && SCALAR_INT_MODE_P (innermode)
5720 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5721 && CONST_INT_P (XEXP (op, 1))
5722 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5724 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5725 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5726 return simplify_gen_binary (LSHIFTRT, outermode,
5727 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5728
5729 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5730 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5731 the outer subreg is effectively a truncation to the original mode. */
5732 if (GET_CODE (op) == ASHIFT
5733 && SCALAR_INT_MODE_P (outermode)
5734 && SCALAR_INT_MODE_P (innermode)
5735 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5736 && CONST_INT_P (XEXP (op, 1))
5737 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5738 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5739 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5740 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5741 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5742 return simplify_gen_binary (ASHIFT, outermode,
5743 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5744
5745 /* Recognize a word extraction from a multi-word subreg. */
5746 if ((GET_CODE (op) == LSHIFTRT
5747 || GET_CODE (op) == ASHIFTRT)
5748 && SCALAR_INT_MODE_P (innermode)
5749 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5750 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5751 && CONST_INT_P (XEXP (op, 1))
5752 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5753 && INTVAL (XEXP (op, 1)) >= 0
5754 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5755 && byte == subreg_lowpart_offset (outermode, innermode))
5756 {
5757 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5758 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5759 (WORDS_BIG_ENDIAN
5760 ? byte - shifted_bytes
5761 : byte + shifted_bytes));
5762 }
5763
5764 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5765 and try replacing the SUBREG and shift with it. Don't do this if
5766 the MEM has a mode-dependent address or if we would be widening it. */
5767
5768 if ((GET_CODE (op) == LSHIFTRT
5769 || GET_CODE (op) == ASHIFTRT)
5770 && SCALAR_INT_MODE_P (innermode)
5771 && MEM_P (XEXP (op, 0))
5772 && CONST_INT_P (XEXP (op, 1))
5773 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5774 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5775 && INTVAL (XEXP (op, 1)) > 0
5776 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5777 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5778 && ! MEM_VOLATILE_P (XEXP (op, 0))
5779 && byte == subreg_lowpart_offset (outermode, innermode)
5780 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5781 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5782 {
5783 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5784 return adjust_address_nv (XEXP (op, 0), outermode,
5785 (WORDS_BIG_ENDIAN
5786 ? byte - shifted_bytes
5787 : byte + shifted_bytes));
5788 }
5789
5790 return NULL_RTX;
5791 }
5792
5793 /* Make a SUBREG operation or equivalent if it folds. */
5794
5795 rtx
5796 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5797 enum machine_mode innermode, unsigned int byte)
5798 {
5799 rtx newx;
5800
5801 newx = simplify_subreg (outermode, op, innermode, byte);
5802 if (newx)
5803 return newx;
5804
5805 if (GET_CODE (op) == SUBREG
5806 || GET_CODE (op) == CONCAT
5807 || GET_MODE (op) == VOIDmode)
5808 return NULL_RTX;
5809
5810 if (validate_subreg (outermode, innermode, op, byte))
5811 return gen_rtx_SUBREG (outermode, op, byte);
5812
5813 return NULL_RTX;
5814 }
5815
5816 /* Simplify X, an rtx expression.
5817
5818 Return the simplified expression or NULL if no simplifications
5819 were possible.
5820
5821 This is the preferred entry point into the simplification routines;
5822 however, we still allow passes to call the more specific routines.
5823
5824 Right now GCC has three (yes, three) major bodies of RTL simplification
5825 code that need to be unified.
5826
5827 1. fold_rtx in cse.c. This code uses various CSE specific
5828 information to aid in RTL simplification.
5829
5830 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5831 it uses combine specific information to aid in RTL
5832 simplification.
5833
5834 3. The routines in this file.
5835
5836
5837 Long term we want to only have one body of simplification code; to
5838 get to that state I recommend the following steps:
5839
5840 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5841 which are not pass dependent state into these routines.
5842
5843 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5844 use this routine whenever possible.
5845
5846 3. Allow for pass dependent state to be provided to these
5847 routines and add simplifications based on the pass dependent
5848 state. Remove code from cse.c & combine.c that becomes
5849 redundant/dead.
5850
5851 It will take time, but ultimately the compiler will be easier to
5852 maintain and improve. It's totally silly that when we add a
5853 simplification that it needs to be added to 4 places (3 for RTL
5854 simplification and 1 for tree simplification. */
5855
5856 rtx
5857 simplify_rtx (const_rtx x)
5858 {
5859 const enum rtx_code code = GET_CODE (x);
5860 const enum machine_mode mode = GET_MODE (x);
5861
5862 switch (GET_RTX_CLASS (code))
5863 {
5864 case RTX_UNARY:
5865 return simplify_unary_operation (code, mode,
5866 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5867 case RTX_COMM_ARITH:
5868 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5869 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5870
5871 /* Fall through.... */
5872
5873 case RTX_BIN_ARITH:
5874 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5875
5876 case RTX_TERNARY:
5877 case RTX_BITFIELD_OPS:
5878 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5879 XEXP (x, 0), XEXP (x, 1),
5880 XEXP (x, 2));
5881
5882 case RTX_COMPARE:
5883 case RTX_COMM_COMPARE:
5884 return simplify_relational_operation (code, mode,
5885 ((GET_MODE (XEXP (x, 0))
5886 != VOIDmode)
5887 ? GET_MODE (XEXP (x, 0))
5888 : GET_MODE (XEXP (x, 1))),
5889 XEXP (x, 0),
5890 XEXP (x, 1));
5891
5892 case RTX_EXTRA:
5893 if (code == SUBREG)
5894 return simplify_subreg (mode, SUBREG_REG (x),
5895 GET_MODE (SUBREG_REG (x)),
5896 SUBREG_BYTE (x));
5897 break;
5898
5899 case RTX_OBJ:
5900 if (code == LO_SUM)
5901 {
5902 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5903 if (GET_CODE (XEXP (x, 0)) == HIGH
5904 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5905 return XEXP (x, 1);
5906 }
5907 break;
5908
5909 default:
5910 break;
5911 }
5912 return NULL;
5913 }