]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
2012-08-17 Marc Glisse <marc.glisse@inria.fr>
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
63 \f
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
68 {
69 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
94 {
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
97 }
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
101
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
110
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114 unsigned int width;
115
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
118
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
122
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132 unsigned int width;
133
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
136
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
140
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
143 }
144
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150 unsigned int width;
151
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
154
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
158
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
161 }
162 \f
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
165
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
169 {
170 rtx tem;
171
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
176
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
181
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 \f
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
193
194 switch (GET_CODE (x))
195 {
196 case MEM:
197 break;
198
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
204 {
205 REAL_VALUE_TYPE d;
206
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 }
210 return x;
211
212 default:
213 return x;
214 }
215
216 if (GET_MODE (x) == BLKmode)
217 return x;
218
219 addr = XEXP (x, 0);
220
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
223
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 {
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
231 }
232
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
235
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
240 {
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
243
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
248 {
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
252 }
253 else
254 return c;
255 }
256
257 return x;
258 }
259 \f
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
263
264 rtx
265 delegitimize_mem_from_attrs (rtx x)
266 {
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
272 {
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
276
277 switch (TREE_CODE (decl))
278 {
279 default:
280 decl = NULL;
281 break;
282
283 case VAR_DECL:
284 break;
285
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
293 {
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
297
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
305 {
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
309 }
310 break;
311 }
312 }
313
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
321 {
322 rtx newx;
323
324 offset += MEM_OFFSET (x);
325
326 newx = DECL_RTL (decl);
327
328 if (MEM_P (newx))
329 {
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
350 }
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
354 }
355 }
356
357 return x;
358 }
359 \f
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
362
363 rtx
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
366 {
367 rtx tem;
368
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
372
373 return gen_rtx_fmt_e (code, mode, op);
374 }
375
376 /* Likewise for ternary operations. */
377
378 rtx
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 {
382 rtx tem;
383
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
388
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 }
391
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
394
395 rtx
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 {
399 rtx tem;
400
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
404
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
406 }
407 \f
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
412
413 rtx
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 {
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
424
425 if (__builtin_expect (fn != NULL, 0))
426 {
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
430 }
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
433
434 switch (GET_RTX_CLASS (code))
435 {
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
443
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
451
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
462
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475
476 case RTX_EXTRA:
477 if (code == SUBREG)
478 {
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
486 }
487 break;
488
489 case RTX_OBJ:
490 if (code == MEM)
491 {
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
496 }
497 else if (code == LO_SUM)
498 {
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
505
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
509 }
510 break;
511
512 default:
513 break;
514 }
515
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
520 {
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 {
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
529 {
530 if (newvec == vec)
531 {
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
536 }
537 RTVEC_ELT (newvec, j) = op;
538 }
539 }
540 break;
541
542 case 'e':
543 if (XEXP (x, i))
544 {
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
547 {
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
551 }
552 }
553 break;
554 }
555 return newx;
556 }
557
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
560
561 rtx
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 {
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 }
566 \f
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
570 rtx
571 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
572 rtx op, enum machine_mode op_mode)
573 {
574 rtx trueop, tem;
575
576 trueop = avoid_constant_pool_reference (op);
577
578 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
579 if (tem)
580 return tem;
581
582 return simplify_unary_operation_1 (code, mode, op);
583 }
584
585 /* Perform some simplifications we can do even if the operands
586 aren't constant. */
587 static rtx
588 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
589 {
590 enum rtx_code reversed;
591 rtx temp;
592
593 switch (code)
594 {
595 case NOT:
596 /* (not (not X)) == X. */
597 if (GET_CODE (op) == NOT)
598 return XEXP (op, 0);
599
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op)
603 && (mode == BImode || STORE_FLAG_VALUE == -1)
604 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
605 return simplify_gen_relational (reversed, mode, VOIDmode,
606 XEXP (op, 0), XEXP (op, 1));
607
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op) == PLUS
610 && XEXP (op, 1) == constm1_rtx)
611 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
612
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op) == NEG)
615 return plus_constant (mode, XEXP (op, 0), -1);
616
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op) == XOR
619 && CONST_INT_P (XEXP (op, 1))
620 && (temp = simplify_unary_operation (NOT, mode,
621 XEXP (op, 1), mode)) != 0)
622 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
623
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op) == PLUS
626 && CONST_INT_P (XEXP (op, 1))
627 && mode_signbit_p (mode, XEXP (op, 1))
628 && (temp = simplify_unary_operation (NOT, mode,
629 XEXP (op, 1), mode)) != 0)
630 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
631
632
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
637 bother with. */
638 if (GET_CODE (op) == ASHIFT
639 && XEXP (op, 0) == const1_rtx)
640 {
641 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
642 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
643 }
644
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
648
649 if (STORE_FLAG_VALUE == -1
650 && GET_CODE (op) == ASHIFTRT
651 && GET_CODE (XEXP (op, 1))
652 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
653 return simplify_gen_relational (GE, mode, VOIDmode,
654 XEXP (op, 0), const0_rtx);
655
656
657 if (GET_CODE (op) == SUBREG
658 && subreg_lowpart_p (op)
659 && (GET_MODE_SIZE (GET_MODE (op))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
661 && GET_CODE (SUBREG_REG (op)) == ASHIFT
662 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
663 {
664 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
665 rtx x;
666
667 x = gen_rtx_ROTATE (inner_mode,
668 simplify_gen_unary (NOT, inner_mode, const1_rtx,
669 inner_mode),
670 XEXP (SUBREG_REG (op), 1));
671 return rtl_hooks.gen_lowpart_no_emit (mode, x);
672 }
673
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
677 coded. */
678
679 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
680 {
681 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
682 enum machine_mode op_mode;
683
684 op_mode = GET_MODE (in1);
685 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
686
687 op_mode = GET_MODE (in2);
688 if (op_mode == VOIDmode)
689 op_mode = mode;
690 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
691
692 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
693 {
694 rtx tem = in2;
695 in2 = in1; in1 = tem;
696 }
697
698 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
699 mode, in1, in2);
700 }
701 break;
702
703 case NEG:
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op) == NEG)
706 return XEXP (op, 0);
707
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op) == PLUS
710 && XEXP (op, 1) == const1_rtx)
711 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
712
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op) == NOT)
715 return plus_constant (mode, XEXP (op, 0), 1);
716
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
725 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
726
727 if (GET_CODE (op) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
730 {
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op, 1))
733 || CONST_DOUBLE_P (XEXP (op, 1)))
734 {
735 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
736 if (temp)
737 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
738 }
739
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
742 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
743 }
744
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
749 {
750 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
751 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
752 }
753
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
756 is a constant). */
757 if (GET_CODE (op) == ASHIFT)
758 {
759 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
760 if (temp)
761 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
762 }
763
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op) == ASHIFTRT
767 && CONST_INT_P (XEXP (op, 1))
768 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
769 return simplify_gen_binary (LSHIFTRT, mode,
770 XEXP (op, 0), XEXP (op, 1));
771
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op) == LSHIFTRT
775 && CONST_INT_P (XEXP (op, 1))
776 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
777 return simplify_gen_binary (ASHIFTRT, mode,
778 XEXP (op, 0), XEXP (op, 1));
779
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op) == XOR
782 && XEXP (op, 1) == const1_rtx
783 && nonzero_bits (XEXP (op, 0), mode) == 1)
784 return plus_constant (mode, XEXP (op, 0), -1);
785
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op) == LT
789 && XEXP (op, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
791 {
792 enum machine_mode inner = GET_MODE (XEXP (op, 0));
793 int isize = GET_MODE_PRECISION (inner);
794 if (STORE_FLAG_VALUE == 1)
795 {
796 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
797 GEN_INT (isize - 1));
798 if (mode == inner)
799 return temp;
800 if (GET_MODE_PRECISION (mode) > isize)
801 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
802 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
803 }
804 else if (STORE_FLAG_VALUE == -1)
805 {
806 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
807 GEN_INT (isize - 1));
808 if (mode == inner)
809 return temp;
810 if (GET_MODE_PRECISION (mode) > isize)
811 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
812 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
813 }
814 }
815 break;
816
817 case TRUNCATE:
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
820 integer mode. */
821 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
822 break;
823
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op) == SIGN_EXTEND
826 || GET_CODE (op) == ZERO_EXTEND)
827 && GET_MODE (XEXP (op, 0)) == mode)
828 return XEXP (op, 0);
829
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op) == ABS
833 || GET_CODE (op) == NEG)
834 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
836 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
837 return simplify_gen_unary (GET_CODE (op), mode,
838 XEXP (XEXP (op, 0), 0), mode);
839
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
841 (truncate:A X). */
842 if (GET_CODE (op) == SUBREG
843 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
844 && subreg_lowpart_p (op))
845 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
846 GET_MODE (XEXP (SUBREG_REG (op), 0)));
847
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
854 patterns. */
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
856 ? (num_sign_bit_copies (op, GET_MODE (op))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
858 - GET_MODE_PRECISION (mode)))
859 : truncated_to_mode (mode, op))
860 && ! (GET_CODE (op) == LSHIFTRT
861 && GET_CODE (XEXP (op, 0)) == MULT))
862 return rtl_hooks.gen_lowpart_no_emit (mode, op);
863
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode)
869 && COMPARISON_P (op)
870 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
871 return rtl_hooks.gen_lowpart_no_emit (mode, op);
872 break;
873
874 case FLOAT_TRUNCATE:
875 if (DECIMAL_FLOAT_MODE_P (mode))
876 break;
877
878 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
879 if (GET_CODE (op) == FLOAT_EXTEND
880 && GET_MODE (XEXP (op, 0)) == mode)
881 return XEXP (op, 0);
882
883 /* (float_truncate:SF (float_truncate:DF foo:XF))
884 = (float_truncate:SF foo:XF).
885 This may eliminate double rounding, so it is unsafe.
886
887 (float_truncate:SF (float_extend:XF foo:DF))
888 = (float_truncate:SF foo:DF).
889
890 (float_truncate:DF (float_extend:XF foo:SF))
891 = (float_extend:SF foo:DF). */
892 if ((GET_CODE (op) == FLOAT_TRUNCATE
893 && flag_unsafe_math_optimizations)
894 || GET_CODE (op) == FLOAT_EXTEND)
895 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
896 0)))
897 > GET_MODE_SIZE (mode)
898 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
899 mode,
900 XEXP (op, 0), mode);
901
902 /* (float_truncate (float x)) is (float x) */
903 if (GET_CODE (op) == FLOAT
904 && (flag_unsafe_math_optimizations
905 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
906 && ((unsigned)significand_size (GET_MODE (op))
907 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
908 - num_sign_bit_copies (XEXP (op, 0),
909 GET_MODE (XEXP (op, 0))))))))
910 return simplify_gen_unary (FLOAT, mode,
911 XEXP (op, 0),
912 GET_MODE (XEXP (op, 0)));
913
914 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
915 (OP:SF foo:SF) if OP is NEG or ABS. */
916 if ((GET_CODE (op) == ABS
917 || GET_CODE (op) == NEG)
918 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
919 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
920 return simplify_gen_unary (GET_CODE (op), mode,
921 XEXP (XEXP (op, 0), 0), mode);
922
923 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
924 is (float_truncate:SF x). */
925 if (GET_CODE (op) == SUBREG
926 && subreg_lowpart_p (op)
927 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
928 return SUBREG_REG (op);
929 break;
930
931 case FLOAT_EXTEND:
932 if (DECIMAL_FLOAT_MODE_P (mode))
933 break;
934
935 /* (float_extend (float_extend x)) is (float_extend x)
936
937 (float_extend (float x)) is (float x) assuming that double
938 rounding can't happen.
939 */
940 if (GET_CODE (op) == FLOAT_EXTEND
941 || (GET_CODE (op) == FLOAT
942 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
943 && ((unsigned)significand_size (GET_MODE (op))
944 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
945 - num_sign_bit_copies (XEXP (op, 0),
946 GET_MODE (XEXP (op, 0)))))))
947 return simplify_gen_unary (GET_CODE (op), mode,
948 XEXP (op, 0),
949 GET_MODE (XEXP (op, 0)));
950
951 break;
952
953 case ABS:
954 /* (abs (neg <foo>)) -> (abs <foo>) */
955 if (GET_CODE (op) == NEG)
956 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
957 GET_MODE (XEXP (op, 0)));
958
959 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
960 do nothing. */
961 if (GET_MODE (op) == VOIDmode)
962 break;
963
964 /* If operand is something known to be positive, ignore the ABS. */
965 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
966 || val_signbit_known_clear_p (GET_MODE (op),
967 nonzero_bits (op, GET_MODE (op))))
968 return op;
969
970 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
971 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
972 return gen_rtx_NEG (mode, op);
973
974 break;
975
976 case FFS:
977 /* (ffs (*_extend <X>)) = (ffs <X>) */
978 if (GET_CODE (op) == SIGN_EXTEND
979 || GET_CODE (op) == ZERO_EXTEND)
980 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
981 GET_MODE (XEXP (op, 0)));
982 break;
983
984 case POPCOUNT:
985 switch (GET_CODE (op))
986 {
987 case BSWAP:
988 case ZERO_EXTEND:
989 /* (popcount (zero_extend <X>)) = (popcount <X>) */
990 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
991 GET_MODE (XEXP (op, 0)));
992
993 case ROTATE:
994 case ROTATERT:
995 /* Rotations don't affect popcount. */
996 if (!side_effects_p (XEXP (op, 1)))
997 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
998 GET_MODE (XEXP (op, 0)));
999 break;
1000
1001 default:
1002 break;
1003 }
1004 break;
1005
1006 case PARITY:
1007 switch (GET_CODE (op))
1008 {
1009 case NOT:
1010 case BSWAP:
1011 case ZERO_EXTEND:
1012 case SIGN_EXTEND:
1013 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1014 GET_MODE (XEXP (op, 0)));
1015
1016 case ROTATE:
1017 case ROTATERT:
1018 /* Rotations don't affect parity. */
1019 if (!side_effects_p (XEXP (op, 1)))
1020 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1021 GET_MODE (XEXP (op, 0)));
1022 break;
1023
1024 default:
1025 break;
1026 }
1027 break;
1028
1029 case BSWAP:
1030 /* (bswap (bswap x)) -> x. */
1031 if (GET_CODE (op) == BSWAP)
1032 return XEXP (op, 0);
1033 break;
1034
1035 case FLOAT:
1036 /* (float (sign_extend <X>)) = (float <X>). */
1037 if (GET_CODE (op) == SIGN_EXTEND)
1038 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1039 GET_MODE (XEXP (op, 0)));
1040 break;
1041
1042 case SIGN_EXTEND:
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1046 the VAX). */
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1053
1054 /* Extending a widening multiplication should be canonicalized to
1055 a wider widening multiplication. */
1056 if (GET_CODE (op) == MULT)
1057 {
1058 rtx lhs = XEXP (op, 0);
1059 rtx rhs = XEXP (op, 1);
1060 enum rtx_code lcode = GET_CODE (lhs);
1061 enum rtx_code rcode = GET_CODE (rhs);
1062
1063 /* Widening multiplies usually extend both operands, but sometimes
1064 they use a shift to extract a portion of a register. */
1065 if ((lcode == SIGN_EXTEND
1066 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1067 && (rcode == SIGN_EXTEND
1068 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1069 {
1070 enum machine_mode lmode = GET_MODE (lhs);
1071 enum machine_mode rmode = GET_MODE (rhs);
1072 int bits;
1073
1074 if (lcode == ASHIFTRT)
1075 /* Number of bits not shifted off the end. */
1076 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1077 else /* lcode == SIGN_EXTEND */
1078 /* Size of inner mode. */
1079 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1080
1081 if (rcode == ASHIFTRT)
1082 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1083 else /* rcode == SIGN_EXTEND */
1084 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1085
1086 /* We can only widen multiplies if the result is mathematiclly
1087 equivalent. I.e. if overflow was impossible. */
1088 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1089 return simplify_gen_binary
1090 (MULT, mode,
1091 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1092 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1093 }
1094 }
1095
1096 /* Check for a sign extension of a subreg of a promoted
1097 variable, where the promotion is sign-extended, and the
1098 target mode is the same as the variable's promotion. */
1099 if (GET_CODE (op) == SUBREG
1100 && SUBREG_PROMOTED_VAR_P (op)
1101 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1102 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1103 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1104
1105 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1106 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1107 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1108 {
1109 gcc_assert (GET_MODE_BITSIZE (mode)
1110 > GET_MODE_BITSIZE (GET_MODE (op)));
1111 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1112 GET_MODE (XEXP (op, 0)));
1113 }
1114
1115 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1116 is (sign_extend:M (subreg:O <X>)) if there is mode with
1117 GET_MODE_BITSIZE (N) - I bits.
1118 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1119 is similarly (zero_extend:M (subreg:O <X>)). */
1120 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1121 && GET_CODE (XEXP (op, 0)) == ASHIFT
1122 && CONST_INT_P (XEXP (op, 1))
1123 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1124 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1125 {
1126 enum machine_mode tmode
1127 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1128 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1129 gcc_assert (GET_MODE_BITSIZE (mode)
1130 > GET_MODE_BITSIZE (GET_MODE (op)));
1131 if (tmode != BLKmode)
1132 {
1133 rtx inner =
1134 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1135 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1136 ? SIGN_EXTEND : ZERO_EXTEND,
1137 mode, inner, tmode);
1138 }
1139 }
1140
1141 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1142 /* As we do not know which address space the pointer is referring to,
1143 we can do this only if the target does not support different pointer
1144 or address modes depending on the address space. */
1145 if (target_default_pointer_address_modes_p ()
1146 && ! POINTERS_EXTEND_UNSIGNED
1147 && mode == Pmode && GET_MODE (op) == ptr_mode
1148 && (CONSTANT_P (op)
1149 || (GET_CODE (op) == SUBREG
1150 && REG_P (SUBREG_REG (op))
1151 && REG_POINTER (SUBREG_REG (op))
1152 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1153 return convert_memory_address (Pmode, op);
1154 #endif
1155 break;
1156
1157 case ZERO_EXTEND:
1158 /* Check for a zero extension of a subreg of a promoted
1159 variable, where the promotion is zero-extended, and the
1160 target mode is the same as the variable's promotion. */
1161 if (GET_CODE (op) == SUBREG
1162 && SUBREG_PROMOTED_VAR_P (op)
1163 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1164 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1165 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1166
1167 /* Extending a widening multiplication should be canonicalized to
1168 a wider widening multiplication. */
1169 if (GET_CODE (op) == MULT)
1170 {
1171 rtx lhs = XEXP (op, 0);
1172 rtx rhs = XEXP (op, 1);
1173 enum rtx_code lcode = GET_CODE (lhs);
1174 enum rtx_code rcode = GET_CODE (rhs);
1175
1176 /* Widening multiplies usually extend both operands, but sometimes
1177 they use a shift to extract a portion of a register. */
1178 if ((lcode == ZERO_EXTEND
1179 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1180 && (rcode == ZERO_EXTEND
1181 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1182 {
1183 enum machine_mode lmode = GET_MODE (lhs);
1184 enum machine_mode rmode = GET_MODE (rhs);
1185 int bits;
1186
1187 if (lcode == LSHIFTRT)
1188 /* Number of bits not shifted off the end. */
1189 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1190 else /* lcode == ZERO_EXTEND */
1191 /* Size of inner mode. */
1192 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1193
1194 if (rcode == LSHIFTRT)
1195 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1196 else /* rcode == ZERO_EXTEND */
1197 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1198
1199 /* We can only widen multiplies if the result is mathematiclly
1200 equivalent. I.e. if overflow was impossible. */
1201 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1202 return simplify_gen_binary
1203 (MULT, mode,
1204 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1205 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1206 }
1207 }
1208
1209 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1210 if (GET_CODE (op) == ZERO_EXTEND)
1211 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1212 GET_MODE (XEXP (op, 0)));
1213
1214 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1215 is (zero_extend:M (subreg:O <X>)) if there is mode with
1216 GET_MODE_BITSIZE (N) - I bits. */
1217 if (GET_CODE (op) == LSHIFTRT
1218 && GET_CODE (XEXP (op, 0)) == ASHIFT
1219 && CONST_INT_P (XEXP (op, 1))
1220 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1221 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1222 {
1223 enum machine_mode tmode
1224 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1225 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1226 if (tmode != BLKmode)
1227 {
1228 rtx inner =
1229 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1230 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1231 }
1232 }
1233
1234 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1235 /* As we do not know which address space the pointer is referring to,
1236 we can do this only if the target does not support different pointer
1237 or address modes depending on the address space. */
1238 if (target_default_pointer_address_modes_p ()
1239 && POINTERS_EXTEND_UNSIGNED > 0
1240 && mode == Pmode && GET_MODE (op) == ptr_mode
1241 && (CONSTANT_P (op)
1242 || (GET_CODE (op) == SUBREG
1243 && REG_P (SUBREG_REG (op))
1244 && REG_POINTER (SUBREG_REG (op))
1245 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1246 return convert_memory_address (Pmode, op);
1247 #endif
1248 break;
1249
1250 default:
1251 break;
1252 }
1253
1254 return 0;
1255 }
1256
1257 /* Try to compute the value of a unary operation CODE whose output mode is to
1258 be MODE with input operand OP whose mode was originally OP_MODE.
1259 Return zero if the value cannot be computed. */
1260 rtx
1261 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1262 rtx op, enum machine_mode op_mode)
1263 {
1264 unsigned int width = GET_MODE_PRECISION (mode);
1265 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1266
1267 if (code == VEC_DUPLICATE)
1268 {
1269 gcc_assert (VECTOR_MODE_P (mode));
1270 if (GET_MODE (op) != VOIDmode)
1271 {
1272 if (!VECTOR_MODE_P (GET_MODE (op)))
1273 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1274 else
1275 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1276 (GET_MODE (op)));
1277 }
1278 if (CONST_INT_P (op) || CONST_DOUBLE_P (op)
1279 || GET_CODE (op) == CONST_VECTOR)
1280 {
1281 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1282 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1283 rtvec v = rtvec_alloc (n_elts);
1284 unsigned int i;
1285
1286 if (GET_CODE (op) != CONST_VECTOR)
1287 for (i = 0; i < n_elts; i++)
1288 RTVEC_ELT (v, i) = op;
1289 else
1290 {
1291 enum machine_mode inmode = GET_MODE (op);
1292 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1293 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1294
1295 gcc_assert (in_n_elts < n_elts);
1296 gcc_assert ((n_elts % in_n_elts) == 0);
1297 for (i = 0; i < n_elts; i++)
1298 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1299 }
1300 return gen_rtx_CONST_VECTOR (mode, v);
1301 }
1302 }
1303
1304 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1305 {
1306 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1307 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1308 enum machine_mode opmode = GET_MODE (op);
1309 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1310 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1311 rtvec v = rtvec_alloc (n_elts);
1312 unsigned int i;
1313
1314 gcc_assert (op_n_elts == n_elts);
1315 for (i = 0; i < n_elts; i++)
1316 {
1317 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1318 CONST_VECTOR_ELT (op, i),
1319 GET_MODE_INNER (opmode));
1320 if (!x)
1321 return 0;
1322 RTVEC_ELT (v, i) = x;
1323 }
1324 return gen_rtx_CONST_VECTOR (mode, v);
1325 }
1326
1327 /* The order of these tests is critical so that, for example, we don't
1328 check the wrong mode (input vs. output) for a conversion operation,
1329 such as FIX. At some point, this should be simplified. */
1330
1331 if (code == FLOAT && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1332 {
1333 HOST_WIDE_INT hv, lv;
1334 REAL_VALUE_TYPE d;
1335
1336 if (CONST_INT_P (op))
1337 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1338 else
1339 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1340
1341 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1342 d = real_value_truncate (mode, d);
1343 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1344 }
1345 else if (code == UNSIGNED_FLOAT
1346 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1347 {
1348 HOST_WIDE_INT hv, lv;
1349 REAL_VALUE_TYPE d;
1350
1351 if (CONST_INT_P (op))
1352 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1353 else
1354 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1355
1356 if (op_mode == VOIDmode
1357 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1358 /* We should never get a negative number. */
1359 gcc_assert (hv >= 0);
1360 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1361 hv = 0, lv &= GET_MODE_MASK (op_mode);
1362
1363 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1364 d = real_value_truncate (mode, d);
1365 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1366 }
1367
1368 if (CONST_INT_P (op)
1369 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1370 {
1371 HOST_WIDE_INT arg0 = INTVAL (op);
1372 HOST_WIDE_INT val;
1373
1374 switch (code)
1375 {
1376 case NOT:
1377 val = ~ arg0;
1378 break;
1379
1380 case NEG:
1381 val = - arg0;
1382 break;
1383
1384 case ABS:
1385 val = (arg0 >= 0 ? arg0 : - arg0);
1386 break;
1387
1388 case FFS:
1389 arg0 &= GET_MODE_MASK (mode);
1390 val = ffs_hwi (arg0);
1391 break;
1392
1393 case CLZ:
1394 arg0 &= GET_MODE_MASK (mode);
1395 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1396 ;
1397 else
1398 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1399 break;
1400
1401 case CLRSB:
1402 arg0 &= GET_MODE_MASK (mode);
1403 if (arg0 == 0)
1404 val = GET_MODE_PRECISION (mode) - 1;
1405 else if (arg0 >= 0)
1406 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1407 else if (arg0 < 0)
1408 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1409 break;
1410
1411 case CTZ:
1412 arg0 &= GET_MODE_MASK (mode);
1413 if (arg0 == 0)
1414 {
1415 /* Even if the value at zero is undefined, we have to come
1416 up with some replacement. Seems good enough. */
1417 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1418 val = GET_MODE_PRECISION (mode);
1419 }
1420 else
1421 val = ctz_hwi (arg0);
1422 break;
1423
1424 case POPCOUNT:
1425 arg0 &= GET_MODE_MASK (mode);
1426 val = 0;
1427 while (arg0)
1428 val++, arg0 &= arg0 - 1;
1429 break;
1430
1431 case PARITY:
1432 arg0 &= GET_MODE_MASK (mode);
1433 val = 0;
1434 while (arg0)
1435 val++, arg0 &= arg0 - 1;
1436 val &= 1;
1437 break;
1438
1439 case BSWAP:
1440 {
1441 unsigned int s;
1442
1443 val = 0;
1444 for (s = 0; s < width; s += 8)
1445 {
1446 unsigned int d = width - s - 8;
1447 unsigned HOST_WIDE_INT byte;
1448 byte = (arg0 >> s) & 0xff;
1449 val |= byte << d;
1450 }
1451 }
1452 break;
1453
1454 case TRUNCATE:
1455 val = arg0;
1456 break;
1457
1458 case ZERO_EXTEND:
1459 /* When zero-extending a CONST_INT, we need to know its
1460 original mode. */
1461 gcc_assert (op_mode != VOIDmode);
1462 if (op_width == HOST_BITS_PER_WIDE_INT)
1463 {
1464 /* If we were really extending the mode,
1465 we would have to distinguish between zero-extension
1466 and sign-extension. */
1467 gcc_assert (width == op_width);
1468 val = arg0;
1469 }
1470 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1471 val = arg0 & GET_MODE_MASK (op_mode);
1472 else
1473 return 0;
1474 break;
1475
1476 case SIGN_EXTEND:
1477 if (op_mode == VOIDmode)
1478 op_mode = mode;
1479 op_width = GET_MODE_PRECISION (op_mode);
1480 if (op_width == HOST_BITS_PER_WIDE_INT)
1481 {
1482 /* If we were really extending the mode,
1483 we would have to distinguish between zero-extension
1484 and sign-extension. */
1485 gcc_assert (width == op_width);
1486 val = arg0;
1487 }
1488 else if (op_width < HOST_BITS_PER_WIDE_INT)
1489 {
1490 val = arg0 & GET_MODE_MASK (op_mode);
1491 if (val_signbit_known_set_p (op_mode, val))
1492 val |= ~GET_MODE_MASK (op_mode);
1493 }
1494 else
1495 return 0;
1496 break;
1497
1498 case SQRT:
1499 case FLOAT_EXTEND:
1500 case FLOAT_TRUNCATE:
1501 case SS_TRUNCATE:
1502 case US_TRUNCATE:
1503 case SS_NEG:
1504 case US_NEG:
1505 case SS_ABS:
1506 return 0;
1507
1508 default:
1509 gcc_unreachable ();
1510 }
1511
1512 return gen_int_mode (val, mode);
1513 }
1514
1515 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1516 for a DImode operation on a CONST_INT. */
1517 else if (width <= HOST_BITS_PER_DOUBLE_INT
1518 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1519 {
1520 unsigned HOST_WIDE_INT l1, lv;
1521 HOST_WIDE_INT h1, hv;
1522
1523 if (CONST_DOUBLE_AS_INT_P (op))
1524 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1525 else
1526 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1527
1528 switch (code)
1529 {
1530 case NOT:
1531 lv = ~ l1;
1532 hv = ~ h1;
1533 break;
1534
1535 case NEG:
1536 neg_double (l1, h1, &lv, &hv);
1537 break;
1538
1539 case ABS:
1540 if (h1 < 0)
1541 neg_double (l1, h1, &lv, &hv);
1542 else
1543 lv = l1, hv = h1;
1544 break;
1545
1546 case FFS:
1547 hv = 0;
1548 if (l1 != 0)
1549 lv = ffs_hwi (l1);
1550 else if (h1 != 0)
1551 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1552 else
1553 lv = 0;
1554 break;
1555
1556 case CLZ:
1557 hv = 0;
1558 if (h1 != 0)
1559 lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1560 - HOST_BITS_PER_WIDE_INT;
1561 else if (l1 != 0)
1562 lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1563 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1564 lv = GET_MODE_PRECISION (mode);
1565 break;
1566
1567 case CTZ:
1568 hv = 0;
1569 if (l1 != 0)
1570 lv = ctz_hwi (l1);
1571 else if (h1 != 0)
1572 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1573 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1574 lv = GET_MODE_PRECISION (mode);
1575 break;
1576
1577 case POPCOUNT:
1578 hv = 0;
1579 lv = 0;
1580 while (l1)
1581 lv++, l1 &= l1 - 1;
1582 while (h1)
1583 lv++, h1 &= h1 - 1;
1584 break;
1585
1586 case PARITY:
1587 hv = 0;
1588 lv = 0;
1589 while (l1)
1590 lv++, l1 &= l1 - 1;
1591 while (h1)
1592 lv++, h1 &= h1 - 1;
1593 lv &= 1;
1594 break;
1595
1596 case BSWAP:
1597 {
1598 unsigned int s;
1599
1600 hv = 0;
1601 lv = 0;
1602 for (s = 0; s < width; s += 8)
1603 {
1604 unsigned int d = width - s - 8;
1605 unsigned HOST_WIDE_INT byte;
1606
1607 if (s < HOST_BITS_PER_WIDE_INT)
1608 byte = (l1 >> s) & 0xff;
1609 else
1610 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1611
1612 if (d < HOST_BITS_PER_WIDE_INT)
1613 lv |= byte << d;
1614 else
1615 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1616 }
1617 }
1618 break;
1619
1620 case TRUNCATE:
1621 /* This is just a change-of-mode, so do nothing. */
1622 lv = l1, hv = h1;
1623 break;
1624
1625 case ZERO_EXTEND:
1626 gcc_assert (op_mode != VOIDmode);
1627
1628 if (op_width > HOST_BITS_PER_WIDE_INT)
1629 return 0;
1630
1631 hv = 0;
1632 lv = l1 & GET_MODE_MASK (op_mode);
1633 break;
1634
1635 case SIGN_EXTEND:
1636 if (op_mode == VOIDmode
1637 || op_width > HOST_BITS_PER_WIDE_INT)
1638 return 0;
1639 else
1640 {
1641 lv = l1 & GET_MODE_MASK (op_mode);
1642 if (val_signbit_known_set_p (op_mode, lv))
1643 lv |= ~GET_MODE_MASK (op_mode);
1644
1645 hv = HWI_SIGN_EXTEND (lv);
1646 }
1647 break;
1648
1649 case SQRT:
1650 return 0;
1651
1652 default:
1653 return 0;
1654 }
1655
1656 return immed_double_const (lv, hv, mode);
1657 }
1658
1659 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1660 && SCALAR_FLOAT_MODE_P (mode)
1661 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1662 {
1663 REAL_VALUE_TYPE d, t;
1664 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1665
1666 switch (code)
1667 {
1668 case SQRT:
1669 if (HONOR_SNANS (mode) && real_isnan (&d))
1670 return 0;
1671 real_sqrt (&t, mode, &d);
1672 d = t;
1673 break;
1674 case ABS:
1675 d = real_value_abs (&d);
1676 break;
1677 case NEG:
1678 d = real_value_negate (&d);
1679 break;
1680 case FLOAT_TRUNCATE:
1681 d = real_value_truncate (mode, d);
1682 break;
1683 case FLOAT_EXTEND:
1684 /* All this does is change the mode, unless changing
1685 mode class. */
1686 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1687 real_convert (&d, mode, &d);
1688 break;
1689 case FIX:
1690 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1691 break;
1692 case NOT:
1693 {
1694 long tmp[4];
1695 int i;
1696
1697 real_to_target (tmp, &d, GET_MODE (op));
1698 for (i = 0; i < 4; i++)
1699 tmp[i] = ~tmp[i];
1700 real_from_target (&d, tmp, mode);
1701 break;
1702 }
1703 default:
1704 gcc_unreachable ();
1705 }
1706 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1707 }
1708
1709 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1710 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1711 && GET_MODE_CLASS (mode) == MODE_INT
1712 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1713 {
1714 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1715 operators are intentionally left unspecified (to ease implementation
1716 by target backends), for consistency, this routine implements the
1717 same semantics for constant folding as used by the middle-end. */
1718
1719 /* This was formerly used only for non-IEEE float.
1720 eggert@twinsun.com says it is safe for IEEE also. */
1721 HOST_WIDE_INT xh, xl, th, tl;
1722 REAL_VALUE_TYPE x, t;
1723 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1724 switch (code)
1725 {
1726 case FIX:
1727 if (REAL_VALUE_ISNAN (x))
1728 return const0_rtx;
1729
1730 /* Test against the signed upper bound. */
1731 if (width > HOST_BITS_PER_WIDE_INT)
1732 {
1733 th = ((unsigned HOST_WIDE_INT) 1
1734 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1735 tl = -1;
1736 }
1737 else
1738 {
1739 th = 0;
1740 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1741 }
1742 real_from_integer (&t, VOIDmode, tl, th, 0);
1743 if (REAL_VALUES_LESS (t, x))
1744 {
1745 xh = th;
1746 xl = tl;
1747 break;
1748 }
1749
1750 /* Test against the signed lower bound. */
1751 if (width > HOST_BITS_PER_WIDE_INT)
1752 {
1753 th = (unsigned HOST_WIDE_INT) (-1)
1754 << (width - HOST_BITS_PER_WIDE_INT - 1);
1755 tl = 0;
1756 }
1757 else
1758 {
1759 th = -1;
1760 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1761 }
1762 real_from_integer (&t, VOIDmode, tl, th, 0);
1763 if (REAL_VALUES_LESS (x, t))
1764 {
1765 xh = th;
1766 xl = tl;
1767 break;
1768 }
1769 REAL_VALUE_TO_INT (&xl, &xh, x);
1770 break;
1771
1772 case UNSIGNED_FIX:
1773 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1774 return const0_rtx;
1775
1776 /* Test against the unsigned upper bound. */
1777 if (width == HOST_BITS_PER_DOUBLE_INT)
1778 {
1779 th = -1;
1780 tl = -1;
1781 }
1782 else if (width >= HOST_BITS_PER_WIDE_INT)
1783 {
1784 th = ((unsigned HOST_WIDE_INT) 1
1785 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1786 tl = -1;
1787 }
1788 else
1789 {
1790 th = 0;
1791 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1792 }
1793 real_from_integer (&t, VOIDmode, tl, th, 1);
1794 if (REAL_VALUES_LESS (t, x))
1795 {
1796 xh = th;
1797 xl = tl;
1798 break;
1799 }
1800
1801 REAL_VALUE_TO_INT (&xl, &xh, x);
1802 break;
1803
1804 default:
1805 gcc_unreachable ();
1806 }
1807 return immed_double_const (xl, xh, mode);
1808 }
1809
1810 return NULL_RTX;
1811 }
1812 \f
1813 /* Subroutine of simplify_binary_operation to simplify a commutative,
1814 associative binary operation CODE with result mode MODE, operating
1815 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1816 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1817 canonicalization is possible. */
1818
1819 static rtx
1820 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1821 rtx op0, rtx op1)
1822 {
1823 rtx tem;
1824
1825 /* Linearize the operator to the left. */
1826 if (GET_CODE (op1) == code)
1827 {
1828 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1829 if (GET_CODE (op0) == code)
1830 {
1831 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1832 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1833 }
1834
1835 /* "a op (b op c)" becomes "(b op c) op a". */
1836 if (! swap_commutative_operands_p (op1, op0))
1837 return simplify_gen_binary (code, mode, op1, op0);
1838
1839 tem = op0;
1840 op0 = op1;
1841 op1 = tem;
1842 }
1843
1844 if (GET_CODE (op0) == code)
1845 {
1846 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1847 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1848 {
1849 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1850 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1851 }
1852
1853 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1854 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1855 if (tem != 0)
1856 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1857
1858 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1859 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1860 if (tem != 0)
1861 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1862 }
1863
1864 return 0;
1865 }
1866
1867
1868 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1869 and OP1. Return 0 if no simplification is possible.
1870
1871 Don't use this for relational operations such as EQ or LT.
1872 Use simplify_relational_operation instead. */
1873 rtx
1874 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1875 rtx op0, rtx op1)
1876 {
1877 rtx trueop0, trueop1;
1878 rtx tem;
1879
1880 /* Relational operations don't work here. We must know the mode
1881 of the operands in order to do the comparison correctly.
1882 Assuming a full word can give incorrect results.
1883 Consider comparing 128 with -128 in QImode. */
1884 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1885 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1886
1887 /* Make sure the constant is second. */
1888 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1889 && swap_commutative_operands_p (op0, op1))
1890 {
1891 tem = op0, op0 = op1, op1 = tem;
1892 }
1893
1894 trueop0 = avoid_constant_pool_reference (op0);
1895 trueop1 = avoid_constant_pool_reference (op1);
1896
1897 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1898 if (tem)
1899 return tem;
1900 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1901 }
1902
1903 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1904 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1905 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1906 actual constants. */
1907
1908 static rtx
1909 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1910 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1911 {
1912 rtx tem, reversed, opleft, opright;
1913 HOST_WIDE_INT val;
1914 unsigned int width = GET_MODE_PRECISION (mode);
1915
1916 /* Even if we can't compute a constant result,
1917 there are some cases worth simplifying. */
1918
1919 switch (code)
1920 {
1921 case PLUS:
1922 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1923 when x is NaN, infinite, or finite and nonzero. They aren't
1924 when x is -0 and the rounding mode is not towards -infinity,
1925 since (-0) + 0 is then 0. */
1926 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1927 return op0;
1928
1929 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1930 transformations are safe even for IEEE. */
1931 if (GET_CODE (op0) == NEG)
1932 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1933 else if (GET_CODE (op1) == NEG)
1934 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1935
1936 /* (~a) + 1 -> -a */
1937 if (INTEGRAL_MODE_P (mode)
1938 && GET_CODE (op0) == NOT
1939 && trueop1 == const1_rtx)
1940 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1941
1942 /* Handle both-operands-constant cases. We can only add
1943 CONST_INTs to constants since the sum of relocatable symbols
1944 can't be handled by most assemblers. Don't add CONST_INT
1945 to CONST_INT since overflow won't be computed properly if wider
1946 than HOST_BITS_PER_WIDE_INT. */
1947
1948 if ((GET_CODE (op0) == CONST
1949 || GET_CODE (op0) == SYMBOL_REF
1950 || GET_CODE (op0) == LABEL_REF)
1951 && CONST_INT_P (op1))
1952 return plus_constant (mode, op0, INTVAL (op1));
1953 else if ((GET_CODE (op1) == CONST
1954 || GET_CODE (op1) == SYMBOL_REF
1955 || GET_CODE (op1) == LABEL_REF)
1956 && CONST_INT_P (op0))
1957 return plus_constant (mode, op1, INTVAL (op0));
1958
1959 /* See if this is something like X * C - X or vice versa or
1960 if the multiplication is written as a shift. If so, we can
1961 distribute and make a new multiply, shift, or maybe just
1962 have X (if C is 2 in the example above). But don't make
1963 something more expensive than we had before. */
1964
1965 if (SCALAR_INT_MODE_P (mode))
1966 {
1967 double_int coeff0, coeff1;
1968 rtx lhs = op0, rhs = op1;
1969
1970 coeff0 = double_int_one;
1971 coeff1 = double_int_one;
1972
1973 if (GET_CODE (lhs) == NEG)
1974 {
1975 coeff0 = double_int_minus_one;
1976 lhs = XEXP (lhs, 0);
1977 }
1978 else if (GET_CODE (lhs) == MULT
1979 && CONST_INT_P (XEXP (lhs, 1)))
1980 {
1981 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1982 lhs = XEXP (lhs, 0);
1983 }
1984 else if (GET_CODE (lhs) == ASHIFT
1985 && CONST_INT_P (XEXP (lhs, 1))
1986 && INTVAL (XEXP (lhs, 1)) >= 0
1987 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1988 {
1989 coeff0 = double_int_setbit (double_int_zero,
1990 INTVAL (XEXP (lhs, 1)));
1991 lhs = XEXP (lhs, 0);
1992 }
1993
1994 if (GET_CODE (rhs) == NEG)
1995 {
1996 coeff1 = double_int_minus_one;
1997 rhs = XEXP (rhs, 0);
1998 }
1999 else if (GET_CODE (rhs) == MULT
2000 && CONST_INT_P (XEXP (rhs, 1)))
2001 {
2002 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2003 rhs = XEXP (rhs, 0);
2004 }
2005 else if (GET_CODE (rhs) == ASHIFT
2006 && CONST_INT_P (XEXP (rhs, 1))
2007 && INTVAL (XEXP (rhs, 1)) >= 0
2008 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2009 {
2010 coeff1 = double_int_setbit (double_int_zero,
2011 INTVAL (XEXP (rhs, 1)));
2012 rhs = XEXP (rhs, 0);
2013 }
2014
2015 if (rtx_equal_p (lhs, rhs))
2016 {
2017 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2018 rtx coeff;
2019 double_int val;
2020 bool speed = optimize_function_for_speed_p (cfun);
2021
2022 val = double_int_add (coeff0, coeff1);
2023 coeff = immed_double_int_const (val, mode);
2024
2025 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2026 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2027 ? tem : 0;
2028 }
2029 }
2030
2031 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2032 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2033 && GET_CODE (op0) == XOR
2034 && (CONST_INT_P (XEXP (op0, 1))
2035 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2036 && mode_signbit_p (mode, op1))
2037 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2038 simplify_gen_binary (XOR, mode, op1,
2039 XEXP (op0, 1)));
2040
2041 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2042 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2043 && GET_CODE (op0) == MULT
2044 && GET_CODE (XEXP (op0, 0)) == NEG)
2045 {
2046 rtx in1, in2;
2047
2048 in1 = XEXP (XEXP (op0, 0), 0);
2049 in2 = XEXP (op0, 1);
2050 return simplify_gen_binary (MINUS, mode, op1,
2051 simplify_gen_binary (MULT, mode,
2052 in1, in2));
2053 }
2054
2055 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2056 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2057 is 1. */
2058 if (COMPARISON_P (op0)
2059 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2060 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2061 && (reversed = reversed_comparison (op0, mode)))
2062 return
2063 simplify_gen_unary (NEG, mode, reversed, mode);
2064
2065 /* If one of the operands is a PLUS or a MINUS, see if we can
2066 simplify this by the associative law.
2067 Don't use the associative law for floating point.
2068 The inaccuracy makes it nonassociative,
2069 and subtle programs can break if operations are associated. */
2070
2071 if (INTEGRAL_MODE_P (mode)
2072 && (plus_minus_operand_p (op0)
2073 || plus_minus_operand_p (op1))
2074 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2075 return tem;
2076
2077 /* Reassociate floating point addition only when the user
2078 specifies associative math operations. */
2079 if (FLOAT_MODE_P (mode)
2080 && flag_associative_math)
2081 {
2082 tem = simplify_associative_operation (code, mode, op0, op1);
2083 if (tem)
2084 return tem;
2085 }
2086 break;
2087
2088 case COMPARE:
2089 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2090 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2091 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2092 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2093 {
2094 rtx xop00 = XEXP (op0, 0);
2095 rtx xop10 = XEXP (op1, 0);
2096
2097 #ifdef HAVE_cc0
2098 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2099 #else
2100 if (REG_P (xop00) && REG_P (xop10)
2101 && GET_MODE (xop00) == GET_MODE (xop10)
2102 && REGNO (xop00) == REGNO (xop10)
2103 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2104 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2105 #endif
2106 return xop00;
2107 }
2108 break;
2109
2110 case MINUS:
2111 /* We can't assume x-x is 0 even with non-IEEE floating point,
2112 but since it is zero except in very strange circumstances, we
2113 will treat it as zero with -ffinite-math-only. */
2114 if (rtx_equal_p (trueop0, trueop1)
2115 && ! side_effects_p (op0)
2116 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2117 return CONST0_RTX (mode);
2118
2119 /* Change subtraction from zero into negation. (0 - x) is the
2120 same as -x when x is NaN, infinite, or finite and nonzero.
2121 But if the mode has signed zeros, and does not round towards
2122 -infinity, then 0 - 0 is 0, not -0. */
2123 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2124 return simplify_gen_unary (NEG, mode, op1, mode);
2125
2126 /* (-1 - a) is ~a. */
2127 if (trueop0 == constm1_rtx)
2128 return simplify_gen_unary (NOT, mode, op1, mode);
2129
2130 /* Subtracting 0 has no effect unless the mode has signed zeros
2131 and supports rounding towards -infinity. In such a case,
2132 0 - 0 is -0. */
2133 if (!(HONOR_SIGNED_ZEROS (mode)
2134 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2135 && trueop1 == CONST0_RTX (mode))
2136 return op0;
2137
2138 /* See if this is something like X * C - X or vice versa or
2139 if the multiplication is written as a shift. If so, we can
2140 distribute and make a new multiply, shift, or maybe just
2141 have X (if C is 2 in the example above). But don't make
2142 something more expensive than we had before. */
2143
2144 if (SCALAR_INT_MODE_P (mode))
2145 {
2146 double_int coeff0, negcoeff1;
2147 rtx lhs = op0, rhs = op1;
2148
2149 coeff0 = double_int_one;
2150 negcoeff1 = double_int_minus_one;
2151
2152 if (GET_CODE (lhs) == NEG)
2153 {
2154 coeff0 = double_int_minus_one;
2155 lhs = XEXP (lhs, 0);
2156 }
2157 else if (GET_CODE (lhs) == MULT
2158 && CONST_INT_P (XEXP (lhs, 1)))
2159 {
2160 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2161 lhs = XEXP (lhs, 0);
2162 }
2163 else if (GET_CODE (lhs) == ASHIFT
2164 && CONST_INT_P (XEXP (lhs, 1))
2165 && INTVAL (XEXP (lhs, 1)) >= 0
2166 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2167 {
2168 coeff0 = double_int_setbit (double_int_zero,
2169 INTVAL (XEXP (lhs, 1)));
2170 lhs = XEXP (lhs, 0);
2171 }
2172
2173 if (GET_CODE (rhs) == NEG)
2174 {
2175 negcoeff1 = double_int_one;
2176 rhs = XEXP (rhs, 0);
2177 }
2178 else if (GET_CODE (rhs) == MULT
2179 && CONST_INT_P (XEXP (rhs, 1)))
2180 {
2181 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2182 rhs = XEXP (rhs, 0);
2183 }
2184 else if (GET_CODE (rhs) == ASHIFT
2185 && CONST_INT_P (XEXP (rhs, 1))
2186 && INTVAL (XEXP (rhs, 1)) >= 0
2187 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2188 {
2189 negcoeff1 = double_int_setbit (double_int_zero,
2190 INTVAL (XEXP (rhs, 1)));
2191 negcoeff1 = double_int_neg (negcoeff1);
2192 rhs = XEXP (rhs, 0);
2193 }
2194
2195 if (rtx_equal_p (lhs, rhs))
2196 {
2197 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2198 rtx coeff;
2199 double_int val;
2200 bool speed = optimize_function_for_speed_p (cfun);
2201
2202 val = double_int_add (coeff0, negcoeff1);
2203 coeff = immed_double_int_const (val, mode);
2204
2205 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2206 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2207 ? tem : 0;
2208 }
2209 }
2210
2211 /* (a - (-b)) -> (a + b). True even for IEEE. */
2212 if (GET_CODE (op1) == NEG)
2213 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2214
2215 /* (-x - c) may be simplified as (-c - x). */
2216 if (GET_CODE (op0) == NEG
2217 && (CONST_INT_P (op1) || CONST_DOUBLE_P (op1)))
2218 {
2219 tem = simplify_unary_operation (NEG, mode, op1, mode);
2220 if (tem)
2221 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2222 }
2223
2224 /* Don't let a relocatable value get a negative coeff. */
2225 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2226 return simplify_gen_binary (PLUS, mode,
2227 op0,
2228 neg_const_int (mode, op1));
2229
2230 /* (x - (x & y)) -> (x & ~y) */
2231 if (GET_CODE (op1) == AND)
2232 {
2233 if (rtx_equal_p (op0, XEXP (op1, 0)))
2234 {
2235 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2236 GET_MODE (XEXP (op1, 1)));
2237 return simplify_gen_binary (AND, mode, op0, tem);
2238 }
2239 if (rtx_equal_p (op0, XEXP (op1, 1)))
2240 {
2241 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2242 GET_MODE (XEXP (op1, 0)));
2243 return simplify_gen_binary (AND, mode, op0, tem);
2244 }
2245 }
2246
2247 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2248 by reversing the comparison code if valid. */
2249 if (STORE_FLAG_VALUE == 1
2250 && trueop0 == const1_rtx
2251 && COMPARISON_P (op1)
2252 && (reversed = reversed_comparison (op1, mode)))
2253 return reversed;
2254
2255 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2256 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2257 && GET_CODE (op1) == MULT
2258 && GET_CODE (XEXP (op1, 0)) == NEG)
2259 {
2260 rtx in1, in2;
2261
2262 in1 = XEXP (XEXP (op1, 0), 0);
2263 in2 = XEXP (op1, 1);
2264 return simplify_gen_binary (PLUS, mode,
2265 simplify_gen_binary (MULT, mode,
2266 in1, in2),
2267 op0);
2268 }
2269
2270 /* Canonicalize (minus (neg A) (mult B C)) to
2271 (minus (mult (neg B) C) A). */
2272 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2273 && GET_CODE (op1) == MULT
2274 && GET_CODE (op0) == NEG)
2275 {
2276 rtx in1, in2;
2277
2278 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2279 in2 = XEXP (op1, 1);
2280 return simplify_gen_binary (MINUS, mode,
2281 simplify_gen_binary (MULT, mode,
2282 in1, in2),
2283 XEXP (op0, 0));
2284 }
2285
2286 /* If one of the operands is a PLUS or a MINUS, see if we can
2287 simplify this by the associative law. This will, for example,
2288 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2289 Don't use the associative law for floating point.
2290 The inaccuracy makes it nonassociative,
2291 and subtle programs can break if operations are associated. */
2292
2293 if (INTEGRAL_MODE_P (mode)
2294 && (plus_minus_operand_p (op0)
2295 || plus_minus_operand_p (op1))
2296 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2297 return tem;
2298 break;
2299
2300 case MULT:
2301 if (trueop1 == constm1_rtx)
2302 return simplify_gen_unary (NEG, mode, op0, mode);
2303
2304 if (GET_CODE (op0) == NEG)
2305 {
2306 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2307 /* If op1 is a MULT as well and simplify_unary_operation
2308 just moved the NEG to the second operand, simplify_gen_binary
2309 below could through simplify_associative_operation move
2310 the NEG around again and recurse endlessly. */
2311 if (temp
2312 && GET_CODE (op1) == MULT
2313 && GET_CODE (temp) == MULT
2314 && XEXP (op1, 0) == XEXP (temp, 0)
2315 && GET_CODE (XEXP (temp, 1)) == NEG
2316 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2317 temp = NULL_RTX;
2318 if (temp)
2319 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2320 }
2321 if (GET_CODE (op1) == NEG)
2322 {
2323 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2324 /* If op0 is a MULT as well and simplify_unary_operation
2325 just moved the NEG to the second operand, simplify_gen_binary
2326 below could through simplify_associative_operation move
2327 the NEG around again and recurse endlessly. */
2328 if (temp
2329 && GET_CODE (op0) == MULT
2330 && GET_CODE (temp) == MULT
2331 && XEXP (op0, 0) == XEXP (temp, 0)
2332 && GET_CODE (XEXP (temp, 1)) == NEG
2333 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2334 temp = NULL_RTX;
2335 if (temp)
2336 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2337 }
2338
2339 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2340 x is NaN, since x * 0 is then also NaN. Nor is it valid
2341 when the mode has signed zeros, since multiplying a negative
2342 number by 0 will give -0, not 0. */
2343 if (!HONOR_NANS (mode)
2344 && !HONOR_SIGNED_ZEROS (mode)
2345 && trueop1 == CONST0_RTX (mode)
2346 && ! side_effects_p (op0))
2347 return op1;
2348
2349 /* In IEEE floating point, x*1 is not equivalent to x for
2350 signalling NaNs. */
2351 if (!HONOR_SNANS (mode)
2352 && trueop1 == CONST1_RTX (mode))
2353 return op0;
2354
2355 /* Convert multiply by constant power of two into shift unless
2356 we are still generating RTL. This test is a kludge. */
2357 if (CONST_INT_P (trueop1)
2358 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2359 /* If the mode is larger than the host word size, and the
2360 uppermost bit is set, then this isn't a power of two due
2361 to implicit sign extension. */
2362 && (width <= HOST_BITS_PER_WIDE_INT
2363 || val != HOST_BITS_PER_WIDE_INT - 1))
2364 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2365
2366 /* Likewise for multipliers wider than a word. */
2367 if (CONST_DOUBLE_AS_INT_P (trueop1)
2368 && GET_MODE (op0) == mode
2369 && CONST_DOUBLE_LOW (trueop1) == 0
2370 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2371 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2372 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2373 return simplify_gen_binary (ASHIFT, mode, op0,
2374 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2375
2376 /* x*2 is x+x and x*(-1) is -x */
2377 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2378 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2379 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2380 && GET_MODE (op0) == mode)
2381 {
2382 REAL_VALUE_TYPE d;
2383 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2384
2385 if (REAL_VALUES_EQUAL (d, dconst2))
2386 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2387
2388 if (!HONOR_SNANS (mode)
2389 && REAL_VALUES_EQUAL (d, dconstm1))
2390 return simplify_gen_unary (NEG, mode, op0, mode);
2391 }
2392
2393 /* Optimize -x * -x as x * x. */
2394 if (FLOAT_MODE_P (mode)
2395 && GET_CODE (op0) == NEG
2396 && GET_CODE (op1) == NEG
2397 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2398 && !side_effects_p (XEXP (op0, 0)))
2399 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2400
2401 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2402 if (SCALAR_FLOAT_MODE_P (mode)
2403 && GET_CODE (op0) == ABS
2404 && GET_CODE (op1) == ABS
2405 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2406 && !side_effects_p (XEXP (op0, 0)))
2407 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2408
2409 /* Reassociate multiplication, but for floating point MULTs
2410 only when the user specifies unsafe math optimizations. */
2411 if (! FLOAT_MODE_P (mode)
2412 || flag_unsafe_math_optimizations)
2413 {
2414 tem = simplify_associative_operation (code, mode, op0, op1);
2415 if (tem)
2416 return tem;
2417 }
2418 break;
2419
2420 case IOR:
2421 if (trueop1 == CONST0_RTX (mode))
2422 return op0;
2423 if (INTEGRAL_MODE_P (mode)
2424 && trueop1 == CONSTM1_RTX (mode)
2425 && !side_effects_p (op0))
2426 return op1;
2427 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2428 return op0;
2429 /* A | (~A) -> -1 */
2430 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2431 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2432 && ! side_effects_p (op0)
2433 && SCALAR_INT_MODE_P (mode))
2434 return constm1_rtx;
2435
2436 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2437 if (CONST_INT_P (op1)
2438 && HWI_COMPUTABLE_MODE_P (mode)
2439 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2440 && !side_effects_p (op0))
2441 return op1;
2442
2443 /* Canonicalize (X & C1) | C2. */
2444 if (GET_CODE (op0) == AND
2445 && CONST_INT_P (trueop1)
2446 && CONST_INT_P (XEXP (op0, 1)))
2447 {
2448 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2449 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2450 HOST_WIDE_INT c2 = INTVAL (trueop1);
2451
2452 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2453 if ((c1 & c2) == c1
2454 && !side_effects_p (XEXP (op0, 0)))
2455 return trueop1;
2456
2457 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2458 if (((c1|c2) & mask) == mask)
2459 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2460
2461 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2462 if (((c1 & ~c2) & mask) != (c1 & mask))
2463 {
2464 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2465 gen_int_mode (c1 & ~c2, mode));
2466 return simplify_gen_binary (IOR, mode, tem, op1);
2467 }
2468 }
2469
2470 /* Convert (A & B) | A to A. */
2471 if (GET_CODE (op0) == AND
2472 && (rtx_equal_p (XEXP (op0, 0), op1)
2473 || rtx_equal_p (XEXP (op0, 1), op1))
2474 && ! side_effects_p (XEXP (op0, 0))
2475 && ! side_effects_p (XEXP (op0, 1)))
2476 return op1;
2477
2478 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2479 mode size to (rotate A CX). */
2480
2481 if (GET_CODE (op1) == ASHIFT
2482 || GET_CODE (op1) == SUBREG)
2483 {
2484 opleft = op1;
2485 opright = op0;
2486 }
2487 else
2488 {
2489 opright = op1;
2490 opleft = op0;
2491 }
2492
2493 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2494 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2495 && CONST_INT_P (XEXP (opleft, 1))
2496 && CONST_INT_P (XEXP (opright, 1))
2497 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2498 == GET_MODE_PRECISION (mode)))
2499 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2500
2501 /* Same, but for ashift that has been "simplified" to a wider mode
2502 by simplify_shift_const. */
2503
2504 if (GET_CODE (opleft) == SUBREG
2505 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2506 && GET_CODE (opright) == LSHIFTRT
2507 && GET_CODE (XEXP (opright, 0)) == SUBREG
2508 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2509 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2510 && (GET_MODE_SIZE (GET_MODE (opleft))
2511 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2512 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2513 SUBREG_REG (XEXP (opright, 0)))
2514 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2515 && CONST_INT_P (XEXP (opright, 1))
2516 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2517 == GET_MODE_PRECISION (mode)))
2518 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2519 XEXP (SUBREG_REG (opleft), 1));
2520
2521 /* If we have (ior (and (X C1) C2)), simplify this by making
2522 C1 as small as possible if C1 actually changes. */
2523 if (CONST_INT_P (op1)
2524 && (HWI_COMPUTABLE_MODE_P (mode)
2525 || INTVAL (op1) > 0)
2526 && GET_CODE (op0) == AND
2527 && CONST_INT_P (XEXP (op0, 1))
2528 && CONST_INT_P (op1)
2529 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2530 return simplify_gen_binary (IOR, mode,
2531 simplify_gen_binary
2532 (AND, mode, XEXP (op0, 0),
2533 GEN_INT (UINTVAL (XEXP (op0, 1))
2534 & ~UINTVAL (op1))),
2535 op1);
2536
2537 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2538 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2539 the PLUS does not affect any of the bits in OP1: then we can do
2540 the IOR as a PLUS and we can associate. This is valid if OP1
2541 can be safely shifted left C bits. */
2542 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2543 && GET_CODE (XEXP (op0, 0)) == PLUS
2544 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2545 && CONST_INT_P (XEXP (op0, 1))
2546 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2547 {
2548 int count = INTVAL (XEXP (op0, 1));
2549 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2550
2551 if (mask >> count == INTVAL (trueop1)
2552 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2553 return simplify_gen_binary (ASHIFTRT, mode,
2554 plus_constant (mode, XEXP (op0, 0),
2555 mask),
2556 XEXP (op0, 1));
2557 }
2558
2559 tem = simplify_associative_operation (code, mode, op0, op1);
2560 if (tem)
2561 return tem;
2562 break;
2563
2564 case XOR:
2565 if (trueop1 == CONST0_RTX (mode))
2566 return op0;
2567 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2568 return simplify_gen_unary (NOT, mode, op0, mode);
2569 if (rtx_equal_p (trueop0, trueop1)
2570 && ! side_effects_p (op0)
2571 && GET_MODE_CLASS (mode) != MODE_CC)
2572 return CONST0_RTX (mode);
2573
2574 /* Canonicalize XOR of the most significant bit to PLUS. */
2575 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2576 && mode_signbit_p (mode, op1))
2577 return simplify_gen_binary (PLUS, mode, op0, op1);
2578 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2579 if ((CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
2580 && GET_CODE (op0) == PLUS
2581 && (CONST_INT_P (XEXP (op0, 1))
2582 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1)))
2583 && mode_signbit_p (mode, XEXP (op0, 1)))
2584 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2585 simplify_gen_binary (XOR, mode, op1,
2586 XEXP (op0, 1)));
2587
2588 /* If we are XORing two things that have no bits in common,
2589 convert them into an IOR. This helps to detect rotation encoded
2590 using those methods and possibly other simplifications. */
2591
2592 if (HWI_COMPUTABLE_MODE_P (mode)
2593 && (nonzero_bits (op0, mode)
2594 & nonzero_bits (op1, mode)) == 0)
2595 return (simplify_gen_binary (IOR, mode, op0, op1));
2596
2597 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2598 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2599 (NOT y). */
2600 {
2601 int num_negated = 0;
2602
2603 if (GET_CODE (op0) == NOT)
2604 num_negated++, op0 = XEXP (op0, 0);
2605 if (GET_CODE (op1) == NOT)
2606 num_negated++, op1 = XEXP (op1, 0);
2607
2608 if (num_negated == 2)
2609 return simplify_gen_binary (XOR, mode, op0, op1);
2610 else if (num_negated == 1)
2611 return simplify_gen_unary (NOT, mode,
2612 simplify_gen_binary (XOR, mode, op0, op1),
2613 mode);
2614 }
2615
2616 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2617 correspond to a machine insn or result in further simplifications
2618 if B is a constant. */
2619
2620 if (GET_CODE (op0) == AND
2621 && rtx_equal_p (XEXP (op0, 1), op1)
2622 && ! side_effects_p (op1))
2623 return simplify_gen_binary (AND, mode,
2624 simplify_gen_unary (NOT, mode,
2625 XEXP (op0, 0), mode),
2626 op1);
2627
2628 else if (GET_CODE (op0) == AND
2629 && rtx_equal_p (XEXP (op0, 0), op1)
2630 && ! side_effects_p (op1))
2631 return simplify_gen_binary (AND, mode,
2632 simplify_gen_unary (NOT, mode,
2633 XEXP (op0, 1), mode),
2634 op1);
2635
2636 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2637 we can transform like this:
2638 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2639 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2640 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2641 Attempt a few simplifications when B and C are both constants. */
2642 if (GET_CODE (op0) == AND
2643 && CONST_INT_P (op1)
2644 && CONST_INT_P (XEXP (op0, 1)))
2645 {
2646 rtx a = XEXP (op0, 0);
2647 rtx b = XEXP (op0, 1);
2648 rtx c = op1;
2649 HOST_WIDE_INT bval = INTVAL (b);
2650 HOST_WIDE_INT cval = INTVAL (c);
2651
2652 rtx na_c
2653 = simplify_binary_operation (AND, mode,
2654 simplify_gen_unary (NOT, mode, a, mode),
2655 c);
2656 if ((~cval & bval) == 0)
2657 {
2658 /* Try to simplify ~A&C | ~B&C. */
2659 if (na_c != NULL_RTX)
2660 return simplify_gen_binary (IOR, mode, na_c,
2661 GEN_INT (~bval & cval));
2662 }
2663 else
2664 {
2665 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2666 if (na_c == const0_rtx)
2667 {
2668 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2669 GEN_INT (~cval & bval));
2670 return simplify_gen_binary (IOR, mode, a_nc_b,
2671 GEN_INT (~bval & cval));
2672 }
2673 }
2674 }
2675
2676 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2677 comparison if STORE_FLAG_VALUE is 1. */
2678 if (STORE_FLAG_VALUE == 1
2679 && trueop1 == const1_rtx
2680 && COMPARISON_P (op0)
2681 && (reversed = reversed_comparison (op0, mode)))
2682 return reversed;
2683
2684 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2685 is (lt foo (const_int 0)), so we can perform the above
2686 simplification if STORE_FLAG_VALUE is 1. */
2687
2688 if (STORE_FLAG_VALUE == 1
2689 && trueop1 == const1_rtx
2690 && GET_CODE (op0) == LSHIFTRT
2691 && CONST_INT_P (XEXP (op0, 1))
2692 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2693 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2694
2695 /* (xor (comparison foo bar) (const_int sign-bit))
2696 when STORE_FLAG_VALUE is the sign bit. */
2697 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2698 && trueop1 == const_true_rtx
2699 && COMPARISON_P (op0)
2700 && (reversed = reversed_comparison (op0, mode)))
2701 return reversed;
2702
2703 tem = simplify_associative_operation (code, mode, op0, op1);
2704 if (tem)
2705 return tem;
2706 break;
2707
2708 case AND:
2709 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2710 return trueop1;
2711 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2712 return op0;
2713 if (HWI_COMPUTABLE_MODE_P (mode))
2714 {
2715 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2716 HOST_WIDE_INT nzop1;
2717 if (CONST_INT_P (trueop1))
2718 {
2719 HOST_WIDE_INT val1 = INTVAL (trueop1);
2720 /* If we are turning off bits already known off in OP0, we need
2721 not do an AND. */
2722 if ((nzop0 & ~val1) == 0)
2723 return op0;
2724 }
2725 nzop1 = nonzero_bits (trueop1, mode);
2726 /* If we are clearing all the nonzero bits, the result is zero. */
2727 if ((nzop1 & nzop0) == 0
2728 && !side_effects_p (op0) && !side_effects_p (op1))
2729 return CONST0_RTX (mode);
2730 }
2731 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2732 && GET_MODE_CLASS (mode) != MODE_CC)
2733 return op0;
2734 /* A & (~A) -> 0 */
2735 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2736 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2737 && ! side_effects_p (op0)
2738 && GET_MODE_CLASS (mode) != MODE_CC)
2739 return CONST0_RTX (mode);
2740
2741 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2742 there are no nonzero bits of C outside of X's mode. */
2743 if ((GET_CODE (op0) == SIGN_EXTEND
2744 || GET_CODE (op0) == ZERO_EXTEND)
2745 && CONST_INT_P (trueop1)
2746 && HWI_COMPUTABLE_MODE_P (mode)
2747 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2748 & UINTVAL (trueop1)) == 0)
2749 {
2750 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2751 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2752 gen_int_mode (INTVAL (trueop1),
2753 imode));
2754 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2755 }
2756
2757 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2758 we might be able to further simplify the AND with X and potentially
2759 remove the truncation altogether. */
2760 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2761 {
2762 rtx x = XEXP (op0, 0);
2763 enum machine_mode xmode = GET_MODE (x);
2764 tem = simplify_gen_binary (AND, xmode, x,
2765 gen_int_mode (INTVAL (trueop1), xmode));
2766 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2767 }
2768
2769 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2770 if (GET_CODE (op0) == IOR
2771 && CONST_INT_P (trueop1)
2772 && CONST_INT_P (XEXP (op0, 1)))
2773 {
2774 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2775 return simplify_gen_binary (IOR, mode,
2776 simplify_gen_binary (AND, mode,
2777 XEXP (op0, 0), op1),
2778 gen_int_mode (tmp, mode));
2779 }
2780
2781 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2782 insn (and may simplify more). */
2783 if (GET_CODE (op0) == XOR
2784 && rtx_equal_p (XEXP (op0, 0), op1)
2785 && ! side_effects_p (op1))
2786 return simplify_gen_binary (AND, mode,
2787 simplify_gen_unary (NOT, mode,
2788 XEXP (op0, 1), mode),
2789 op1);
2790
2791 if (GET_CODE (op0) == XOR
2792 && rtx_equal_p (XEXP (op0, 1), op1)
2793 && ! side_effects_p (op1))
2794 return simplify_gen_binary (AND, mode,
2795 simplify_gen_unary (NOT, mode,
2796 XEXP (op0, 0), mode),
2797 op1);
2798
2799 /* Similarly for (~(A ^ B)) & A. */
2800 if (GET_CODE (op0) == NOT
2801 && GET_CODE (XEXP (op0, 0)) == XOR
2802 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2803 && ! side_effects_p (op1))
2804 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2805
2806 if (GET_CODE (op0) == NOT
2807 && GET_CODE (XEXP (op0, 0)) == XOR
2808 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2809 && ! side_effects_p (op1))
2810 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2811
2812 /* Convert (A | B) & A to A. */
2813 if (GET_CODE (op0) == IOR
2814 && (rtx_equal_p (XEXP (op0, 0), op1)
2815 || rtx_equal_p (XEXP (op0, 1), op1))
2816 && ! side_effects_p (XEXP (op0, 0))
2817 && ! side_effects_p (XEXP (op0, 1)))
2818 return op1;
2819
2820 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2821 ((A & N) + B) & M -> (A + B) & M
2822 Similarly if (N & M) == 0,
2823 ((A | N) + B) & M -> (A + B) & M
2824 and for - instead of + and/or ^ instead of |.
2825 Also, if (N & M) == 0, then
2826 (A +- N) & M -> A & M. */
2827 if (CONST_INT_P (trueop1)
2828 && HWI_COMPUTABLE_MODE_P (mode)
2829 && ~UINTVAL (trueop1)
2830 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2831 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2832 {
2833 rtx pmop[2];
2834 int which;
2835
2836 pmop[0] = XEXP (op0, 0);
2837 pmop[1] = XEXP (op0, 1);
2838
2839 if (CONST_INT_P (pmop[1])
2840 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2841 return simplify_gen_binary (AND, mode, pmop[0], op1);
2842
2843 for (which = 0; which < 2; which++)
2844 {
2845 tem = pmop[which];
2846 switch (GET_CODE (tem))
2847 {
2848 case AND:
2849 if (CONST_INT_P (XEXP (tem, 1))
2850 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2851 == UINTVAL (trueop1))
2852 pmop[which] = XEXP (tem, 0);
2853 break;
2854 case IOR:
2855 case XOR:
2856 if (CONST_INT_P (XEXP (tem, 1))
2857 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2858 pmop[which] = XEXP (tem, 0);
2859 break;
2860 default:
2861 break;
2862 }
2863 }
2864
2865 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2866 {
2867 tem = simplify_gen_binary (GET_CODE (op0), mode,
2868 pmop[0], pmop[1]);
2869 return simplify_gen_binary (code, mode, tem, op1);
2870 }
2871 }
2872
2873 /* (and X (ior (not X) Y) -> (and X Y) */
2874 if (GET_CODE (op1) == IOR
2875 && GET_CODE (XEXP (op1, 0)) == NOT
2876 && op0 == XEXP (XEXP (op1, 0), 0))
2877 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2878
2879 /* (and (ior (not X) Y) X) -> (and X Y) */
2880 if (GET_CODE (op0) == IOR
2881 && GET_CODE (XEXP (op0, 0)) == NOT
2882 && op1 == XEXP (XEXP (op0, 0), 0))
2883 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2884
2885 tem = simplify_associative_operation (code, mode, op0, op1);
2886 if (tem)
2887 return tem;
2888 break;
2889
2890 case UDIV:
2891 /* 0/x is 0 (or x&0 if x has side-effects). */
2892 if (trueop0 == CONST0_RTX (mode))
2893 {
2894 if (side_effects_p (op1))
2895 return simplify_gen_binary (AND, mode, op1, trueop0);
2896 return trueop0;
2897 }
2898 /* x/1 is x. */
2899 if (trueop1 == CONST1_RTX (mode))
2900 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2901 /* Convert divide by power of two into shift. */
2902 if (CONST_INT_P (trueop1)
2903 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2904 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2905 break;
2906
2907 case DIV:
2908 /* Handle floating point and integers separately. */
2909 if (SCALAR_FLOAT_MODE_P (mode))
2910 {
2911 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2912 safe for modes with NaNs, since 0.0 / 0.0 will then be
2913 NaN rather than 0.0. Nor is it safe for modes with signed
2914 zeros, since dividing 0 by a negative number gives -0.0 */
2915 if (trueop0 == CONST0_RTX (mode)
2916 && !HONOR_NANS (mode)
2917 && !HONOR_SIGNED_ZEROS (mode)
2918 && ! side_effects_p (op1))
2919 return op0;
2920 /* x/1.0 is x. */
2921 if (trueop1 == CONST1_RTX (mode)
2922 && !HONOR_SNANS (mode))
2923 return op0;
2924
2925 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2926 && trueop1 != CONST0_RTX (mode))
2927 {
2928 REAL_VALUE_TYPE d;
2929 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2930
2931 /* x/-1.0 is -x. */
2932 if (REAL_VALUES_EQUAL (d, dconstm1)
2933 && !HONOR_SNANS (mode))
2934 return simplify_gen_unary (NEG, mode, op0, mode);
2935
2936 /* Change FP division by a constant into multiplication.
2937 Only do this with -freciprocal-math. */
2938 if (flag_reciprocal_math
2939 && !REAL_VALUES_EQUAL (d, dconst0))
2940 {
2941 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2942 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2943 return simplify_gen_binary (MULT, mode, op0, tem);
2944 }
2945 }
2946 }
2947 else if (SCALAR_INT_MODE_P (mode))
2948 {
2949 /* 0/x is 0 (or x&0 if x has side-effects). */
2950 if (trueop0 == CONST0_RTX (mode)
2951 && !cfun->can_throw_non_call_exceptions)
2952 {
2953 if (side_effects_p (op1))
2954 return simplify_gen_binary (AND, mode, op1, trueop0);
2955 return trueop0;
2956 }
2957 /* x/1 is x. */
2958 if (trueop1 == CONST1_RTX (mode))
2959 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2960 /* x/-1 is -x. */
2961 if (trueop1 == constm1_rtx)
2962 {
2963 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2964 return simplify_gen_unary (NEG, mode, x, mode);
2965 }
2966 }
2967 break;
2968
2969 case UMOD:
2970 /* 0%x is 0 (or x&0 if x has side-effects). */
2971 if (trueop0 == CONST0_RTX (mode))
2972 {
2973 if (side_effects_p (op1))
2974 return simplify_gen_binary (AND, mode, op1, trueop0);
2975 return trueop0;
2976 }
2977 /* x%1 is 0 (of x&0 if x has side-effects). */
2978 if (trueop1 == CONST1_RTX (mode))
2979 {
2980 if (side_effects_p (op0))
2981 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2982 return CONST0_RTX (mode);
2983 }
2984 /* Implement modulus by power of two as AND. */
2985 if (CONST_INT_P (trueop1)
2986 && exact_log2 (UINTVAL (trueop1)) > 0)
2987 return simplify_gen_binary (AND, mode, op0,
2988 GEN_INT (INTVAL (op1) - 1));
2989 break;
2990
2991 case MOD:
2992 /* 0%x is 0 (or x&0 if x has side-effects). */
2993 if (trueop0 == CONST0_RTX (mode))
2994 {
2995 if (side_effects_p (op1))
2996 return simplify_gen_binary (AND, mode, op1, trueop0);
2997 return trueop0;
2998 }
2999 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3000 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3001 {
3002 if (side_effects_p (op0))
3003 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3004 return CONST0_RTX (mode);
3005 }
3006 break;
3007
3008 case ROTATERT:
3009 case ROTATE:
3010 case ASHIFTRT:
3011 if (trueop1 == CONST0_RTX (mode))
3012 return op0;
3013 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3014 return op0;
3015 /* Rotating ~0 always results in ~0. */
3016 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3017 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3018 && ! side_effects_p (op1))
3019 return op0;
3020 canonicalize_shift:
3021 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3022 {
3023 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3024 if (val != INTVAL (op1))
3025 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3026 }
3027 break;
3028
3029 case ASHIFT:
3030 case SS_ASHIFT:
3031 case US_ASHIFT:
3032 if (trueop1 == CONST0_RTX (mode))
3033 return op0;
3034 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3035 return op0;
3036 goto canonicalize_shift;
3037
3038 case LSHIFTRT:
3039 if (trueop1 == CONST0_RTX (mode))
3040 return op0;
3041 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3042 return op0;
3043 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3044 if (GET_CODE (op0) == CLZ
3045 && CONST_INT_P (trueop1)
3046 && STORE_FLAG_VALUE == 1
3047 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3048 {
3049 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3050 unsigned HOST_WIDE_INT zero_val = 0;
3051
3052 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3053 && zero_val == GET_MODE_PRECISION (imode)
3054 && INTVAL (trueop1) == exact_log2 (zero_val))
3055 return simplify_gen_relational (EQ, mode, imode,
3056 XEXP (op0, 0), const0_rtx);
3057 }
3058 goto canonicalize_shift;
3059
3060 case SMIN:
3061 if (width <= HOST_BITS_PER_WIDE_INT
3062 && mode_signbit_p (mode, trueop1)
3063 && ! side_effects_p (op0))
3064 return op1;
3065 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3066 return op0;
3067 tem = simplify_associative_operation (code, mode, op0, op1);
3068 if (tem)
3069 return tem;
3070 break;
3071
3072 case SMAX:
3073 if (width <= HOST_BITS_PER_WIDE_INT
3074 && CONST_INT_P (trueop1)
3075 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3076 && ! side_effects_p (op0))
3077 return op1;
3078 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3079 return op0;
3080 tem = simplify_associative_operation (code, mode, op0, op1);
3081 if (tem)
3082 return tem;
3083 break;
3084
3085 case UMIN:
3086 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3087 return op1;
3088 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3089 return op0;
3090 tem = simplify_associative_operation (code, mode, op0, op1);
3091 if (tem)
3092 return tem;
3093 break;
3094
3095 case UMAX:
3096 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3097 return op1;
3098 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3099 return op0;
3100 tem = simplify_associative_operation (code, mode, op0, op1);
3101 if (tem)
3102 return tem;
3103 break;
3104
3105 case SS_PLUS:
3106 case US_PLUS:
3107 case SS_MINUS:
3108 case US_MINUS:
3109 case SS_MULT:
3110 case US_MULT:
3111 case SS_DIV:
3112 case US_DIV:
3113 /* ??? There are simplifications that can be done. */
3114 return 0;
3115
3116 case VEC_SELECT:
3117 if (!VECTOR_MODE_P (mode))
3118 {
3119 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3120 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3121 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3122 gcc_assert (XVECLEN (trueop1, 0) == 1);
3123 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3124
3125 if (GET_CODE (trueop0) == CONST_VECTOR)
3126 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3127 (trueop1, 0, 0)));
3128
3129 /* Extract a scalar element from a nested VEC_SELECT expression
3130 (with optional nested VEC_CONCAT expression). Some targets
3131 (i386) extract scalar element from a vector using chain of
3132 nested VEC_SELECT expressions. When input operand is a memory
3133 operand, this operation can be simplified to a simple scalar
3134 load from an offseted memory address. */
3135 if (GET_CODE (trueop0) == VEC_SELECT)
3136 {
3137 rtx op0 = XEXP (trueop0, 0);
3138 rtx op1 = XEXP (trueop0, 1);
3139
3140 enum machine_mode opmode = GET_MODE (op0);
3141 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3142 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3143
3144 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3145 int elem;
3146
3147 rtvec vec;
3148 rtx tmp_op, tmp;
3149
3150 gcc_assert (GET_CODE (op1) == PARALLEL);
3151 gcc_assert (i < n_elts);
3152
3153 /* Select element, pointed by nested selector. */
3154 elem = INTVAL (XVECEXP (op1, 0, i));
3155
3156 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3157 if (GET_CODE (op0) == VEC_CONCAT)
3158 {
3159 rtx op00 = XEXP (op0, 0);
3160 rtx op01 = XEXP (op0, 1);
3161
3162 enum machine_mode mode00, mode01;
3163 int n_elts00, n_elts01;
3164
3165 mode00 = GET_MODE (op00);
3166 mode01 = GET_MODE (op01);
3167
3168 /* Find out number of elements of each operand. */
3169 if (VECTOR_MODE_P (mode00))
3170 {
3171 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3172 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3173 }
3174 else
3175 n_elts00 = 1;
3176
3177 if (VECTOR_MODE_P (mode01))
3178 {
3179 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3180 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3181 }
3182 else
3183 n_elts01 = 1;
3184
3185 gcc_assert (n_elts == n_elts00 + n_elts01);
3186
3187 /* Select correct operand of VEC_CONCAT
3188 and adjust selector. */
3189 if (elem < n_elts01)
3190 tmp_op = op00;
3191 else
3192 {
3193 tmp_op = op01;
3194 elem -= n_elts00;
3195 }
3196 }
3197 else
3198 tmp_op = op0;
3199
3200 vec = rtvec_alloc (1);
3201 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3202
3203 tmp = gen_rtx_fmt_ee (code, mode,
3204 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3205 return tmp;
3206 }
3207 if (GET_CODE (trueop0) == VEC_DUPLICATE
3208 && GET_MODE (XEXP (trueop0, 0)) == mode)
3209 return XEXP (trueop0, 0);
3210 }
3211 else
3212 {
3213 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3214 gcc_assert (GET_MODE_INNER (mode)
3215 == GET_MODE_INNER (GET_MODE (trueop0)));
3216 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3217
3218 if (GET_CODE (trueop0) == CONST_VECTOR)
3219 {
3220 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3221 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3222 rtvec v = rtvec_alloc (n_elts);
3223 unsigned int i;
3224
3225 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3226 for (i = 0; i < n_elts; i++)
3227 {
3228 rtx x = XVECEXP (trueop1, 0, i);
3229
3230 gcc_assert (CONST_INT_P (x));
3231 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3232 INTVAL (x));
3233 }
3234
3235 return gen_rtx_CONST_VECTOR (mode, v);
3236 }
3237
3238 /* If we build {a,b} then permute it, build the result directly. */
3239 if (XVECLEN (trueop1, 0) == 2
3240 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3241 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3242 && GET_CODE (trueop0) == VEC_CONCAT
3243 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3244 && GET_MODE (XEXP (trueop0, 0)) == mode
3245 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3246 && GET_MODE (XEXP (trueop0, 1)) == mode)
3247 {
3248 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3249 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3250 rtx subop0, subop1;
3251
3252 gcc_assert (i0 < 4 && i1 < 4);
3253 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3254 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3255
3256 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3257 }
3258
3259 if (XVECLEN (trueop1, 0) == 2
3260 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3261 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3262 && GET_CODE (trueop0) == VEC_CONCAT
3263 && GET_MODE (trueop0) == mode)
3264 {
3265 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3266 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3267 rtx subop0, subop1;
3268
3269 gcc_assert (i0 < 2 && i1 < 2);
3270 subop0 = XEXP (trueop0, i0);
3271 subop1 = XEXP (trueop0, i1);
3272
3273 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3274 }
3275 }
3276
3277 if (XVECLEN (trueop1, 0) == 1
3278 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3279 && GET_CODE (trueop0) == VEC_CONCAT)
3280 {
3281 rtx vec = trueop0;
3282 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3283
3284 /* Try to find the element in the VEC_CONCAT. */
3285 while (GET_MODE (vec) != mode
3286 && GET_CODE (vec) == VEC_CONCAT)
3287 {
3288 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3289 if (offset < vec_size)
3290 vec = XEXP (vec, 0);
3291 else
3292 {
3293 offset -= vec_size;
3294 vec = XEXP (vec, 1);
3295 }
3296 vec = avoid_constant_pool_reference (vec);
3297 }
3298
3299 if (GET_MODE (vec) == mode)
3300 return vec;
3301 }
3302
3303 return 0;
3304 case VEC_CONCAT:
3305 {
3306 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3307 ? GET_MODE (trueop0)
3308 : GET_MODE_INNER (mode));
3309 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3310 ? GET_MODE (trueop1)
3311 : GET_MODE_INNER (mode));
3312
3313 gcc_assert (VECTOR_MODE_P (mode));
3314 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3315 == GET_MODE_SIZE (mode));
3316
3317 if (VECTOR_MODE_P (op0_mode))
3318 gcc_assert (GET_MODE_INNER (mode)
3319 == GET_MODE_INNER (op0_mode));
3320 else
3321 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3322
3323 if (VECTOR_MODE_P (op1_mode))
3324 gcc_assert (GET_MODE_INNER (mode)
3325 == GET_MODE_INNER (op1_mode));
3326 else
3327 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3328
3329 if ((GET_CODE (trueop0) == CONST_VECTOR
3330 || CONST_INT_P (trueop0) || CONST_DOUBLE_P (trueop0))
3331 && (GET_CODE (trueop1) == CONST_VECTOR
3332 || CONST_INT_P (trueop1) || CONST_DOUBLE_P (trueop1)))
3333 {
3334 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3335 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3336 rtvec v = rtvec_alloc (n_elts);
3337 unsigned int i;
3338 unsigned in_n_elts = 1;
3339
3340 if (VECTOR_MODE_P (op0_mode))
3341 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3342 for (i = 0; i < n_elts; i++)
3343 {
3344 if (i < in_n_elts)
3345 {
3346 if (!VECTOR_MODE_P (op0_mode))
3347 RTVEC_ELT (v, i) = trueop0;
3348 else
3349 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3350 }
3351 else
3352 {
3353 if (!VECTOR_MODE_P (op1_mode))
3354 RTVEC_ELT (v, i) = trueop1;
3355 else
3356 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3357 i - in_n_elts);
3358 }
3359 }
3360
3361 return gen_rtx_CONST_VECTOR (mode, v);
3362 }
3363 }
3364 return 0;
3365
3366 default:
3367 gcc_unreachable ();
3368 }
3369
3370 return 0;
3371 }
3372
3373 rtx
3374 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3375 rtx op0, rtx op1)
3376 {
3377 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3378 HOST_WIDE_INT val;
3379 unsigned int width = GET_MODE_PRECISION (mode);
3380
3381 if (VECTOR_MODE_P (mode)
3382 && code != VEC_CONCAT
3383 && GET_CODE (op0) == CONST_VECTOR
3384 && GET_CODE (op1) == CONST_VECTOR)
3385 {
3386 unsigned n_elts = GET_MODE_NUNITS (mode);
3387 enum machine_mode op0mode = GET_MODE (op0);
3388 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3389 enum machine_mode op1mode = GET_MODE (op1);
3390 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3391 rtvec v = rtvec_alloc (n_elts);
3392 unsigned int i;
3393
3394 gcc_assert (op0_n_elts == n_elts);
3395 gcc_assert (op1_n_elts == n_elts);
3396 for (i = 0; i < n_elts; i++)
3397 {
3398 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3399 CONST_VECTOR_ELT (op0, i),
3400 CONST_VECTOR_ELT (op1, i));
3401 if (!x)
3402 return 0;
3403 RTVEC_ELT (v, i) = x;
3404 }
3405
3406 return gen_rtx_CONST_VECTOR (mode, v);
3407 }
3408
3409 if (VECTOR_MODE_P (mode)
3410 && code == VEC_CONCAT
3411 && (CONST_INT_P (op0)
3412 || GET_CODE (op0) == CONST_FIXED
3413 || CONST_DOUBLE_P (op0))
3414 && (CONST_INT_P (op1)
3415 || CONST_DOUBLE_P (op1)
3416 || GET_CODE (op1) == CONST_FIXED))
3417 {
3418 unsigned n_elts = GET_MODE_NUNITS (mode);
3419 rtvec v = rtvec_alloc (n_elts);
3420
3421 gcc_assert (n_elts >= 2);
3422 if (n_elts == 2)
3423 {
3424 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3425 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3426
3427 RTVEC_ELT (v, 0) = op0;
3428 RTVEC_ELT (v, 1) = op1;
3429 }
3430 else
3431 {
3432 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3433 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3434 unsigned i;
3435
3436 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3437 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3438 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3439
3440 for (i = 0; i < op0_n_elts; ++i)
3441 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3442 for (i = 0; i < op1_n_elts; ++i)
3443 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3444 }
3445
3446 return gen_rtx_CONST_VECTOR (mode, v);
3447 }
3448
3449 if (SCALAR_FLOAT_MODE_P (mode)
3450 && CONST_DOUBLE_AS_FLOAT_P (op0)
3451 && CONST_DOUBLE_AS_FLOAT_P (op1)
3452 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3453 {
3454 if (code == AND
3455 || code == IOR
3456 || code == XOR)
3457 {
3458 long tmp0[4];
3459 long tmp1[4];
3460 REAL_VALUE_TYPE r;
3461 int i;
3462
3463 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3464 GET_MODE (op0));
3465 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3466 GET_MODE (op1));
3467 for (i = 0; i < 4; i++)
3468 {
3469 switch (code)
3470 {
3471 case AND:
3472 tmp0[i] &= tmp1[i];
3473 break;
3474 case IOR:
3475 tmp0[i] |= tmp1[i];
3476 break;
3477 case XOR:
3478 tmp0[i] ^= tmp1[i];
3479 break;
3480 default:
3481 gcc_unreachable ();
3482 }
3483 }
3484 real_from_target (&r, tmp0, mode);
3485 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3486 }
3487 else
3488 {
3489 REAL_VALUE_TYPE f0, f1, value, result;
3490 bool inexact;
3491
3492 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3493 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3494 real_convert (&f0, mode, &f0);
3495 real_convert (&f1, mode, &f1);
3496
3497 if (HONOR_SNANS (mode)
3498 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3499 return 0;
3500
3501 if (code == DIV
3502 && REAL_VALUES_EQUAL (f1, dconst0)
3503 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3504 return 0;
3505
3506 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3507 && flag_trapping_math
3508 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3509 {
3510 int s0 = REAL_VALUE_NEGATIVE (f0);
3511 int s1 = REAL_VALUE_NEGATIVE (f1);
3512
3513 switch (code)
3514 {
3515 case PLUS:
3516 /* Inf + -Inf = NaN plus exception. */
3517 if (s0 != s1)
3518 return 0;
3519 break;
3520 case MINUS:
3521 /* Inf - Inf = NaN plus exception. */
3522 if (s0 == s1)
3523 return 0;
3524 break;
3525 case DIV:
3526 /* Inf / Inf = NaN plus exception. */
3527 return 0;
3528 default:
3529 break;
3530 }
3531 }
3532
3533 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3534 && flag_trapping_math
3535 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3536 || (REAL_VALUE_ISINF (f1)
3537 && REAL_VALUES_EQUAL (f0, dconst0))))
3538 /* Inf * 0 = NaN plus exception. */
3539 return 0;
3540
3541 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3542 &f0, &f1);
3543 real_convert (&result, mode, &value);
3544
3545 /* Don't constant fold this floating point operation if
3546 the result has overflowed and flag_trapping_math. */
3547
3548 if (flag_trapping_math
3549 && MODE_HAS_INFINITIES (mode)
3550 && REAL_VALUE_ISINF (result)
3551 && !REAL_VALUE_ISINF (f0)
3552 && !REAL_VALUE_ISINF (f1))
3553 /* Overflow plus exception. */
3554 return 0;
3555
3556 /* Don't constant fold this floating point operation if the
3557 result may dependent upon the run-time rounding mode and
3558 flag_rounding_math is set, or if GCC's software emulation
3559 is unable to accurately represent the result. */
3560
3561 if ((flag_rounding_math
3562 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3563 && (inexact || !real_identical (&result, &value)))
3564 return NULL_RTX;
3565
3566 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3567 }
3568 }
3569
3570 /* We can fold some multi-word operations. */
3571 if (GET_MODE_CLASS (mode) == MODE_INT
3572 && width == HOST_BITS_PER_DOUBLE_INT
3573 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3574 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3575 {
3576 double_int o0, o1, res, tmp;
3577
3578 o0 = rtx_to_double_int (op0);
3579 o1 = rtx_to_double_int (op1);
3580
3581 switch (code)
3582 {
3583 case MINUS:
3584 /* A - B == A + (-B). */
3585 o1 = double_int_neg (o1);
3586
3587 /* Fall through.... */
3588
3589 case PLUS:
3590 res = double_int_add (o0, o1);
3591 break;
3592
3593 case MULT:
3594 res = double_int_mul (o0, o1);
3595 break;
3596
3597 case DIV:
3598 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3599 o0.low, o0.high, o1.low, o1.high,
3600 &res.low, &res.high,
3601 &tmp.low, &tmp.high))
3602 return 0;
3603 break;
3604
3605 case MOD:
3606 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3607 o0.low, o0.high, o1.low, o1.high,
3608 &tmp.low, &tmp.high,
3609 &res.low, &res.high))
3610 return 0;
3611 break;
3612
3613 case UDIV:
3614 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3615 o0.low, o0.high, o1.low, o1.high,
3616 &res.low, &res.high,
3617 &tmp.low, &tmp.high))
3618 return 0;
3619 break;
3620
3621 case UMOD:
3622 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3623 o0.low, o0.high, o1.low, o1.high,
3624 &tmp.low, &tmp.high,
3625 &res.low, &res.high))
3626 return 0;
3627 break;
3628
3629 case AND:
3630 res = double_int_and (o0, o1);
3631 break;
3632
3633 case IOR:
3634 res = double_int_ior (o0, o1);
3635 break;
3636
3637 case XOR:
3638 res = double_int_xor (o0, o1);
3639 break;
3640
3641 case SMIN:
3642 res = double_int_smin (o0, o1);
3643 break;
3644
3645 case SMAX:
3646 res = double_int_smax (o0, o1);
3647 break;
3648
3649 case UMIN:
3650 res = double_int_umin (o0, o1);
3651 break;
3652
3653 case UMAX:
3654 res = double_int_umax (o0, o1);
3655 break;
3656
3657 case LSHIFTRT: case ASHIFTRT:
3658 case ASHIFT:
3659 case ROTATE: case ROTATERT:
3660 {
3661 unsigned HOST_WIDE_INT cnt;
3662
3663 if (SHIFT_COUNT_TRUNCATED)
3664 {
3665 o1.high = 0;
3666 o1.low &= GET_MODE_PRECISION (mode) - 1;
3667 }
3668
3669 if (!double_int_fits_in_uhwi_p (o1)
3670 || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3671 return 0;
3672
3673 cnt = double_int_to_uhwi (o1);
3674
3675 if (code == LSHIFTRT || code == ASHIFTRT)
3676 res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3677 code == ASHIFTRT);
3678 else if (code == ASHIFT)
3679 res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3680 true);
3681 else if (code == ROTATE)
3682 res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3683 else /* code == ROTATERT */
3684 res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3685 }
3686 break;
3687
3688 default:
3689 return 0;
3690 }
3691
3692 return immed_double_int_const (res, mode);
3693 }
3694
3695 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3696 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3697 {
3698 /* Get the integer argument values in two forms:
3699 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3700
3701 arg0 = INTVAL (op0);
3702 arg1 = INTVAL (op1);
3703
3704 if (width < HOST_BITS_PER_WIDE_INT)
3705 {
3706 arg0 &= GET_MODE_MASK (mode);
3707 arg1 &= GET_MODE_MASK (mode);
3708
3709 arg0s = arg0;
3710 if (val_signbit_known_set_p (mode, arg0s))
3711 arg0s |= ~GET_MODE_MASK (mode);
3712
3713 arg1s = arg1;
3714 if (val_signbit_known_set_p (mode, arg1s))
3715 arg1s |= ~GET_MODE_MASK (mode);
3716 }
3717 else
3718 {
3719 arg0s = arg0;
3720 arg1s = arg1;
3721 }
3722
3723 /* Compute the value of the arithmetic. */
3724
3725 switch (code)
3726 {
3727 case PLUS:
3728 val = arg0s + arg1s;
3729 break;
3730
3731 case MINUS:
3732 val = arg0s - arg1s;
3733 break;
3734
3735 case MULT:
3736 val = arg0s * arg1s;
3737 break;
3738
3739 case DIV:
3740 if (arg1s == 0
3741 || ((unsigned HOST_WIDE_INT) arg0s
3742 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3743 && arg1s == -1))
3744 return 0;
3745 val = arg0s / arg1s;
3746 break;
3747
3748 case MOD:
3749 if (arg1s == 0
3750 || ((unsigned HOST_WIDE_INT) arg0s
3751 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3752 && arg1s == -1))
3753 return 0;
3754 val = arg0s % arg1s;
3755 break;
3756
3757 case UDIV:
3758 if (arg1 == 0
3759 || ((unsigned HOST_WIDE_INT) arg0s
3760 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3761 && arg1s == -1))
3762 return 0;
3763 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3764 break;
3765
3766 case UMOD:
3767 if (arg1 == 0
3768 || ((unsigned HOST_WIDE_INT) arg0s
3769 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3770 && arg1s == -1))
3771 return 0;
3772 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3773 break;
3774
3775 case AND:
3776 val = arg0 & arg1;
3777 break;
3778
3779 case IOR:
3780 val = arg0 | arg1;
3781 break;
3782
3783 case XOR:
3784 val = arg0 ^ arg1;
3785 break;
3786
3787 case LSHIFTRT:
3788 case ASHIFT:
3789 case ASHIFTRT:
3790 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3791 the value is in range. We can't return any old value for
3792 out-of-range arguments because either the middle-end (via
3793 shift_truncation_mask) or the back-end might be relying on
3794 target-specific knowledge. Nor can we rely on
3795 shift_truncation_mask, since the shift might not be part of an
3796 ashlM3, lshrM3 or ashrM3 instruction. */
3797 if (SHIFT_COUNT_TRUNCATED)
3798 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3799 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3800 return 0;
3801
3802 val = (code == ASHIFT
3803 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3804 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3805
3806 /* Sign-extend the result for arithmetic right shifts. */
3807 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3808 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3809 break;
3810
3811 case ROTATERT:
3812 if (arg1 < 0)
3813 return 0;
3814
3815 arg1 %= width;
3816 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3817 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3818 break;
3819
3820 case ROTATE:
3821 if (arg1 < 0)
3822 return 0;
3823
3824 arg1 %= width;
3825 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3826 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3827 break;
3828
3829 case COMPARE:
3830 /* Do nothing here. */
3831 return 0;
3832
3833 case SMIN:
3834 val = arg0s <= arg1s ? arg0s : arg1s;
3835 break;
3836
3837 case UMIN:
3838 val = ((unsigned HOST_WIDE_INT) arg0
3839 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3840 break;
3841
3842 case SMAX:
3843 val = arg0s > arg1s ? arg0s : arg1s;
3844 break;
3845
3846 case UMAX:
3847 val = ((unsigned HOST_WIDE_INT) arg0
3848 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3849 break;
3850
3851 case SS_PLUS:
3852 case US_PLUS:
3853 case SS_MINUS:
3854 case US_MINUS:
3855 case SS_MULT:
3856 case US_MULT:
3857 case SS_DIV:
3858 case US_DIV:
3859 case SS_ASHIFT:
3860 case US_ASHIFT:
3861 /* ??? There are simplifications that can be done. */
3862 return 0;
3863
3864 default:
3865 gcc_unreachable ();
3866 }
3867
3868 return gen_int_mode (val, mode);
3869 }
3870
3871 return NULL_RTX;
3872 }
3873
3874
3875 \f
3876 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3877 PLUS or MINUS.
3878
3879 Rather than test for specific case, we do this by a brute-force method
3880 and do all possible simplifications until no more changes occur. Then
3881 we rebuild the operation. */
3882
3883 struct simplify_plus_minus_op_data
3884 {
3885 rtx op;
3886 short neg;
3887 };
3888
3889 static bool
3890 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3891 {
3892 int result;
3893
3894 result = (commutative_operand_precedence (y)
3895 - commutative_operand_precedence (x));
3896 if (result)
3897 return result > 0;
3898
3899 /* Group together equal REGs to do more simplification. */
3900 if (REG_P (x) && REG_P (y))
3901 return REGNO (x) > REGNO (y);
3902 else
3903 return false;
3904 }
3905
3906 static rtx
3907 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3908 rtx op1)
3909 {
3910 struct simplify_plus_minus_op_data ops[8];
3911 rtx result, tem;
3912 int n_ops = 2, input_ops = 2;
3913 int changed, n_constants = 0, canonicalized = 0;
3914 int i, j;
3915
3916 memset (ops, 0, sizeof ops);
3917
3918 /* Set up the two operands and then expand them until nothing has been
3919 changed. If we run out of room in our array, give up; this should
3920 almost never happen. */
3921
3922 ops[0].op = op0;
3923 ops[0].neg = 0;
3924 ops[1].op = op1;
3925 ops[1].neg = (code == MINUS);
3926
3927 do
3928 {
3929 changed = 0;
3930
3931 for (i = 0; i < n_ops; i++)
3932 {
3933 rtx this_op = ops[i].op;
3934 int this_neg = ops[i].neg;
3935 enum rtx_code this_code = GET_CODE (this_op);
3936
3937 switch (this_code)
3938 {
3939 case PLUS:
3940 case MINUS:
3941 if (n_ops == 7)
3942 return NULL_RTX;
3943
3944 ops[n_ops].op = XEXP (this_op, 1);
3945 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3946 n_ops++;
3947
3948 ops[i].op = XEXP (this_op, 0);
3949 input_ops++;
3950 changed = 1;
3951 canonicalized |= this_neg;
3952 break;
3953
3954 case NEG:
3955 ops[i].op = XEXP (this_op, 0);
3956 ops[i].neg = ! this_neg;
3957 changed = 1;
3958 canonicalized = 1;
3959 break;
3960
3961 case CONST:
3962 if (n_ops < 7
3963 && GET_CODE (XEXP (this_op, 0)) == PLUS
3964 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3965 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3966 {
3967 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3968 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3969 ops[n_ops].neg = this_neg;
3970 n_ops++;
3971 changed = 1;
3972 canonicalized = 1;
3973 }
3974 break;
3975
3976 case NOT:
3977 /* ~a -> (-a - 1) */
3978 if (n_ops != 7)
3979 {
3980 ops[n_ops].op = CONSTM1_RTX (mode);
3981 ops[n_ops++].neg = this_neg;
3982 ops[i].op = XEXP (this_op, 0);
3983 ops[i].neg = !this_neg;
3984 changed = 1;
3985 canonicalized = 1;
3986 }
3987 break;
3988
3989 case CONST_INT:
3990 n_constants++;
3991 if (this_neg)
3992 {
3993 ops[i].op = neg_const_int (mode, this_op);
3994 ops[i].neg = 0;
3995 changed = 1;
3996 canonicalized = 1;
3997 }
3998 break;
3999
4000 default:
4001 break;
4002 }
4003 }
4004 }
4005 while (changed);
4006
4007 if (n_constants > 1)
4008 canonicalized = 1;
4009
4010 gcc_assert (n_ops >= 2);
4011
4012 /* If we only have two operands, we can avoid the loops. */
4013 if (n_ops == 2)
4014 {
4015 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4016 rtx lhs, rhs;
4017
4018 /* Get the two operands. Be careful with the order, especially for
4019 the cases where code == MINUS. */
4020 if (ops[0].neg && ops[1].neg)
4021 {
4022 lhs = gen_rtx_NEG (mode, ops[0].op);
4023 rhs = ops[1].op;
4024 }
4025 else if (ops[0].neg)
4026 {
4027 lhs = ops[1].op;
4028 rhs = ops[0].op;
4029 }
4030 else
4031 {
4032 lhs = ops[0].op;
4033 rhs = ops[1].op;
4034 }
4035
4036 return simplify_const_binary_operation (code, mode, lhs, rhs);
4037 }
4038
4039 /* Now simplify each pair of operands until nothing changes. */
4040 do
4041 {
4042 /* Insertion sort is good enough for an eight-element array. */
4043 for (i = 1; i < n_ops; i++)
4044 {
4045 struct simplify_plus_minus_op_data save;
4046 j = i - 1;
4047 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4048 continue;
4049
4050 canonicalized = 1;
4051 save = ops[i];
4052 do
4053 ops[j + 1] = ops[j];
4054 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4055 ops[j + 1] = save;
4056 }
4057
4058 changed = 0;
4059 for (i = n_ops - 1; i > 0; i--)
4060 for (j = i - 1; j >= 0; j--)
4061 {
4062 rtx lhs = ops[j].op, rhs = ops[i].op;
4063 int lneg = ops[j].neg, rneg = ops[i].neg;
4064
4065 if (lhs != 0 && rhs != 0)
4066 {
4067 enum rtx_code ncode = PLUS;
4068
4069 if (lneg != rneg)
4070 {
4071 ncode = MINUS;
4072 if (lneg)
4073 tem = lhs, lhs = rhs, rhs = tem;
4074 }
4075 else if (swap_commutative_operands_p (lhs, rhs))
4076 tem = lhs, lhs = rhs, rhs = tem;
4077
4078 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4079 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4080 {
4081 rtx tem_lhs, tem_rhs;
4082
4083 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4084 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4085 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4086
4087 if (tem && !CONSTANT_P (tem))
4088 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4089 }
4090 else
4091 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4092
4093 /* Reject "simplifications" that just wrap the two
4094 arguments in a CONST. Failure to do so can result
4095 in infinite recursion with simplify_binary_operation
4096 when it calls us to simplify CONST operations. */
4097 if (tem
4098 && ! (GET_CODE (tem) == CONST
4099 && GET_CODE (XEXP (tem, 0)) == ncode
4100 && XEXP (XEXP (tem, 0), 0) == lhs
4101 && XEXP (XEXP (tem, 0), 1) == rhs))
4102 {
4103 lneg &= rneg;
4104 if (GET_CODE (tem) == NEG)
4105 tem = XEXP (tem, 0), lneg = !lneg;
4106 if (CONST_INT_P (tem) && lneg)
4107 tem = neg_const_int (mode, tem), lneg = 0;
4108
4109 ops[i].op = tem;
4110 ops[i].neg = lneg;
4111 ops[j].op = NULL_RTX;
4112 changed = 1;
4113 canonicalized = 1;
4114 }
4115 }
4116 }
4117
4118 /* If nothing changed, fail. */
4119 if (!canonicalized)
4120 return NULL_RTX;
4121
4122 /* Pack all the operands to the lower-numbered entries. */
4123 for (i = 0, j = 0; j < n_ops; j++)
4124 if (ops[j].op)
4125 {
4126 ops[i] = ops[j];
4127 i++;
4128 }
4129 n_ops = i;
4130 }
4131 while (changed);
4132
4133 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4134 if (n_ops == 2
4135 && CONST_INT_P (ops[1].op)
4136 && CONSTANT_P (ops[0].op)
4137 && ops[0].neg)
4138 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4139
4140 /* We suppressed creation of trivial CONST expressions in the
4141 combination loop to avoid recursion. Create one manually now.
4142 The combination loop should have ensured that there is exactly
4143 one CONST_INT, and the sort will have ensured that it is last
4144 in the array and that any other constant will be next-to-last. */
4145
4146 if (n_ops > 1
4147 && CONST_INT_P (ops[n_ops - 1].op)
4148 && CONSTANT_P (ops[n_ops - 2].op))
4149 {
4150 rtx value = ops[n_ops - 1].op;
4151 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4152 value = neg_const_int (mode, value);
4153 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4154 INTVAL (value));
4155 n_ops--;
4156 }
4157
4158 /* Put a non-negated operand first, if possible. */
4159
4160 for (i = 0; i < n_ops && ops[i].neg; i++)
4161 continue;
4162 if (i == n_ops)
4163 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4164 else if (i != 0)
4165 {
4166 tem = ops[0].op;
4167 ops[0] = ops[i];
4168 ops[i].op = tem;
4169 ops[i].neg = 1;
4170 }
4171
4172 /* Now make the result by performing the requested operations. */
4173 result = ops[0].op;
4174 for (i = 1; i < n_ops; i++)
4175 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4176 mode, result, ops[i].op);
4177
4178 return result;
4179 }
4180
4181 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4182 static bool
4183 plus_minus_operand_p (const_rtx x)
4184 {
4185 return GET_CODE (x) == PLUS
4186 || GET_CODE (x) == MINUS
4187 || (GET_CODE (x) == CONST
4188 && GET_CODE (XEXP (x, 0)) == PLUS
4189 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4190 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4191 }
4192
4193 /* Like simplify_binary_operation except used for relational operators.
4194 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4195 not also be VOIDmode.
4196
4197 CMP_MODE specifies in which mode the comparison is done in, so it is
4198 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4199 the operands or, if both are VOIDmode, the operands are compared in
4200 "infinite precision". */
4201 rtx
4202 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4203 enum machine_mode cmp_mode, rtx op0, rtx op1)
4204 {
4205 rtx tem, trueop0, trueop1;
4206
4207 if (cmp_mode == VOIDmode)
4208 cmp_mode = GET_MODE (op0);
4209 if (cmp_mode == VOIDmode)
4210 cmp_mode = GET_MODE (op1);
4211
4212 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4213 if (tem)
4214 {
4215 if (SCALAR_FLOAT_MODE_P (mode))
4216 {
4217 if (tem == const0_rtx)
4218 return CONST0_RTX (mode);
4219 #ifdef FLOAT_STORE_FLAG_VALUE
4220 {
4221 REAL_VALUE_TYPE val;
4222 val = FLOAT_STORE_FLAG_VALUE (mode);
4223 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4224 }
4225 #else
4226 return NULL_RTX;
4227 #endif
4228 }
4229 if (VECTOR_MODE_P (mode))
4230 {
4231 if (tem == const0_rtx)
4232 return CONST0_RTX (mode);
4233 #ifdef VECTOR_STORE_FLAG_VALUE
4234 {
4235 int i, units;
4236 rtvec v;
4237
4238 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4239 if (val == NULL_RTX)
4240 return NULL_RTX;
4241 if (val == const1_rtx)
4242 return CONST1_RTX (mode);
4243
4244 units = GET_MODE_NUNITS (mode);
4245 v = rtvec_alloc (units);
4246 for (i = 0; i < units; i++)
4247 RTVEC_ELT (v, i) = val;
4248 return gen_rtx_raw_CONST_VECTOR (mode, v);
4249 }
4250 #else
4251 return NULL_RTX;
4252 #endif
4253 }
4254
4255 return tem;
4256 }
4257
4258 /* For the following tests, ensure const0_rtx is op1. */
4259 if (swap_commutative_operands_p (op0, op1)
4260 || (op0 == const0_rtx && op1 != const0_rtx))
4261 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4262
4263 /* If op0 is a compare, extract the comparison arguments from it. */
4264 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4265 return simplify_gen_relational (code, mode, VOIDmode,
4266 XEXP (op0, 0), XEXP (op0, 1));
4267
4268 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4269 || CC0_P (op0))
4270 return NULL_RTX;
4271
4272 trueop0 = avoid_constant_pool_reference (op0);
4273 trueop1 = avoid_constant_pool_reference (op1);
4274 return simplify_relational_operation_1 (code, mode, cmp_mode,
4275 trueop0, trueop1);
4276 }
4277
4278 /* This part of simplify_relational_operation is only used when CMP_MODE
4279 is not in class MODE_CC (i.e. it is a real comparison).
4280
4281 MODE is the mode of the result, while CMP_MODE specifies in which
4282 mode the comparison is done in, so it is the mode of the operands. */
4283
4284 static rtx
4285 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4286 enum machine_mode cmp_mode, rtx op0, rtx op1)
4287 {
4288 enum rtx_code op0code = GET_CODE (op0);
4289
4290 if (op1 == const0_rtx && COMPARISON_P (op0))
4291 {
4292 /* If op0 is a comparison, extract the comparison arguments
4293 from it. */
4294 if (code == NE)
4295 {
4296 if (GET_MODE (op0) == mode)
4297 return simplify_rtx (op0);
4298 else
4299 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4300 XEXP (op0, 0), XEXP (op0, 1));
4301 }
4302 else if (code == EQ)
4303 {
4304 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4305 if (new_code != UNKNOWN)
4306 return simplify_gen_relational (new_code, mode, VOIDmode,
4307 XEXP (op0, 0), XEXP (op0, 1));
4308 }
4309 }
4310
4311 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4312 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4313 if ((code == LTU || code == GEU)
4314 && GET_CODE (op0) == PLUS
4315 && CONST_INT_P (XEXP (op0, 1))
4316 && (rtx_equal_p (op1, XEXP (op0, 0))
4317 || rtx_equal_p (op1, XEXP (op0, 1))))
4318 {
4319 rtx new_cmp
4320 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4321 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4322 cmp_mode, XEXP (op0, 0), new_cmp);
4323 }
4324
4325 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4326 if ((code == LTU || code == GEU)
4327 && GET_CODE (op0) == PLUS
4328 && rtx_equal_p (op1, XEXP (op0, 1))
4329 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4330 && !rtx_equal_p (op1, XEXP (op0, 0)))
4331 return simplify_gen_relational (code, mode, cmp_mode, op0,
4332 copy_rtx (XEXP (op0, 0)));
4333
4334 if (op1 == const0_rtx)
4335 {
4336 /* Canonicalize (GTU x 0) as (NE x 0). */
4337 if (code == GTU)
4338 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4339 /* Canonicalize (LEU x 0) as (EQ x 0). */
4340 if (code == LEU)
4341 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4342 }
4343 else if (op1 == const1_rtx)
4344 {
4345 switch (code)
4346 {
4347 case GE:
4348 /* Canonicalize (GE x 1) as (GT x 0). */
4349 return simplify_gen_relational (GT, mode, cmp_mode,
4350 op0, const0_rtx);
4351 case GEU:
4352 /* Canonicalize (GEU x 1) as (NE x 0). */
4353 return simplify_gen_relational (NE, mode, cmp_mode,
4354 op0, const0_rtx);
4355 case LT:
4356 /* Canonicalize (LT x 1) as (LE x 0). */
4357 return simplify_gen_relational (LE, mode, cmp_mode,
4358 op0, const0_rtx);
4359 case LTU:
4360 /* Canonicalize (LTU x 1) as (EQ x 0). */
4361 return simplify_gen_relational (EQ, mode, cmp_mode,
4362 op0, const0_rtx);
4363 default:
4364 break;
4365 }
4366 }
4367 else if (op1 == constm1_rtx)
4368 {
4369 /* Canonicalize (LE x -1) as (LT x 0). */
4370 if (code == LE)
4371 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4372 /* Canonicalize (GT x -1) as (GE x 0). */
4373 if (code == GT)
4374 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4375 }
4376
4377 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4378 if ((code == EQ || code == NE)
4379 && (op0code == PLUS || op0code == MINUS)
4380 && CONSTANT_P (op1)
4381 && CONSTANT_P (XEXP (op0, 1))
4382 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4383 {
4384 rtx x = XEXP (op0, 0);
4385 rtx c = XEXP (op0, 1);
4386 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4387 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4388
4389 /* Detect an infinite recursive condition, where we oscillate at this
4390 simplification case between:
4391 A + B == C <---> C - B == A,
4392 where A, B, and C are all constants with non-simplifiable expressions,
4393 usually SYMBOL_REFs. */
4394 if (GET_CODE (tem) == invcode
4395 && CONSTANT_P (x)
4396 && rtx_equal_p (c, XEXP (tem, 1)))
4397 return NULL_RTX;
4398
4399 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4400 }
4401
4402 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4403 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4404 if (code == NE
4405 && op1 == const0_rtx
4406 && GET_MODE_CLASS (mode) == MODE_INT
4407 && cmp_mode != VOIDmode
4408 /* ??? Work-around BImode bugs in the ia64 backend. */
4409 && mode != BImode
4410 && cmp_mode != BImode
4411 && nonzero_bits (op0, cmp_mode) == 1
4412 && STORE_FLAG_VALUE == 1)
4413 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4414 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4415 : lowpart_subreg (mode, op0, cmp_mode);
4416
4417 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4418 if ((code == EQ || code == NE)
4419 && op1 == const0_rtx
4420 && op0code == XOR)
4421 return simplify_gen_relational (code, mode, cmp_mode,
4422 XEXP (op0, 0), XEXP (op0, 1));
4423
4424 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4425 if ((code == EQ || code == NE)
4426 && op0code == XOR
4427 && rtx_equal_p (XEXP (op0, 0), op1)
4428 && !side_effects_p (XEXP (op0, 0)))
4429 return simplify_gen_relational (code, mode, cmp_mode,
4430 XEXP (op0, 1), const0_rtx);
4431
4432 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4433 if ((code == EQ || code == NE)
4434 && op0code == XOR
4435 && rtx_equal_p (XEXP (op0, 1), op1)
4436 && !side_effects_p (XEXP (op0, 1)))
4437 return simplify_gen_relational (code, mode, cmp_mode,
4438 XEXP (op0, 0), const0_rtx);
4439
4440 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4441 if ((code == EQ || code == NE)
4442 && op0code == XOR
4443 && (CONST_INT_P (op1) || CONST_DOUBLE_AS_INT_P (op1))
4444 && (CONST_INT_P (XEXP (op0, 1))
4445 || CONST_DOUBLE_AS_INT_P (XEXP (op0, 1))))
4446 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4447 simplify_gen_binary (XOR, cmp_mode,
4448 XEXP (op0, 1), op1));
4449
4450 if (op0code == POPCOUNT && op1 == const0_rtx)
4451 switch (code)
4452 {
4453 case EQ:
4454 case LE:
4455 case LEU:
4456 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4457 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4458 XEXP (op0, 0), const0_rtx);
4459
4460 case NE:
4461 case GT:
4462 case GTU:
4463 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4464 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4465 XEXP (op0, 0), const0_rtx);
4466
4467 default:
4468 break;
4469 }
4470
4471 return NULL_RTX;
4472 }
4473
4474 enum
4475 {
4476 CMP_EQ = 1,
4477 CMP_LT = 2,
4478 CMP_GT = 4,
4479 CMP_LTU = 8,
4480 CMP_GTU = 16
4481 };
4482
4483
4484 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4485 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4486 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4487 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4488 For floating-point comparisons, assume that the operands were ordered. */
4489
4490 static rtx
4491 comparison_result (enum rtx_code code, int known_results)
4492 {
4493 switch (code)
4494 {
4495 case EQ:
4496 case UNEQ:
4497 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4498 case NE:
4499 case LTGT:
4500 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4501
4502 case LT:
4503 case UNLT:
4504 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4505 case GE:
4506 case UNGE:
4507 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4508
4509 case GT:
4510 case UNGT:
4511 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4512 case LE:
4513 case UNLE:
4514 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4515
4516 case LTU:
4517 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4518 case GEU:
4519 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4520
4521 case GTU:
4522 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4523 case LEU:
4524 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4525
4526 case ORDERED:
4527 return const_true_rtx;
4528 case UNORDERED:
4529 return const0_rtx;
4530 default:
4531 gcc_unreachable ();
4532 }
4533 }
4534
4535 /* Check if the given comparison (done in the given MODE) is actually a
4536 tautology or a contradiction.
4537 If no simplification is possible, this function returns zero.
4538 Otherwise, it returns either const_true_rtx or const0_rtx. */
4539
4540 rtx
4541 simplify_const_relational_operation (enum rtx_code code,
4542 enum machine_mode mode,
4543 rtx op0, rtx op1)
4544 {
4545 rtx tem;
4546 rtx trueop0;
4547 rtx trueop1;
4548
4549 gcc_assert (mode != VOIDmode
4550 || (GET_MODE (op0) == VOIDmode
4551 && GET_MODE (op1) == VOIDmode));
4552
4553 /* If op0 is a compare, extract the comparison arguments from it. */
4554 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4555 {
4556 op1 = XEXP (op0, 1);
4557 op0 = XEXP (op0, 0);
4558
4559 if (GET_MODE (op0) != VOIDmode)
4560 mode = GET_MODE (op0);
4561 else if (GET_MODE (op1) != VOIDmode)
4562 mode = GET_MODE (op1);
4563 else
4564 return 0;
4565 }
4566
4567 /* We can't simplify MODE_CC values since we don't know what the
4568 actual comparison is. */
4569 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4570 return 0;
4571
4572 /* Make sure the constant is second. */
4573 if (swap_commutative_operands_p (op0, op1))
4574 {
4575 tem = op0, op0 = op1, op1 = tem;
4576 code = swap_condition (code);
4577 }
4578
4579 trueop0 = avoid_constant_pool_reference (op0);
4580 trueop1 = avoid_constant_pool_reference (op1);
4581
4582 /* For integer comparisons of A and B maybe we can simplify A - B and can
4583 then simplify a comparison of that with zero. If A and B are both either
4584 a register or a CONST_INT, this can't help; testing for these cases will
4585 prevent infinite recursion here and speed things up.
4586
4587 We can only do this for EQ and NE comparisons as otherwise we may
4588 lose or introduce overflow which we cannot disregard as undefined as
4589 we do not know the signedness of the operation on either the left or
4590 the right hand side of the comparison. */
4591
4592 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4593 && (code == EQ || code == NE)
4594 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4595 && (REG_P (op1) || CONST_INT_P (trueop1)))
4596 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4597 /* We cannot do this if tem is a nonzero address. */
4598 && ! nonzero_address_p (tem))
4599 return simplify_const_relational_operation (signed_condition (code),
4600 mode, tem, const0_rtx);
4601
4602 if (! HONOR_NANS (mode) && code == ORDERED)
4603 return const_true_rtx;
4604
4605 if (! HONOR_NANS (mode) && code == UNORDERED)
4606 return const0_rtx;
4607
4608 /* For modes without NaNs, if the two operands are equal, we know the
4609 result except if they have side-effects. Even with NaNs we know
4610 the result of unordered comparisons and, if signaling NaNs are
4611 irrelevant, also the result of LT/GT/LTGT. */
4612 if ((! HONOR_NANS (GET_MODE (trueop0))
4613 || code == UNEQ || code == UNLE || code == UNGE
4614 || ((code == LT || code == GT || code == LTGT)
4615 && ! HONOR_SNANS (GET_MODE (trueop0))))
4616 && rtx_equal_p (trueop0, trueop1)
4617 && ! side_effects_p (trueop0))
4618 return comparison_result (code, CMP_EQ);
4619
4620 /* If the operands are floating-point constants, see if we can fold
4621 the result. */
4622 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4623 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4624 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4625 {
4626 REAL_VALUE_TYPE d0, d1;
4627
4628 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4629 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4630
4631 /* Comparisons are unordered iff at least one of the values is NaN. */
4632 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4633 switch (code)
4634 {
4635 case UNEQ:
4636 case UNLT:
4637 case UNGT:
4638 case UNLE:
4639 case UNGE:
4640 case NE:
4641 case UNORDERED:
4642 return const_true_rtx;
4643 case EQ:
4644 case LT:
4645 case GT:
4646 case LE:
4647 case GE:
4648 case LTGT:
4649 case ORDERED:
4650 return const0_rtx;
4651 default:
4652 return 0;
4653 }
4654
4655 return comparison_result (code,
4656 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4657 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4658 }
4659
4660 /* Otherwise, see if the operands are both integers. */
4661 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4662 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4663 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4664 {
4665 int width = GET_MODE_PRECISION (mode);
4666 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4667 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4668
4669 /* Get the two words comprising each integer constant. */
4670 if (CONST_DOUBLE_AS_INT_P (trueop0))
4671 {
4672 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4673 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4674 }
4675 else
4676 {
4677 l0u = l0s = INTVAL (trueop0);
4678 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4679 }
4680
4681 if (CONST_DOUBLE_AS_INT_P (trueop1))
4682 {
4683 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4684 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4685 }
4686 else
4687 {
4688 l1u = l1s = INTVAL (trueop1);
4689 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4690 }
4691
4692 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4693 we have to sign or zero-extend the values. */
4694 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4695 {
4696 l0u &= GET_MODE_MASK (mode);
4697 l1u &= GET_MODE_MASK (mode);
4698
4699 if (val_signbit_known_set_p (mode, l0s))
4700 l0s |= ~GET_MODE_MASK (mode);
4701
4702 if (val_signbit_known_set_p (mode, l1s))
4703 l1s |= ~GET_MODE_MASK (mode);
4704 }
4705 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4706 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4707
4708 if (h0u == h1u && l0u == l1u)
4709 return comparison_result (code, CMP_EQ);
4710 else
4711 {
4712 int cr;
4713 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4714 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4715 return comparison_result (code, cr);
4716 }
4717 }
4718
4719 /* Optimize comparisons with upper and lower bounds. */
4720 if (HWI_COMPUTABLE_MODE_P (mode)
4721 && CONST_INT_P (trueop1))
4722 {
4723 int sign;
4724 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4725 HOST_WIDE_INT val = INTVAL (trueop1);
4726 HOST_WIDE_INT mmin, mmax;
4727
4728 if (code == GEU
4729 || code == LEU
4730 || code == GTU
4731 || code == LTU)
4732 sign = 0;
4733 else
4734 sign = 1;
4735
4736 /* Get a reduced range if the sign bit is zero. */
4737 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4738 {
4739 mmin = 0;
4740 mmax = nonzero;
4741 }
4742 else
4743 {
4744 rtx mmin_rtx, mmax_rtx;
4745 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4746
4747 mmin = INTVAL (mmin_rtx);
4748 mmax = INTVAL (mmax_rtx);
4749 if (sign)
4750 {
4751 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4752
4753 mmin >>= (sign_copies - 1);
4754 mmax >>= (sign_copies - 1);
4755 }
4756 }
4757
4758 switch (code)
4759 {
4760 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4761 case GEU:
4762 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4763 return const_true_rtx;
4764 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4765 return const0_rtx;
4766 break;
4767 case GE:
4768 if (val <= mmin)
4769 return const_true_rtx;
4770 if (val > mmax)
4771 return const0_rtx;
4772 break;
4773
4774 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4775 case LEU:
4776 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4777 return const_true_rtx;
4778 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4779 return const0_rtx;
4780 break;
4781 case LE:
4782 if (val >= mmax)
4783 return const_true_rtx;
4784 if (val < mmin)
4785 return const0_rtx;
4786 break;
4787
4788 case EQ:
4789 /* x == y is always false for y out of range. */
4790 if (val < mmin || val > mmax)
4791 return const0_rtx;
4792 break;
4793
4794 /* x > y is always false for y >= mmax, always true for y < mmin. */
4795 case GTU:
4796 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4797 return const0_rtx;
4798 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4799 return const_true_rtx;
4800 break;
4801 case GT:
4802 if (val >= mmax)
4803 return const0_rtx;
4804 if (val < mmin)
4805 return const_true_rtx;
4806 break;
4807
4808 /* x < y is always false for y <= mmin, always true for y > mmax. */
4809 case LTU:
4810 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4811 return const0_rtx;
4812 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4813 return const_true_rtx;
4814 break;
4815 case LT:
4816 if (val <= mmin)
4817 return const0_rtx;
4818 if (val > mmax)
4819 return const_true_rtx;
4820 break;
4821
4822 case NE:
4823 /* x != y is always true for y out of range. */
4824 if (val < mmin || val > mmax)
4825 return const_true_rtx;
4826 break;
4827
4828 default:
4829 break;
4830 }
4831 }
4832
4833 /* Optimize integer comparisons with zero. */
4834 if (trueop1 == const0_rtx)
4835 {
4836 /* Some addresses are known to be nonzero. We don't know
4837 their sign, but equality comparisons are known. */
4838 if (nonzero_address_p (trueop0))
4839 {
4840 if (code == EQ || code == LEU)
4841 return const0_rtx;
4842 if (code == NE || code == GTU)
4843 return const_true_rtx;
4844 }
4845
4846 /* See if the first operand is an IOR with a constant. If so, we
4847 may be able to determine the result of this comparison. */
4848 if (GET_CODE (op0) == IOR)
4849 {
4850 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4851 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4852 {
4853 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4854 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4855 && (UINTVAL (inner_const)
4856 & ((unsigned HOST_WIDE_INT) 1
4857 << sign_bitnum)));
4858
4859 switch (code)
4860 {
4861 case EQ:
4862 case LEU:
4863 return const0_rtx;
4864 case NE:
4865 case GTU:
4866 return const_true_rtx;
4867 case LT:
4868 case LE:
4869 if (has_sign)
4870 return const_true_rtx;
4871 break;
4872 case GT:
4873 case GE:
4874 if (has_sign)
4875 return const0_rtx;
4876 break;
4877 default:
4878 break;
4879 }
4880 }
4881 }
4882 }
4883
4884 /* Optimize comparison of ABS with zero. */
4885 if (trueop1 == CONST0_RTX (mode)
4886 && (GET_CODE (trueop0) == ABS
4887 || (GET_CODE (trueop0) == FLOAT_EXTEND
4888 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4889 {
4890 switch (code)
4891 {
4892 case LT:
4893 /* Optimize abs(x) < 0.0. */
4894 if (!HONOR_SNANS (mode)
4895 && (!INTEGRAL_MODE_P (mode)
4896 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4897 {
4898 if (INTEGRAL_MODE_P (mode)
4899 && (issue_strict_overflow_warning
4900 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4901 warning (OPT_Wstrict_overflow,
4902 ("assuming signed overflow does not occur when "
4903 "assuming abs (x) < 0 is false"));
4904 return const0_rtx;
4905 }
4906 break;
4907
4908 case GE:
4909 /* Optimize abs(x) >= 0.0. */
4910 if (!HONOR_NANS (mode)
4911 && (!INTEGRAL_MODE_P (mode)
4912 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4913 {
4914 if (INTEGRAL_MODE_P (mode)
4915 && (issue_strict_overflow_warning
4916 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4917 warning (OPT_Wstrict_overflow,
4918 ("assuming signed overflow does not occur when "
4919 "assuming abs (x) >= 0 is true"));
4920 return const_true_rtx;
4921 }
4922 break;
4923
4924 case UNGE:
4925 /* Optimize ! (abs(x) < 0.0). */
4926 return const_true_rtx;
4927
4928 default:
4929 break;
4930 }
4931 }
4932
4933 return 0;
4934 }
4935 \f
4936 /* Simplify CODE, an operation with result mode MODE and three operands,
4937 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4938 a constant. Return 0 if no simplifications is possible. */
4939
4940 rtx
4941 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4942 enum machine_mode op0_mode, rtx op0, rtx op1,
4943 rtx op2)
4944 {
4945 unsigned int width = GET_MODE_PRECISION (mode);
4946 bool any_change = false;
4947 rtx tem;
4948
4949 /* VOIDmode means "infinite" precision. */
4950 if (width == 0)
4951 width = HOST_BITS_PER_WIDE_INT;
4952
4953 switch (code)
4954 {
4955 case FMA:
4956 /* Simplify negations around the multiplication. */
4957 /* -a * -b + c => a * b + c. */
4958 if (GET_CODE (op0) == NEG)
4959 {
4960 tem = simplify_unary_operation (NEG, mode, op1, mode);
4961 if (tem)
4962 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4963 }
4964 else if (GET_CODE (op1) == NEG)
4965 {
4966 tem = simplify_unary_operation (NEG, mode, op0, mode);
4967 if (tem)
4968 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4969 }
4970
4971 /* Canonicalize the two multiplication operands. */
4972 /* a * -b + c => -b * a + c. */
4973 if (swap_commutative_operands_p (op0, op1))
4974 tem = op0, op0 = op1, op1 = tem, any_change = true;
4975
4976 if (any_change)
4977 return gen_rtx_FMA (mode, op0, op1, op2);
4978 return NULL_RTX;
4979
4980 case SIGN_EXTRACT:
4981 case ZERO_EXTRACT:
4982 if (CONST_INT_P (op0)
4983 && CONST_INT_P (op1)
4984 && CONST_INT_P (op2)
4985 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4986 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4987 {
4988 /* Extracting a bit-field from a constant */
4989 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4990 HOST_WIDE_INT op1val = INTVAL (op1);
4991 HOST_WIDE_INT op2val = INTVAL (op2);
4992 if (BITS_BIG_ENDIAN)
4993 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4994 else
4995 val >>= op2val;
4996
4997 if (HOST_BITS_PER_WIDE_INT != op1val)
4998 {
4999 /* First zero-extend. */
5000 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5001 /* If desired, propagate sign bit. */
5002 if (code == SIGN_EXTRACT
5003 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5004 != 0)
5005 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5006 }
5007
5008 return gen_int_mode (val, mode);
5009 }
5010 break;
5011
5012 case IF_THEN_ELSE:
5013 if (CONST_INT_P (op0))
5014 return op0 != const0_rtx ? op1 : op2;
5015
5016 /* Convert c ? a : a into "a". */
5017 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5018 return op1;
5019
5020 /* Convert a != b ? a : b into "a". */
5021 if (GET_CODE (op0) == NE
5022 && ! side_effects_p (op0)
5023 && ! HONOR_NANS (mode)
5024 && ! HONOR_SIGNED_ZEROS (mode)
5025 && ((rtx_equal_p (XEXP (op0, 0), op1)
5026 && rtx_equal_p (XEXP (op0, 1), op2))
5027 || (rtx_equal_p (XEXP (op0, 0), op2)
5028 && rtx_equal_p (XEXP (op0, 1), op1))))
5029 return op1;
5030
5031 /* Convert a == b ? a : b into "b". */
5032 if (GET_CODE (op0) == EQ
5033 && ! side_effects_p (op0)
5034 && ! HONOR_NANS (mode)
5035 && ! HONOR_SIGNED_ZEROS (mode)
5036 && ((rtx_equal_p (XEXP (op0, 0), op1)
5037 && rtx_equal_p (XEXP (op0, 1), op2))
5038 || (rtx_equal_p (XEXP (op0, 0), op2)
5039 && rtx_equal_p (XEXP (op0, 1), op1))))
5040 return op2;
5041
5042 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5043 {
5044 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5045 ? GET_MODE (XEXP (op0, 1))
5046 : GET_MODE (XEXP (op0, 0)));
5047 rtx temp;
5048
5049 /* Look for happy constants in op1 and op2. */
5050 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5051 {
5052 HOST_WIDE_INT t = INTVAL (op1);
5053 HOST_WIDE_INT f = INTVAL (op2);
5054
5055 if (t == STORE_FLAG_VALUE && f == 0)
5056 code = GET_CODE (op0);
5057 else if (t == 0 && f == STORE_FLAG_VALUE)
5058 {
5059 enum rtx_code tmp;
5060 tmp = reversed_comparison_code (op0, NULL_RTX);
5061 if (tmp == UNKNOWN)
5062 break;
5063 code = tmp;
5064 }
5065 else
5066 break;
5067
5068 return simplify_gen_relational (code, mode, cmp_mode,
5069 XEXP (op0, 0), XEXP (op0, 1));
5070 }
5071
5072 if (cmp_mode == VOIDmode)
5073 cmp_mode = op0_mode;
5074 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5075 cmp_mode, XEXP (op0, 0),
5076 XEXP (op0, 1));
5077
5078 /* See if any simplifications were possible. */
5079 if (temp)
5080 {
5081 if (CONST_INT_P (temp))
5082 return temp == const0_rtx ? op2 : op1;
5083 else if (temp)
5084 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5085 }
5086 }
5087 break;
5088
5089 case VEC_MERGE:
5090 gcc_assert (GET_MODE (op0) == mode);
5091 gcc_assert (GET_MODE (op1) == mode);
5092 gcc_assert (VECTOR_MODE_P (mode));
5093 op2 = avoid_constant_pool_reference (op2);
5094 if (CONST_INT_P (op2))
5095 {
5096 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5097 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5098 int mask = (1 << n_elts) - 1;
5099
5100 if (!(INTVAL (op2) & mask))
5101 return op1;
5102 if ((INTVAL (op2) & mask) == mask)
5103 return op0;
5104
5105 op0 = avoid_constant_pool_reference (op0);
5106 op1 = avoid_constant_pool_reference (op1);
5107 if (GET_CODE (op0) == CONST_VECTOR
5108 && GET_CODE (op1) == CONST_VECTOR)
5109 {
5110 rtvec v = rtvec_alloc (n_elts);
5111 unsigned int i;
5112
5113 for (i = 0; i < n_elts; i++)
5114 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5115 ? CONST_VECTOR_ELT (op0, i)
5116 : CONST_VECTOR_ELT (op1, i));
5117 return gen_rtx_CONST_VECTOR (mode, v);
5118 }
5119 }
5120 break;
5121
5122 default:
5123 gcc_unreachable ();
5124 }
5125
5126 return 0;
5127 }
5128
5129 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5130 or CONST_VECTOR,
5131 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5132
5133 Works by unpacking OP into a collection of 8-bit values
5134 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5135 and then repacking them again for OUTERMODE. */
5136
5137 static rtx
5138 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5139 enum machine_mode innermode, unsigned int byte)
5140 {
5141 /* We support up to 512-bit values (for V8DFmode). */
5142 enum {
5143 max_bitsize = 512,
5144 value_bit = 8,
5145 value_mask = (1 << value_bit) - 1
5146 };
5147 unsigned char value[max_bitsize / value_bit];
5148 int value_start;
5149 int i;
5150 int elem;
5151
5152 int num_elem;
5153 rtx * elems;
5154 int elem_bitsize;
5155 rtx result_s;
5156 rtvec result_v = NULL;
5157 enum mode_class outer_class;
5158 enum machine_mode outer_submode;
5159
5160 /* Some ports misuse CCmode. */
5161 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5162 return op;
5163
5164 /* We have no way to represent a complex constant at the rtl level. */
5165 if (COMPLEX_MODE_P (outermode))
5166 return NULL_RTX;
5167
5168 /* Unpack the value. */
5169
5170 if (GET_CODE (op) == CONST_VECTOR)
5171 {
5172 num_elem = CONST_VECTOR_NUNITS (op);
5173 elems = &CONST_VECTOR_ELT (op, 0);
5174 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5175 }
5176 else
5177 {
5178 num_elem = 1;
5179 elems = &op;
5180 elem_bitsize = max_bitsize;
5181 }
5182 /* If this asserts, it is too complicated; reducing value_bit may help. */
5183 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5184 /* I don't know how to handle endianness of sub-units. */
5185 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5186
5187 for (elem = 0; elem < num_elem; elem++)
5188 {
5189 unsigned char * vp;
5190 rtx el = elems[elem];
5191
5192 /* Vectors are kept in target memory order. (This is probably
5193 a mistake.) */
5194 {
5195 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5196 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5197 / BITS_PER_UNIT);
5198 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5199 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5200 unsigned bytele = (subword_byte % UNITS_PER_WORD
5201 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5202 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5203 }
5204
5205 switch (GET_CODE (el))
5206 {
5207 case CONST_INT:
5208 for (i = 0;
5209 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5210 i += value_bit)
5211 *vp++ = INTVAL (el) >> i;
5212 /* CONST_INTs are always logically sign-extended. */
5213 for (; i < elem_bitsize; i += value_bit)
5214 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5215 break;
5216
5217 case CONST_DOUBLE:
5218 if (GET_MODE (el) == VOIDmode)
5219 {
5220 unsigned char extend = 0;
5221 /* If this triggers, someone should have generated a
5222 CONST_INT instead. */
5223 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5224
5225 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5226 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5227 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5228 {
5229 *vp++
5230 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5231 i += value_bit;
5232 }
5233
5234 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5235 extend = -1;
5236 for (; i < elem_bitsize; i += value_bit)
5237 *vp++ = extend;
5238 }
5239 else
5240 {
5241 long tmp[max_bitsize / 32];
5242 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5243
5244 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5245 gcc_assert (bitsize <= elem_bitsize);
5246 gcc_assert (bitsize % value_bit == 0);
5247
5248 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5249 GET_MODE (el));
5250
5251 /* real_to_target produces its result in words affected by
5252 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5253 and use WORDS_BIG_ENDIAN instead; see the documentation
5254 of SUBREG in rtl.texi. */
5255 for (i = 0; i < bitsize; i += value_bit)
5256 {
5257 int ibase;
5258 if (WORDS_BIG_ENDIAN)
5259 ibase = bitsize - 1 - i;
5260 else
5261 ibase = i;
5262 *vp++ = tmp[ibase / 32] >> i % 32;
5263 }
5264
5265 /* It shouldn't matter what's done here, so fill it with
5266 zero. */
5267 for (; i < elem_bitsize; i += value_bit)
5268 *vp++ = 0;
5269 }
5270 break;
5271
5272 case CONST_FIXED:
5273 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5274 {
5275 for (i = 0; i < elem_bitsize; i += value_bit)
5276 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5277 }
5278 else
5279 {
5280 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5281 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5282 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5283 i += value_bit)
5284 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5285 >> (i - HOST_BITS_PER_WIDE_INT);
5286 for (; i < elem_bitsize; i += value_bit)
5287 *vp++ = 0;
5288 }
5289 break;
5290
5291 default:
5292 gcc_unreachable ();
5293 }
5294 }
5295
5296 /* Now, pick the right byte to start with. */
5297 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5298 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5299 will already have offset 0. */
5300 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5301 {
5302 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5303 - byte);
5304 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5305 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5306 byte = (subword_byte % UNITS_PER_WORD
5307 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5308 }
5309
5310 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5311 so if it's become negative it will instead be very large.) */
5312 gcc_assert (byte < GET_MODE_SIZE (innermode));
5313
5314 /* Convert from bytes to chunks of size value_bit. */
5315 value_start = byte * (BITS_PER_UNIT / value_bit);
5316
5317 /* Re-pack the value. */
5318
5319 if (VECTOR_MODE_P (outermode))
5320 {
5321 num_elem = GET_MODE_NUNITS (outermode);
5322 result_v = rtvec_alloc (num_elem);
5323 elems = &RTVEC_ELT (result_v, 0);
5324 outer_submode = GET_MODE_INNER (outermode);
5325 }
5326 else
5327 {
5328 num_elem = 1;
5329 elems = &result_s;
5330 outer_submode = outermode;
5331 }
5332
5333 outer_class = GET_MODE_CLASS (outer_submode);
5334 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5335
5336 gcc_assert (elem_bitsize % value_bit == 0);
5337 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5338
5339 for (elem = 0; elem < num_elem; elem++)
5340 {
5341 unsigned char *vp;
5342
5343 /* Vectors are stored in target memory order. (This is probably
5344 a mistake.) */
5345 {
5346 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5347 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5348 / BITS_PER_UNIT);
5349 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5350 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5351 unsigned bytele = (subword_byte % UNITS_PER_WORD
5352 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5353 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5354 }
5355
5356 switch (outer_class)
5357 {
5358 case MODE_INT:
5359 case MODE_PARTIAL_INT:
5360 {
5361 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5362
5363 for (i = 0;
5364 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5365 i += value_bit)
5366 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5367 for (; i < elem_bitsize; i += value_bit)
5368 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5369 << (i - HOST_BITS_PER_WIDE_INT);
5370
5371 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5372 know why. */
5373 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5374 elems[elem] = gen_int_mode (lo, outer_submode);
5375 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5376 elems[elem] = immed_double_const (lo, hi, outer_submode);
5377 else
5378 return NULL_RTX;
5379 }
5380 break;
5381
5382 case MODE_FLOAT:
5383 case MODE_DECIMAL_FLOAT:
5384 {
5385 REAL_VALUE_TYPE r;
5386 long tmp[max_bitsize / 32];
5387
5388 /* real_from_target wants its input in words affected by
5389 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5390 and use WORDS_BIG_ENDIAN instead; see the documentation
5391 of SUBREG in rtl.texi. */
5392 for (i = 0; i < max_bitsize / 32; i++)
5393 tmp[i] = 0;
5394 for (i = 0; i < elem_bitsize; i += value_bit)
5395 {
5396 int ibase;
5397 if (WORDS_BIG_ENDIAN)
5398 ibase = elem_bitsize - 1 - i;
5399 else
5400 ibase = i;
5401 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5402 }
5403
5404 real_from_target (&r, tmp, outer_submode);
5405 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5406 }
5407 break;
5408
5409 case MODE_FRACT:
5410 case MODE_UFRACT:
5411 case MODE_ACCUM:
5412 case MODE_UACCUM:
5413 {
5414 FIXED_VALUE_TYPE f;
5415 f.data.low = 0;
5416 f.data.high = 0;
5417 f.mode = outer_submode;
5418
5419 for (i = 0;
5420 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5421 i += value_bit)
5422 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5423 for (; i < elem_bitsize; i += value_bit)
5424 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5425 << (i - HOST_BITS_PER_WIDE_INT));
5426
5427 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5428 }
5429 break;
5430
5431 default:
5432 gcc_unreachable ();
5433 }
5434 }
5435 if (VECTOR_MODE_P (outermode))
5436 return gen_rtx_CONST_VECTOR (outermode, result_v);
5437 else
5438 return result_s;
5439 }
5440
5441 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5442 Return 0 if no simplifications are possible. */
5443 rtx
5444 simplify_subreg (enum machine_mode outermode, rtx op,
5445 enum machine_mode innermode, unsigned int byte)
5446 {
5447 /* Little bit of sanity checking. */
5448 gcc_assert (innermode != VOIDmode);
5449 gcc_assert (outermode != VOIDmode);
5450 gcc_assert (innermode != BLKmode);
5451 gcc_assert (outermode != BLKmode);
5452
5453 gcc_assert (GET_MODE (op) == innermode
5454 || GET_MODE (op) == VOIDmode);
5455
5456 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5457 gcc_assert (byte < GET_MODE_SIZE (innermode));
5458
5459 if (outermode == innermode && !byte)
5460 return op;
5461
5462 if (CONST_INT_P (op)
5463 || CONST_DOUBLE_P (op)
5464 || GET_CODE (op) == CONST_FIXED
5465 || GET_CODE (op) == CONST_VECTOR)
5466 return simplify_immed_subreg (outermode, op, innermode, byte);
5467
5468 /* Changing mode twice with SUBREG => just change it once,
5469 or not at all if changing back op starting mode. */
5470 if (GET_CODE (op) == SUBREG)
5471 {
5472 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5473 int final_offset = byte + SUBREG_BYTE (op);
5474 rtx newx;
5475
5476 if (outermode == innermostmode
5477 && byte == 0 && SUBREG_BYTE (op) == 0)
5478 return SUBREG_REG (op);
5479
5480 /* The SUBREG_BYTE represents offset, as if the value were stored
5481 in memory. Irritating exception is paradoxical subreg, where
5482 we define SUBREG_BYTE to be 0. On big endian machines, this
5483 value should be negative. For a moment, undo this exception. */
5484 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5485 {
5486 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5487 if (WORDS_BIG_ENDIAN)
5488 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5489 if (BYTES_BIG_ENDIAN)
5490 final_offset += difference % UNITS_PER_WORD;
5491 }
5492 if (SUBREG_BYTE (op) == 0
5493 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5494 {
5495 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5496 if (WORDS_BIG_ENDIAN)
5497 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5498 if (BYTES_BIG_ENDIAN)
5499 final_offset += difference % UNITS_PER_WORD;
5500 }
5501
5502 /* See whether resulting subreg will be paradoxical. */
5503 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5504 {
5505 /* In nonparadoxical subregs we can't handle negative offsets. */
5506 if (final_offset < 0)
5507 return NULL_RTX;
5508 /* Bail out in case resulting subreg would be incorrect. */
5509 if (final_offset % GET_MODE_SIZE (outermode)
5510 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5511 return NULL_RTX;
5512 }
5513 else
5514 {
5515 int offset = 0;
5516 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5517
5518 /* In paradoxical subreg, see if we are still looking on lower part.
5519 If so, our SUBREG_BYTE will be 0. */
5520 if (WORDS_BIG_ENDIAN)
5521 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5522 if (BYTES_BIG_ENDIAN)
5523 offset += difference % UNITS_PER_WORD;
5524 if (offset == final_offset)
5525 final_offset = 0;
5526 else
5527 return NULL_RTX;
5528 }
5529
5530 /* Recurse for further possible simplifications. */
5531 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5532 final_offset);
5533 if (newx)
5534 return newx;
5535 if (validate_subreg (outermode, innermostmode,
5536 SUBREG_REG (op), final_offset))
5537 {
5538 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5539 if (SUBREG_PROMOTED_VAR_P (op)
5540 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5541 && GET_MODE_CLASS (outermode) == MODE_INT
5542 && IN_RANGE (GET_MODE_SIZE (outermode),
5543 GET_MODE_SIZE (innermode),
5544 GET_MODE_SIZE (innermostmode))
5545 && subreg_lowpart_p (newx))
5546 {
5547 SUBREG_PROMOTED_VAR_P (newx) = 1;
5548 SUBREG_PROMOTED_UNSIGNED_SET
5549 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5550 }
5551 return newx;
5552 }
5553 return NULL_RTX;
5554 }
5555
5556 /* Merge implicit and explicit truncations. */
5557
5558 if (GET_CODE (op) == TRUNCATE
5559 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5560 && subreg_lowpart_offset (outermode, innermode) == byte)
5561 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5562 GET_MODE (XEXP (op, 0)));
5563
5564 /* SUBREG of a hard register => just change the register number
5565 and/or mode. If the hard register is not valid in that mode,
5566 suppress this simplification. If the hard register is the stack,
5567 frame, or argument pointer, leave this as a SUBREG. */
5568
5569 if (REG_P (op) && HARD_REGISTER_P (op))
5570 {
5571 unsigned int regno, final_regno;
5572
5573 regno = REGNO (op);
5574 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5575 if (HARD_REGISTER_NUM_P (final_regno))
5576 {
5577 rtx x;
5578 int final_offset = byte;
5579
5580 /* Adjust offset for paradoxical subregs. */
5581 if (byte == 0
5582 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5583 {
5584 int difference = (GET_MODE_SIZE (innermode)
5585 - GET_MODE_SIZE (outermode));
5586 if (WORDS_BIG_ENDIAN)
5587 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5588 if (BYTES_BIG_ENDIAN)
5589 final_offset += difference % UNITS_PER_WORD;
5590 }
5591
5592 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5593
5594 /* Propagate original regno. We don't have any way to specify
5595 the offset inside original regno, so do so only for lowpart.
5596 The information is used only by alias analysis that can not
5597 grog partial register anyway. */
5598
5599 if (subreg_lowpart_offset (outermode, innermode) == byte)
5600 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5601 return x;
5602 }
5603 }
5604
5605 /* If we have a SUBREG of a register that we are replacing and we are
5606 replacing it with a MEM, make a new MEM and try replacing the
5607 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5608 or if we would be widening it. */
5609
5610 if (MEM_P (op)
5611 && ! mode_dependent_address_p (XEXP (op, 0))
5612 /* Allow splitting of volatile memory references in case we don't
5613 have instruction to move the whole thing. */
5614 && (! MEM_VOLATILE_P (op)
5615 || ! have_insn_for (SET, innermode))
5616 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5617 return adjust_address_nv (op, outermode, byte);
5618
5619 /* Handle complex values represented as CONCAT
5620 of real and imaginary part. */
5621 if (GET_CODE (op) == CONCAT)
5622 {
5623 unsigned int part_size, final_offset;
5624 rtx part, res;
5625
5626 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5627 if (byte < part_size)
5628 {
5629 part = XEXP (op, 0);
5630 final_offset = byte;
5631 }
5632 else
5633 {
5634 part = XEXP (op, 1);
5635 final_offset = byte - part_size;
5636 }
5637
5638 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5639 return NULL_RTX;
5640
5641 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5642 if (res)
5643 return res;
5644 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5645 return gen_rtx_SUBREG (outermode, part, final_offset);
5646 return NULL_RTX;
5647 }
5648
5649 /* Optimize SUBREG truncations of zero and sign extended values. */
5650 if ((GET_CODE (op) == ZERO_EXTEND
5651 || GET_CODE (op) == SIGN_EXTEND)
5652 && SCALAR_INT_MODE_P (innermode)
5653 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5654 {
5655 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5656
5657 /* If we're requesting the lowpart of a zero or sign extension,
5658 there are three possibilities. If the outermode is the same
5659 as the origmode, we can omit both the extension and the subreg.
5660 If the outermode is not larger than the origmode, we can apply
5661 the truncation without the extension. Finally, if the outermode
5662 is larger than the origmode, but both are integer modes, we
5663 can just extend to the appropriate mode. */
5664 if (bitpos == 0)
5665 {
5666 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5667 if (outermode == origmode)
5668 return XEXP (op, 0);
5669 if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5670 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5671 subreg_lowpart_offset (outermode,
5672 origmode));
5673 if (SCALAR_INT_MODE_P (outermode))
5674 return simplify_gen_unary (GET_CODE (op), outermode,
5675 XEXP (op, 0), origmode);
5676 }
5677
5678 /* A SUBREG resulting from a zero extension may fold to zero if
5679 it extracts higher bits that the ZERO_EXTEND's source bits. */
5680 if (GET_CODE (op) == ZERO_EXTEND
5681 && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5682 return CONST0_RTX (outermode);
5683 }
5684
5685 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5686 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5687 the outer subreg is effectively a truncation to the original mode. */
5688 if ((GET_CODE (op) == LSHIFTRT
5689 || GET_CODE (op) == ASHIFTRT)
5690 && SCALAR_INT_MODE_P (outermode)
5691 && SCALAR_INT_MODE_P (innermode)
5692 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5693 to avoid the possibility that an outer LSHIFTRT shifts by more
5694 than the sign extension's sign_bit_copies and introduces zeros
5695 into the high bits of the result. */
5696 && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5697 && CONST_INT_P (XEXP (op, 1))
5698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5700 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5701 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5702 return simplify_gen_binary (ASHIFTRT, outermode,
5703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5704
5705 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5706 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5707 the outer subreg is effectively a truncation to the original mode. */
5708 if ((GET_CODE (op) == LSHIFTRT
5709 || GET_CODE (op) == ASHIFTRT)
5710 && SCALAR_INT_MODE_P (outermode)
5711 && SCALAR_INT_MODE_P (innermode)
5712 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5713 && CONST_INT_P (XEXP (op, 1))
5714 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5715 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5716 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5717 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5718 return simplify_gen_binary (LSHIFTRT, outermode,
5719 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5720
5721 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5722 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5723 the outer subreg is effectively a truncation to the original mode. */
5724 if (GET_CODE (op) == ASHIFT
5725 && SCALAR_INT_MODE_P (outermode)
5726 && SCALAR_INT_MODE_P (innermode)
5727 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5728 && CONST_INT_P (XEXP (op, 1))
5729 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5730 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5731 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5732 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5733 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5734 return simplify_gen_binary (ASHIFT, outermode,
5735 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5736
5737 /* Recognize a word extraction from a multi-word subreg. */
5738 if ((GET_CODE (op) == LSHIFTRT
5739 || GET_CODE (op) == ASHIFTRT)
5740 && SCALAR_INT_MODE_P (innermode)
5741 && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5742 && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5743 && CONST_INT_P (XEXP (op, 1))
5744 && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5745 && INTVAL (XEXP (op, 1)) >= 0
5746 && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5747 && byte == subreg_lowpart_offset (outermode, innermode))
5748 {
5749 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5750 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5751 (WORDS_BIG_ENDIAN
5752 ? byte - shifted_bytes
5753 : byte + shifted_bytes));
5754 }
5755
5756 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5757 and try replacing the SUBREG and shift with it. Don't do this if
5758 the MEM has a mode-dependent address or if we would be widening it. */
5759
5760 if ((GET_CODE (op) == LSHIFTRT
5761 || GET_CODE (op) == ASHIFTRT)
5762 && SCALAR_INT_MODE_P (innermode)
5763 && MEM_P (XEXP (op, 0))
5764 && CONST_INT_P (XEXP (op, 1))
5765 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5766 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5767 && INTVAL (XEXP (op, 1)) > 0
5768 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5769 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5770 && ! MEM_VOLATILE_P (XEXP (op, 0))
5771 && byte == subreg_lowpart_offset (outermode, innermode)
5772 && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5773 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5774 {
5775 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5776 return adjust_address_nv (XEXP (op, 0), outermode,
5777 (WORDS_BIG_ENDIAN
5778 ? byte - shifted_bytes
5779 : byte + shifted_bytes));
5780 }
5781
5782 return NULL_RTX;
5783 }
5784
5785 /* Make a SUBREG operation or equivalent if it folds. */
5786
5787 rtx
5788 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5789 enum machine_mode innermode, unsigned int byte)
5790 {
5791 rtx newx;
5792
5793 newx = simplify_subreg (outermode, op, innermode, byte);
5794 if (newx)
5795 return newx;
5796
5797 if (GET_CODE (op) == SUBREG
5798 || GET_CODE (op) == CONCAT
5799 || GET_MODE (op) == VOIDmode)
5800 return NULL_RTX;
5801
5802 if (validate_subreg (outermode, innermode, op, byte))
5803 return gen_rtx_SUBREG (outermode, op, byte);
5804
5805 return NULL_RTX;
5806 }
5807
5808 /* Simplify X, an rtx expression.
5809
5810 Return the simplified expression or NULL if no simplifications
5811 were possible.
5812
5813 This is the preferred entry point into the simplification routines;
5814 however, we still allow passes to call the more specific routines.
5815
5816 Right now GCC has three (yes, three) major bodies of RTL simplification
5817 code that need to be unified.
5818
5819 1. fold_rtx in cse.c. This code uses various CSE specific
5820 information to aid in RTL simplification.
5821
5822 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5823 it uses combine specific information to aid in RTL
5824 simplification.
5825
5826 3. The routines in this file.
5827
5828
5829 Long term we want to only have one body of simplification code; to
5830 get to that state I recommend the following steps:
5831
5832 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5833 which are not pass dependent state into these routines.
5834
5835 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5836 use this routine whenever possible.
5837
5838 3. Allow for pass dependent state to be provided to these
5839 routines and add simplifications based on the pass dependent
5840 state. Remove code from cse.c & combine.c that becomes
5841 redundant/dead.
5842
5843 It will take time, but ultimately the compiler will be easier to
5844 maintain and improve. It's totally silly that when we add a
5845 simplification that it needs to be added to 4 places (3 for RTL
5846 simplification and 1 for tree simplification. */
5847
5848 rtx
5849 simplify_rtx (const_rtx x)
5850 {
5851 const enum rtx_code code = GET_CODE (x);
5852 const enum machine_mode mode = GET_MODE (x);
5853
5854 switch (GET_RTX_CLASS (code))
5855 {
5856 case RTX_UNARY:
5857 return simplify_unary_operation (code, mode,
5858 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5859 case RTX_COMM_ARITH:
5860 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5861 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5862
5863 /* Fall through.... */
5864
5865 case RTX_BIN_ARITH:
5866 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5867
5868 case RTX_TERNARY:
5869 case RTX_BITFIELD_OPS:
5870 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5871 XEXP (x, 0), XEXP (x, 1),
5872 XEXP (x, 2));
5873
5874 case RTX_COMPARE:
5875 case RTX_COMM_COMPARE:
5876 return simplify_relational_operation (code, mode,
5877 ((GET_MODE (XEXP (x, 0))
5878 != VOIDmode)
5879 ? GET_MODE (XEXP (x, 0))
5880 : GET_MODE (XEXP (x, 1))),
5881 XEXP (x, 0),
5882 XEXP (x, 1));
5883
5884 case RTX_EXTRA:
5885 if (code == SUBREG)
5886 return simplify_subreg (mode, SUBREG_REG (x),
5887 GET_MODE (SUBREG_REG (x)),
5888 SUBREG_BYTE (x));
5889 break;
5890
5891 case RTX_OBJ:
5892 if (code == LO_SUM)
5893 {
5894 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5895 if (GET_CODE (XEXP (x, 0)) == HIGH
5896 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5897 return XEXP (x, 1);
5898 }
5899 break;
5900
5901 default:
5902 break;
5903 }
5904 return NULL;
5905 }