]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
Remove trailing white spaces.
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
65 \f
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, const_rtx i)
70 {
71 return gen_int_mode (- INTVAL (i), mode);
72 }
73
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
76
77 bool
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
79 {
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
82
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
85
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
89
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && CONST_INT_P (x))
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
96 {
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
99 }
100 else
101 return false;
102
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 }
107 \f
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
110
111 rtx
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
114 {
115 rtx tem;
116
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
119 if (tem)
120 return tem;
121
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
126
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
128 }
129 \f
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
132 rtx
133 avoid_constant_pool_reference (rtx x)
134 {
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
138
139 switch (GET_CODE (x))
140 {
141 case MEM:
142 break;
143
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
149 {
150 REAL_VALUE_TYPE d;
151
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
154 }
155 return x;
156
157 default:
158 return x;
159 }
160
161 if (GET_MODE (x) == BLKmode)
162 return x;
163
164 addr = XEXP (x, 0);
165
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
168
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
173 {
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
176 }
177
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
180
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
185 {
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
188
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
193 {
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
196 return tem;
197 }
198 else
199 return c;
200 }
201
202 return x;
203 }
204 \f
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
208
209 rtx
210 delegitimize_mem_from_attrs (rtx x)
211 {
212 if (MEM_P (x)
213 && MEM_EXPR (x)
214 && (!MEM_OFFSET (x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
216 {
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
220
221 switch (TREE_CODE (decl))
222 {
223 default:
224 decl = NULL;
225 break;
226
227 case VAR_DECL:
228 break;
229
230 case ARRAY_REF:
231 case ARRAY_RANGE_REF:
232 case COMPONENT_REF:
233 case BIT_FIELD_REF:
234 case REALPART_EXPR:
235 case IMAGPART_EXPR:
236 case VIEW_CONVERT_EXPR:
237 {
238 HOST_WIDE_INT bitsize, bitpos;
239 tree toffset;
240 int unsignedp = 0, volatilep = 0;
241
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
247 decl = NULL;
248 else
249 {
250 offset += bitpos / BITS_PER_UNIT;
251 if (toffset)
252 offset += TREE_INT_CST_LOW (toffset);
253 }
254 break;
255 }
256 }
257
258 if (decl
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
265 {
266 rtx newx;
267
268 if (MEM_OFFSET (x))
269 offset += INTVAL (MEM_OFFSET (x));
270
271 newx = DECL_RTL (decl);
272
273 if (MEM_P (newx))
274 {
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
276
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
283 if (!((offset == 0
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
295 }
296 else if (GET_MODE (x) == GET_MODE (newx)
297 && offset == 0)
298 x = newx;
299 }
300 }
301
302 return x;
303 }
304 \f
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
307
308 rtx
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
311 {
312 rtx tem;
313
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
316 return tem;
317
318 return gen_rtx_fmt_e (code, mode, op);
319 }
320
321 /* Likewise for ternary operations. */
322
323 rtx
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
326 {
327 rtx tem;
328
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
331 op0, op1, op2)))
332 return tem;
333
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
335 }
336
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
339
340 rtx
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
343 {
344 rtx tem;
345
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
347 op0, op1)))
348 return tem;
349
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
351 }
352 \f
353 /* Replace all occurrences of OLD_RTX in X with FN (X', DATA), where X'
354 is an expression in X that is equal to OLD_RTX. Canonicalize and
355 simplify the result.
356
357 If FN is null, assume FN (X', DATA) == copy_rtx (DATA). */
358
359 rtx
360 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
361 rtx (*fn) (rtx, void *), void *data)
362 {
363 enum rtx_code code = GET_CODE (x);
364 enum machine_mode mode = GET_MODE (x);
365 enum machine_mode op_mode;
366 const char *fmt;
367 rtx op0, op1, op2, newx, op;
368 rtvec vec, newvec;
369 int i, j;
370
371 /* If X is OLD_RTX, return FN (X, DATA), with a null FN. Otherwise,
372 if this is an expression, try to build a new expression, substituting
373 recursively. If we can't do anything, return our input. */
374
375 if (rtx_equal_p (x, old_rtx))
376 {
377 if (fn)
378 return fn (x, data);
379 else
380 return copy_rtx ((rtx) data);
381 }
382
383 switch (GET_RTX_CLASS (code))
384 {
385 case RTX_UNARY:
386 op0 = XEXP (x, 0);
387 op_mode = GET_MODE (op0);
388 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
389 if (op0 == XEXP (x, 0))
390 return x;
391 return simplify_gen_unary (code, mode, op0, op_mode);
392
393 case RTX_BIN_ARITH:
394 case RTX_COMM_ARITH:
395 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
396 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
397 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
398 return x;
399 return simplify_gen_binary (code, mode, op0, op1);
400
401 case RTX_COMPARE:
402 case RTX_COMM_COMPARE:
403 op0 = XEXP (x, 0);
404 op1 = XEXP (x, 1);
405 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
406 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
407 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
408 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
409 return x;
410 return simplify_gen_relational (code, mode, op_mode, op0, op1);
411
412 case RTX_TERNARY:
413 case RTX_BITFIELD_OPS:
414 op0 = XEXP (x, 0);
415 op_mode = GET_MODE (op0);
416 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
417 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
418 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
419 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
420 return x;
421 if (op_mode == VOIDmode)
422 op_mode = GET_MODE (op0);
423 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
424
425 case RTX_EXTRA:
426 if (code == SUBREG)
427 {
428 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
429 if (op0 == SUBREG_REG (x))
430 return x;
431 op0 = simplify_gen_subreg (GET_MODE (x), op0,
432 GET_MODE (SUBREG_REG (x)),
433 SUBREG_BYTE (x));
434 return op0 ? op0 : x;
435 }
436 break;
437
438 case RTX_OBJ:
439 if (code == MEM)
440 {
441 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
442 if (op0 == XEXP (x, 0))
443 return x;
444 return replace_equiv_address_nv (x, op0);
445 }
446 else if (code == LO_SUM)
447 {
448 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
449 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
450
451 /* (lo_sum (high x) x) -> x */
452 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
453 return op1;
454
455 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
456 return x;
457 return gen_rtx_LO_SUM (mode, op0, op1);
458 }
459 break;
460
461 default:
462 break;
463 }
464
465 newx = x;
466 fmt = GET_RTX_FORMAT (code);
467 for (i = 0; fmt[i]; i++)
468 switch (fmt[i])
469 {
470 case 'E':
471 vec = XVEC (x, i);
472 newvec = XVEC (newx, i);
473 for (j = 0; j < GET_NUM_ELEM (vec); j++)
474 {
475 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
476 old_rtx, fn, data);
477 if (op != RTVEC_ELT (vec, j))
478 {
479 if (newvec == vec)
480 {
481 newvec = shallow_copy_rtvec (vec);
482 if (x == newx)
483 newx = shallow_copy_rtx (x);
484 XVEC (newx, i) = newvec;
485 }
486 RTVEC_ELT (newvec, j) = op;
487 }
488 }
489 break;
490
491 case 'e':
492 if (XEXP (x, i))
493 {
494 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
495 if (op != XEXP (x, i))
496 {
497 if (x == newx)
498 newx = shallow_copy_rtx (x);
499 XEXP (newx, i) = op;
500 }
501 }
502 break;
503 }
504 return newx;
505 }
506
507 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
508 resulting RTX. Return a new RTX which is as simplified as possible. */
509
510 rtx
511 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
512 {
513 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
514 }
515 \f
516 /* Try to simplify a unary operation CODE whose output mode is to be
517 MODE with input operand OP whose mode was originally OP_MODE.
518 Return zero if no simplification can be made. */
519 rtx
520 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
521 rtx op, enum machine_mode op_mode)
522 {
523 rtx trueop, tem;
524
525 trueop = avoid_constant_pool_reference (op);
526
527 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
528 if (tem)
529 return tem;
530
531 return simplify_unary_operation_1 (code, mode, op);
532 }
533
534 /* Perform some simplifications we can do even if the operands
535 aren't constant. */
536 static rtx
537 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
538 {
539 enum rtx_code reversed;
540 rtx temp;
541
542 switch (code)
543 {
544 case NOT:
545 /* (not (not X)) == X. */
546 if (GET_CODE (op) == NOT)
547 return XEXP (op, 0);
548
549 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
550 comparison is all ones. */
551 if (COMPARISON_P (op)
552 && (mode == BImode || STORE_FLAG_VALUE == -1)
553 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
554 return simplify_gen_relational (reversed, mode, VOIDmode,
555 XEXP (op, 0), XEXP (op, 1));
556
557 /* (not (plus X -1)) can become (neg X). */
558 if (GET_CODE (op) == PLUS
559 && XEXP (op, 1) == constm1_rtx)
560 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
561
562 /* Similarly, (not (neg X)) is (plus X -1). */
563 if (GET_CODE (op) == NEG)
564 return plus_constant (XEXP (op, 0), -1);
565
566 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
567 if (GET_CODE (op) == XOR
568 && CONST_INT_P (XEXP (op, 1))
569 && (temp = simplify_unary_operation (NOT, mode,
570 XEXP (op, 1), mode)) != 0)
571 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
572
573 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
574 if (GET_CODE (op) == PLUS
575 && CONST_INT_P (XEXP (op, 1))
576 && mode_signbit_p (mode, XEXP (op, 1))
577 && (temp = simplify_unary_operation (NOT, mode,
578 XEXP (op, 1), mode)) != 0)
579 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
580
581
582 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
583 operands other than 1, but that is not valid. We could do a
584 similar simplification for (not (lshiftrt C X)) where C is
585 just the sign bit, but this doesn't seem common enough to
586 bother with. */
587 if (GET_CODE (op) == ASHIFT
588 && XEXP (op, 0) == const1_rtx)
589 {
590 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
591 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
592 }
593
594 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
595 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
596 so we can perform the above simplification. */
597
598 if (STORE_FLAG_VALUE == -1
599 && GET_CODE (op) == ASHIFTRT
600 && GET_CODE (XEXP (op, 1))
601 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
602 return simplify_gen_relational (GE, mode, VOIDmode,
603 XEXP (op, 0), const0_rtx);
604
605
606 if (GET_CODE (op) == SUBREG
607 && subreg_lowpart_p (op)
608 && (GET_MODE_SIZE (GET_MODE (op))
609 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
610 && GET_CODE (SUBREG_REG (op)) == ASHIFT
611 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
612 {
613 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
614 rtx x;
615
616 x = gen_rtx_ROTATE (inner_mode,
617 simplify_gen_unary (NOT, inner_mode, const1_rtx,
618 inner_mode),
619 XEXP (SUBREG_REG (op), 1));
620 return rtl_hooks.gen_lowpart_no_emit (mode, x);
621 }
622
623 /* Apply De Morgan's laws to reduce number of patterns for machines
624 with negating logical insns (and-not, nand, etc.). If result has
625 only one NOT, put it first, since that is how the patterns are
626 coded. */
627
628 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
629 {
630 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
631 enum machine_mode op_mode;
632
633 op_mode = GET_MODE (in1);
634 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
635
636 op_mode = GET_MODE (in2);
637 if (op_mode == VOIDmode)
638 op_mode = mode;
639 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
640
641 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
642 {
643 rtx tem = in2;
644 in2 = in1; in1 = tem;
645 }
646
647 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
648 mode, in1, in2);
649 }
650 break;
651
652 case NEG:
653 /* (neg (neg X)) == X. */
654 if (GET_CODE (op) == NEG)
655 return XEXP (op, 0);
656
657 /* (neg (plus X 1)) can become (not X). */
658 if (GET_CODE (op) == PLUS
659 && XEXP (op, 1) == const1_rtx)
660 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
661
662 /* Similarly, (neg (not X)) is (plus X 1). */
663 if (GET_CODE (op) == NOT)
664 return plus_constant (XEXP (op, 0), 1);
665
666 /* (neg (minus X Y)) can become (minus Y X). This transformation
667 isn't safe for modes with signed zeros, since if X and Y are
668 both +0, (minus Y X) is the same as (minus X Y). If the
669 rounding mode is towards +infinity (or -infinity) then the two
670 expressions will be rounded differently. */
671 if (GET_CODE (op) == MINUS
672 && !HONOR_SIGNED_ZEROS (mode)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
674 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
675
676 if (GET_CODE (op) == PLUS
677 && !HONOR_SIGNED_ZEROS (mode)
678 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
679 {
680 /* (neg (plus A C)) is simplified to (minus -C A). */
681 if (CONST_INT_P (XEXP (op, 1))
682 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
683 {
684 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
685 if (temp)
686 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
687 }
688
689 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
690 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
691 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
692 }
693
694 /* (neg (mult A B)) becomes (mult (neg A) B).
695 This works even for floating-point values. */
696 if (GET_CODE (op) == MULT
697 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
698 {
699 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
700 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
701 }
702
703 /* NEG commutes with ASHIFT since it is multiplication. Only do
704 this if we can then eliminate the NEG (e.g., if the operand
705 is a constant). */
706 if (GET_CODE (op) == ASHIFT)
707 {
708 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
709 if (temp)
710 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
711 }
712
713 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
714 C is equal to the width of MODE minus 1. */
715 if (GET_CODE (op) == ASHIFTRT
716 && CONST_INT_P (XEXP (op, 1))
717 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
718 return simplify_gen_binary (LSHIFTRT, mode,
719 XEXP (op, 0), XEXP (op, 1));
720
721 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
722 C is equal to the width of MODE minus 1. */
723 if (GET_CODE (op) == LSHIFTRT
724 && CONST_INT_P (XEXP (op, 1))
725 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
726 return simplify_gen_binary (ASHIFTRT, mode,
727 XEXP (op, 0), XEXP (op, 1));
728
729 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
730 if (GET_CODE (op) == XOR
731 && XEXP (op, 1) == const1_rtx
732 && nonzero_bits (XEXP (op, 0), mode) == 1)
733 return plus_constant (XEXP (op, 0), -1);
734
735 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
736 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
737 if (GET_CODE (op) == LT
738 && XEXP (op, 1) == const0_rtx
739 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
740 {
741 enum machine_mode inner = GET_MODE (XEXP (op, 0));
742 int isize = GET_MODE_BITSIZE (inner);
743 if (STORE_FLAG_VALUE == 1)
744 {
745 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
746 GEN_INT (isize - 1));
747 if (mode == inner)
748 return temp;
749 if (GET_MODE_BITSIZE (mode) > isize)
750 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
751 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
752 }
753 else if (STORE_FLAG_VALUE == -1)
754 {
755 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
756 GEN_INT (isize - 1));
757 if (mode == inner)
758 return temp;
759 if (GET_MODE_BITSIZE (mode) > isize)
760 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
761 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
762 }
763 }
764 break;
765
766 case TRUNCATE:
767 /* We can't handle truncation to a partial integer mode here
768 because we don't know the real bitsize of the partial
769 integer mode. */
770 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
771 break;
772
773 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
774 if ((GET_CODE (op) == SIGN_EXTEND
775 || GET_CODE (op) == ZERO_EXTEND)
776 && GET_MODE (XEXP (op, 0)) == mode)
777 return XEXP (op, 0);
778
779 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
780 (OP:SI foo:SI) if OP is NEG or ABS. */
781 if ((GET_CODE (op) == ABS
782 || GET_CODE (op) == NEG)
783 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
784 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
785 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
786 return simplify_gen_unary (GET_CODE (op), mode,
787 XEXP (XEXP (op, 0), 0), mode);
788
789 /* (truncate:A (subreg:B (truncate:C X) 0)) is
790 (truncate:A X). */
791 if (GET_CODE (op) == SUBREG
792 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
793 && subreg_lowpart_p (op))
794 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
795 GET_MODE (XEXP (SUBREG_REG (op), 0)));
796
797 /* If we know that the value is already truncated, we can
798 replace the TRUNCATE with a SUBREG. Note that this is also
799 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
800 modes we just have to apply a different definition for
801 truncation. But don't do this for an (LSHIFTRT (MULT ...))
802 since this will cause problems with the umulXi3_highpart
803 patterns. */
804 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
805 GET_MODE_BITSIZE (GET_MODE (op)))
806 ? (num_sign_bit_copies (op, GET_MODE (op))
807 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
808 - GET_MODE_BITSIZE (mode)))
809 : truncated_to_mode (mode, op))
810 && ! (GET_CODE (op) == LSHIFTRT
811 && GET_CODE (XEXP (op, 0)) == MULT))
812 return rtl_hooks.gen_lowpart_no_emit (mode, op);
813
814 /* A truncate of a comparison can be replaced with a subreg if
815 STORE_FLAG_VALUE permits. This is like the previous test,
816 but it works even if the comparison is done in a mode larger
817 than HOST_BITS_PER_WIDE_INT. */
818 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
819 && COMPARISON_P (op)
820 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
821 return rtl_hooks.gen_lowpart_no_emit (mode, op);
822 break;
823
824 case FLOAT_TRUNCATE:
825 if (DECIMAL_FLOAT_MODE_P (mode))
826 break;
827
828 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
829 if (GET_CODE (op) == FLOAT_EXTEND
830 && GET_MODE (XEXP (op, 0)) == mode)
831 return XEXP (op, 0);
832
833 /* (float_truncate:SF (float_truncate:DF foo:XF))
834 = (float_truncate:SF foo:XF).
835 This may eliminate double rounding, so it is unsafe.
836
837 (float_truncate:SF (float_extend:XF foo:DF))
838 = (float_truncate:SF foo:DF).
839
840 (float_truncate:DF (float_extend:XF foo:SF))
841 = (float_extend:SF foo:DF). */
842 if ((GET_CODE (op) == FLOAT_TRUNCATE
843 && flag_unsafe_math_optimizations)
844 || GET_CODE (op) == FLOAT_EXTEND)
845 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
846 0)))
847 > GET_MODE_SIZE (mode)
848 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
849 mode,
850 XEXP (op, 0), mode);
851
852 /* (float_truncate (float x)) is (float x) */
853 if (GET_CODE (op) == FLOAT
854 && (flag_unsafe_math_optimizations
855 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
856 && ((unsigned)significand_size (GET_MODE (op))
857 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
858 - num_sign_bit_copies (XEXP (op, 0),
859 GET_MODE (XEXP (op, 0))))))))
860 return simplify_gen_unary (FLOAT, mode,
861 XEXP (op, 0),
862 GET_MODE (XEXP (op, 0)));
863
864 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
865 (OP:SF foo:SF) if OP is NEG or ABS. */
866 if ((GET_CODE (op) == ABS
867 || GET_CODE (op) == NEG)
868 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
869 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
870 return simplify_gen_unary (GET_CODE (op), mode,
871 XEXP (XEXP (op, 0), 0), mode);
872
873 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
874 is (float_truncate:SF x). */
875 if (GET_CODE (op) == SUBREG
876 && subreg_lowpart_p (op)
877 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
878 return SUBREG_REG (op);
879 break;
880
881 case FLOAT_EXTEND:
882 if (DECIMAL_FLOAT_MODE_P (mode))
883 break;
884
885 /* (float_extend (float_extend x)) is (float_extend x)
886
887 (float_extend (float x)) is (float x) assuming that double
888 rounding can't happen.
889 */
890 if (GET_CODE (op) == FLOAT_EXTEND
891 || (GET_CODE (op) == FLOAT
892 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
893 && ((unsigned)significand_size (GET_MODE (op))
894 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
895 - num_sign_bit_copies (XEXP (op, 0),
896 GET_MODE (XEXP (op, 0)))))))
897 return simplify_gen_unary (GET_CODE (op), mode,
898 XEXP (op, 0),
899 GET_MODE (XEXP (op, 0)));
900
901 break;
902
903 case ABS:
904 /* (abs (neg <foo>)) -> (abs <foo>) */
905 if (GET_CODE (op) == NEG)
906 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
907 GET_MODE (XEXP (op, 0)));
908
909 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
910 do nothing. */
911 if (GET_MODE (op) == VOIDmode)
912 break;
913
914 /* If operand is something known to be positive, ignore the ABS. */
915 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
916 || ((GET_MODE_BITSIZE (GET_MODE (op))
917 <= HOST_BITS_PER_WIDE_INT)
918 && ((nonzero_bits (op, GET_MODE (op))
919 & ((HOST_WIDE_INT) 1
920 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
921 == 0)))
922 return op;
923
924 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
925 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
926 return gen_rtx_NEG (mode, op);
927
928 break;
929
930 case FFS:
931 /* (ffs (*_extend <X>)) = (ffs <X>) */
932 if (GET_CODE (op) == SIGN_EXTEND
933 || GET_CODE (op) == ZERO_EXTEND)
934 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
935 GET_MODE (XEXP (op, 0)));
936 break;
937
938 case POPCOUNT:
939 switch (GET_CODE (op))
940 {
941 case BSWAP:
942 case ZERO_EXTEND:
943 /* (popcount (zero_extend <X>)) = (popcount <X>) */
944 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
945 GET_MODE (XEXP (op, 0)));
946
947 case ROTATE:
948 case ROTATERT:
949 /* Rotations don't affect popcount. */
950 if (!side_effects_p (XEXP (op, 1)))
951 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
952 GET_MODE (XEXP (op, 0)));
953 break;
954
955 default:
956 break;
957 }
958 break;
959
960 case PARITY:
961 switch (GET_CODE (op))
962 {
963 case NOT:
964 case BSWAP:
965 case ZERO_EXTEND:
966 case SIGN_EXTEND:
967 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
968 GET_MODE (XEXP (op, 0)));
969
970 case ROTATE:
971 case ROTATERT:
972 /* Rotations don't affect parity. */
973 if (!side_effects_p (XEXP (op, 1)))
974 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
975 GET_MODE (XEXP (op, 0)));
976 break;
977
978 default:
979 break;
980 }
981 break;
982
983 case BSWAP:
984 /* (bswap (bswap x)) -> x. */
985 if (GET_CODE (op) == BSWAP)
986 return XEXP (op, 0);
987 break;
988
989 case FLOAT:
990 /* (float (sign_extend <X>)) = (float <X>). */
991 if (GET_CODE (op) == SIGN_EXTEND)
992 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
993 GET_MODE (XEXP (op, 0)));
994 break;
995
996 case SIGN_EXTEND:
997 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
998 becomes just the MINUS if its mode is MODE. This allows
999 folding switch statements on machines using casesi (such as
1000 the VAX). */
1001 if (GET_CODE (op) == TRUNCATE
1002 && GET_MODE (XEXP (op, 0)) == mode
1003 && GET_CODE (XEXP (op, 0)) == MINUS
1004 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1005 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1006 return XEXP (op, 0);
1007
1008 /* Check for a sign extension of a subreg of a promoted
1009 variable, where the promotion is sign-extended, and the
1010 target mode is the same as the variable's promotion. */
1011 if (GET_CODE (op) == SUBREG
1012 && SUBREG_PROMOTED_VAR_P (op)
1013 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1014 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1015 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1016
1017 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1018 /* As we do not know which address space the pointer is refering to,
1019 we can do this only if the target does not support different pointer
1020 or address modes depending on the address space. */
1021 if (target_default_pointer_address_modes_p ()
1022 && ! POINTERS_EXTEND_UNSIGNED
1023 && mode == Pmode && GET_MODE (op) == ptr_mode
1024 && (CONSTANT_P (op)
1025 || (GET_CODE (op) == SUBREG
1026 && REG_P (SUBREG_REG (op))
1027 && REG_POINTER (SUBREG_REG (op))
1028 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1029 return convert_memory_address (Pmode, op);
1030 #endif
1031 break;
1032
1033 case ZERO_EXTEND:
1034 /* Check for a zero extension of a subreg of a promoted
1035 variable, where the promotion is zero-extended, and the
1036 target mode is the same as the variable's promotion. */
1037 if (GET_CODE (op) == SUBREG
1038 && SUBREG_PROMOTED_VAR_P (op)
1039 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1040 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1041 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1042
1043 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1044 /* As we do not know which address space the pointer is refering to,
1045 we can do this only if the target does not support different pointer
1046 or address modes depending on the address space. */
1047 if (target_default_pointer_address_modes_p ()
1048 && POINTERS_EXTEND_UNSIGNED > 0
1049 && mode == Pmode && GET_MODE (op) == ptr_mode
1050 && (CONSTANT_P (op)
1051 || (GET_CODE (op) == SUBREG
1052 && REG_P (SUBREG_REG (op))
1053 && REG_POINTER (SUBREG_REG (op))
1054 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1055 return convert_memory_address (Pmode, op);
1056 #endif
1057 break;
1058
1059 default:
1060 break;
1061 }
1062
1063 return 0;
1064 }
1065
1066 /* Try to compute the value of a unary operation CODE whose output mode is to
1067 be MODE with input operand OP whose mode was originally OP_MODE.
1068 Return zero if the value cannot be computed. */
1069 rtx
1070 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1071 rtx op, enum machine_mode op_mode)
1072 {
1073 unsigned int width = GET_MODE_BITSIZE (mode);
1074
1075 if (code == VEC_DUPLICATE)
1076 {
1077 gcc_assert (VECTOR_MODE_P (mode));
1078 if (GET_MODE (op) != VOIDmode)
1079 {
1080 if (!VECTOR_MODE_P (GET_MODE (op)))
1081 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1082 else
1083 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1084 (GET_MODE (op)));
1085 }
1086 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1087 || GET_CODE (op) == CONST_VECTOR)
1088 {
1089 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1090 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1091 rtvec v = rtvec_alloc (n_elts);
1092 unsigned int i;
1093
1094 if (GET_CODE (op) != CONST_VECTOR)
1095 for (i = 0; i < n_elts; i++)
1096 RTVEC_ELT (v, i) = op;
1097 else
1098 {
1099 enum machine_mode inmode = GET_MODE (op);
1100 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1101 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1102
1103 gcc_assert (in_n_elts < n_elts);
1104 gcc_assert ((n_elts % in_n_elts) == 0);
1105 for (i = 0; i < n_elts; i++)
1106 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1107 }
1108 return gen_rtx_CONST_VECTOR (mode, v);
1109 }
1110 }
1111
1112 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1113 {
1114 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1115 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1116 enum machine_mode opmode = GET_MODE (op);
1117 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1118 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1119 rtvec v = rtvec_alloc (n_elts);
1120 unsigned int i;
1121
1122 gcc_assert (op_n_elts == n_elts);
1123 for (i = 0; i < n_elts; i++)
1124 {
1125 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1126 CONST_VECTOR_ELT (op, i),
1127 GET_MODE_INNER (opmode));
1128 if (!x)
1129 return 0;
1130 RTVEC_ELT (v, i) = x;
1131 }
1132 return gen_rtx_CONST_VECTOR (mode, v);
1133 }
1134
1135 /* The order of these tests is critical so that, for example, we don't
1136 check the wrong mode (input vs. output) for a conversion operation,
1137 such as FIX. At some point, this should be simplified. */
1138
1139 if (code == FLOAT && GET_MODE (op) == VOIDmode
1140 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1141 {
1142 HOST_WIDE_INT hv, lv;
1143 REAL_VALUE_TYPE d;
1144
1145 if (CONST_INT_P (op))
1146 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1147 else
1148 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1149
1150 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1151 d = real_value_truncate (mode, d);
1152 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1153 }
1154 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1155 && (GET_CODE (op) == CONST_DOUBLE
1156 || CONST_INT_P (op)))
1157 {
1158 HOST_WIDE_INT hv, lv;
1159 REAL_VALUE_TYPE d;
1160
1161 if (CONST_INT_P (op))
1162 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1163 else
1164 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1165
1166 if (op_mode == VOIDmode)
1167 {
1168 /* We don't know how to interpret negative-looking numbers in
1169 this case, so don't try to fold those. */
1170 if (hv < 0)
1171 return 0;
1172 }
1173 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1174 ;
1175 else
1176 hv = 0, lv &= GET_MODE_MASK (op_mode);
1177
1178 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1179 d = real_value_truncate (mode, d);
1180 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1181 }
1182
1183 if (CONST_INT_P (op)
1184 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1185 {
1186 HOST_WIDE_INT arg0 = INTVAL (op);
1187 HOST_WIDE_INT val;
1188
1189 switch (code)
1190 {
1191 case NOT:
1192 val = ~ arg0;
1193 break;
1194
1195 case NEG:
1196 val = - arg0;
1197 break;
1198
1199 case ABS:
1200 val = (arg0 >= 0 ? arg0 : - arg0);
1201 break;
1202
1203 case FFS:
1204 /* Don't use ffs here. Instead, get low order bit and then its
1205 number. If arg0 is zero, this will return 0, as desired. */
1206 arg0 &= GET_MODE_MASK (mode);
1207 val = exact_log2 (arg0 & (- arg0)) + 1;
1208 break;
1209
1210 case CLZ:
1211 arg0 &= GET_MODE_MASK (mode);
1212 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1213 ;
1214 else
1215 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1216 break;
1217
1218 case CTZ:
1219 arg0 &= GET_MODE_MASK (mode);
1220 if (arg0 == 0)
1221 {
1222 /* Even if the value at zero is undefined, we have to come
1223 up with some replacement. Seems good enough. */
1224 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1225 val = GET_MODE_BITSIZE (mode);
1226 }
1227 else
1228 val = exact_log2 (arg0 & -arg0);
1229 break;
1230
1231 case POPCOUNT:
1232 arg0 &= GET_MODE_MASK (mode);
1233 val = 0;
1234 while (arg0)
1235 val++, arg0 &= arg0 - 1;
1236 break;
1237
1238 case PARITY:
1239 arg0 &= GET_MODE_MASK (mode);
1240 val = 0;
1241 while (arg0)
1242 val++, arg0 &= arg0 - 1;
1243 val &= 1;
1244 break;
1245
1246 case BSWAP:
1247 {
1248 unsigned int s;
1249
1250 val = 0;
1251 for (s = 0; s < width; s += 8)
1252 {
1253 unsigned int d = width - s - 8;
1254 unsigned HOST_WIDE_INT byte;
1255 byte = (arg0 >> s) & 0xff;
1256 val |= byte << d;
1257 }
1258 }
1259 break;
1260
1261 case TRUNCATE:
1262 val = arg0;
1263 break;
1264
1265 case ZERO_EXTEND:
1266 /* When zero-extending a CONST_INT, we need to know its
1267 original mode. */
1268 gcc_assert (op_mode != VOIDmode);
1269 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1270 {
1271 /* If we were really extending the mode,
1272 we would have to distinguish between zero-extension
1273 and sign-extension. */
1274 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1275 val = arg0;
1276 }
1277 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1278 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1279 else
1280 return 0;
1281 break;
1282
1283 case SIGN_EXTEND:
1284 if (op_mode == VOIDmode)
1285 op_mode = mode;
1286 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1287 {
1288 /* If we were really extending the mode,
1289 we would have to distinguish between zero-extension
1290 and sign-extension. */
1291 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1292 val = arg0;
1293 }
1294 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1295 {
1296 val
1297 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1298 if (val
1299 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1300 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1301 }
1302 else
1303 return 0;
1304 break;
1305
1306 case SQRT:
1307 case FLOAT_EXTEND:
1308 case FLOAT_TRUNCATE:
1309 case SS_TRUNCATE:
1310 case US_TRUNCATE:
1311 case SS_NEG:
1312 case US_NEG:
1313 case SS_ABS:
1314 return 0;
1315
1316 default:
1317 gcc_unreachable ();
1318 }
1319
1320 return gen_int_mode (val, mode);
1321 }
1322
1323 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1324 for a DImode operation on a CONST_INT. */
1325 else if (GET_MODE (op) == VOIDmode
1326 && width <= HOST_BITS_PER_WIDE_INT * 2
1327 && (GET_CODE (op) == CONST_DOUBLE
1328 || CONST_INT_P (op)))
1329 {
1330 unsigned HOST_WIDE_INT l1, lv;
1331 HOST_WIDE_INT h1, hv;
1332
1333 if (GET_CODE (op) == CONST_DOUBLE)
1334 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1335 else
1336 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1337
1338 switch (code)
1339 {
1340 case NOT:
1341 lv = ~ l1;
1342 hv = ~ h1;
1343 break;
1344
1345 case NEG:
1346 neg_double (l1, h1, &lv, &hv);
1347 break;
1348
1349 case ABS:
1350 if (h1 < 0)
1351 neg_double (l1, h1, &lv, &hv);
1352 else
1353 lv = l1, hv = h1;
1354 break;
1355
1356 case FFS:
1357 hv = 0;
1358 if (l1 == 0)
1359 {
1360 if (h1 == 0)
1361 lv = 0;
1362 else
1363 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1364 }
1365 else
1366 lv = exact_log2 (l1 & -l1) + 1;
1367 break;
1368
1369 case CLZ:
1370 hv = 0;
1371 if (h1 != 0)
1372 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1373 - HOST_BITS_PER_WIDE_INT;
1374 else if (l1 != 0)
1375 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1376 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1377 lv = GET_MODE_BITSIZE (mode);
1378 break;
1379
1380 case CTZ:
1381 hv = 0;
1382 if (l1 != 0)
1383 lv = exact_log2 (l1 & -l1);
1384 else if (h1 != 0)
1385 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1386 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1387 lv = GET_MODE_BITSIZE (mode);
1388 break;
1389
1390 case POPCOUNT:
1391 hv = 0;
1392 lv = 0;
1393 while (l1)
1394 lv++, l1 &= l1 - 1;
1395 while (h1)
1396 lv++, h1 &= h1 - 1;
1397 break;
1398
1399 case PARITY:
1400 hv = 0;
1401 lv = 0;
1402 while (l1)
1403 lv++, l1 &= l1 - 1;
1404 while (h1)
1405 lv++, h1 &= h1 - 1;
1406 lv &= 1;
1407 break;
1408
1409 case BSWAP:
1410 {
1411 unsigned int s;
1412
1413 hv = 0;
1414 lv = 0;
1415 for (s = 0; s < width; s += 8)
1416 {
1417 unsigned int d = width - s - 8;
1418 unsigned HOST_WIDE_INT byte;
1419
1420 if (s < HOST_BITS_PER_WIDE_INT)
1421 byte = (l1 >> s) & 0xff;
1422 else
1423 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1424
1425 if (d < HOST_BITS_PER_WIDE_INT)
1426 lv |= byte << d;
1427 else
1428 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1429 }
1430 }
1431 break;
1432
1433 case TRUNCATE:
1434 /* This is just a change-of-mode, so do nothing. */
1435 lv = l1, hv = h1;
1436 break;
1437
1438 case ZERO_EXTEND:
1439 gcc_assert (op_mode != VOIDmode);
1440
1441 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1442 return 0;
1443
1444 hv = 0;
1445 lv = l1 & GET_MODE_MASK (op_mode);
1446 break;
1447
1448 case SIGN_EXTEND:
1449 if (op_mode == VOIDmode
1450 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1451 return 0;
1452 else
1453 {
1454 lv = l1 & GET_MODE_MASK (op_mode);
1455 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1456 && (lv & ((HOST_WIDE_INT) 1
1457 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1458 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1459
1460 hv = HWI_SIGN_EXTEND (lv);
1461 }
1462 break;
1463
1464 case SQRT:
1465 return 0;
1466
1467 default:
1468 return 0;
1469 }
1470
1471 return immed_double_const (lv, hv, mode);
1472 }
1473
1474 else if (GET_CODE (op) == CONST_DOUBLE
1475 && SCALAR_FLOAT_MODE_P (mode))
1476 {
1477 REAL_VALUE_TYPE d, t;
1478 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1479
1480 switch (code)
1481 {
1482 case SQRT:
1483 if (HONOR_SNANS (mode) && real_isnan (&d))
1484 return 0;
1485 real_sqrt (&t, mode, &d);
1486 d = t;
1487 break;
1488 case ABS:
1489 d = REAL_VALUE_ABS (d);
1490 break;
1491 case NEG:
1492 d = REAL_VALUE_NEGATE (d);
1493 break;
1494 case FLOAT_TRUNCATE:
1495 d = real_value_truncate (mode, d);
1496 break;
1497 case FLOAT_EXTEND:
1498 /* All this does is change the mode. */
1499 break;
1500 case FIX:
1501 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1502 break;
1503 case NOT:
1504 {
1505 long tmp[4];
1506 int i;
1507
1508 real_to_target (tmp, &d, GET_MODE (op));
1509 for (i = 0; i < 4; i++)
1510 tmp[i] = ~tmp[i];
1511 real_from_target (&d, tmp, mode);
1512 break;
1513 }
1514 default:
1515 gcc_unreachable ();
1516 }
1517 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1518 }
1519
1520 else if (GET_CODE (op) == CONST_DOUBLE
1521 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1522 && GET_MODE_CLASS (mode) == MODE_INT
1523 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1524 {
1525 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1526 operators are intentionally left unspecified (to ease implementation
1527 by target backends), for consistency, this routine implements the
1528 same semantics for constant folding as used by the middle-end. */
1529
1530 /* This was formerly used only for non-IEEE float.
1531 eggert@twinsun.com says it is safe for IEEE also. */
1532 HOST_WIDE_INT xh, xl, th, tl;
1533 REAL_VALUE_TYPE x, t;
1534 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1535 switch (code)
1536 {
1537 case FIX:
1538 if (REAL_VALUE_ISNAN (x))
1539 return const0_rtx;
1540
1541 /* Test against the signed upper bound. */
1542 if (width > HOST_BITS_PER_WIDE_INT)
1543 {
1544 th = ((unsigned HOST_WIDE_INT) 1
1545 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1546 tl = -1;
1547 }
1548 else
1549 {
1550 th = 0;
1551 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1552 }
1553 real_from_integer (&t, VOIDmode, tl, th, 0);
1554 if (REAL_VALUES_LESS (t, x))
1555 {
1556 xh = th;
1557 xl = tl;
1558 break;
1559 }
1560
1561 /* Test against the signed lower bound. */
1562 if (width > HOST_BITS_PER_WIDE_INT)
1563 {
1564 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1565 tl = 0;
1566 }
1567 else
1568 {
1569 th = -1;
1570 tl = (HOST_WIDE_INT) -1 << (width - 1);
1571 }
1572 real_from_integer (&t, VOIDmode, tl, th, 0);
1573 if (REAL_VALUES_LESS (x, t))
1574 {
1575 xh = th;
1576 xl = tl;
1577 break;
1578 }
1579 REAL_VALUE_TO_INT (&xl, &xh, x);
1580 break;
1581
1582 case UNSIGNED_FIX:
1583 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1584 return const0_rtx;
1585
1586 /* Test against the unsigned upper bound. */
1587 if (width == 2*HOST_BITS_PER_WIDE_INT)
1588 {
1589 th = -1;
1590 tl = -1;
1591 }
1592 else if (width >= HOST_BITS_PER_WIDE_INT)
1593 {
1594 th = ((unsigned HOST_WIDE_INT) 1
1595 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1596 tl = -1;
1597 }
1598 else
1599 {
1600 th = 0;
1601 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1602 }
1603 real_from_integer (&t, VOIDmode, tl, th, 1);
1604 if (REAL_VALUES_LESS (t, x))
1605 {
1606 xh = th;
1607 xl = tl;
1608 break;
1609 }
1610
1611 REAL_VALUE_TO_INT (&xl, &xh, x);
1612 break;
1613
1614 default:
1615 gcc_unreachable ();
1616 }
1617 return immed_double_const (xl, xh, mode);
1618 }
1619
1620 return NULL_RTX;
1621 }
1622 \f
1623 /* Subroutine of simplify_binary_operation to simplify a commutative,
1624 associative binary operation CODE with result mode MODE, operating
1625 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1626 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1627 canonicalization is possible. */
1628
1629 static rtx
1630 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1631 rtx op0, rtx op1)
1632 {
1633 rtx tem;
1634
1635 /* Linearize the operator to the left. */
1636 if (GET_CODE (op1) == code)
1637 {
1638 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1639 if (GET_CODE (op0) == code)
1640 {
1641 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1642 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1643 }
1644
1645 /* "a op (b op c)" becomes "(b op c) op a". */
1646 if (! swap_commutative_operands_p (op1, op0))
1647 return simplify_gen_binary (code, mode, op1, op0);
1648
1649 tem = op0;
1650 op0 = op1;
1651 op1 = tem;
1652 }
1653
1654 if (GET_CODE (op0) == code)
1655 {
1656 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1657 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1658 {
1659 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1660 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1661 }
1662
1663 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1664 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1665 if (tem != 0)
1666 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1667
1668 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1669 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1670 if (tem != 0)
1671 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1672 }
1673
1674 return 0;
1675 }
1676
1677
1678 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1679 and OP1. Return 0 if no simplification is possible.
1680
1681 Don't use this for relational operations such as EQ or LT.
1682 Use simplify_relational_operation instead. */
1683 rtx
1684 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1685 rtx op0, rtx op1)
1686 {
1687 rtx trueop0, trueop1;
1688 rtx tem;
1689
1690 /* Relational operations don't work here. We must know the mode
1691 of the operands in order to do the comparison correctly.
1692 Assuming a full word can give incorrect results.
1693 Consider comparing 128 with -128 in QImode. */
1694 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1695 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1696
1697 /* Make sure the constant is second. */
1698 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1699 && swap_commutative_operands_p (op0, op1))
1700 {
1701 tem = op0, op0 = op1, op1 = tem;
1702 }
1703
1704 trueop0 = avoid_constant_pool_reference (op0);
1705 trueop1 = avoid_constant_pool_reference (op1);
1706
1707 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1708 if (tem)
1709 return tem;
1710 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1711 }
1712
1713 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1714 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1715 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1716 actual constants. */
1717
1718 static rtx
1719 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1720 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1721 {
1722 rtx tem, reversed, opleft, opright;
1723 HOST_WIDE_INT val;
1724 unsigned int width = GET_MODE_BITSIZE (mode);
1725
1726 /* Even if we can't compute a constant result,
1727 there are some cases worth simplifying. */
1728
1729 switch (code)
1730 {
1731 case PLUS:
1732 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1733 when x is NaN, infinite, or finite and nonzero. They aren't
1734 when x is -0 and the rounding mode is not towards -infinity,
1735 since (-0) + 0 is then 0. */
1736 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1737 return op0;
1738
1739 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1740 transformations are safe even for IEEE. */
1741 if (GET_CODE (op0) == NEG)
1742 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1743 else if (GET_CODE (op1) == NEG)
1744 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1745
1746 /* (~a) + 1 -> -a */
1747 if (INTEGRAL_MODE_P (mode)
1748 && GET_CODE (op0) == NOT
1749 && trueop1 == const1_rtx)
1750 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1751
1752 /* Handle both-operands-constant cases. We can only add
1753 CONST_INTs to constants since the sum of relocatable symbols
1754 can't be handled by most assemblers. Don't add CONST_INT
1755 to CONST_INT since overflow won't be computed properly if wider
1756 than HOST_BITS_PER_WIDE_INT. */
1757
1758 if ((GET_CODE (op0) == CONST
1759 || GET_CODE (op0) == SYMBOL_REF
1760 || GET_CODE (op0) == LABEL_REF)
1761 && CONST_INT_P (op1))
1762 return plus_constant (op0, INTVAL (op1));
1763 else if ((GET_CODE (op1) == CONST
1764 || GET_CODE (op1) == SYMBOL_REF
1765 || GET_CODE (op1) == LABEL_REF)
1766 && CONST_INT_P (op0))
1767 return plus_constant (op1, INTVAL (op0));
1768
1769 /* See if this is something like X * C - X or vice versa or
1770 if the multiplication is written as a shift. If so, we can
1771 distribute and make a new multiply, shift, or maybe just
1772 have X (if C is 2 in the example above). But don't make
1773 something more expensive than we had before. */
1774
1775 if (SCALAR_INT_MODE_P (mode))
1776 {
1777 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1778 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1779 rtx lhs = op0, rhs = op1;
1780
1781 if (GET_CODE (lhs) == NEG)
1782 {
1783 coeff0l = -1;
1784 coeff0h = -1;
1785 lhs = XEXP (lhs, 0);
1786 }
1787 else if (GET_CODE (lhs) == MULT
1788 && CONST_INT_P (XEXP (lhs, 1)))
1789 {
1790 coeff0l = INTVAL (XEXP (lhs, 1));
1791 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1792 lhs = XEXP (lhs, 0);
1793 }
1794 else if (GET_CODE (lhs) == ASHIFT
1795 && CONST_INT_P (XEXP (lhs, 1))
1796 && INTVAL (XEXP (lhs, 1)) >= 0
1797 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1798 {
1799 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1800 coeff0h = 0;
1801 lhs = XEXP (lhs, 0);
1802 }
1803
1804 if (GET_CODE (rhs) == NEG)
1805 {
1806 coeff1l = -1;
1807 coeff1h = -1;
1808 rhs = XEXP (rhs, 0);
1809 }
1810 else if (GET_CODE (rhs) == MULT
1811 && CONST_INT_P (XEXP (rhs, 1)))
1812 {
1813 coeff1l = INTVAL (XEXP (rhs, 1));
1814 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1815 rhs = XEXP (rhs, 0);
1816 }
1817 else if (GET_CODE (rhs) == ASHIFT
1818 && CONST_INT_P (XEXP (rhs, 1))
1819 && INTVAL (XEXP (rhs, 1)) >= 0
1820 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1821 {
1822 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1823 coeff1h = 0;
1824 rhs = XEXP (rhs, 0);
1825 }
1826
1827 if (rtx_equal_p (lhs, rhs))
1828 {
1829 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1830 rtx coeff;
1831 unsigned HOST_WIDE_INT l;
1832 HOST_WIDE_INT h;
1833 bool speed = optimize_function_for_speed_p (cfun);
1834
1835 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1836 coeff = immed_double_const (l, h, mode);
1837
1838 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1839 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1840 ? tem : 0;
1841 }
1842 }
1843
1844 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1845 if ((CONST_INT_P (op1)
1846 || GET_CODE (op1) == CONST_DOUBLE)
1847 && GET_CODE (op0) == XOR
1848 && (CONST_INT_P (XEXP (op0, 1))
1849 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1850 && mode_signbit_p (mode, op1))
1851 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1852 simplify_gen_binary (XOR, mode, op1,
1853 XEXP (op0, 1)));
1854
1855 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1856 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1857 && GET_CODE (op0) == MULT
1858 && GET_CODE (XEXP (op0, 0)) == NEG)
1859 {
1860 rtx in1, in2;
1861
1862 in1 = XEXP (XEXP (op0, 0), 0);
1863 in2 = XEXP (op0, 1);
1864 return simplify_gen_binary (MINUS, mode, op1,
1865 simplify_gen_binary (MULT, mode,
1866 in1, in2));
1867 }
1868
1869 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1870 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1871 is 1. */
1872 if (COMPARISON_P (op0)
1873 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1874 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1875 && (reversed = reversed_comparison (op0, mode)))
1876 return
1877 simplify_gen_unary (NEG, mode, reversed, mode);
1878
1879 /* If one of the operands is a PLUS or a MINUS, see if we can
1880 simplify this by the associative law.
1881 Don't use the associative law for floating point.
1882 The inaccuracy makes it nonassociative,
1883 and subtle programs can break if operations are associated. */
1884
1885 if (INTEGRAL_MODE_P (mode)
1886 && (plus_minus_operand_p (op0)
1887 || plus_minus_operand_p (op1))
1888 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1889 return tem;
1890
1891 /* Reassociate floating point addition only when the user
1892 specifies associative math operations. */
1893 if (FLOAT_MODE_P (mode)
1894 && flag_associative_math)
1895 {
1896 tem = simplify_associative_operation (code, mode, op0, op1);
1897 if (tem)
1898 return tem;
1899 }
1900 break;
1901
1902 case COMPARE:
1903 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1904 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1905 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1906 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1907 {
1908 rtx xop00 = XEXP (op0, 0);
1909 rtx xop10 = XEXP (op1, 0);
1910
1911 #ifdef HAVE_cc0
1912 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1913 #else
1914 if (REG_P (xop00) && REG_P (xop10)
1915 && GET_MODE (xop00) == GET_MODE (xop10)
1916 && REGNO (xop00) == REGNO (xop10)
1917 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1918 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1919 #endif
1920 return xop00;
1921 }
1922 break;
1923
1924 case MINUS:
1925 /* We can't assume x-x is 0 even with non-IEEE floating point,
1926 but since it is zero except in very strange circumstances, we
1927 will treat it as zero with -ffinite-math-only. */
1928 if (rtx_equal_p (trueop0, trueop1)
1929 && ! side_effects_p (op0)
1930 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1931 return CONST0_RTX (mode);
1932
1933 /* Change subtraction from zero into negation. (0 - x) is the
1934 same as -x when x is NaN, infinite, or finite and nonzero.
1935 But if the mode has signed zeros, and does not round towards
1936 -infinity, then 0 - 0 is 0, not -0. */
1937 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1938 return simplify_gen_unary (NEG, mode, op1, mode);
1939
1940 /* (-1 - a) is ~a. */
1941 if (trueop0 == constm1_rtx)
1942 return simplify_gen_unary (NOT, mode, op1, mode);
1943
1944 /* Subtracting 0 has no effect unless the mode has signed zeros
1945 and supports rounding towards -infinity. In such a case,
1946 0 - 0 is -0. */
1947 if (!(HONOR_SIGNED_ZEROS (mode)
1948 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1949 && trueop1 == CONST0_RTX (mode))
1950 return op0;
1951
1952 /* See if this is something like X * C - X or vice versa or
1953 if the multiplication is written as a shift. If so, we can
1954 distribute and make a new multiply, shift, or maybe just
1955 have X (if C is 2 in the example above). But don't make
1956 something more expensive than we had before. */
1957
1958 if (SCALAR_INT_MODE_P (mode))
1959 {
1960 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1961 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1962 rtx lhs = op0, rhs = op1;
1963
1964 if (GET_CODE (lhs) == NEG)
1965 {
1966 coeff0l = -1;
1967 coeff0h = -1;
1968 lhs = XEXP (lhs, 0);
1969 }
1970 else if (GET_CODE (lhs) == MULT
1971 && CONST_INT_P (XEXP (lhs, 1)))
1972 {
1973 coeff0l = INTVAL (XEXP (lhs, 1));
1974 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1975 lhs = XEXP (lhs, 0);
1976 }
1977 else if (GET_CODE (lhs) == ASHIFT
1978 && CONST_INT_P (XEXP (lhs, 1))
1979 && INTVAL (XEXP (lhs, 1)) >= 0
1980 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1981 {
1982 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1983 coeff0h = 0;
1984 lhs = XEXP (lhs, 0);
1985 }
1986
1987 if (GET_CODE (rhs) == NEG)
1988 {
1989 negcoeff1l = 1;
1990 negcoeff1h = 0;
1991 rhs = XEXP (rhs, 0);
1992 }
1993 else if (GET_CODE (rhs) == MULT
1994 && CONST_INT_P (XEXP (rhs, 1)))
1995 {
1996 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1997 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1998 rhs = XEXP (rhs, 0);
1999 }
2000 else if (GET_CODE (rhs) == ASHIFT
2001 && CONST_INT_P (XEXP (rhs, 1))
2002 && INTVAL (XEXP (rhs, 1)) >= 0
2003 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2004 {
2005 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
2006 negcoeff1h = -1;
2007 rhs = XEXP (rhs, 0);
2008 }
2009
2010 if (rtx_equal_p (lhs, rhs))
2011 {
2012 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2013 rtx coeff;
2014 unsigned HOST_WIDE_INT l;
2015 HOST_WIDE_INT h;
2016 bool speed = optimize_function_for_speed_p (cfun);
2017
2018 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
2019 coeff = immed_double_const (l, h, mode);
2020
2021 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2022 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2023 ? tem : 0;
2024 }
2025 }
2026
2027 /* (a - (-b)) -> (a + b). True even for IEEE. */
2028 if (GET_CODE (op1) == NEG)
2029 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2030
2031 /* (-x - c) may be simplified as (-c - x). */
2032 if (GET_CODE (op0) == NEG
2033 && (CONST_INT_P (op1)
2034 || GET_CODE (op1) == CONST_DOUBLE))
2035 {
2036 tem = simplify_unary_operation (NEG, mode, op1, mode);
2037 if (tem)
2038 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2039 }
2040
2041 /* Don't let a relocatable value get a negative coeff. */
2042 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2043 return simplify_gen_binary (PLUS, mode,
2044 op0,
2045 neg_const_int (mode, op1));
2046
2047 /* (x - (x & y)) -> (x & ~y) */
2048 if (GET_CODE (op1) == AND)
2049 {
2050 if (rtx_equal_p (op0, XEXP (op1, 0)))
2051 {
2052 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2053 GET_MODE (XEXP (op1, 1)));
2054 return simplify_gen_binary (AND, mode, op0, tem);
2055 }
2056 if (rtx_equal_p (op0, XEXP (op1, 1)))
2057 {
2058 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2059 GET_MODE (XEXP (op1, 0)));
2060 return simplify_gen_binary (AND, mode, op0, tem);
2061 }
2062 }
2063
2064 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2065 by reversing the comparison code if valid. */
2066 if (STORE_FLAG_VALUE == 1
2067 && trueop0 == const1_rtx
2068 && COMPARISON_P (op1)
2069 && (reversed = reversed_comparison (op1, mode)))
2070 return reversed;
2071
2072 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2073 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2074 && GET_CODE (op1) == MULT
2075 && GET_CODE (XEXP (op1, 0)) == NEG)
2076 {
2077 rtx in1, in2;
2078
2079 in1 = XEXP (XEXP (op1, 0), 0);
2080 in2 = XEXP (op1, 1);
2081 return simplify_gen_binary (PLUS, mode,
2082 simplify_gen_binary (MULT, mode,
2083 in1, in2),
2084 op0);
2085 }
2086
2087 /* Canonicalize (minus (neg A) (mult B C)) to
2088 (minus (mult (neg B) C) A). */
2089 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2090 && GET_CODE (op1) == MULT
2091 && GET_CODE (op0) == NEG)
2092 {
2093 rtx in1, in2;
2094
2095 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2096 in2 = XEXP (op1, 1);
2097 return simplify_gen_binary (MINUS, mode,
2098 simplify_gen_binary (MULT, mode,
2099 in1, in2),
2100 XEXP (op0, 0));
2101 }
2102
2103 /* If one of the operands is a PLUS or a MINUS, see if we can
2104 simplify this by the associative law. This will, for example,
2105 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2106 Don't use the associative law for floating point.
2107 The inaccuracy makes it nonassociative,
2108 and subtle programs can break if operations are associated. */
2109
2110 if (INTEGRAL_MODE_P (mode)
2111 && (plus_minus_operand_p (op0)
2112 || plus_minus_operand_p (op1))
2113 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2114 return tem;
2115 break;
2116
2117 case MULT:
2118 if (trueop1 == constm1_rtx)
2119 return simplify_gen_unary (NEG, mode, op0, mode);
2120
2121 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2122 x is NaN, since x * 0 is then also NaN. Nor is it valid
2123 when the mode has signed zeros, since multiplying a negative
2124 number by 0 will give -0, not 0. */
2125 if (!HONOR_NANS (mode)
2126 && !HONOR_SIGNED_ZEROS (mode)
2127 && trueop1 == CONST0_RTX (mode)
2128 && ! side_effects_p (op0))
2129 return op1;
2130
2131 /* In IEEE floating point, x*1 is not equivalent to x for
2132 signalling NaNs. */
2133 if (!HONOR_SNANS (mode)
2134 && trueop1 == CONST1_RTX (mode))
2135 return op0;
2136
2137 /* Convert multiply by constant power of two into shift unless
2138 we are still generating RTL. This test is a kludge. */
2139 if (CONST_INT_P (trueop1)
2140 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2141 /* If the mode is larger than the host word size, and the
2142 uppermost bit is set, then this isn't a power of two due
2143 to implicit sign extension. */
2144 && (width <= HOST_BITS_PER_WIDE_INT
2145 || val != HOST_BITS_PER_WIDE_INT - 1))
2146 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2147
2148 /* Likewise for multipliers wider than a word. */
2149 if (GET_CODE (trueop1) == CONST_DOUBLE
2150 && (GET_MODE (trueop1) == VOIDmode
2151 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2152 && GET_MODE (op0) == mode
2153 && CONST_DOUBLE_LOW (trueop1) == 0
2154 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2155 return simplify_gen_binary (ASHIFT, mode, op0,
2156 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2157
2158 /* x*2 is x+x and x*(-1) is -x */
2159 if (GET_CODE (trueop1) == CONST_DOUBLE
2160 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2161 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2162 && GET_MODE (op0) == mode)
2163 {
2164 REAL_VALUE_TYPE d;
2165 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2166
2167 if (REAL_VALUES_EQUAL (d, dconst2))
2168 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2169
2170 if (!HONOR_SNANS (mode)
2171 && REAL_VALUES_EQUAL (d, dconstm1))
2172 return simplify_gen_unary (NEG, mode, op0, mode);
2173 }
2174
2175 /* Optimize -x * -x as x * x. */
2176 if (FLOAT_MODE_P (mode)
2177 && GET_CODE (op0) == NEG
2178 && GET_CODE (op1) == NEG
2179 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2180 && !side_effects_p (XEXP (op0, 0)))
2181 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2182
2183 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2184 if (SCALAR_FLOAT_MODE_P (mode)
2185 && GET_CODE (op0) == ABS
2186 && GET_CODE (op1) == ABS
2187 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2188 && !side_effects_p (XEXP (op0, 0)))
2189 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2190
2191 /* Reassociate multiplication, but for floating point MULTs
2192 only when the user specifies unsafe math optimizations. */
2193 if (! FLOAT_MODE_P (mode)
2194 || flag_unsafe_math_optimizations)
2195 {
2196 tem = simplify_associative_operation (code, mode, op0, op1);
2197 if (tem)
2198 return tem;
2199 }
2200 break;
2201
2202 case IOR:
2203 if (trueop1 == const0_rtx)
2204 return op0;
2205 if (CONST_INT_P (trueop1)
2206 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2207 == GET_MODE_MASK (mode)))
2208 return op1;
2209 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2210 return op0;
2211 /* A | (~A) -> -1 */
2212 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2213 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2214 && ! side_effects_p (op0)
2215 && SCALAR_INT_MODE_P (mode))
2216 return constm1_rtx;
2217
2218 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2219 if (CONST_INT_P (op1)
2220 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2221 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2222 return op1;
2223
2224 /* Canonicalize (X & C1) | C2. */
2225 if (GET_CODE (op0) == AND
2226 && CONST_INT_P (trueop1)
2227 && CONST_INT_P (XEXP (op0, 1)))
2228 {
2229 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2230 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2231 HOST_WIDE_INT c2 = INTVAL (trueop1);
2232
2233 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2234 if ((c1 & c2) == c1
2235 && !side_effects_p (XEXP (op0, 0)))
2236 return trueop1;
2237
2238 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2239 if (((c1|c2) & mask) == mask)
2240 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2241
2242 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2243 if (((c1 & ~c2) & mask) != (c1 & mask))
2244 {
2245 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2246 gen_int_mode (c1 & ~c2, mode));
2247 return simplify_gen_binary (IOR, mode, tem, op1);
2248 }
2249 }
2250
2251 /* Convert (A & B) | A to A. */
2252 if (GET_CODE (op0) == AND
2253 && (rtx_equal_p (XEXP (op0, 0), op1)
2254 || rtx_equal_p (XEXP (op0, 1), op1))
2255 && ! side_effects_p (XEXP (op0, 0))
2256 && ! side_effects_p (XEXP (op0, 1)))
2257 return op1;
2258
2259 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2260 mode size to (rotate A CX). */
2261
2262 if (GET_CODE (op1) == ASHIFT
2263 || GET_CODE (op1) == SUBREG)
2264 {
2265 opleft = op1;
2266 opright = op0;
2267 }
2268 else
2269 {
2270 opright = op1;
2271 opleft = op0;
2272 }
2273
2274 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2275 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2276 && CONST_INT_P (XEXP (opleft, 1))
2277 && CONST_INT_P (XEXP (opright, 1))
2278 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2279 == GET_MODE_BITSIZE (mode)))
2280 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2281
2282 /* Same, but for ashift that has been "simplified" to a wider mode
2283 by simplify_shift_const. */
2284
2285 if (GET_CODE (opleft) == SUBREG
2286 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2287 && GET_CODE (opright) == LSHIFTRT
2288 && GET_CODE (XEXP (opright, 0)) == SUBREG
2289 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2290 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2291 && (GET_MODE_SIZE (GET_MODE (opleft))
2292 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2293 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2294 SUBREG_REG (XEXP (opright, 0)))
2295 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2296 && CONST_INT_P (XEXP (opright, 1))
2297 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2298 == GET_MODE_BITSIZE (mode)))
2299 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2300 XEXP (SUBREG_REG (opleft), 1));
2301
2302 /* If we have (ior (and (X C1) C2)), simplify this by making
2303 C1 as small as possible if C1 actually changes. */
2304 if (CONST_INT_P (op1)
2305 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2306 || INTVAL (op1) > 0)
2307 && GET_CODE (op0) == AND
2308 && CONST_INT_P (XEXP (op0, 1))
2309 && CONST_INT_P (op1)
2310 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2311 return simplify_gen_binary (IOR, mode,
2312 simplify_gen_binary
2313 (AND, mode, XEXP (op0, 0),
2314 GEN_INT (INTVAL (XEXP (op0, 1))
2315 & ~INTVAL (op1))),
2316 op1);
2317
2318 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2319 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2320 the PLUS does not affect any of the bits in OP1: then we can do
2321 the IOR as a PLUS and we can associate. This is valid if OP1
2322 can be safely shifted left C bits. */
2323 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2324 && GET_CODE (XEXP (op0, 0)) == PLUS
2325 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2326 && CONST_INT_P (XEXP (op0, 1))
2327 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2328 {
2329 int count = INTVAL (XEXP (op0, 1));
2330 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2331
2332 if (mask >> count == INTVAL (trueop1)
2333 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2334 return simplify_gen_binary (ASHIFTRT, mode,
2335 plus_constant (XEXP (op0, 0), mask),
2336 XEXP (op0, 1));
2337 }
2338
2339 tem = simplify_associative_operation (code, mode, op0, op1);
2340 if (tem)
2341 return tem;
2342 break;
2343
2344 case XOR:
2345 if (trueop1 == const0_rtx)
2346 return op0;
2347 if (CONST_INT_P (trueop1)
2348 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2349 == GET_MODE_MASK (mode)))
2350 return simplify_gen_unary (NOT, mode, op0, mode);
2351 if (rtx_equal_p (trueop0, trueop1)
2352 && ! side_effects_p (op0)
2353 && GET_MODE_CLASS (mode) != MODE_CC)
2354 return CONST0_RTX (mode);
2355
2356 /* Canonicalize XOR of the most significant bit to PLUS. */
2357 if ((CONST_INT_P (op1)
2358 || GET_CODE (op1) == CONST_DOUBLE)
2359 && mode_signbit_p (mode, op1))
2360 return simplify_gen_binary (PLUS, mode, op0, op1);
2361 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2362 if ((CONST_INT_P (op1)
2363 || GET_CODE (op1) == CONST_DOUBLE)
2364 && GET_CODE (op0) == PLUS
2365 && (CONST_INT_P (XEXP (op0, 1))
2366 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2367 && mode_signbit_p (mode, XEXP (op0, 1)))
2368 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2369 simplify_gen_binary (XOR, mode, op1,
2370 XEXP (op0, 1)));
2371
2372 /* If we are XORing two things that have no bits in common,
2373 convert them into an IOR. This helps to detect rotation encoded
2374 using those methods and possibly other simplifications. */
2375
2376 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2377 && (nonzero_bits (op0, mode)
2378 & nonzero_bits (op1, mode)) == 0)
2379 return (simplify_gen_binary (IOR, mode, op0, op1));
2380
2381 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2382 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2383 (NOT y). */
2384 {
2385 int num_negated = 0;
2386
2387 if (GET_CODE (op0) == NOT)
2388 num_negated++, op0 = XEXP (op0, 0);
2389 if (GET_CODE (op1) == NOT)
2390 num_negated++, op1 = XEXP (op1, 0);
2391
2392 if (num_negated == 2)
2393 return simplify_gen_binary (XOR, mode, op0, op1);
2394 else if (num_negated == 1)
2395 return simplify_gen_unary (NOT, mode,
2396 simplify_gen_binary (XOR, mode, op0, op1),
2397 mode);
2398 }
2399
2400 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2401 correspond to a machine insn or result in further simplifications
2402 if B is a constant. */
2403
2404 if (GET_CODE (op0) == AND
2405 && rtx_equal_p (XEXP (op0, 1), op1)
2406 && ! side_effects_p (op1))
2407 return simplify_gen_binary (AND, mode,
2408 simplify_gen_unary (NOT, mode,
2409 XEXP (op0, 0), mode),
2410 op1);
2411
2412 else if (GET_CODE (op0) == AND
2413 && rtx_equal_p (XEXP (op0, 0), op1)
2414 && ! side_effects_p (op1))
2415 return simplify_gen_binary (AND, mode,
2416 simplify_gen_unary (NOT, mode,
2417 XEXP (op0, 1), mode),
2418 op1);
2419
2420 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2421 comparison if STORE_FLAG_VALUE is 1. */
2422 if (STORE_FLAG_VALUE == 1
2423 && trueop1 == const1_rtx
2424 && COMPARISON_P (op0)
2425 && (reversed = reversed_comparison (op0, mode)))
2426 return reversed;
2427
2428 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2429 is (lt foo (const_int 0)), so we can perform the above
2430 simplification if STORE_FLAG_VALUE is 1. */
2431
2432 if (STORE_FLAG_VALUE == 1
2433 && trueop1 == const1_rtx
2434 && GET_CODE (op0) == LSHIFTRT
2435 && CONST_INT_P (XEXP (op0, 1))
2436 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2437 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2438
2439 /* (xor (comparison foo bar) (const_int sign-bit))
2440 when STORE_FLAG_VALUE is the sign bit. */
2441 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2442 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2443 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2444 && trueop1 == const_true_rtx
2445 && COMPARISON_P (op0)
2446 && (reversed = reversed_comparison (op0, mode)))
2447 return reversed;
2448
2449 tem = simplify_associative_operation (code, mode, op0, op1);
2450 if (tem)
2451 return tem;
2452 break;
2453
2454 case AND:
2455 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2456 return trueop1;
2457 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2458 {
2459 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2460 HOST_WIDE_INT nzop1;
2461 if (CONST_INT_P (trueop1))
2462 {
2463 HOST_WIDE_INT val1 = INTVAL (trueop1);
2464 /* If we are turning off bits already known off in OP0, we need
2465 not do an AND. */
2466 if ((nzop0 & ~val1) == 0)
2467 return op0;
2468 }
2469 nzop1 = nonzero_bits (trueop1, mode);
2470 /* If we are clearing all the nonzero bits, the result is zero. */
2471 if ((nzop1 & nzop0) == 0
2472 && !side_effects_p (op0) && !side_effects_p (op1))
2473 return CONST0_RTX (mode);
2474 }
2475 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2476 && GET_MODE_CLASS (mode) != MODE_CC)
2477 return op0;
2478 /* A & (~A) -> 0 */
2479 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2480 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2481 && ! side_effects_p (op0)
2482 && GET_MODE_CLASS (mode) != MODE_CC)
2483 return CONST0_RTX (mode);
2484
2485 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2486 there are no nonzero bits of C outside of X's mode. */
2487 if ((GET_CODE (op0) == SIGN_EXTEND
2488 || GET_CODE (op0) == ZERO_EXTEND)
2489 && CONST_INT_P (trueop1)
2490 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2491 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2492 & INTVAL (trueop1)) == 0)
2493 {
2494 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2495 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2496 gen_int_mode (INTVAL (trueop1),
2497 imode));
2498 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2499 }
2500
2501 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2502 we might be able to further simplify the AND with X and potentially
2503 remove the truncation altogether. */
2504 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2505 {
2506 rtx x = XEXP (op0, 0);
2507 enum machine_mode xmode = GET_MODE (x);
2508 tem = simplify_gen_binary (AND, xmode, x,
2509 gen_int_mode (INTVAL (trueop1), xmode));
2510 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2511 }
2512
2513 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2514 if (GET_CODE (op0) == IOR
2515 && CONST_INT_P (trueop1)
2516 && CONST_INT_P (XEXP (op0, 1)))
2517 {
2518 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2519 return simplify_gen_binary (IOR, mode,
2520 simplify_gen_binary (AND, mode,
2521 XEXP (op0, 0), op1),
2522 gen_int_mode (tmp, mode));
2523 }
2524
2525 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2526 insn (and may simplify more). */
2527 if (GET_CODE (op0) == XOR
2528 && rtx_equal_p (XEXP (op0, 0), op1)
2529 && ! side_effects_p (op1))
2530 return simplify_gen_binary (AND, mode,
2531 simplify_gen_unary (NOT, mode,
2532 XEXP (op0, 1), mode),
2533 op1);
2534
2535 if (GET_CODE (op0) == XOR
2536 && rtx_equal_p (XEXP (op0, 1), op1)
2537 && ! side_effects_p (op1))
2538 return simplify_gen_binary (AND, mode,
2539 simplify_gen_unary (NOT, mode,
2540 XEXP (op0, 0), mode),
2541 op1);
2542
2543 /* Similarly for (~(A ^ B)) & A. */
2544 if (GET_CODE (op0) == NOT
2545 && GET_CODE (XEXP (op0, 0)) == XOR
2546 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2547 && ! side_effects_p (op1))
2548 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2549
2550 if (GET_CODE (op0) == NOT
2551 && GET_CODE (XEXP (op0, 0)) == XOR
2552 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2553 && ! side_effects_p (op1))
2554 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2555
2556 /* Convert (A | B) & A to A. */
2557 if (GET_CODE (op0) == IOR
2558 && (rtx_equal_p (XEXP (op0, 0), op1)
2559 || rtx_equal_p (XEXP (op0, 1), op1))
2560 && ! side_effects_p (XEXP (op0, 0))
2561 && ! side_effects_p (XEXP (op0, 1)))
2562 return op1;
2563
2564 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2565 ((A & N) + B) & M -> (A + B) & M
2566 Similarly if (N & M) == 0,
2567 ((A | N) + B) & M -> (A + B) & M
2568 and for - instead of + and/or ^ instead of |.
2569 Also, if (N & M) == 0, then
2570 (A +- N) & M -> A & M. */
2571 if (CONST_INT_P (trueop1)
2572 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2573 && ~INTVAL (trueop1)
2574 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2575 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2576 {
2577 rtx pmop[2];
2578 int which;
2579
2580 pmop[0] = XEXP (op0, 0);
2581 pmop[1] = XEXP (op0, 1);
2582
2583 if (CONST_INT_P (pmop[1])
2584 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2585 return simplify_gen_binary (AND, mode, pmop[0], op1);
2586
2587 for (which = 0; which < 2; which++)
2588 {
2589 tem = pmop[which];
2590 switch (GET_CODE (tem))
2591 {
2592 case AND:
2593 if (CONST_INT_P (XEXP (tem, 1))
2594 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2595 == INTVAL (trueop1))
2596 pmop[which] = XEXP (tem, 0);
2597 break;
2598 case IOR:
2599 case XOR:
2600 if (CONST_INT_P (XEXP (tem, 1))
2601 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2602 pmop[which] = XEXP (tem, 0);
2603 break;
2604 default:
2605 break;
2606 }
2607 }
2608
2609 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2610 {
2611 tem = simplify_gen_binary (GET_CODE (op0), mode,
2612 pmop[0], pmop[1]);
2613 return simplify_gen_binary (code, mode, tem, op1);
2614 }
2615 }
2616
2617 /* (and X (ior (not X) Y) -> (and X Y) */
2618 if (GET_CODE (op1) == IOR
2619 && GET_CODE (XEXP (op1, 0)) == NOT
2620 && op0 == XEXP (XEXP (op1, 0), 0))
2621 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2622
2623 /* (and (ior (not X) Y) X) -> (and X Y) */
2624 if (GET_CODE (op0) == IOR
2625 && GET_CODE (XEXP (op0, 0)) == NOT
2626 && op1 == XEXP (XEXP (op0, 0), 0))
2627 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2628
2629 tem = simplify_associative_operation (code, mode, op0, op1);
2630 if (tem)
2631 return tem;
2632 break;
2633
2634 case UDIV:
2635 /* 0/x is 0 (or x&0 if x has side-effects). */
2636 if (trueop0 == CONST0_RTX (mode))
2637 {
2638 if (side_effects_p (op1))
2639 return simplify_gen_binary (AND, mode, op1, trueop0);
2640 return trueop0;
2641 }
2642 /* x/1 is x. */
2643 if (trueop1 == CONST1_RTX (mode))
2644 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2645 /* Convert divide by power of two into shift. */
2646 if (CONST_INT_P (trueop1)
2647 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2648 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2649 break;
2650
2651 case DIV:
2652 /* Handle floating point and integers separately. */
2653 if (SCALAR_FLOAT_MODE_P (mode))
2654 {
2655 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2656 safe for modes with NaNs, since 0.0 / 0.0 will then be
2657 NaN rather than 0.0. Nor is it safe for modes with signed
2658 zeros, since dividing 0 by a negative number gives -0.0 */
2659 if (trueop0 == CONST0_RTX (mode)
2660 && !HONOR_NANS (mode)
2661 && !HONOR_SIGNED_ZEROS (mode)
2662 && ! side_effects_p (op1))
2663 return op0;
2664 /* x/1.0 is x. */
2665 if (trueop1 == CONST1_RTX (mode)
2666 && !HONOR_SNANS (mode))
2667 return op0;
2668
2669 if (GET_CODE (trueop1) == CONST_DOUBLE
2670 && trueop1 != CONST0_RTX (mode))
2671 {
2672 REAL_VALUE_TYPE d;
2673 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2674
2675 /* x/-1.0 is -x. */
2676 if (REAL_VALUES_EQUAL (d, dconstm1)
2677 && !HONOR_SNANS (mode))
2678 return simplify_gen_unary (NEG, mode, op0, mode);
2679
2680 /* Change FP division by a constant into multiplication.
2681 Only do this with -freciprocal-math. */
2682 if (flag_reciprocal_math
2683 && !REAL_VALUES_EQUAL (d, dconst0))
2684 {
2685 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2686 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2687 return simplify_gen_binary (MULT, mode, op0, tem);
2688 }
2689 }
2690 }
2691 else
2692 {
2693 /* 0/x is 0 (or x&0 if x has side-effects). */
2694 if (trueop0 == CONST0_RTX (mode))
2695 {
2696 if (side_effects_p (op1))
2697 return simplify_gen_binary (AND, mode, op1, trueop0);
2698 return trueop0;
2699 }
2700 /* x/1 is x. */
2701 if (trueop1 == CONST1_RTX (mode))
2702 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2703 /* x/-1 is -x. */
2704 if (trueop1 == constm1_rtx)
2705 {
2706 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2707 return simplify_gen_unary (NEG, mode, x, mode);
2708 }
2709 }
2710 break;
2711
2712 case UMOD:
2713 /* 0%x is 0 (or x&0 if x has side-effects). */
2714 if (trueop0 == CONST0_RTX (mode))
2715 {
2716 if (side_effects_p (op1))
2717 return simplify_gen_binary (AND, mode, op1, trueop0);
2718 return trueop0;
2719 }
2720 /* x%1 is 0 (of x&0 if x has side-effects). */
2721 if (trueop1 == CONST1_RTX (mode))
2722 {
2723 if (side_effects_p (op0))
2724 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2725 return CONST0_RTX (mode);
2726 }
2727 /* Implement modulus by power of two as AND. */
2728 if (CONST_INT_P (trueop1)
2729 && exact_log2 (INTVAL (trueop1)) > 0)
2730 return simplify_gen_binary (AND, mode, op0,
2731 GEN_INT (INTVAL (op1) - 1));
2732 break;
2733
2734 case MOD:
2735 /* 0%x is 0 (or x&0 if x has side-effects). */
2736 if (trueop0 == CONST0_RTX (mode))
2737 {
2738 if (side_effects_p (op1))
2739 return simplify_gen_binary (AND, mode, op1, trueop0);
2740 return trueop0;
2741 }
2742 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2743 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2744 {
2745 if (side_effects_p (op0))
2746 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2747 return CONST0_RTX (mode);
2748 }
2749 break;
2750
2751 case ROTATERT:
2752 case ROTATE:
2753 case ASHIFTRT:
2754 if (trueop1 == CONST0_RTX (mode))
2755 return op0;
2756 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2757 return op0;
2758 /* Rotating ~0 always results in ~0. */
2759 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2760 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2761 && ! side_effects_p (op1))
2762 return op0;
2763 canonicalize_shift:
2764 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2765 {
2766 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2767 if (val != INTVAL (op1))
2768 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2769 }
2770 break;
2771
2772 case ASHIFT:
2773 case SS_ASHIFT:
2774 case US_ASHIFT:
2775 if (trueop1 == CONST0_RTX (mode))
2776 return op0;
2777 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2778 return op0;
2779 goto canonicalize_shift;
2780
2781 case LSHIFTRT:
2782 if (trueop1 == CONST0_RTX (mode))
2783 return op0;
2784 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2785 return op0;
2786 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2787 if (GET_CODE (op0) == CLZ
2788 && CONST_INT_P (trueop1)
2789 && STORE_FLAG_VALUE == 1
2790 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2791 {
2792 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2793 unsigned HOST_WIDE_INT zero_val = 0;
2794
2795 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2796 && zero_val == GET_MODE_BITSIZE (imode)
2797 && INTVAL (trueop1) == exact_log2 (zero_val))
2798 return simplify_gen_relational (EQ, mode, imode,
2799 XEXP (op0, 0), const0_rtx);
2800 }
2801 goto canonicalize_shift;
2802
2803 case SMIN:
2804 if (width <= HOST_BITS_PER_WIDE_INT
2805 && CONST_INT_P (trueop1)
2806 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2807 && ! side_effects_p (op0))
2808 return op1;
2809 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2810 return op0;
2811 tem = simplify_associative_operation (code, mode, op0, op1);
2812 if (tem)
2813 return tem;
2814 break;
2815
2816 case SMAX:
2817 if (width <= HOST_BITS_PER_WIDE_INT
2818 && CONST_INT_P (trueop1)
2819 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2820 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2821 && ! side_effects_p (op0))
2822 return op1;
2823 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2824 return op0;
2825 tem = simplify_associative_operation (code, mode, op0, op1);
2826 if (tem)
2827 return tem;
2828 break;
2829
2830 case UMIN:
2831 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2832 return op1;
2833 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2834 return op0;
2835 tem = simplify_associative_operation (code, mode, op0, op1);
2836 if (tem)
2837 return tem;
2838 break;
2839
2840 case UMAX:
2841 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2842 return op1;
2843 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2844 return op0;
2845 tem = simplify_associative_operation (code, mode, op0, op1);
2846 if (tem)
2847 return tem;
2848 break;
2849
2850 case SS_PLUS:
2851 case US_PLUS:
2852 case SS_MINUS:
2853 case US_MINUS:
2854 case SS_MULT:
2855 case US_MULT:
2856 case SS_DIV:
2857 case US_DIV:
2858 /* ??? There are simplifications that can be done. */
2859 return 0;
2860
2861 case VEC_SELECT:
2862 if (!VECTOR_MODE_P (mode))
2863 {
2864 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2865 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2866 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2867 gcc_assert (XVECLEN (trueop1, 0) == 1);
2868 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2869
2870 if (GET_CODE (trueop0) == CONST_VECTOR)
2871 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2872 (trueop1, 0, 0)));
2873
2874 /* Extract a scalar element from a nested VEC_SELECT expression
2875 (with optional nested VEC_CONCAT expression). Some targets
2876 (i386) extract scalar element from a vector using chain of
2877 nested VEC_SELECT expressions. When input operand is a memory
2878 operand, this operation can be simplified to a simple scalar
2879 load from an offseted memory address. */
2880 if (GET_CODE (trueop0) == VEC_SELECT)
2881 {
2882 rtx op0 = XEXP (trueop0, 0);
2883 rtx op1 = XEXP (trueop0, 1);
2884
2885 enum machine_mode opmode = GET_MODE (op0);
2886 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2887 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2888
2889 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2890 int elem;
2891
2892 rtvec vec;
2893 rtx tmp_op, tmp;
2894
2895 gcc_assert (GET_CODE (op1) == PARALLEL);
2896 gcc_assert (i < n_elts);
2897
2898 /* Select element, pointed by nested selector. */
2899 elem = INTVAL (XVECEXP (op1, 0, i));
2900
2901 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2902 if (GET_CODE (op0) == VEC_CONCAT)
2903 {
2904 rtx op00 = XEXP (op0, 0);
2905 rtx op01 = XEXP (op0, 1);
2906
2907 enum machine_mode mode00, mode01;
2908 int n_elts00, n_elts01;
2909
2910 mode00 = GET_MODE (op00);
2911 mode01 = GET_MODE (op01);
2912
2913 /* Find out number of elements of each operand. */
2914 if (VECTOR_MODE_P (mode00))
2915 {
2916 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2917 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2918 }
2919 else
2920 n_elts00 = 1;
2921
2922 if (VECTOR_MODE_P (mode01))
2923 {
2924 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2925 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2926 }
2927 else
2928 n_elts01 = 1;
2929
2930 gcc_assert (n_elts == n_elts00 + n_elts01);
2931
2932 /* Select correct operand of VEC_CONCAT
2933 and adjust selector. */
2934 if (elem < n_elts01)
2935 tmp_op = op00;
2936 else
2937 {
2938 tmp_op = op01;
2939 elem -= n_elts00;
2940 }
2941 }
2942 else
2943 tmp_op = op0;
2944
2945 vec = rtvec_alloc (1);
2946 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2947
2948 tmp = gen_rtx_fmt_ee (code, mode,
2949 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2950 return tmp;
2951 }
2952 if (GET_CODE (trueop0) == VEC_DUPLICATE
2953 && GET_MODE (XEXP (trueop0, 0)) == mode)
2954 return XEXP (trueop0, 0);
2955 }
2956 else
2957 {
2958 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2959 gcc_assert (GET_MODE_INNER (mode)
2960 == GET_MODE_INNER (GET_MODE (trueop0)));
2961 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2962
2963 if (GET_CODE (trueop0) == CONST_VECTOR)
2964 {
2965 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2966 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2967 rtvec v = rtvec_alloc (n_elts);
2968 unsigned int i;
2969
2970 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2971 for (i = 0; i < n_elts; i++)
2972 {
2973 rtx x = XVECEXP (trueop1, 0, i);
2974
2975 gcc_assert (CONST_INT_P (x));
2976 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2977 INTVAL (x));
2978 }
2979
2980 return gen_rtx_CONST_VECTOR (mode, v);
2981 }
2982 }
2983
2984 if (XVECLEN (trueop1, 0) == 1
2985 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2986 && GET_CODE (trueop0) == VEC_CONCAT)
2987 {
2988 rtx vec = trueop0;
2989 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2990
2991 /* Try to find the element in the VEC_CONCAT. */
2992 while (GET_MODE (vec) != mode
2993 && GET_CODE (vec) == VEC_CONCAT)
2994 {
2995 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2996 if (offset < vec_size)
2997 vec = XEXP (vec, 0);
2998 else
2999 {
3000 offset -= vec_size;
3001 vec = XEXP (vec, 1);
3002 }
3003 vec = avoid_constant_pool_reference (vec);
3004 }
3005
3006 if (GET_MODE (vec) == mode)
3007 return vec;
3008 }
3009
3010 return 0;
3011 case VEC_CONCAT:
3012 {
3013 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3014 ? GET_MODE (trueop0)
3015 : GET_MODE_INNER (mode));
3016 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3017 ? GET_MODE (trueop1)
3018 : GET_MODE_INNER (mode));
3019
3020 gcc_assert (VECTOR_MODE_P (mode));
3021 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3022 == GET_MODE_SIZE (mode));
3023
3024 if (VECTOR_MODE_P (op0_mode))
3025 gcc_assert (GET_MODE_INNER (mode)
3026 == GET_MODE_INNER (op0_mode));
3027 else
3028 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3029
3030 if (VECTOR_MODE_P (op1_mode))
3031 gcc_assert (GET_MODE_INNER (mode)
3032 == GET_MODE_INNER (op1_mode));
3033 else
3034 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3035
3036 if ((GET_CODE (trueop0) == CONST_VECTOR
3037 || CONST_INT_P (trueop0)
3038 || GET_CODE (trueop0) == CONST_DOUBLE)
3039 && (GET_CODE (trueop1) == CONST_VECTOR
3040 || CONST_INT_P (trueop1)
3041 || GET_CODE (trueop1) == CONST_DOUBLE))
3042 {
3043 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3044 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3045 rtvec v = rtvec_alloc (n_elts);
3046 unsigned int i;
3047 unsigned in_n_elts = 1;
3048
3049 if (VECTOR_MODE_P (op0_mode))
3050 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3051 for (i = 0; i < n_elts; i++)
3052 {
3053 if (i < in_n_elts)
3054 {
3055 if (!VECTOR_MODE_P (op0_mode))
3056 RTVEC_ELT (v, i) = trueop0;
3057 else
3058 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3059 }
3060 else
3061 {
3062 if (!VECTOR_MODE_P (op1_mode))
3063 RTVEC_ELT (v, i) = trueop1;
3064 else
3065 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3066 i - in_n_elts);
3067 }
3068 }
3069
3070 return gen_rtx_CONST_VECTOR (mode, v);
3071 }
3072 }
3073 return 0;
3074
3075 default:
3076 gcc_unreachable ();
3077 }
3078
3079 return 0;
3080 }
3081
3082 rtx
3083 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3084 rtx op0, rtx op1)
3085 {
3086 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3087 HOST_WIDE_INT val;
3088 unsigned int width = GET_MODE_BITSIZE (mode);
3089
3090 if (VECTOR_MODE_P (mode)
3091 && code != VEC_CONCAT
3092 && GET_CODE (op0) == CONST_VECTOR
3093 && GET_CODE (op1) == CONST_VECTOR)
3094 {
3095 unsigned n_elts = GET_MODE_NUNITS (mode);
3096 enum machine_mode op0mode = GET_MODE (op0);
3097 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3098 enum machine_mode op1mode = GET_MODE (op1);
3099 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3100 rtvec v = rtvec_alloc (n_elts);
3101 unsigned int i;
3102
3103 gcc_assert (op0_n_elts == n_elts);
3104 gcc_assert (op1_n_elts == n_elts);
3105 for (i = 0; i < n_elts; i++)
3106 {
3107 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3108 CONST_VECTOR_ELT (op0, i),
3109 CONST_VECTOR_ELT (op1, i));
3110 if (!x)
3111 return 0;
3112 RTVEC_ELT (v, i) = x;
3113 }
3114
3115 return gen_rtx_CONST_VECTOR (mode, v);
3116 }
3117
3118 if (VECTOR_MODE_P (mode)
3119 && code == VEC_CONCAT
3120 && (CONST_INT_P (op0)
3121 || GET_CODE (op0) == CONST_DOUBLE
3122 || GET_CODE (op0) == CONST_FIXED)
3123 && (CONST_INT_P (op1)
3124 || GET_CODE (op1) == CONST_DOUBLE
3125 || GET_CODE (op1) == CONST_FIXED))
3126 {
3127 unsigned n_elts = GET_MODE_NUNITS (mode);
3128 rtvec v = rtvec_alloc (n_elts);
3129
3130 gcc_assert (n_elts >= 2);
3131 if (n_elts == 2)
3132 {
3133 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3134 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3135
3136 RTVEC_ELT (v, 0) = op0;
3137 RTVEC_ELT (v, 1) = op1;
3138 }
3139 else
3140 {
3141 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3142 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3143 unsigned i;
3144
3145 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3146 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3147 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3148
3149 for (i = 0; i < op0_n_elts; ++i)
3150 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3151 for (i = 0; i < op1_n_elts; ++i)
3152 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3153 }
3154
3155 return gen_rtx_CONST_VECTOR (mode, v);
3156 }
3157
3158 if (SCALAR_FLOAT_MODE_P (mode)
3159 && GET_CODE (op0) == CONST_DOUBLE
3160 && GET_CODE (op1) == CONST_DOUBLE
3161 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3162 {
3163 if (code == AND
3164 || code == IOR
3165 || code == XOR)
3166 {
3167 long tmp0[4];
3168 long tmp1[4];
3169 REAL_VALUE_TYPE r;
3170 int i;
3171
3172 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3173 GET_MODE (op0));
3174 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3175 GET_MODE (op1));
3176 for (i = 0; i < 4; i++)
3177 {
3178 switch (code)
3179 {
3180 case AND:
3181 tmp0[i] &= tmp1[i];
3182 break;
3183 case IOR:
3184 tmp0[i] |= tmp1[i];
3185 break;
3186 case XOR:
3187 tmp0[i] ^= tmp1[i];
3188 break;
3189 default:
3190 gcc_unreachable ();
3191 }
3192 }
3193 real_from_target (&r, tmp0, mode);
3194 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3195 }
3196 else
3197 {
3198 REAL_VALUE_TYPE f0, f1, value, result;
3199 bool inexact;
3200
3201 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3202 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3203 real_convert (&f0, mode, &f0);
3204 real_convert (&f1, mode, &f1);
3205
3206 if (HONOR_SNANS (mode)
3207 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3208 return 0;
3209
3210 if (code == DIV
3211 && REAL_VALUES_EQUAL (f1, dconst0)
3212 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3213 return 0;
3214
3215 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3216 && flag_trapping_math
3217 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3218 {
3219 int s0 = REAL_VALUE_NEGATIVE (f0);
3220 int s1 = REAL_VALUE_NEGATIVE (f1);
3221
3222 switch (code)
3223 {
3224 case PLUS:
3225 /* Inf + -Inf = NaN plus exception. */
3226 if (s0 != s1)
3227 return 0;
3228 break;
3229 case MINUS:
3230 /* Inf - Inf = NaN plus exception. */
3231 if (s0 == s1)
3232 return 0;
3233 break;
3234 case DIV:
3235 /* Inf / Inf = NaN plus exception. */
3236 return 0;
3237 default:
3238 break;
3239 }
3240 }
3241
3242 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3243 && flag_trapping_math
3244 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3245 || (REAL_VALUE_ISINF (f1)
3246 && REAL_VALUES_EQUAL (f0, dconst0))))
3247 /* Inf * 0 = NaN plus exception. */
3248 return 0;
3249
3250 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3251 &f0, &f1);
3252 real_convert (&result, mode, &value);
3253
3254 /* Don't constant fold this floating point operation if
3255 the result has overflowed and flag_trapping_math. */
3256
3257 if (flag_trapping_math
3258 && MODE_HAS_INFINITIES (mode)
3259 && REAL_VALUE_ISINF (result)
3260 && !REAL_VALUE_ISINF (f0)
3261 && !REAL_VALUE_ISINF (f1))
3262 /* Overflow plus exception. */
3263 return 0;
3264
3265 /* Don't constant fold this floating point operation if the
3266 result may dependent upon the run-time rounding mode and
3267 flag_rounding_math is set, or if GCC's software emulation
3268 is unable to accurately represent the result. */
3269
3270 if ((flag_rounding_math
3271 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3272 && (inexact || !real_identical (&result, &value)))
3273 return NULL_RTX;
3274
3275 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3276 }
3277 }
3278
3279 /* We can fold some multi-word operations. */
3280 if (GET_MODE_CLASS (mode) == MODE_INT
3281 && width == HOST_BITS_PER_WIDE_INT * 2
3282 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3283 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3284 {
3285 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3286 HOST_WIDE_INT h1, h2, hv, ht;
3287
3288 if (GET_CODE (op0) == CONST_DOUBLE)
3289 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3290 else
3291 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3292
3293 if (GET_CODE (op1) == CONST_DOUBLE)
3294 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3295 else
3296 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3297
3298 switch (code)
3299 {
3300 case MINUS:
3301 /* A - B == A + (-B). */
3302 neg_double (l2, h2, &lv, &hv);
3303 l2 = lv, h2 = hv;
3304
3305 /* Fall through.... */
3306
3307 case PLUS:
3308 add_double (l1, h1, l2, h2, &lv, &hv);
3309 break;
3310
3311 case MULT:
3312 mul_double (l1, h1, l2, h2, &lv, &hv);
3313 break;
3314
3315 case DIV:
3316 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3317 &lv, &hv, &lt, &ht))
3318 return 0;
3319 break;
3320
3321 case MOD:
3322 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3323 &lt, &ht, &lv, &hv))
3324 return 0;
3325 break;
3326
3327 case UDIV:
3328 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3329 &lv, &hv, &lt, &ht))
3330 return 0;
3331 break;
3332
3333 case UMOD:
3334 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3335 &lt, &ht, &lv, &hv))
3336 return 0;
3337 break;
3338
3339 case AND:
3340 lv = l1 & l2, hv = h1 & h2;
3341 break;
3342
3343 case IOR:
3344 lv = l1 | l2, hv = h1 | h2;
3345 break;
3346
3347 case XOR:
3348 lv = l1 ^ l2, hv = h1 ^ h2;
3349 break;
3350
3351 case SMIN:
3352 if (h1 < h2
3353 || (h1 == h2
3354 && ((unsigned HOST_WIDE_INT) l1
3355 < (unsigned HOST_WIDE_INT) l2)))
3356 lv = l1, hv = h1;
3357 else
3358 lv = l2, hv = h2;
3359 break;
3360
3361 case SMAX:
3362 if (h1 > h2
3363 || (h1 == h2
3364 && ((unsigned HOST_WIDE_INT) l1
3365 > (unsigned HOST_WIDE_INT) l2)))
3366 lv = l1, hv = h1;
3367 else
3368 lv = l2, hv = h2;
3369 break;
3370
3371 case UMIN:
3372 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3373 || (h1 == h2
3374 && ((unsigned HOST_WIDE_INT) l1
3375 < (unsigned HOST_WIDE_INT) l2)))
3376 lv = l1, hv = h1;
3377 else
3378 lv = l2, hv = h2;
3379 break;
3380
3381 case UMAX:
3382 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3383 || (h1 == h2
3384 && ((unsigned HOST_WIDE_INT) l1
3385 > (unsigned HOST_WIDE_INT) l2)))
3386 lv = l1, hv = h1;
3387 else
3388 lv = l2, hv = h2;
3389 break;
3390
3391 case LSHIFTRT: case ASHIFTRT:
3392 case ASHIFT:
3393 case ROTATE: case ROTATERT:
3394 if (SHIFT_COUNT_TRUNCATED)
3395 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3396
3397 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3398 return 0;
3399
3400 if (code == LSHIFTRT || code == ASHIFTRT)
3401 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3402 code == ASHIFTRT);
3403 else if (code == ASHIFT)
3404 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3405 else if (code == ROTATE)
3406 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3407 else /* code == ROTATERT */
3408 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3409 break;
3410
3411 default:
3412 return 0;
3413 }
3414
3415 return immed_double_const (lv, hv, mode);
3416 }
3417
3418 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3419 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3420 {
3421 /* Get the integer argument values in two forms:
3422 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3423
3424 arg0 = INTVAL (op0);
3425 arg1 = INTVAL (op1);
3426
3427 if (width < HOST_BITS_PER_WIDE_INT)
3428 {
3429 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3430 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3431
3432 arg0s = arg0;
3433 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3434 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3435
3436 arg1s = arg1;
3437 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3438 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3439 }
3440 else
3441 {
3442 arg0s = arg0;
3443 arg1s = arg1;
3444 }
3445
3446 /* Compute the value of the arithmetic. */
3447
3448 switch (code)
3449 {
3450 case PLUS:
3451 val = arg0s + arg1s;
3452 break;
3453
3454 case MINUS:
3455 val = arg0s - arg1s;
3456 break;
3457
3458 case MULT:
3459 val = arg0s * arg1s;
3460 break;
3461
3462 case DIV:
3463 if (arg1s == 0
3464 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3465 && arg1s == -1))
3466 return 0;
3467 val = arg0s / arg1s;
3468 break;
3469
3470 case MOD:
3471 if (arg1s == 0
3472 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3473 && arg1s == -1))
3474 return 0;
3475 val = arg0s % arg1s;
3476 break;
3477
3478 case UDIV:
3479 if (arg1 == 0
3480 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3481 && arg1s == -1))
3482 return 0;
3483 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3484 break;
3485
3486 case UMOD:
3487 if (arg1 == 0
3488 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3489 && arg1s == -1))
3490 return 0;
3491 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3492 break;
3493
3494 case AND:
3495 val = arg0 & arg1;
3496 break;
3497
3498 case IOR:
3499 val = arg0 | arg1;
3500 break;
3501
3502 case XOR:
3503 val = arg0 ^ arg1;
3504 break;
3505
3506 case LSHIFTRT:
3507 case ASHIFT:
3508 case ASHIFTRT:
3509 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3510 the value is in range. We can't return any old value for
3511 out-of-range arguments because either the middle-end (via
3512 shift_truncation_mask) or the back-end might be relying on
3513 target-specific knowledge. Nor can we rely on
3514 shift_truncation_mask, since the shift might not be part of an
3515 ashlM3, lshrM3 or ashrM3 instruction. */
3516 if (SHIFT_COUNT_TRUNCATED)
3517 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3518 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3519 return 0;
3520
3521 val = (code == ASHIFT
3522 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3523 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3524
3525 /* Sign-extend the result for arithmetic right shifts. */
3526 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3527 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3528 break;
3529
3530 case ROTATERT:
3531 if (arg1 < 0)
3532 return 0;
3533
3534 arg1 %= width;
3535 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3536 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3537 break;
3538
3539 case ROTATE:
3540 if (arg1 < 0)
3541 return 0;
3542
3543 arg1 %= width;
3544 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3545 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3546 break;
3547
3548 case COMPARE:
3549 /* Do nothing here. */
3550 return 0;
3551
3552 case SMIN:
3553 val = arg0s <= arg1s ? arg0s : arg1s;
3554 break;
3555
3556 case UMIN:
3557 val = ((unsigned HOST_WIDE_INT) arg0
3558 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3559 break;
3560
3561 case SMAX:
3562 val = arg0s > arg1s ? arg0s : arg1s;
3563 break;
3564
3565 case UMAX:
3566 val = ((unsigned HOST_WIDE_INT) arg0
3567 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3568 break;
3569
3570 case SS_PLUS:
3571 case US_PLUS:
3572 case SS_MINUS:
3573 case US_MINUS:
3574 case SS_MULT:
3575 case US_MULT:
3576 case SS_DIV:
3577 case US_DIV:
3578 case SS_ASHIFT:
3579 case US_ASHIFT:
3580 /* ??? There are simplifications that can be done. */
3581 return 0;
3582
3583 default:
3584 gcc_unreachable ();
3585 }
3586
3587 return gen_int_mode (val, mode);
3588 }
3589
3590 return NULL_RTX;
3591 }
3592
3593
3594 \f
3595 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3596 PLUS or MINUS.
3597
3598 Rather than test for specific case, we do this by a brute-force method
3599 and do all possible simplifications until no more changes occur. Then
3600 we rebuild the operation. */
3601
3602 struct simplify_plus_minus_op_data
3603 {
3604 rtx op;
3605 short neg;
3606 };
3607
3608 static bool
3609 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3610 {
3611 int result;
3612
3613 result = (commutative_operand_precedence (y)
3614 - commutative_operand_precedence (x));
3615 if (result)
3616 return result > 0;
3617
3618 /* Group together equal REGs to do more simplification. */
3619 if (REG_P (x) && REG_P (y))
3620 return REGNO (x) > REGNO (y);
3621 else
3622 return false;
3623 }
3624
3625 static rtx
3626 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3627 rtx op1)
3628 {
3629 struct simplify_plus_minus_op_data ops[8];
3630 rtx result, tem;
3631 int n_ops = 2, input_ops = 2;
3632 int changed, n_constants = 0, canonicalized = 0;
3633 int i, j;
3634
3635 memset (ops, 0, sizeof ops);
3636
3637 /* Set up the two operands and then expand them until nothing has been
3638 changed. If we run out of room in our array, give up; this should
3639 almost never happen. */
3640
3641 ops[0].op = op0;
3642 ops[0].neg = 0;
3643 ops[1].op = op1;
3644 ops[1].neg = (code == MINUS);
3645
3646 do
3647 {
3648 changed = 0;
3649
3650 for (i = 0; i < n_ops; i++)
3651 {
3652 rtx this_op = ops[i].op;
3653 int this_neg = ops[i].neg;
3654 enum rtx_code this_code = GET_CODE (this_op);
3655
3656 switch (this_code)
3657 {
3658 case PLUS:
3659 case MINUS:
3660 if (n_ops == 7)
3661 return NULL_RTX;
3662
3663 ops[n_ops].op = XEXP (this_op, 1);
3664 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3665 n_ops++;
3666
3667 ops[i].op = XEXP (this_op, 0);
3668 input_ops++;
3669 changed = 1;
3670 canonicalized |= this_neg;
3671 break;
3672
3673 case NEG:
3674 ops[i].op = XEXP (this_op, 0);
3675 ops[i].neg = ! this_neg;
3676 changed = 1;
3677 canonicalized = 1;
3678 break;
3679
3680 case CONST:
3681 if (n_ops < 7
3682 && GET_CODE (XEXP (this_op, 0)) == PLUS
3683 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3684 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3685 {
3686 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3687 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3688 ops[n_ops].neg = this_neg;
3689 n_ops++;
3690 changed = 1;
3691 canonicalized = 1;
3692 }
3693 break;
3694
3695 case NOT:
3696 /* ~a -> (-a - 1) */
3697 if (n_ops != 7)
3698 {
3699 ops[n_ops].op = constm1_rtx;
3700 ops[n_ops++].neg = this_neg;
3701 ops[i].op = XEXP (this_op, 0);
3702 ops[i].neg = !this_neg;
3703 changed = 1;
3704 canonicalized = 1;
3705 }
3706 break;
3707
3708 case CONST_INT:
3709 n_constants++;
3710 if (this_neg)
3711 {
3712 ops[i].op = neg_const_int (mode, this_op);
3713 ops[i].neg = 0;
3714 changed = 1;
3715 canonicalized = 1;
3716 }
3717 break;
3718
3719 default:
3720 break;
3721 }
3722 }
3723 }
3724 while (changed);
3725
3726 if (n_constants > 1)
3727 canonicalized = 1;
3728
3729 gcc_assert (n_ops >= 2);
3730
3731 /* If we only have two operands, we can avoid the loops. */
3732 if (n_ops == 2)
3733 {
3734 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3735 rtx lhs, rhs;
3736
3737 /* Get the two operands. Be careful with the order, especially for
3738 the cases where code == MINUS. */
3739 if (ops[0].neg && ops[1].neg)
3740 {
3741 lhs = gen_rtx_NEG (mode, ops[0].op);
3742 rhs = ops[1].op;
3743 }
3744 else if (ops[0].neg)
3745 {
3746 lhs = ops[1].op;
3747 rhs = ops[0].op;
3748 }
3749 else
3750 {
3751 lhs = ops[0].op;
3752 rhs = ops[1].op;
3753 }
3754
3755 return simplify_const_binary_operation (code, mode, lhs, rhs);
3756 }
3757
3758 /* Now simplify each pair of operands until nothing changes. */
3759 do
3760 {
3761 /* Insertion sort is good enough for an eight-element array. */
3762 for (i = 1; i < n_ops; i++)
3763 {
3764 struct simplify_plus_minus_op_data save;
3765 j = i - 1;
3766 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3767 continue;
3768
3769 canonicalized = 1;
3770 save = ops[i];
3771 do
3772 ops[j + 1] = ops[j];
3773 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3774 ops[j + 1] = save;
3775 }
3776
3777 changed = 0;
3778 for (i = n_ops - 1; i > 0; i--)
3779 for (j = i - 1; j >= 0; j--)
3780 {
3781 rtx lhs = ops[j].op, rhs = ops[i].op;
3782 int lneg = ops[j].neg, rneg = ops[i].neg;
3783
3784 if (lhs != 0 && rhs != 0)
3785 {
3786 enum rtx_code ncode = PLUS;
3787
3788 if (lneg != rneg)
3789 {
3790 ncode = MINUS;
3791 if (lneg)
3792 tem = lhs, lhs = rhs, rhs = tem;
3793 }
3794 else if (swap_commutative_operands_p (lhs, rhs))
3795 tem = lhs, lhs = rhs, rhs = tem;
3796
3797 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3798 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3799 {
3800 rtx tem_lhs, tem_rhs;
3801
3802 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3803 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3804 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3805
3806 if (tem && !CONSTANT_P (tem))
3807 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3808 }
3809 else
3810 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3811
3812 /* Reject "simplifications" that just wrap the two
3813 arguments in a CONST. Failure to do so can result
3814 in infinite recursion with simplify_binary_operation
3815 when it calls us to simplify CONST operations. */
3816 if (tem
3817 && ! (GET_CODE (tem) == CONST
3818 && GET_CODE (XEXP (tem, 0)) == ncode
3819 && XEXP (XEXP (tem, 0), 0) == lhs
3820 && XEXP (XEXP (tem, 0), 1) == rhs))
3821 {
3822 lneg &= rneg;
3823 if (GET_CODE (tem) == NEG)
3824 tem = XEXP (tem, 0), lneg = !lneg;
3825 if (CONST_INT_P (tem) && lneg)
3826 tem = neg_const_int (mode, tem), lneg = 0;
3827
3828 ops[i].op = tem;
3829 ops[i].neg = lneg;
3830 ops[j].op = NULL_RTX;
3831 changed = 1;
3832 canonicalized = 1;
3833 }
3834 }
3835 }
3836
3837 /* If nothing changed, fail. */
3838 if (!canonicalized)
3839 return NULL_RTX;
3840
3841 /* Pack all the operands to the lower-numbered entries. */
3842 for (i = 0, j = 0; j < n_ops; j++)
3843 if (ops[j].op)
3844 {
3845 ops[i] = ops[j];
3846 i++;
3847 }
3848 n_ops = i;
3849 }
3850 while (changed);
3851
3852 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3853 if (n_ops == 2
3854 && CONST_INT_P (ops[1].op)
3855 && CONSTANT_P (ops[0].op)
3856 && ops[0].neg)
3857 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3858
3859 /* We suppressed creation of trivial CONST expressions in the
3860 combination loop to avoid recursion. Create one manually now.
3861 The combination loop should have ensured that there is exactly
3862 one CONST_INT, and the sort will have ensured that it is last
3863 in the array and that any other constant will be next-to-last. */
3864
3865 if (n_ops > 1
3866 && CONST_INT_P (ops[n_ops - 1].op)
3867 && CONSTANT_P (ops[n_ops - 2].op))
3868 {
3869 rtx value = ops[n_ops - 1].op;
3870 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3871 value = neg_const_int (mode, value);
3872 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3873 n_ops--;
3874 }
3875
3876 /* Put a non-negated operand first, if possible. */
3877
3878 for (i = 0; i < n_ops && ops[i].neg; i++)
3879 continue;
3880 if (i == n_ops)
3881 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3882 else if (i != 0)
3883 {
3884 tem = ops[0].op;
3885 ops[0] = ops[i];
3886 ops[i].op = tem;
3887 ops[i].neg = 1;
3888 }
3889
3890 /* Now make the result by performing the requested operations. */
3891 result = ops[0].op;
3892 for (i = 1; i < n_ops; i++)
3893 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3894 mode, result, ops[i].op);
3895
3896 return result;
3897 }
3898
3899 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3900 static bool
3901 plus_minus_operand_p (const_rtx x)
3902 {
3903 return GET_CODE (x) == PLUS
3904 || GET_CODE (x) == MINUS
3905 || (GET_CODE (x) == CONST
3906 && GET_CODE (XEXP (x, 0)) == PLUS
3907 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3908 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3909 }
3910
3911 /* Like simplify_binary_operation except used for relational operators.
3912 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3913 not also be VOIDmode.
3914
3915 CMP_MODE specifies in which mode the comparison is done in, so it is
3916 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3917 the operands or, if both are VOIDmode, the operands are compared in
3918 "infinite precision". */
3919 rtx
3920 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3921 enum machine_mode cmp_mode, rtx op0, rtx op1)
3922 {
3923 rtx tem, trueop0, trueop1;
3924
3925 if (cmp_mode == VOIDmode)
3926 cmp_mode = GET_MODE (op0);
3927 if (cmp_mode == VOIDmode)
3928 cmp_mode = GET_MODE (op1);
3929
3930 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3931 if (tem)
3932 {
3933 if (SCALAR_FLOAT_MODE_P (mode))
3934 {
3935 if (tem == const0_rtx)
3936 return CONST0_RTX (mode);
3937 #ifdef FLOAT_STORE_FLAG_VALUE
3938 {
3939 REAL_VALUE_TYPE val;
3940 val = FLOAT_STORE_FLAG_VALUE (mode);
3941 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3942 }
3943 #else
3944 return NULL_RTX;
3945 #endif
3946 }
3947 if (VECTOR_MODE_P (mode))
3948 {
3949 if (tem == const0_rtx)
3950 return CONST0_RTX (mode);
3951 #ifdef VECTOR_STORE_FLAG_VALUE
3952 {
3953 int i, units;
3954 rtvec v;
3955
3956 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3957 if (val == NULL_RTX)
3958 return NULL_RTX;
3959 if (val == const1_rtx)
3960 return CONST1_RTX (mode);
3961
3962 units = GET_MODE_NUNITS (mode);
3963 v = rtvec_alloc (units);
3964 for (i = 0; i < units; i++)
3965 RTVEC_ELT (v, i) = val;
3966 return gen_rtx_raw_CONST_VECTOR (mode, v);
3967 }
3968 #else
3969 return NULL_RTX;
3970 #endif
3971 }
3972
3973 return tem;
3974 }
3975
3976 /* For the following tests, ensure const0_rtx is op1. */
3977 if (swap_commutative_operands_p (op0, op1)
3978 || (op0 == const0_rtx && op1 != const0_rtx))
3979 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3980
3981 /* If op0 is a compare, extract the comparison arguments from it. */
3982 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3983 return simplify_gen_relational (code, mode, VOIDmode,
3984 XEXP (op0, 0), XEXP (op0, 1));
3985
3986 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3987 || CC0_P (op0))
3988 return NULL_RTX;
3989
3990 trueop0 = avoid_constant_pool_reference (op0);
3991 trueop1 = avoid_constant_pool_reference (op1);
3992 return simplify_relational_operation_1 (code, mode, cmp_mode,
3993 trueop0, trueop1);
3994 }
3995
3996 /* This part of simplify_relational_operation is only used when CMP_MODE
3997 is not in class MODE_CC (i.e. it is a real comparison).
3998
3999 MODE is the mode of the result, while CMP_MODE specifies in which
4000 mode the comparison is done in, so it is the mode of the operands. */
4001
4002 static rtx
4003 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4004 enum machine_mode cmp_mode, rtx op0, rtx op1)
4005 {
4006 enum rtx_code op0code = GET_CODE (op0);
4007
4008 if (op1 == const0_rtx && COMPARISON_P (op0))
4009 {
4010 /* If op0 is a comparison, extract the comparison arguments
4011 from it. */
4012 if (code == NE)
4013 {
4014 if (GET_MODE (op0) == mode)
4015 return simplify_rtx (op0);
4016 else
4017 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4018 XEXP (op0, 0), XEXP (op0, 1));
4019 }
4020 else if (code == EQ)
4021 {
4022 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4023 if (new_code != UNKNOWN)
4024 return simplify_gen_relational (new_code, mode, VOIDmode,
4025 XEXP (op0, 0), XEXP (op0, 1));
4026 }
4027 }
4028
4029 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4030 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4031 if ((code == LTU || code == GEU)
4032 && GET_CODE (op0) == PLUS
4033 && CONST_INT_P (XEXP (op0, 1))
4034 && (rtx_equal_p (op1, XEXP (op0, 0))
4035 || rtx_equal_p (op1, XEXP (op0, 1))))
4036 {
4037 rtx new_cmp
4038 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4039 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4040 cmp_mode, XEXP (op0, 0), new_cmp);
4041 }
4042
4043 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4044 if ((code == LTU || code == GEU)
4045 && GET_CODE (op0) == PLUS
4046 && rtx_equal_p (op1, XEXP (op0, 1))
4047 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4048 && !rtx_equal_p (op1, XEXP (op0, 0)))
4049 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
4050
4051 if (op1 == const0_rtx)
4052 {
4053 /* Canonicalize (GTU x 0) as (NE x 0). */
4054 if (code == GTU)
4055 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4056 /* Canonicalize (LEU x 0) as (EQ x 0). */
4057 if (code == LEU)
4058 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4059 }
4060 else if (op1 == const1_rtx)
4061 {
4062 switch (code)
4063 {
4064 case GE:
4065 /* Canonicalize (GE x 1) as (GT x 0). */
4066 return simplify_gen_relational (GT, mode, cmp_mode,
4067 op0, const0_rtx);
4068 case GEU:
4069 /* Canonicalize (GEU x 1) as (NE x 0). */
4070 return simplify_gen_relational (NE, mode, cmp_mode,
4071 op0, const0_rtx);
4072 case LT:
4073 /* Canonicalize (LT x 1) as (LE x 0). */
4074 return simplify_gen_relational (LE, mode, cmp_mode,
4075 op0, const0_rtx);
4076 case LTU:
4077 /* Canonicalize (LTU x 1) as (EQ x 0). */
4078 return simplify_gen_relational (EQ, mode, cmp_mode,
4079 op0, const0_rtx);
4080 default:
4081 break;
4082 }
4083 }
4084 else if (op1 == constm1_rtx)
4085 {
4086 /* Canonicalize (LE x -1) as (LT x 0). */
4087 if (code == LE)
4088 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4089 /* Canonicalize (GT x -1) as (GE x 0). */
4090 if (code == GT)
4091 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4092 }
4093
4094 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4095 if ((code == EQ || code == NE)
4096 && (op0code == PLUS || op0code == MINUS)
4097 && CONSTANT_P (op1)
4098 && CONSTANT_P (XEXP (op0, 1))
4099 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4100 {
4101 rtx x = XEXP (op0, 0);
4102 rtx c = XEXP (op0, 1);
4103
4104 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4105 cmp_mode, op1, c);
4106 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4107 }
4108
4109 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4110 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4111 if (code == NE
4112 && op1 == const0_rtx
4113 && GET_MODE_CLASS (mode) == MODE_INT
4114 && cmp_mode != VOIDmode
4115 /* ??? Work-around BImode bugs in the ia64 backend. */
4116 && mode != BImode
4117 && cmp_mode != BImode
4118 && nonzero_bits (op0, cmp_mode) == 1
4119 && STORE_FLAG_VALUE == 1)
4120 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4121 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4122 : lowpart_subreg (mode, op0, cmp_mode);
4123
4124 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4125 if ((code == EQ || code == NE)
4126 && op1 == const0_rtx
4127 && op0code == XOR)
4128 return simplify_gen_relational (code, mode, cmp_mode,
4129 XEXP (op0, 0), XEXP (op0, 1));
4130
4131 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4132 if ((code == EQ || code == NE)
4133 && op0code == XOR
4134 && rtx_equal_p (XEXP (op0, 0), op1)
4135 && !side_effects_p (XEXP (op0, 0)))
4136 return simplify_gen_relational (code, mode, cmp_mode,
4137 XEXP (op0, 1), const0_rtx);
4138
4139 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4140 if ((code == EQ || code == NE)
4141 && op0code == XOR
4142 && rtx_equal_p (XEXP (op0, 1), op1)
4143 && !side_effects_p (XEXP (op0, 1)))
4144 return simplify_gen_relational (code, mode, cmp_mode,
4145 XEXP (op0, 0), const0_rtx);
4146
4147 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4148 if ((code == EQ || code == NE)
4149 && op0code == XOR
4150 && (CONST_INT_P (op1)
4151 || GET_CODE (op1) == CONST_DOUBLE)
4152 && (CONST_INT_P (XEXP (op0, 1))
4153 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4154 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4155 simplify_gen_binary (XOR, cmp_mode,
4156 XEXP (op0, 1), op1));
4157
4158 if (op0code == POPCOUNT && op1 == const0_rtx)
4159 switch (code)
4160 {
4161 case EQ:
4162 case LE:
4163 case LEU:
4164 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4165 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4166 XEXP (op0, 0), const0_rtx);
4167
4168 case NE:
4169 case GT:
4170 case GTU:
4171 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4172 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4173 XEXP (op0, 0), const0_rtx);
4174
4175 default:
4176 break;
4177 }
4178
4179 return NULL_RTX;
4180 }
4181
4182 enum
4183 {
4184 CMP_EQ = 1,
4185 CMP_LT = 2,
4186 CMP_GT = 4,
4187 CMP_LTU = 8,
4188 CMP_GTU = 16
4189 };
4190
4191
4192 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4193 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4194 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4195 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4196 For floating-point comparisons, assume that the operands were ordered. */
4197
4198 static rtx
4199 comparison_result (enum rtx_code code, int known_results)
4200 {
4201 switch (code)
4202 {
4203 case EQ:
4204 case UNEQ:
4205 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4206 case NE:
4207 case LTGT:
4208 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4209
4210 case LT:
4211 case UNLT:
4212 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4213 case GE:
4214 case UNGE:
4215 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4216
4217 case GT:
4218 case UNGT:
4219 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4220 case LE:
4221 case UNLE:
4222 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4223
4224 case LTU:
4225 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4226 case GEU:
4227 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4228
4229 case GTU:
4230 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4231 case LEU:
4232 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4233
4234 case ORDERED:
4235 return const_true_rtx;
4236 case UNORDERED:
4237 return const0_rtx;
4238 default:
4239 gcc_unreachable ();
4240 }
4241 }
4242
4243 /* Check if the given comparison (done in the given MODE) is actually a
4244 tautology or a contradiction.
4245 If no simplification is possible, this function returns zero.
4246 Otherwise, it returns either const_true_rtx or const0_rtx. */
4247
4248 rtx
4249 simplify_const_relational_operation (enum rtx_code code,
4250 enum machine_mode mode,
4251 rtx op0, rtx op1)
4252 {
4253 rtx tem;
4254 rtx trueop0;
4255 rtx trueop1;
4256
4257 gcc_assert (mode != VOIDmode
4258 || (GET_MODE (op0) == VOIDmode
4259 && GET_MODE (op1) == VOIDmode));
4260
4261 /* If op0 is a compare, extract the comparison arguments from it. */
4262 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4263 {
4264 op1 = XEXP (op0, 1);
4265 op0 = XEXP (op0, 0);
4266
4267 if (GET_MODE (op0) != VOIDmode)
4268 mode = GET_MODE (op0);
4269 else if (GET_MODE (op1) != VOIDmode)
4270 mode = GET_MODE (op1);
4271 else
4272 return 0;
4273 }
4274
4275 /* We can't simplify MODE_CC values since we don't know what the
4276 actual comparison is. */
4277 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4278 return 0;
4279
4280 /* Make sure the constant is second. */
4281 if (swap_commutative_operands_p (op0, op1))
4282 {
4283 tem = op0, op0 = op1, op1 = tem;
4284 code = swap_condition (code);
4285 }
4286
4287 trueop0 = avoid_constant_pool_reference (op0);
4288 trueop1 = avoid_constant_pool_reference (op1);
4289
4290 /* For integer comparisons of A and B maybe we can simplify A - B and can
4291 then simplify a comparison of that with zero. If A and B are both either
4292 a register or a CONST_INT, this can't help; testing for these cases will
4293 prevent infinite recursion here and speed things up.
4294
4295 We can only do this for EQ and NE comparisons as otherwise we may
4296 lose or introduce overflow which we cannot disregard as undefined as
4297 we do not know the signedness of the operation on either the left or
4298 the right hand side of the comparison. */
4299
4300 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4301 && (code == EQ || code == NE)
4302 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4303 && (REG_P (op1) || CONST_INT_P (trueop1)))
4304 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4305 /* We cannot do this if tem is a nonzero address. */
4306 && ! nonzero_address_p (tem))
4307 return simplify_const_relational_operation (signed_condition (code),
4308 mode, tem, const0_rtx);
4309
4310 if (! HONOR_NANS (mode) && code == ORDERED)
4311 return const_true_rtx;
4312
4313 if (! HONOR_NANS (mode) && code == UNORDERED)
4314 return const0_rtx;
4315
4316 /* For modes without NaNs, if the two operands are equal, we know the
4317 result except if they have side-effects. Even with NaNs we know
4318 the result of unordered comparisons and, if signaling NaNs are
4319 irrelevant, also the result of LT/GT/LTGT. */
4320 if ((! HONOR_NANS (GET_MODE (trueop0))
4321 || code == UNEQ || code == UNLE || code == UNGE
4322 || ((code == LT || code == GT || code == LTGT)
4323 && ! HONOR_SNANS (GET_MODE (trueop0))))
4324 && rtx_equal_p (trueop0, trueop1)
4325 && ! side_effects_p (trueop0))
4326 return comparison_result (code, CMP_EQ);
4327
4328 /* If the operands are floating-point constants, see if we can fold
4329 the result. */
4330 if (GET_CODE (trueop0) == CONST_DOUBLE
4331 && GET_CODE (trueop1) == CONST_DOUBLE
4332 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4333 {
4334 REAL_VALUE_TYPE d0, d1;
4335
4336 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4337 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4338
4339 /* Comparisons are unordered iff at least one of the values is NaN. */
4340 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4341 switch (code)
4342 {
4343 case UNEQ:
4344 case UNLT:
4345 case UNGT:
4346 case UNLE:
4347 case UNGE:
4348 case NE:
4349 case UNORDERED:
4350 return const_true_rtx;
4351 case EQ:
4352 case LT:
4353 case GT:
4354 case LE:
4355 case GE:
4356 case LTGT:
4357 case ORDERED:
4358 return const0_rtx;
4359 default:
4360 return 0;
4361 }
4362
4363 return comparison_result (code,
4364 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4365 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4366 }
4367
4368 /* Otherwise, see if the operands are both integers. */
4369 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4370 && (GET_CODE (trueop0) == CONST_DOUBLE
4371 || CONST_INT_P (trueop0))
4372 && (GET_CODE (trueop1) == CONST_DOUBLE
4373 || CONST_INT_P (trueop1)))
4374 {
4375 int width = GET_MODE_BITSIZE (mode);
4376 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4377 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4378
4379 /* Get the two words comprising each integer constant. */
4380 if (GET_CODE (trueop0) == CONST_DOUBLE)
4381 {
4382 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4383 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4384 }
4385 else
4386 {
4387 l0u = l0s = INTVAL (trueop0);
4388 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4389 }
4390
4391 if (GET_CODE (trueop1) == CONST_DOUBLE)
4392 {
4393 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4394 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4395 }
4396 else
4397 {
4398 l1u = l1s = INTVAL (trueop1);
4399 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4400 }
4401
4402 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4403 we have to sign or zero-extend the values. */
4404 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4405 {
4406 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4407 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4408
4409 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4410 l0s |= ((HOST_WIDE_INT) (-1) << width);
4411
4412 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4413 l1s |= ((HOST_WIDE_INT) (-1) << width);
4414 }
4415 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4416 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4417
4418 if (h0u == h1u && l0u == l1u)
4419 return comparison_result (code, CMP_EQ);
4420 else
4421 {
4422 int cr;
4423 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4424 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4425 return comparison_result (code, cr);
4426 }
4427 }
4428
4429 /* Optimize comparisons with upper and lower bounds. */
4430 if (SCALAR_INT_MODE_P (mode)
4431 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4432 && CONST_INT_P (trueop1))
4433 {
4434 int sign;
4435 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4436 HOST_WIDE_INT val = INTVAL (trueop1);
4437 HOST_WIDE_INT mmin, mmax;
4438
4439 if (code == GEU
4440 || code == LEU
4441 || code == GTU
4442 || code == LTU)
4443 sign = 0;
4444 else
4445 sign = 1;
4446
4447 /* Get a reduced range if the sign bit is zero. */
4448 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4449 {
4450 mmin = 0;
4451 mmax = nonzero;
4452 }
4453 else
4454 {
4455 rtx mmin_rtx, mmax_rtx;
4456 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4457
4458 mmin = INTVAL (mmin_rtx);
4459 mmax = INTVAL (mmax_rtx);
4460 if (sign)
4461 {
4462 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4463
4464 mmin >>= (sign_copies - 1);
4465 mmax >>= (sign_copies - 1);
4466 }
4467 }
4468
4469 switch (code)
4470 {
4471 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4472 case GEU:
4473 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4474 return const_true_rtx;
4475 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4476 return const0_rtx;
4477 break;
4478 case GE:
4479 if (val <= mmin)
4480 return const_true_rtx;
4481 if (val > mmax)
4482 return const0_rtx;
4483 break;
4484
4485 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4486 case LEU:
4487 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4488 return const_true_rtx;
4489 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4490 return const0_rtx;
4491 break;
4492 case LE:
4493 if (val >= mmax)
4494 return const_true_rtx;
4495 if (val < mmin)
4496 return const0_rtx;
4497 break;
4498
4499 case EQ:
4500 /* x == y is always false for y out of range. */
4501 if (val < mmin || val > mmax)
4502 return const0_rtx;
4503 break;
4504
4505 /* x > y is always false for y >= mmax, always true for y < mmin. */
4506 case GTU:
4507 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4508 return const0_rtx;
4509 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4510 return const_true_rtx;
4511 break;
4512 case GT:
4513 if (val >= mmax)
4514 return const0_rtx;
4515 if (val < mmin)
4516 return const_true_rtx;
4517 break;
4518
4519 /* x < y is always false for y <= mmin, always true for y > mmax. */
4520 case LTU:
4521 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4522 return const0_rtx;
4523 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4524 return const_true_rtx;
4525 break;
4526 case LT:
4527 if (val <= mmin)
4528 return const0_rtx;
4529 if (val > mmax)
4530 return const_true_rtx;
4531 break;
4532
4533 case NE:
4534 /* x != y is always true for y out of range. */
4535 if (val < mmin || val > mmax)
4536 return const_true_rtx;
4537 break;
4538
4539 default:
4540 break;
4541 }
4542 }
4543
4544 /* Optimize integer comparisons with zero. */
4545 if (trueop1 == const0_rtx)
4546 {
4547 /* Some addresses are known to be nonzero. We don't know
4548 their sign, but equality comparisons are known. */
4549 if (nonzero_address_p (trueop0))
4550 {
4551 if (code == EQ || code == LEU)
4552 return const0_rtx;
4553 if (code == NE || code == GTU)
4554 return const_true_rtx;
4555 }
4556
4557 /* See if the first operand is an IOR with a constant. If so, we
4558 may be able to determine the result of this comparison. */
4559 if (GET_CODE (op0) == IOR)
4560 {
4561 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4562 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4563 {
4564 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4565 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4566 && (INTVAL (inner_const)
4567 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4568
4569 switch (code)
4570 {
4571 case EQ:
4572 case LEU:
4573 return const0_rtx;
4574 case NE:
4575 case GTU:
4576 return const_true_rtx;
4577 case LT:
4578 case LE:
4579 if (has_sign)
4580 return const_true_rtx;
4581 break;
4582 case GT:
4583 case GE:
4584 if (has_sign)
4585 return const0_rtx;
4586 break;
4587 default:
4588 break;
4589 }
4590 }
4591 }
4592 }
4593
4594 /* Optimize comparison of ABS with zero. */
4595 if (trueop1 == CONST0_RTX (mode)
4596 && (GET_CODE (trueop0) == ABS
4597 || (GET_CODE (trueop0) == FLOAT_EXTEND
4598 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4599 {
4600 switch (code)
4601 {
4602 case LT:
4603 /* Optimize abs(x) < 0.0. */
4604 if (!HONOR_SNANS (mode)
4605 && (!INTEGRAL_MODE_P (mode)
4606 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4607 {
4608 if (INTEGRAL_MODE_P (mode)
4609 && (issue_strict_overflow_warning
4610 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4611 warning (OPT_Wstrict_overflow,
4612 ("assuming signed overflow does not occur when "
4613 "assuming abs (x) < 0 is false"));
4614 return const0_rtx;
4615 }
4616 break;
4617
4618 case GE:
4619 /* Optimize abs(x) >= 0.0. */
4620 if (!HONOR_NANS (mode)
4621 && (!INTEGRAL_MODE_P (mode)
4622 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4623 {
4624 if (INTEGRAL_MODE_P (mode)
4625 && (issue_strict_overflow_warning
4626 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4627 warning (OPT_Wstrict_overflow,
4628 ("assuming signed overflow does not occur when "
4629 "assuming abs (x) >= 0 is true"));
4630 return const_true_rtx;
4631 }
4632 break;
4633
4634 case UNGE:
4635 /* Optimize ! (abs(x) < 0.0). */
4636 return const_true_rtx;
4637
4638 default:
4639 break;
4640 }
4641 }
4642
4643 return 0;
4644 }
4645 \f
4646 /* Simplify CODE, an operation with result mode MODE and three operands,
4647 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4648 a constant. Return 0 if no simplifications is possible. */
4649
4650 rtx
4651 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4652 enum machine_mode op0_mode, rtx op0, rtx op1,
4653 rtx op2)
4654 {
4655 unsigned int width = GET_MODE_BITSIZE (mode);
4656
4657 /* VOIDmode means "infinite" precision. */
4658 if (width == 0)
4659 width = HOST_BITS_PER_WIDE_INT;
4660
4661 switch (code)
4662 {
4663 case SIGN_EXTRACT:
4664 case ZERO_EXTRACT:
4665 if (CONST_INT_P (op0)
4666 && CONST_INT_P (op1)
4667 && CONST_INT_P (op2)
4668 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4669 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4670 {
4671 /* Extracting a bit-field from a constant */
4672 HOST_WIDE_INT val = INTVAL (op0);
4673
4674 if (BITS_BIG_ENDIAN)
4675 val >>= (GET_MODE_BITSIZE (op0_mode)
4676 - INTVAL (op2) - INTVAL (op1));
4677 else
4678 val >>= INTVAL (op2);
4679
4680 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4681 {
4682 /* First zero-extend. */
4683 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4684 /* If desired, propagate sign bit. */
4685 if (code == SIGN_EXTRACT
4686 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4687 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4688 }
4689
4690 /* Clear the bits that don't belong in our mode,
4691 unless they and our sign bit are all one.
4692 So we get either a reasonable negative value or a reasonable
4693 unsigned value for this mode. */
4694 if (width < HOST_BITS_PER_WIDE_INT
4695 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4696 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4697 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4698
4699 return gen_int_mode (val, mode);
4700 }
4701 break;
4702
4703 case IF_THEN_ELSE:
4704 if (CONST_INT_P (op0))
4705 return op0 != const0_rtx ? op1 : op2;
4706
4707 /* Convert c ? a : a into "a". */
4708 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4709 return op1;
4710
4711 /* Convert a != b ? a : b into "a". */
4712 if (GET_CODE (op0) == NE
4713 && ! side_effects_p (op0)
4714 && ! HONOR_NANS (mode)
4715 && ! HONOR_SIGNED_ZEROS (mode)
4716 && ((rtx_equal_p (XEXP (op0, 0), op1)
4717 && rtx_equal_p (XEXP (op0, 1), op2))
4718 || (rtx_equal_p (XEXP (op0, 0), op2)
4719 && rtx_equal_p (XEXP (op0, 1), op1))))
4720 return op1;
4721
4722 /* Convert a == b ? a : b into "b". */
4723 if (GET_CODE (op0) == EQ
4724 && ! side_effects_p (op0)
4725 && ! HONOR_NANS (mode)
4726 && ! HONOR_SIGNED_ZEROS (mode)
4727 && ((rtx_equal_p (XEXP (op0, 0), op1)
4728 && rtx_equal_p (XEXP (op0, 1), op2))
4729 || (rtx_equal_p (XEXP (op0, 0), op2)
4730 && rtx_equal_p (XEXP (op0, 1), op1))))
4731 return op2;
4732
4733 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4734 {
4735 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4736 ? GET_MODE (XEXP (op0, 1))
4737 : GET_MODE (XEXP (op0, 0)));
4738 rtx temp;
4739
4740 /* Look for happy constants in op1 and op2. */
4741 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4742 {
4743 HOST_WIDE_INT t = INTVAL (op1);
4744 HOST_WIDE_INT f = INTVAL (op2);
4745
4746 if (t == STORE_FLAG_VALUE && f == 0)
4747 code = GET_CODE (op0);
4748 else if (t == 0 && f == STORE_FLAG_VALUE)
4749 {
4750 enum rtx_code tmp;
4751 tmp = reversed_comparison_code (op0, NULL_RTX);
4752 if (tmp == UNKNOWN)
4753 break;
4754 code = tmp;
4755 }
4756 else
4757 break;
4758
4759 return simplify_gen_relational (code, mode, cmp_mode,
4760 XEXP (op0, 0), XEXP (op0, 1));
4761 }
4762
4763 if (cmp_mode == VOIDmode)
4764 cmp_mode = op0_mode;
4765 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4766 cmp_mode, XEXP (op0, 0),
4767 XEXP (op0, 1));
4768
4769 /* See if any simplifications were possible. */
4770 if (temp)
4771 {
4772 if (CONST_INT_P (temp))
4773 return temp == const0_rtx ? op2 : op1;
4774 else if (temp)
4775 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4776 }
4777 }
4778 break;
4779
4780 case VEC_MERGE:
4781 gcc_assert (GET_MODE (op0) == mode);
4782 gcc_assert (GET_MODE (op1) == mode);
4783 gcc_assert (VECTOR_MODE_P (mode));
4784 op2 = avoid_constant_pool_reference (op2);
4785 if (CONST_INT_P (op2))
4786 {
4787 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4788 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4789 int mask = (1 << n_elts) - 1;
4790
4791 if (!(INTVAL (op2) & mask))
4792 return op1;
4793 if ((INTVAL (op2) & mask) == mask)
4794 return op0;
4795
4796 op0 = avoid_constant_pool_reference (op0);
4797 op1 = avoid_constant_pool_reference (op1);
4798 if (GET_CODE (op0) == CONST_VECTOR
4799 && GET_CODE (op1) == CONST_VECTOR)
4800 {
4801 rtvec v = rtvec_alloc (n_elts);
4802 unsigned int i;
4803
4804 for (i = 0; i < n_elts; i++)
4805 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4806 ? CONST_VECTOR_ELT (op0, i)
4807 : CONST_VECTOR_ELT (op1, i));
4808 return gen_rtx_CONST_VECTOR (mode, v);
4809 }
4810 }
4811 break;
4812
4813 default:
4814 gcc_unreachable ();
4815 }
4816
4817 return 0;
4818 }
4819
4820 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4821 or CONST_VECTOR,
4822 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4823
4824 Works by unpacking OP into a collection of 8-bit values
4825 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4826 and then repacking them again for OUTERMODE. */
4827
4828 static rtx
4829 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4830 enum machine_mode innermode, unsigned int byte)
4831 {
4832 /* We support up to 512-bit values (for V8DFmode). */
4833 enum {
4834 max_bitsize = 512,
4835 value_bit = 8,
4836 value_mask = (1 << value_bit) - 1
4837 };
4838 unsigned char value[max_bitsize / value_bit];
4839 int value_start;
4840 int i;
4841 int elem;
4842
4843 int num_elem;
4844 rtx * elems;
4845 int elem_bitsize;
4846 rtx result_s;
4847 rtvec result_v = NULL;
4848 enum mode_class outer_class;
4849 enum machine_mode outer_submode;
4850
4851 /* Some ports misuse CCmode. */
4852 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4853 return op;
4854
4855 /* We have no way to represent a complex constant at the rtl level. */
4856 if (COMPLEX_MODE_P (outermode))
4857 return NULL_RTX;
4858
4859 /* Unpack the value. */
4860
4861 if (GET_CODE (op) == CONST_VECTOR)
4862 {
4863 num_elem = CONST_VECTOR_NUNITS (op);
4864 elems = &CONST_VECTOR_ELT (op, 0);
4865 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4866 }
4867 else
4868 {
4869 num_elem = 1;
4870 elems = &op;
4871 elem_bitsize = max_bitsize;
4872 }
4873 /* If this asserts, it is too complicated; reducing value_bit may help. */
4874 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4875 /* I don't know how to handle endianness of sub-units. */
4876 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4877
4878 for (elem = 0; elem < num_elem; elem++)
4879 {
4880 unsigned char * vp;
4881 rtx el = elems[elem];
4882
4883 /* Vectors are kept in target memory order. (This is probably
4884 a mistake.) */
4885 {
4886 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4887 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4888 / BITS_PER_UNIT);
4889 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4890 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4891 unsigned bytele = (subword_byte % UNITS_PER_WORD
4892 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4893 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4894 }
4895
4896 switch (GET_CODE (el))
4897 {
4898 case CONST_INT:
4899 for (i = 0;
4900 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4901 i += value_bit)
4902 *vp++ = INTVAL (el) >> i;
4903 /* CONST_INTs are always logically sign-extended. */
4904 for (; i < elem_bitsize; i += value_bit)
4905 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4906 break;
4907
4908 case CONST_DOUBLE:
4909 if (GET_MODE (el) == VOIDmode)
4910 {
4911 /* If this triggers, someone should have generated a
4912 CONST_INT instead. */
4913 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4914
4915 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4916 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4917 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4918 {
4919 *vp++
4920 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4921 i += value_bit;
4922 }
4923 /* It shouldn't matter what's done here, so fill it with
4924 zero. */
4925 for (; i < elem_bitsize; i += value_bit)
4926 *vp++ = 0;
4927 }
4928 else
4929 {
4930 long tmp[max_bitsize / 32];
4931 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4932
4933 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4934 gcc_assert (bitsize <= elem_bitsize);
4935 gcc_assert (bitsize % value_bit == 0);
4936
4937 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4938 GET_MODE (el));
4939
4940 /* real_to_target produces its result in words affected by
4941 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4942 and use WORDS_BIG_ENDIAN instead; see the documentation
4943 of SUBREG in rtl.texi. */
4944 for (i = 0; i < bitsize; i += value_bit)
4945 {
4946 int ibase;
4947 if (WORDS_BIG_ENDIAN)
4948 ibase = bitsize - 1 - i;
4949 else
4950 ibase = i;
4951 *vp++ = tmp[ibase / 32] >> i % 32;
4952 }
4953
4954 /* It shouldn't matter what's done here, so fill it with
4955 zero. */
4956 for (; i < elem_bitsize; i += value_bit)
4957 *vp++ = 0;
4958 }
4959 break;
4960
4961 case CONST_FIXED:
4962 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4963 {
4964 for (i = 0; i < elem_bitsize; i += value_bit)
4965 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4966 }
4967 else
4968 {
4969 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4970 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4971 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4972 i += value_bit)
4973 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4974 >> (i - HOST_BITS_PER_WIDE_INT);
4975 for (; i < elem_bitsize; i += value_bit)
4976 *vp++ = 0;
4977 }
4978 break;
4979
4980 default:
4981 gcc_unreachable ();
4982 }
4983 }
4984
4985 /* Now, pick the right byte to start with. */
4986 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4987 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4988 will already have offset 0. */
4989 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4990 {
4991 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4992 - byte);
4993 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4994 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4995 byte = (subword_byte % UNITS_PER_WORD
4996 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4997 }
4998
4999 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5000 so if it's become negative it will instead be very large.) */
5001 gcc_assert (byte < GET_MODE_SIZE (innermode));
5002
5003 /* Convert from bytes to chunks of size value_bit. */
5004 value_start = byte * (BITS_PER_UNIT / value_bit);
5005
5006 /* Re-pack the value. */
5007
5008 if (VECTOR_MODE_P (outermode))
5009 {
5010 num_elem = GET_MODE_NUNITS (outermode);
5011 result_v = rtvec_alloc (num_elem);
5012 elems = &RTVEC_ELT (result_v, 0);
5013 outer_submode = GET_MODE_INNER (outermode);
5014 }
5015 else
5016 {
5017 num_elem = 1;
5018 elems = &result_s;
5019 outer_submode = outermode;
5020 }
5021
5022 outer_class = GET_MODE_CLASS (outer_submode);
5023 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5024
5025 gcc_assert (elem_bitsize % value_bit == 0);
5026 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5027
5028 for (elem = 0; elem < num_elem; elem++)
5029 {
5030 unsigned char *vp;
5031
5032 /* Vectors are stored in target memory order. (This is probably
5033 a mistake.) */
5034 {
5035 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5036 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5037 / BITS_PER_UNIT);
5038 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5039 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5040 unsigned bytele = (subword_byte % UNITS_PER_WORD
5041 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5042 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5043 }
5044
5045 switch (outer_class)
5046 {
5047 case MODE_INT:
5048 case MODE_PARTIAL_INT:
5049 {
5050 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5051
5052 for (i = 0;
5053 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5054 i += value_bit)
5055 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5056 for (; i < elem_bitsize; i += value_bit)
5057 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5058 << (i - HOST_BITS_PER_WIDE_INT));
5059
5060 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5061 know why. */
5062 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5063 elems[elem] = gen_int_mode (lo, outer_submode);
5064 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5065 elems[elem] = immed_double_const (lo, hi, outer_submode);
5066 else
5067 return NULL_RTX;
5068 }
5069 break;
5070
5071 case MODE_FLOAT:
5072 case MODE_DECIMAL_FLOAT:
5073 {
5074 REAL_VALUE_TYPE r;
5075 long tmp[max_bitsize / 32];
5076
5077 /* real_from_target wants its input in words affected by
5078 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5079 and use WORDS_BIG_ENDIAN instead; see the documentation
5080 of SUBREG in rtl.texi. */
5081 for (i = 0; i < max_bitsize / 32; i++)
5082 tmp[i] = 0;
5083 for (i = 0; i < elem_bitsize; i += value_bit)
5084 {
5085 int ibase;
5086 if (WORDS_BIG_ENDIAN)
5087 ibase = elem_bitsize - 1 - i;
5088 else
5089 ibase = i;
5090 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5091 }
5092
5093 real_from_target (&r, tmp, outer_submode);
5094 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5095 }
5096 break;
5097
5098 case MODE_FRACT:
5099 case MODE_UFRACT:
5100 case MODE_ACCUM:
5101 case MODE_UACCUM:
5102 {
5103 FIXED_VALUE_TYPE f;
5104 f.data.low = 0;
5105 f.data.high = 0;
5106 f.mode = outer_submode;
5107
5108 for (i = 0;
5109 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5110 i += value_bit)
5111 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5112 for (; i < elem_bitsize; i += value_bit)
5113 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5114 << (i - HOST_BITS_PER_WIDE_INT));
5115
5116 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5117 }
5118 break;
5119
5120 default:
5121 gcc_unreachable ();
5122 }
5123 }
5124 if (VECTOR_MODE_P (outermode))
5125 return gen_rtx_CONST_VECTOR (outermode, result_v);
5126 else
5127 return result_s;
5128 }
5129
5130 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5131 Return 0 if no simplifications are possible. */
5132 rtx
5133 simplify_subreg (enum machine_mode outermode, rtx op,
5134 enum machine_mode innermode, unsigned int byte)
5135 {
5136 /* Little bit of sanity checking. */
5137 gcc_assert (innermode != VOIDmode);
5138 gcc_assert (outermode != VOIDmode);
5139 gcc_assert (innermode != BLKmode);
5140 gcc_assert (outermode != BLKmode);
5141
5142 gcc_assert (GET_MODE (op) == innermode
5143 || GET_MODE (op) == VOIDmode);
5144
5145 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5146 gcc_assert (byte < GET_MODE_SIZE (innermode));
5147
5148 if (outermode == innermode && !byte)
5149 return op;
5150
5151 if (CONST_INT_P (op)
5152 || GET_CODE (op) == CONST_DOUBLE
5153 || GET_CODE (op) == CONST_FIXED
5154 || GET_CODE (op) == CONST_VECTOR)
5155 return simplify_immed_subreg (outermode, op, innermode, byte);
5156
5157 /* Changing mode twice with SUBREG => just change it once,
5158 or not at all if changing back op starting mode. */
5159 if (GET_CODE (op) == SUBREG)
5160 {
5161 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5162 int final_offset = byte + SUBREG_BYTE (op);
5163 rtx newx;
5164
5165 if (outermode == innermostmode
5166 && byte == 0 && SUBREG_BYTE (op) == 0)
5167 return SUBREG_REG (op);
5168
5169 /* The SUBREG_BYTE represents offset, as if the value were stored
5170 in memory. Irritating exception is paradoxical subreg, where
5171 we define SUBREG_BYTE to be 0. On big endian machines, this
5172 value should be negative. For a moment, undo this exception. */
5173 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5174 {
5175 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5176 if (WORDS_BIG_ENDIAN)
5177 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5178 if (BYTES_BIG_ENDIAN)
5179 final_offset += difference % UNITS_PER_WORD;
5180 }
5181 if (SUBREG_BYTE (op) == 0
5182 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5183 {
5184 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5185 if (WORDS_BIG_ENDIAN)
5186 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5187 if (BYTES_BIG_ENDIAN)
5188 final_offset += difference % UNITS_PER_WORD;
5189 }
5190
5191 /* See whether resulting subreg will be paradoxical. */
5192 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5193 {
5194 /* In nonparadoxical subregs we can't handle negative offsets. */
5195 if (final_offset < 0)
5196 return NULL_RTX;
5197 /* Bail out in case resulting subreg would be incorrect. */
5198 if (final_offset % GET_MODE_SIZE (outermode)
5199 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5200 return NULL_RTX;
5201 }
5202 else
5203 {
5204 int offset = 0;
5205 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5206
5207 /* In paradoxical subreg, see if we are still looking on lower part.
5208 If so, our SUBREG_BYTE will be 0. */
5209 if (WORDS_BIG_ENDIAN)
5210 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5211 if (BYTES_BIG_ENDIAN)
5212 offset += difference % UNITS_PER_WORD;
5213 if (offset == final_offset)
5214 final_offset = 0;
5215 else
5216 return NULL_RTX;
5217 }
5218
5219 /* Recurse for further possible simplifications. */
5220 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5221 final_offset);
5222 if (newx)
5223 return newx;
5224 if (validate_subreg (outermode, innermostmode,
5225 SUBREG_REG (op), final_offset))
5226 {
5227 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5228 if (SUBREG_PROMOTED_VAR_P (op)
5229 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5230 && GET_MODE_CLASS (outermode) == MODE_INT
5231 && IN_RANGE (GET_MODE_SIZE (outermode),
5232 GET_MODE_SIZE (innermode),
5233 GET_MODE_SIZE (innermostmode))
5234 && subreg_lowpart_p (newx))
5235 {
5236 SUBREG_PROMOTED_VAR_P (newx) = 1;
5237 SUBREG_PROMOTED_UNSIGNED_SET
5238 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5239 }
5240 return newx;
5241 }
5242 return NULL_RTX;
5243 }
5244
5245 /* Merge implicit and explicit truncations. */
5246
5247 if (GET_CODE (op) == TRUNCATE
5248 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5249 && subreg_lowpart_offset (outermode, innermode) == byte)
5250 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5251 GET_MODE (XEXP (op, 0)));
5252
5253 /* SUBREG of a hard register => just change the register number
5254 and/or mode. If the hard register is not valid in that mode,
5255 suppress this simplification. If the hard register is the stack,
5256 frame, or argument pointer, leave this as a SUBREG. */
5257
5258 if (REG_P (op) && HARD_REGISTER_P (op))
5259 {
5260 unsigned int regno, final_regno;
5261
5262 regno = REGNO (op);
5263 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5264 if (HARD_REGISTER_NUM_P (final_regno))
5265 {
5266 rtx x;
5267 int final_offset = byte;
5268
5269 /* Adjust offset for paradoxical subregs. */
5270 if (byte == 0
5271 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5272 {
5273 int difference = (GET_MODE_SIZE (innermode)
5274 - GET_MODE_SIZE (outermode));
5275 if (WORDS_BIG_ENDIAN)
5276 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5277 if (BYTES_BIG_ENDIAN)
5278 final_offset += difference % UNITS_PER_WORD;
5279 }
5280
5281 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5282
5283 /* Propagate original regno. We don't have any way to specify
5284 the offset inside original regno, so do so only for lowpart.
5285 The information is used only by alias analysis that can not
5286 grog partial register anyway. */
5287
5288 if (subreg_lowpart_offset (outermode, innermode) == byte)
5289 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5290 return x;
5291 }
5292 }
5293
5294 /* If we have a SUBREG of a register that we are replacing and we are
5295 replacing it with a MEM, make a new MEM and try replacing the
5296 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5297 or if we would be widening it. */
5298
5299 if (MEM_P (op)
5300 && ! mode_dependent_address_p (XEXP (op, 0))
5301 /* Allow splitting of volatile memory references in case we don't
5302 have instruction to move the whole thing. */
5303 && (! MEM_VOLATILE_P (op)
5304 || ! have_insn_for (SET, innermode))
5305 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5306 return adjust_address_nv (op, outermode, byte);
5307
5308 /* Handle complex values represented as CONCAT
5309 of real and imaginary part. */
5310 if (GET_CODE (op) == CONCAT)
5311 {
5312 unsigned int part_size, final_offset;
5313 rtx part, res;
5314
5315 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5316 if (byte < part_size)
5317 {
5318 part = XEXP (op, 0);
5319 final_offset = byte;
5320 }
5321 else
5322 {
5323 part = XEXP (op, 1);
5324 final_offset = byte - part_size;
5325 }
5326
5327 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5328 return NULL_RTX;
5329
5330 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5331 if (res)
5332 return res;
5333 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5334 return gen_rtx_SUBREG (outermode, part, final_offset);
5335 return NULL_RTX;
5336 }
5337
5338 /* Optimize SUBREG truncations of zero and sign extended values. */
5339 if ((GET_CODE (op) == ZERO_EXTEND
5340 || GET_CODE (op) == SIGN_EXTEND)
5341 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5342 {
5343 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5344
5345 /* If we're requesting the lowpart of a zero or sign extension,
5346 there are three possibilities. If the outermode is the same
5347 as the origmode, we can omit both the extension and the subreg.
5348 If the outermode is not larger than the origmode, we can apply
5349 the truncation without the extension. Finally, if the outermode
5350 is larger than the origmode, but both are integer modes, we
5351 can just extend to the appropriate mode. */
5352 if (bitpos == 0)
5353 {
5354 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5355 if (outermode == origmode)
5356 return XEXP (op, 0);
5357 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5358 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5359 subreg_lowpart_offset (outermode,
5360 origmode));
5361 if (SCALAR_INT_MODE_P (outermode))
5362 return simplify_gen_unary (GET_CODE (op), outermode,
5363 XEXP (op, 0), origmode);
5364 }
5365
5366 /* A SUBREG resulting from a zero extension may fold to zero if
5367 it extracts higher bits that the ZERO_EXTEND's source bits. */
5368 if (GET_CODE (op) == ZERO_EXTEND
5369 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5370 return CONST0_RTX (outermode);
5371 }
5372
5373 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5374 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5375 the outer subreg is effectively a truncation to the original mode. */
5376 if ((GET_CODE (op) == LSHIFTRT
5377 || GET_CODE (op) == ASHIFTRT)
5378 && SCALAR_INT_MODE_P (outermode)
5379 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5380 to avoid the possibility that an outer LSHIFTRT shifts by more
5381 than the sign extension's sign_bit_copies and introduces zeros
5382 into the high bits of the result. */
5383 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5384 && CONST_INT_P (XEXP (op, 1))
5385 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5386 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5387 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5388 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5389 return simplify_gen_binary (ASHIFTRT, outermode,
5390 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5391
5392 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5393 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5394 the outer subreg is effectively a truncation to the original mode. */
5395 if ((GET_CODE (op) == LSHIFTRT
5396 || GET_CODE (op) == ASHIFTRT)
5397 && SCALAR_INT_MODE_P (outermode)
5398 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5399 && CONST_INT_P (XEXP (op, 1))
5400 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5401 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5402 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5403 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5404 return simplify_gen_binary (LSHIFTRT, outermode,
5405 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5406
5407 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5408 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5409 the outer subreg is effectively a truncation to the original mode. */
5410 if (GET_CODE (op) == ASHIFT
5411 && SCALAR_INT_MODE_P (outermode)
5412 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5413 && CONST_INT_P (XEXP (op, 1))
5414 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5415 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5416 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5417 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5418 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5419 return simplify_gen_binary (ASHIFT, outermode,
5420 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5421
5422 /* Recognize a word extraction from a multi-word subreg. */
5423 if ((GET_CODE (op) == LSHIFTRT
5424 || GET_CODE (op) == ASHIFTRT)
5425 && SCALAR_INT_MODE_P (outermode)
5426 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5427 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5428 && CONST_INT_P (XEXP (op, 1))
5429 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5430 && INTVAL (XEXP (op, 1)) >= 0
5431 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5432 && byte == subreg_lowpart_offset (outermode, innermode))
5433 {
5434 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5435 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5436 (WORDS_BIG_ENDIAN
5437 ? byte - shifted_bytes
5438 : byte + shifted_bytes));
5439 }
5440
5441 return NULL_RTX;
5442 }
5443
5444 /* Make a SUBREG operation or equivalent if it folds. */
5445
5446 rtx
5447 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5448 enum machine_mode innermode, unsigned int byte)
5449 {
5450 rtx newx;
5451
5452 newx = simplify_subreg (outermode, op, innermode, byte);
5453 if (newx)
5454 return newx;
5455
5456 if (GET_CODE (op) == SUBREG
5457 || GET_CODE (op) == CONCAT
5458 || GET_MODE (op) == VOIDmode)
5459 return NULL_RTX;
5460
5461 if (validate_subreg (outermode, innermode, op, byte))
5462 return gen_rtx_SUBREG (outermode, op, byte);
5463
5464 return NULL_RTX;
5465 }
5466
5467 /* Simplify X, an rtx expression.
5468
5469 Return the simplified expression or NULL if no simplifications
5470 were possible.
5471
5472 This is the preferred entry point into the simplification routines;
5473 however, we still allow passes to call the more specific routines.
5474
5475 Right now GCC has three (yes, three) major bodies of RTL simplification
5476 code that need to be unified.
5477
5478 1. fold_rtx in cse.c. This code uses various CSE specific
5479 information to aid in RTL simplification.
5480
5481 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5482 it uses combine specific information to aid in RTL
5483 simplification.
5484
5485 3. The routines in this file.
5486
5487
5488 Long term we want to only have one body of simplification code; to
5489 get to that state I recommend the following steps:
5490
5491 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5492 which are not pass dependent state into these routines.
5493
5494 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5495 use this routine whenever possible.
5496
5497 3. Allow for pass dependent state to be provided to these
5498 routines and add simplifications based on the pass dependent
5499 state. Remove code from cse.c & combine.c that becomes
5500 redundant/dead.
5501
5502 It will take time, but ultimately the compiler will be easier to
5503 maintain and improve. It's totally silly that when we add a
5504 simplification that it needs to be added to 4 places (3 for RTL
5505 simplification and 1 for tree simplification. */
5506
5507 rtx
5508 simplify_rtx (const_rtx x)
5509 {
5510 const enum rtx_code code = GET_CODE (x);
5511 const enum machine_mode mode = GET_MODE (x);
5512
5513 switch (GET_RTX_CLASS (code))
5514 {
5515 case RTX_UNARY:
5516 return simplify_unary_operation (code, mode,
5517 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5518 case RTX_COMM_ARITH:
5519 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5520 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5521
5522 /* Fall through.... */
5523
5524 case RTX_BIN_ARITH:
5525 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5526
5527 case RTX_TERNARY:
5528 case RTX_BITFIELD_OPS:
5529 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5530 XEXP (x, 0), XEXP (x, 1),
5531 XEXP (x, 2));
5532
5533 case RTX_COMPARE:
5534 case RTX_COMM_COMPARE:
5535 return simplify_relational_operation (code, mode,
5536 ((GET_MODE (XEXP (x, 0))
5537 != VOIDmode)
5538 ? GET_MODE (XEXP (x, 0))
5539 : GET_MODE (XEXP (x, 1))),
5540 XEXP (x, 0),
5541 XEXP (x, 1));
5542
5543 case RTX_EXTRA:
5544 if (code == SUBREG)
5545 return simplify_subreg (mode, SUBREG_REG (x),
5546 GET_MODE (SUBREG_REG (x)),
5547 SUBREG_BYTE (x));
5548 break;
5549
5550 case RTX_OBJ:
5551 if (code == LO_SUM)
5552 {
5553 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5554 if (GET_CODE (XEXP (x, 0)) == HIGH
5555 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5556 return XEXP (x, 1);
5557 }
5558 break;
5559
5560 default:
5561 break;
5562 }
5563 return NULL;
5564 }