]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
gcc_release (announce_snapshot): Use changedir instead of plain cd.
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
42
43 /* Simplification and canonicalization of RTL. */
44
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 \f
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
63 static rtx
64 neg_const_int (enum machine_mode mode, rtx i)
65 {
66 return gen_int_mode (- INTVAL (i), mode);
67 }
68
69 \f
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
72
73 rtx
74 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
75 rtx op1)
76 {
77 rtx tem;
78
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
81 && swap_commutative_operands_p (op0, op1))
82 tem = op0, op0 = op1, op1 = tem;
83
84 /* If this simplifies, do it. */
85 tem = simplify_binary_operation (code, mode, op0, op1);
86 if (tem)
87 return tem;
88
89 /* Handle addition and subtraction specially. Otherwise, just form
90 the operation. */
91
92 if (code == PLUS || code == MINUS)
93 {
94 tem = simplify_plus_minus (code, mode, op0, op1, 1);
95 if (tem)
96 return tem;
97 }
98
99 return gen_rtx_fmt_ee (code, mode, op0, op1);
100 }
101 \f
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
104 rtx
105 avoid_constant_pool_reference (rtx x)
106 {
107 rtx c, tmp, addr;
108 enum machine_mode cmode;
109
110 switch (GET_CODE (x))
111 {
112 case MEM:
113 break;
114
115 case FLOAT_EXTEND:
116 /* Handle float extensions of constant pool references. */
117 tmp = XEXP (x, 0);
118 c = avoid_constant_pool_reference (tmp);
119 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
120 {
121 REAL_VALUE_TYPE d;
122
123 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
125 }
126 return x;
127
128 default:
129 return x;
130 }
131
132 addr = XEXP (x, 0);
133
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr = (*targetm.delegitimize_address) (addr);
136
137 if (GET_CODE (addr) == LO_SUM)
138 addr = XEXP (addr, 1);
139
140 if (GET_CODE (addr) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr))
142 return x;
143
144 c = get_pool_constant (addr);
145 cmode = get_pool_mode (addr);
146
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode != GET_MODE (x))
151 {
152 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
153 return c ? c : x;
154 }
155
156 return c;
157 }
158 \f
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
161
162 rtx
163 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
164 enum machine_mode op_mode)
165 {
166 rtx tem;
167
168 /* If this simplifies, use it. */
169 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
170 return tem;
171
172 return gen_rtx_fmt_e (code, mode, op);
173 }
174
175 /* Likewise for ternary operations. */
176
177 rtx
178 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
179 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
180 {
181 rtx tem;
182
183 /* If this simplifies, use it. */
184 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
185 op0, op1, op2)))
186 return tem;
187
188 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
189 }
190 \f
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
193 */
194
195 rtx
196 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
197 enum machine_mode cmp_mode, rtx op0, rtx op1)
198 {
199 rtx tem;
200
201 if (cmp_mode == VOIDmode)
202 cmp_mode = GET_MODE (op0);
203 if (cmp_mode == VOIDmode)
204 cmp_mode = GET_MODE (op1);
205
206 if (cmp_mode != VOIDmode)
207 {
208 tem = simplify_relational_operation (code, mode, cmp_mode, op0, op1);
209 if (tem)
210 return tem;
211 }
212
213 /* For the following tests, ensure const0_rtx is op1. */
214 if (swap_commutative_operands_p (op0, op1)
215 || (op0 == const0_rtx && op1 != const0_rtx))
216 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
217
218 /* If op0 is a compare, extract the comparison arguments from it. */
219 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
220 return simplify_gen_relational (code, mode, VOIDmode,
221 XEXP (op0, 0), XEXP (op0, 1));
222
223 /* If op0 is a comparison, extract the comparison arguments form it. */
224 if (COMPARISON_P (op0) && op1 == const0_rtx)
225 {
226 if (code == NE)
227 {
228 if (GET_MODE (op0) == mode)
229 return op0;
230 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
231 XEXP (op0, 0), XEXP (op0, 1));
232 }
233 else if (code == EQ)
234 {
235 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
236 if (new != UNKNOWN)
237 return simplify_gen_relational (new, mode, VOIDmode,
238 XEXP (op0, 0), XEXP (op0, 1));
239 }
240 }
241
242 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 }
244 \f
245 /* Replace all occurrences of OLD in X with NEW and try to simplify the
246 resulting RTX. Return a new RTX which is as simplified as possible. */
247
248 rtx
249 simplify_replace_rtx (rtx x, rtx old, rtx new)
250 {
251 enum rtx_code code = GET_CODE (x);
252 enum machine_mode mode = GET_MODE (x);
253 enum machine_mode op_mode;
254 rtx op0, op1, op2;
255
256 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
257 to build a new expression substituting recursively. If we can't do
258 anything, return our input. */
259
260 if (x == old)
261 return new;
262
263 switch (GET_RTX_CLASS (code))
264 {
265 case RTX_UNARY:
266 op0 = XEXP (x, 0);
267 op_mode = GET_MODE (op0);
268 op0 = simplify_replace_rtx (op0, old, new);
269 if (op0 == XEXP (x, 0))
270 return x;
271 return simplify_gen_unary (code, mode, op0, op_mode);
272
273 case RTX_BIN_ARITH:
274 case RTX_COMM_ARITH:
275 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
276 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
277 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
278 return x;
279 return simplify_gen_binary (code, mode, op0, op1);
280
281 case RTX_COMPARE:
282 case RTX_COMM_COMPARE:
283 op0 = XEXP (x, 0);
284 op1 = XEXP (x, 1);
285 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
286 op0 = simplify_replace_rtx (op0, old, new);
287 op1 = simplify_replace_rtx (op1, old, new);
288 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
289 return x;
290 return simplify_gen_relational (code, mode, op_mode, op0, op1);
291
292 case RTX_TERNARY:
293 case RTX_BITFIELD_OPS:
294 op0 = XEXP (x, 0);
295 op_mode = GET_MODE (op0);
296 op0 = simplify_replace_rtx (op0, old, new);
297 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
298 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
299 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
300 return x;
301 if (op_mode == VOIDmode)
302 op_mode = GET_MODE (op0);
303 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
304
305 case RTX_EXTRA:
306 /* The only case we try to handle is a SUBREG. */
307 if (code == SUBREG)
308 {
309 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
310 if (op0 == SUBREG_REG (x))
311 return x;
312 op0 = simplify_gen_subreg (GET_MODE (x), op0,
313 GET_MODE (SUBREG_REG (x)),
314 SUBREG_BYTE (x));
315 return op0 ? op0 : x;
316 }
317 break;
318
319 case RTX_OBJ:
320 if (code == MEM)
321 {
322 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
323 if (op0 == XEXP (x, 0))
324 return x;
325 return replace_equiv_address_nv (x, op0);
326 }
327 else if (code == LO_SUM)
328 {
329 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
330 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
331
332 /* (lo_sum (high x) x) -> x */
333 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
334 return op1;
335
336 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
337 return x;
338 return gen_rtx_LO_SUM (mode, op0, op1);
339 }
340 else if (code == REG)
341 {
342 if (REG_P (old) && REGNO (x) == REGNO (old))
343 return new;
344 }
345 break;
346
347 default:
348 break;
349 }
350 return x;
351 }
352 \f
353 /* Try to simplify a unary operation CODE whose output mode is to be
354 MODE with input operand OP whose mode was originally OP_MODE.
355 Return zero if no simplification can be made. */
356 rtx
357 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
358 rtx op, enum machine_mode op_mode)
359 {
360 unsigned int width = GET_MODE_BITSIZE (mode);
361 rtx trueop = avoid_constant_pool_reference (op);
362
363 if (code == VEC_DUPLICATE)
364 {
365 if (!VECTOR_MODE_P (mode))
366 abort ();
367 if (GET_MODE (trueop) != VOIDmode
368 && !VECTOR_MODE_P (GET_MODE (trueop))
369 && GET_MODE_INNER (mode) != GET_MODE (trueop))
370 abort ();
371 if (GET_MODE (trueop) != VOIDmode
372 && VECTOR_MODE_P (GET_MODE (trueop))
373 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
374 abort ();
375 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
376 || GET_CODE (trueop) == CONST_VECTOR)
377 {
378 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
379 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
380 rtvec v = rtvec_alloc (n_elts);
381 unsigned int i;
382
383 if (GET_CODE (trueop) != CONST_VECTOR)
384 for (i = 0; i < n_elts; i++)
385 RTVEC_ELT (v, i) = trueop;
386 else
387 {
388 enum machine_mode inmode = GET_MODE (trueop);
389 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
390 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
391
392 if (in_n_elts >= n_elts || n_elts % in_n_elts)
393 abort ();
394 for (i = 0; i < n_elts; i++)
395 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
396 }
397 return gen_rtx_CONST_VECTOR (mode, v);
398 }
399 }
400 else if (GET_CODE (op) == CONST)
401 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
402
403 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
404 {
405 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
406 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
407 enum machine_mode opmode = GET_MODE (trueop);
408 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
409 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
410 rtvec v = rtvec_alloc (n_elts);
411 unsigned int i;
412
413 if (op_n_elts != n_elts)
414 abort ();
415
416 for (i = 0; i < n_elts; i++)
417 {
418 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
419 CONST_VECTOR_ELT (trueop, i),
420 GET_MODE_INNER (opmode));
421 if (!x)
422 return 0;
423 RTVEC_ELT (v, i) = x;
424 }
425 return gen_rtx_CONST_VECTOR (mode, v);
426 }
427
428 /* The order of these tests is critical so that, for example, we don't
429 check the wrong mode (input vs. output) for a conversion operation,
430 such as FIX. At some point, this should be simplified. */
431
432 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
433 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
434 {
435 HOST_WIDE_INT hv, lv;
436 REAL_VALUE_TYPE d;
437
438 if (GET_CODE (trueop) == CONST_INT)
439 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
440 else
441 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
442
443 REAL_VALUE_FROM_INT (d, lv, hv, mode);
444 d = real_value_truncate (mode, d);
445 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
446 }
447 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
448 && (GET_CODE (trueop) == CONST_DOUBLE
449 || GET_CODE (trueop) == CONST_INT))
450 {
451 HOST_WIDE_INT hv, lv;
452 REAL_VALUE_TYPE d;
453
454 if (GET_CODE (trueop) == CONST_INT)
455 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
456 else
457 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
458
459 if (op_mode == VOIDmode)
460 {
461 /* We don't know how to interpret negative-looking numbers in
462 this case, so don't try to fold those. */
463 if (hv < 0)
464 return 0;
465 }
466 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
467 ;
468 else
469 hv = 0, lv &= GET_MODE_MASK (op_mode);
470
471 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
472 d = real_value_truncate (mode, d);
473 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
474 }
475
476 if (GET_CODE (trueop) == CONST_INT
477 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
478 {
479 HOST_WIDE_INT arg0 = INTVAL (trueop);
480 HOST_WIDE_INT val;
481
482 switch (code)
483 {
484 case NOT:
485 val = ~ arg0;
486 break;
487
488 case NEG:
489 val = - arg0;
490 break;
491
492 case ABS:
493 val = (arg0 >= 0 ? arg0 : - arg0);
494 break;
495
496 case FFS:
497 /* Don't use ffs here. Instead, get low order bit and then its
498 number. If arg0 is zero, this will return 0, as desired. */
499 arg0 &= GET_MODE_MASK (mode);
500 val = exact_log2 (arg0 & (- arg0)) + 1;
501 break;
502
503 case CLZ:
504 arg0 &= GET_MODE_MASK (mode);
505 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
506 ;
507 else
508 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
509 break;
510
511 case CTZ:
512 arg0 &= GET_MODE_MASK (mode);
513 if (arg0 == 0)
514 {
515 /* Even if the value at zero is undefined, we have to come
516 up with some replacement. Seems good enough. */
517 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
518 val = GET_MODE_BITSIZE (mode);
519 }
520 else
521 val = exact_log2 (arg0 & -arg0);
522 break;
523
524 case POPCOUNT:
525 arg0 &= GET_MODE_MASK (mode);
526 val = 0;
527 while (arg0)
528 val++, arg0 &= arg0 - 1;
529 break;
530
531 case PARITY:
532 arg0 &= GET_MODE_MASK (mode);
533 val = 0;
534 while (arg0)
535 val++, arg0 &= arg0 - 1;
536 val &= 1;
537 break;
538
539 case TRUNCATE:
540 val = arg0;
541 break;
542
543 case ZERO_EXTEND:
544 /* When zero-extending a CONST_INT, we need to know its
545 original mode. */
546 if (op_mode == VOIDmode)
547 abort ();
548 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
549 {
550 /* If we were really extending the mode,
551 we would have to distinguish between zero-extension
552 and sign-extension. */
553 if (width != GET_MODE_BITSIZE (op_mode))
554 abort ();
555 val = arg0;
556 }
557 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
558 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
559 else
560 return 0;
561 break;
562
563 case SIGN_EXTEND:
564 if (op_mode == VOIDmode)
565 op_mode = mode;
566 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
567 {
568 /* If we were really extending the mode,
569 we would have to distinguish between zero-extension
570 and sign-extension. */
571 if (width != GET_MODE_BITSIZE (op_mode))
572 abort ();
573 val = arg0;
574 }
575 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
576 {
577 val
578 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
579 if (val
580 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
581 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
582 }
583 else
584 return 0;
585 break;
586
587 case SQRT:
588 case FLOAT_EXTEND:
589 case FLOAT_TRUNCATE:
590 case SS_TRUNCATE:
591 case US_TRUNCATE:
592 return 0;
593
594 default:
595 abort ();
596 }
597
598 val = trunc_int_for_mode (val, mode);
599
600 return GEN_INT (val);
601 }
602
603 /* We can do some operations on integer CONST_DOUBLEs. Also allow
604 for a DImode operation on a CONST_INT. */
605 else if (GET_MODE (trueop) == VOIDmode
606 && width <= HOST_BITS_PER_WIDE_INT * 2
607 && (GET_CODE (trueop) == CONST_DOUBLE
608 || GET_CODE (trueop) == CONST_INT))
609 {
610 unsigned HOST_WIDE_INT l1, lv;
611 HOST_WIDE_INT h1, hv;
612
613 if (GET_CODE (trueop) == CONST_DOUBLE)
614 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
615 else
616 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
617
618 switch (code)
619 {
620 case NOT:
621 lv = ~ l1;
622 hv = ~ h1;
623 break;
624
625 case NEG:
626 neg_double (l1, h1, &lv, &hv);
627 break;
628
629 case ABS:
630 if (h1 < 0)
631 neg_double (l1, h1, &lv, &hv);
632 else
633 lv = l1, hv = h1;
634 break;
635
636 case FFS:
637 hv = 0;
638 if (l1 == 0)
639 {
640 if (h1 == 0)
641 lv = 0;
642 else
643 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
644 }
645 else
646 lv = exact_log2 (l1 & -l1) + 1;
647 break;
648
649 case CLZ:
650 hv = 0;
651 if (h1 != 0)
652 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
653 - HOST_BITS_PER_WIDE_INT;
654 else if (l1 != 0)
655 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
656 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
657 lv = GET_MODE_BITSIZE (mode);
658 break;
659
660 case CTZ:
661 hv = 0;
662 if (l1 != 0)
663 lv = exact_log2 (l1 & -l1);
664 else if (h1 != 0)
665 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
666 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
667 lv = GET_MODE_BITSIZE (mode);
668 break;
669
670 case POPCOUNT:
671 hv = 0;
672 lv = 0;
673 while (l1)
674 lv++, l1 &= l1 - 1;
675 while (h1)
676 lv++, h1 &= h1 - 1;
677 break;
678
679 case PARITY:
680 hv = 0;
681 lv = 0;
682 while (l1)
683 lv++, l1 &= l1 - 1;
684 while (h1)
685 lv++, h1 &= h1 - 1;
686 lv &= 1;
687 break;
688
689 case TRUNCATE:
690 /* This is just a change-of-mode, so do nothing. */
691 lv = l1, hv = h1;
692 break;
693
694 case ZERO_EXTEND:
695 if (op_mode == VOIDmode)
696 abort ();
697
698 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
699 return 0;
700
701 hv = 0;
702 lv = l1 & GET_MODE_MASK (op_mode);
703 break;
704
705 case SIGN_EXTEND:
706 if (op_mode == VOIDmode
707 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
708 return 0;
709 else
710 {
711 lv = l1 & GET_MODE_MASK (op_mode);
712 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
713 && (lv & ((HOST_WIDE_INT) 1
714 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
715 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
716
717 hv = HWI_SIGN_EXTEND (lv);
718 }
719 break;
720
721 case SQRT:
722 return 0;
723
724 default:
725 return 0;
726 }
727
728 return immed_double_const (lv, hv, mode);
729 }
730
731 else if (GET_CODE (trueop) == CONST_DOUBLE
732 && GET_MODE_CLASS (mode) == MODE_FLOAT)
733 {
734 REAL_VALUE_TYPE d, t;
735 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
736
737 switch (code)
738 {
739 case SQRT:
740 if (HONOR_SNANS (mode) && real_isnan (&d))
741 return 0;
742 real_sqrt (&t, mode, &d);
743 d = t;
744 break;
745 case ABS:
746 d = REAL_VALUE_ABS (d);
747 break;
748 case NEG:
749 d = REAL_VALUE_NEGATE (d);
750 break;
751 case FLOAT_TRUNCATE:
752 d = real_value_truncate (mode, d);
753 break;
754 case FLOAT_EXTEND:
755 /* All this does is change the mode. */
756 break;
757 case FIX:
758 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
759 break;
760 case NOT:
761 {
762 long tmp[4];
763 int i;
764
765 real_to_target (tmp, &d, GET_MODE (trueop));
766 for (i = 0; i < 4; i++)
767 tmp[i] = ~tmp[i];
768 real_from_target (&d, tmp, mode);
769 }
770 default:
771 abort ();
772 }
773 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
774 }
775
776 else if (GET_CODE (trueop) == CONST_DOUBLE
777 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
778 && GET_MODE_CLASS (mode) == MODE_INT
779 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
780 {
781 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
782 operators are intentionally left unspecified (to ease implementation
783 by target backends), for consistency, this routine implements the
784 same semantics for constant folding as used by the middle-end. */
785
786 HOST_WIDE_INT xh, xl, th, tl;
787 REAL_VALUE_TYPE x, t;
788 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
789 switch (code)
790 {
791 case FIX:
792 if (REAL_VALUE_ISNAN (x))
793 return const0_rtx;
794
795 /* Test against the signed upper bound. */
796 if (width > HOST_BITS_PER_WIDE_INT)
797 {
798 th = ((unsigned HOST_WIDE_INT) 1
799 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
800 tl = -1;
801 }
802 else
803 {
804 th = 0;
805 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
806 }
807 real_from_integer (&t, VOIDmode, tl, th, 0);
808 if (REAL_VALUES_LESS (t, x))
809 {
810 xh = th;
811 xl = tl;
812 break;
813 }
814
815 /* Test against the signed lower bound. */
816 if (width > HOST_BITS_PER_WIDE_INT)
817 {
818 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
819 tl = 0;
820 }
821 else
822 {
823 th = -1;
824 tl = (HOST_WIDE_INT) -1 << (width - 1);
825 }
826 real_from_integer (&t, VOIDmode, tl, th, 0);
827 if (REAL_VALUES_LESS (x, t))
828 {
829 xh = th;
830 xl = tl;
831 break;
832 }
833 REAL_VALUE_TO_INT (&xl, &xh, x);
834 break;
835
836 case UNSIGNED_FIX:
837 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
838 return const0_rtx;
839
840 /* Test against the unsigned upper bound. */
841 if (width == 2*HOST_BITS_PER_WIDE_INT)
842 {
843 th = -1;
844 tl = -1;
845 }
846 else if (width >= HOST_BITS_PER_WIDE_INT)
847 {
848 th = ((unsigned HOST_WIDE_INT) 1
849 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
850 tl = -1;
851 }
852 else
853 {
854 th = 0;
855 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
856 }
857 real_from_integer (&t, VOIDmode, tl, th, 1);
858 if (REAL_VALUES_LESS (t, x))
859 {
860 xh = th;
861 xl = tl;
862 break;
863 }
864
865 REAL_VALUE_TO_INT (&xl, &xh, x);
866 break;
867
868 default:
869 abort ();
870 }
871 return immed_double_const (xl, xh, mode);
872 }
873
874 /* This was formerly used only for non-IEEE float.
875 eggert@twinsun.com says it is safe for IEEE also. */
876 else
877 {
878 enum rtx_code reversed;
879 rtx temp;
880
881 /* There are some simplifications we can do even if the operands
882 aren't constant. */
883 switch (code)
884 {
885 case NOT:
886 /* (not (not X)) == X. */
887 if (GET_CODE (op) == NOT)
888 return XEXP (op, 0);
889
890 /* (not (eq X Y)) == (ne X Y), etc. */
891 if (COMPARISON_P (op)
892 && (mode == BImode || STORE_FLAG_VALUE == -1)
893 && ((reversed = reversed_comparison_code (op, NULL_RTX))
894 != UNKNOWN))
895 return simplify_gen_relational (reversed, mode, VOIDmode,
896 XEXP (op, 0), XEXP (op, 1));
897
898 /* (not (plus X -1)) can become (neg X). */
899 if (GET_CODE (op) == PLUS
900 && XEXP (op, 1) == constm1_rtx)
901 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
902
903 /* Similarly, (not (neg X)) is (plus X -1). */
904 if (GET_CODE (op) == NEG)
905 return plus_constant (XEXP (op, 0), -1);
906
907 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
908 if (GET_CODE (op) == XOR
909 && GET_CODE (XEXP (op, 1)) == CONST_INT
910 && (temp = simplify_unary_operation (NOT, mode,
911 XEXP (op, 1),
912 mode)) != 0)
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
914
915
916 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
917 operands other than 1, but that is not valid. We could do a
918 similar simplification for (not (lshiftrt C X)) where C is
919 just the sign bit, but this doesn't seem common enough to
920 bother with. */
921 if (GET_CODE (op) == ASHIFT
922 && XEXP (op, 0) == const1_rtx)
923 {
924 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
925 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
926 }
927
928 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
929 by reversing the comparison code if valid. */
930 if (STORE_FLAG_VALUE == -1
931 && COMPARISON_P (op)
932 && (reversed = reversed_comparison_code (op, NULL_RTX))
933 != UNKNOWN)
934 return simplify_gen_relational (reversed, mode, VOIDmode,
935 XEXP (op, 0), XEXP (op, 1));
936
937 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
938 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
939 so we can perform the above simplification. */
940
941 if (STORE_FLAG_VALUE == -1
942 && GET_CODE (op) == ASHIFTRT
943 && GET_CODE (XEXP (op, 1)) == CONST_INT
944 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
945 return simplify_gen_relational (GE, mode, VOIDmode,
946 XEXP (op, 0), const0_rtx);
947
948 break;
949
950 case NEG:
951 /* (neg (neg X)) == X. */
952 if (GET_CODE (op) == NEG)
953 return XEXP (op, 0);
954
955 /* (neg (plus X 1)) can become (not X). */
956 if (GET_CODE (op) == PLUS
957 && XEXP (op, 1) == const1_rtx)
958 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
959
960 /* Similarly, (neg (not X)) is (plus X 1). */
961 if (GET_CODE (op) == NOT)
962 return plus_constant (XEXP (op, 0), 1);
963
964 /* (neg (minus X Y)) can become (minus Y X). This transformation
965 isn't safe for modes with signed zeros, since if X and Y are
966 both +0, (minus Y X) is the same as (minus X Y). If the
967 rounding mode is towards +infinity (or -infinity) then the two
968 expressions will be rounded differently. */
969 if (GET_CODE (op) == MINUS
970 && !HONOR_SIGNED_ZEROS (mode)
971 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
972 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
973 XEXP (op, 0));
974
975 if (GET_CODE (op) == PLUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 {
979 /* (neg (plus A C)) is simplified to (minus -C A). */
980 if (GET_CODE (XEXP (op, 1)) == CONST_INT
981 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
982 {
983 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
984 mode);
985 if (temp)
986 return simplify_gen_binary (MINUS, mode, temp,
987 XEXP (op, 0));
988 }
989
990 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
991 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
992 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
993 }
994
995 /* (neg (mult A B)) becomes (mult (neg A) B).
996 This works even for floating-point values. */
997 if (GET_CODE (op) == MULT
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
999 {
1000 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1001 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1002 }
1003
1004 /* NEG commutes with ASHIFT since it is multiplication. Only do
1005 this if we can then eliminate the NEG (e.g., if the operand
1006 is a constant). */
1007 if (GET_CODE (op) == ASHIFT)
1008 {
1009 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1010 mode);
1011 if (temp)
1012 return simplify_gen_binary (ASHIFT, mode, temp,
1013 XEXP (op, 1));
1014 }
1015
1016 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1017 C is equal to the width of MODE minus 1. */
1018 if (GET_CODE (op) == ASHIFTRT
1019 && GET_CODE (XEXP (op, 1)) == CONST_INT
1020 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1021 return simplify_gen_binary (LSHIFTRT, mode,
1022 XEXP (op, 0), XEXP (op, 1));
1023
1024 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op) == LSHIFTRT
1027 && GET_CODE (XEXP (op, 1)) == CONST_INT
1028 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1029 return simplify_gen_binary (ASHIFTRT, mode,
1030 XEXP (op, 0), XEXP (op, 1));
1031
1032 break;
1033
1034 case SIGN_EXTEND:
1035 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1036 becomes just the MINUS if its mode is MODE. This allows
1037 folding switch statements on machines using casesi (such as
1038 the VAX). */
1039 if (GET_CODE (op) == TRUNCATE
1040 && GET_MODE (XEXP (op, 0)) == mode
1041 && GET_CODE (XEXP (op, 0)) == MINUS
1042 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1043 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1044 return XEXP (op, 0);
1045
1046 /* Check for a sign extension of a subreg of a promoted
1047 variable, where the promotion is sign-extended, and the
1048 target mode is the same as the variable's promotion. */
1049 if (GET_CODE (op) == SUBREG
1050 && SUBREG_PROMOTED_VAR_P (op)
1051 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1052 && GET_MODE (XEXP (op, 0)) == mode)
1053 return XEXP (op, 0);
1054
1055 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1056 if (! POINTERS_EXTEND_UNSIGNED
1057 && mode == Pmode && GET_MODE (op) == ptr_mode
1058 && (CONSTANT_P (op)
1059 || (GET_CODE (op) == SUBREG
1060 && GET_CODE (SUBREG_REG (op)) == REG
1061 && REG_POINTER (SUBREG_REG (op))
1062 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1063 return convert_memory_address (Pmode, op);
1064 #endif
1065 break;
1066
1067 case ZERO_EXTEND:
1068 /* Check for a zero extension of a subreg of a promoted
1069 variable, where the promotion is zero-extended, and the
1070 target mode is the same as the variable's promotion. */
1071 if (GET_CODE (op) == SUBREG
1072 && SUBREG_PROMOTED_VAR_P (op)
1073 && SUBREG_PROMOTED_UNSIGNED_P (op)
1074 && GET_MODE (XEXP (op, 0)) == mode)
1075 return XEXP (op, 0);
1076
1077 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1078 if (POINTERS_EXTEND_UNSIGNED > 0
1079 && mode == Pmode && GET_MODE (op) == ptr_mode
1080 && (CONSTANT_P (op)
1081 || (GET_CODE (op) == SUBREG
1082 && GET_CODE (SUBREG_REG (op)) == REG
1083 && REG_POINTER (SUBREG_REG (op))
1084 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1085 return convert_memory_address (Pmode, op);
1086 #endif
1087 break;
1088
1089 default:
1090 break;
1091 }
1092
1093 return 0;
1094 }
1095 }
1096 \f
1097 /* Subroutine of simplify_binary_operation to simplify a commutative,
1098 associative binary operation CODE with result mode MODE, operating
1099 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1100 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1101 canonicalization is possible. */
1102
1103 static rtx
1104 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1105 rtx op0, rtx op1)
1106 {
1107 rtx tem;
1108
1109 /* Linearize the operator to the left. */
1110 if (GET_CODE (op1) == code)
1111 {
1112 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1113 if (GET_CODE (op0) == code)
1114 {
1115 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1116 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1117 }
1118
1119 /* "a op (b op c)" becomes "(b op c) op a". */
1120 if (! swap_commutative_operands_p (op1, op0))
1121 return simplify_gen_binary (code, mode, op1, op0);
1122
1123 tem = op0;
1124 op0 = op1;
1125 op1 = tem;
1126 }
1127
1128 if (GET_CODE (op0) == code)
1129 {
1130 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1131 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1132 {
1133 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1134 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1135 }
1136
1137 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1138 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1139 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1140 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1141 if (tem != 0)
1142 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1143
1144 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1145 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1146 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1147 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1148 if (tem != 0)
1149 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1150 }
1151
1152 return 0;
1153 }
1154
1155 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1156 and OP1. Return 0 if no simplification is possible.
1157
1158 Don't use this for relational operations such as EQ or LT.
1159 Use simplify_relational_operation instead. */
1160
1161 rtx
1162 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1163 rtx op0, rtx op1)
1164 {
1165 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1166 HOST_WIDE_INT val;
1167 unsigned int width = GET_MODE_BITSIZE (mode);
1168 rtx trueop0, trueop1;
1169 rtx tem;
1170
1171 #ifdef ENABLE_CHECKING
1172 /* Relational operations don't work here. We must know the mode
1173 of the operands in order to do the comparison correctly.
1174 Assuming a full word can give incorrect results.
1175 Consider comparing 128 with -128 in QImode. */
1176
1177 if (GET_RTX_CLASS (code) == RTX_COMPARE
1178 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
1179 abort ();
1180 #endif
1181
1182 /* Make sure the constant is second. */
1183 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1184 && swap_commutative_operands_p (op0, op1))
1185 {
1186 tem = op0, op0 = op1, op1 = tem;
1187 }
1188
1189 trueop0 = avoid_constant_pool_reference (op0);
1190 trueop1 = avoid_constant_pool_reference (op1);
1191
1192 if (VECTOR_MODE_P (mode)
1193 && GET_CODE (trueop0) == CONST_VECTOR
1194 && GET_CODE (trueop1) == CONST_VECTOR)
1195 {
1196 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1197 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1198 enum machine_mode op0mode = GET_MODE (trueop0);
1199 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1200 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1201 enum machine_mode op1mode = GET_MODE (trueop1);
1202 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1203 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1204 rtvec v = rtvec_alloc (n_elts);
1205 unsigned int i;
1206
1207 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1208 abort ();
1209
1210 for (i = 0; i < n_elts; i++)
1211 {
1212 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1213 CONST_VECTOR_ELT (trueop0, i),
1214 CONST_VECTOR_ELT (trueop1, i));
1215 if (!x)
1216 return 0;
1217 RTVEC_ELT (v, i) = x;
1218 }
1219
1220 return gen_rtx_CONST_VECTOR (mode, v);
1221 }
1222
1223 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1224 && GET_CODE (trueop0) == CONST_DOUBLE
1225 && GET_CODE (trueop1) == CONST_DOUBLE
1226 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1227 {
1228 if (code == AND
1229 || code == IOR
1230 || code == XOR)
1231 {
1232 long tmp0[4];
1233 long tmp1[4];
1234 REAL_VALUE_TYPE r;
1235 int i;
1236
1237 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1238 GET_MODE (op0));
1239 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1240 GET_MODE (op1));
1241 for (i = 0; i < 4; i++)
1242 {
1243 if (code == AND)
1244 tmp0[i] &= tmp1[i];
1245 else if (code == IOR)
1246 tmp0[i] |= tmp1[i];
1247 else if (code == XOR)
1248 tmp0[i] ^= tmp1[i];
1249 else
1250 abort ();
1251 }
1252 real_from_target (&r, tmp0, mode);
1253 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1254 }
1255 else
1256 {
1257 REAL_VALUE_TYPE f0, f1, value;
1258
1259 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1260 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1261 f0 = real_value_truncate (mode, f0);
1262 f1 = real_value_truncate (mode, f1);
1263
1264 if (HONOR_SNANS (mode)
1265 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1266 return 0;
1267
1268 if (code == DIV
1269 && REAL_VALUES_EQUAL (f1, dconst0)
1270 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1271 return 0;
1272
1273 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1274
1275 value = real_value_truncate (mode, value);
1276 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1277 }
1278 }
1279
1280 /* We can fold some multi-word operations. */
1281 if (GET_MODE_CLASS (mode) == MODE_INT
1282 && width == HOST_BITS_PER_WIDE_INT * 2
1283 && (GET_CODE (trueop0) == CONST_DOUBLE
1284 || GET_CODE (trueop0) == CONST_INT)
1285 && (GET_CODE (trueop1) == CONST_DOUBLE
1286 || GET_CODE (trueop1) == CONST_INT))
1287 {
1288 unsigned HOST_WIDE_INT l1, l2, lv;
1289 HOST_WIDE_INT h1, h2, hv;
1290
1291 if (GET_CODE (trueop0) == CONST_DOUBLE)
1292 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1293 else
1294 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1295
1296 if (GET_CODE (trueop1) == CONST_DOUBLE)
1297 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1298 else
1299 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1300
1301 switch (code)
1302 {
1303 case MINUS:
1304 /* A - B == A + (-B). */
1305 neg_double (l2, h2, &lv, &hv);
1306 l2 = lv, h2 = hv;
1307
1308 /* Fall through.... */
1309
1310 case PLUS:
1311 add_double (l1, h1, l2, h2, &lv, &hv);
1312 break;
1313
1314 case MULT:
1315 mul_double (l1, h1, l2, h2, &lv, &hv);
1316 break;
1317
1318 case DIV: case MOD: case UDIV: case UMOD:
1319 /* We'd need to include tree.h to do this and it doesn't seem worth
1320 it. */
1321 return 0;
1322
1323 case AND:
1324 lv = l1 & l2, hv = h1 & h2;
1325 break;
1326
1327 case IOR:
1328 lv = l1 | l2, hv = h1 | h2;
1329 break;
1330
1331 case XOR:
1332 lv = l1 ^ l2, hv = h1 ^ h2;
1333 break;
1334
1335 case SMIN:
1336 if (h1 < h2
1337 || (h1 == h2
1338 && ((unsigned HOST_WIDE_INT) l1
1339 < (unsigned HOST_WIDE_INT) l2)))
1340 lv = l1, hv = h1;
1341 else
1342 lv = l2, hv = h2;
1343 break;
1344
1345 case SMAX:
1346 if (h1 > h2
1347 || (h1 == h2
1348 && ((unsigned HOST_WIDE_INT) l1
1349 > (unsigned HOST_WIDE_INT) l2)))
1350 lv = l1, hv = h1;
1351 else
1352 lv = l2, hv = h2;
1353 break;
1354
1355 case UMIN:
1356 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1357 || (h1 == h2
1358 && ((unsigned HOST_WIDE_INT) l1
1359 < (unsigned HOST_WIDE_INT) l2)))
1360 lv = l1, hv = h1;
1361 else
1362 lv = l2, hv = h2;
1363 break;
1364
1365 case UMAX:
1366 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1367 || (h1 == h2
1368 && ((unsigned HOST_WIDE_INT) l1
1369 > (unsigned HOST_WIDE_INT) l2)))
1370 lv = l1, hv = h1;
1371 else
1372 lv = l2, hv = h2;
1373 break;
1374
1375 case LSHIFTRT: case ASHIFTRT:
1376 case ASHIFT:
1377 case ROTATE: case ROTATERT:
1378 if (SHIFT_COUNT_TRUNCATED)
1379 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1380
1381 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1382 return 0;
1383
1384 if (code == LSHIFTRT || code == ASHIFTRT)
1385 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1386 code == ASHIFTRT);
1387 else if (code == ASHIFT)
1388 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1389 else if (code == ROTATE)
1390 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1391 else /* code == ROTATERT */
1392 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1393 break;
1394
1395 default:
1396 return 0;
1397 }
1398
1399 return immed_double_const (lv, hv, mode);
1400 }
1401
1402 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1403 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1404 {
1405 /* Even if we can't compute a constant result,
1406 there are some cases worth simplifying. */
1407
1408 switch (code)
1409 {
1410 case PLUS:
1411 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1412 when x is NaN, infinite, or finite and nonzero. They aren't
1413 when x is -0 and the rounding mode is not towards -infinity,
1414 since (-0) + 0 is then 0. */
1415 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1416 return op0;
1417
1418 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1419 transformations are safe even for IEEE. */
1420 if (GET_CODE (op0) == NEG)
1421 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1422 else if (GET_CODE (op1) == NEG)
1423 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1424
1425 /* (~a) + 1 -> -a */
1426 if (INTEGRAL_MODE_P (mode)
1427 && GET_CODE (op0) == NOT
1428 && trueop1 == const1_rtx)
1429 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1430
1431 /* Handle both-operands-constant cases. We can only add
1432 CONST_INTs to constants since the sum of relocatable symbols
1433 can't be handled by most assemblers. Don't add CONST_INT
1434 to CONST_INT since overflow won't be computed properly if wider
1435 than HOST_BITS_PER_WIDE_INT. */
1436
1437 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1438 && GET_CODE (op1) == CONST_INT)
1439 return plus_constant (op0, INTVAL (op1));
1440 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1441 && GET_CODE (op0) == CONST_INT)
1442 return plus_constant (op1, INTVAL (op0));
1443
1444 /* See if this is something like X * C - X or vice versa or
1445 if the multiplication is written as a shift. If so, we can
1446 distribute and make a new multiply, shift, or maybe just
1447 have X (if C is 2 in the example above). But don't make
1448 real multiply if we didn't have one before. */
1449
1450 if (! FLOAT_MODE_P (mode))
1451 {
1452 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1453 rtx lhs = op0, rhs = op1;
1454 int had_mult = 0;
1455
1456 if (GET_CODE (lhs) == NEG)
1457 coeff0 = -1, lhs = XEXP (lhs, 0);
1458 else if (GET_CODE (lhs) == MULT
1459 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1460 {
1461 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1462 had_mult = 1;
1463 }
1464 else if (GET_CODE (lhs) == ASHIFT
1465 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1466 && INTVAL (XEXP (lhs, 1)) >= 0
1467 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1468 {
1469 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1470 lhs = XEXP (lhs, 0);
1471 }
1472
1473 if (GET_CODE (rhs) == NEG)
1474 coeff1 = -1, rhs = XEXP (rhs, 0);
1475 else if (GET_CODE (rhs) == MULT
1476 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1477 {
1478 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1479 had_mult = 1;
1480 }
1481 else if (GET_CODE (rhs) == ASHIFT
1482 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1483 && INTVAL (XEXP (rhs, 1)) >= 0
1484 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1485 {
1486 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1487 rhs = XEXP (rhs, 0);
1488 }
1489
1490 if (rtx_equal_p (lhs, rhs))
1491 {
1492 tem = simplify_gen_binary (MULT, mode, lhs,
1493 GEN_INT (coeff0 + coeff1));
1494 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1495 }
1496 }
1497
1498 /* If one of the operands is a PLUS or a MINUS, see if we can
1499 simplify this by the associative law.
1500 Don't use the associative law for floating point.
1501 The inaccuracy makes it nonassociative,
1502 and subtle programs can break if operations are associated. */
1503
1504 if (INTEGRAL_MODE_P (mode)
1505 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1506 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1507 || (GET_CODE (op0) == CONST
1508 && GET_CODE (XEXP (op0, 0)) == PLUS)
1509 || (GET_CODE (op1) == CONST
1510 && GET_CODE (XEXP (op1, 0)) == PLUS))
1511 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1512 return tem;
1513
1514 /* Reassociate floating point addition only when the user
1515 specifies unsafe math optimizations. */
1516 if (FLOAT_MODE_P (mode)
1517 && flag_unsafe_math_optimizations)
1518 {
1519 tem = simplify_associative_operation (code, mode, op0, op1);
1520 if (tem)
1521 return tem;
1522 }
1523 break;
1524
1525 case COMPARE:
1526 #ifdef HAVE_cc0
1527 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1528 using cc0, in which case we want to leave it as a COMPARE
1529 so we can distinguish it from a register-register-copy.
1530
1531 In IEEE floating point, x-0 is not the same as x. */
1532
1533 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1534 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1535 && trueop1 == CONST0_RTX (mode))
1536 return op0;
1537 #endif
1538
1539 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1540 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1541 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1542 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1543 {
1544 rtx xop00 = XEXP (op0, 0);
1545 rtx xop10 = XEXP (op1, 0);
1546
1547 #ifdef HAVE_cc0
1548 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1549 #else
1550 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1551 && GET_MODE (xop00) == GET_MODE (xop10)
1552 && REGNO (xop00) == REGNO (xop10)
1553 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1554 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1555 #endif
1556 return xop00;
1557 }
1558 break;
1559
1560 case MINUS:
1561 /* We can't assume x-x is 0 even with non-IEEE floating point,
1562 but since it is zero except in very strange circumstances, we
1563 will treat it as zero with -funsafe-math-optimizations. */
1564 if (rtx_equal_p (trueop0, trueop1)
1565 && ! side_effects_p (op0)
1566 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1567 return CONST0_RTX (mode);
1568
1569 /* Change subtraction from zero into negation. (0 - x) is the
1570 same as -x when x is NaN, infinite, or finite and nonzero.
1571 But if the mode has signed zeros, and does not round towards
1572 -infinity, then 0 - 0 is 0, not -0. */
1573 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1574 return simplify_gen_unary (NEG, mode, op1, mode);
1575
1576 /* (-1 - a) is ~a. */
1577 if (trueop0 == constm1_rtx)
1578 return simplify_gen_unary (NOT, mode, op1, mode);
1579
1580 /* Subtracting 0 has no effect unless the mode has signed zeros
1581 and supports rounding towards -infinity. In such a case,
1582 0 - 0 is -0. */
1583 if (!(HONOR_SIGNED_ZEROS (mode)
1584 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1585 && trueop1 == CONST0_RTX (mode))
1586 return op0;
1587
1588 /* See if this is something like X * C - X or vice versa or
1589 if the multiplication is written as a shift. If so, we can
1590 distribute and make a new multiply, shift, or maybe just
1591 have X (if C is 2 in the example above). But don't make
1592 real multiply if we didn't have one before. */
1593
1594 if (! FLOAT_MODE_P (mode))
1595 {
1596 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1597 rtx lhs = op0, rhs = op1;
1598 int had_mult = 0;
1599
1600 if (GET_CODE (lhs) == NEG)
1601 coeff0 = -1, lhs = XEXP (lhs, 0);
1602 else if (GET_CODE (lhs) == MULT
1603 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1604 {
1605 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1606 had_mult = 1;
1607 }
1608 else if (GET_CODE (lhs) == ASHIFT
1609 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1610 && INTVAL (XEXP (lhs, 1)) >= 0
1611 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1612 {
1613 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1614 lhs = XEXP (lhs, 0);
1615 }
1616
1617 if (GET_CODE (rhs) == NEG)
1618 coeff1 = - 1, rhs = XEXP (rhs, 0);
1619 else if (GET_CODE (rhs) == MULT
1620 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1621 {
1622 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1623 had_mult = 1;
1624 }
1625 else if (GET_CODE (rhs) == ASHIFT
1626 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1627 && INTVAL (XEXP (rhs, 1)) >= 0
1628 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1629 {
1630 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1631 rhs = XEXP (rhs, 0);
1632 }
1633
1634 if (rtx_equal_p (lhs, rhs))
1635 {
1636 tem = simplify_gen_binary (MULT, mode, lhs,
1637 GEN_INT (coeff0 - coeff1));
1638 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1639 }
1640 }
1641
1642 /* (a - (-b)) -> (a + b). True even for IEEE. */
1643 if (GET_CODE (op1) == NEG)
1644 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1645
1646 /* (-x - c) may be simplified as (-c - x). */
1647 if (GET_CODE (op0) == NEG
1648 && (GET_CODE (op1) == CONST_INT
1649 || GET_CODE (op1) == CONST_DOUBLE))
1650 {
1651 tem = simplify_unary_operation (NEG, mode, op1, mode);
1652 if (tem)
1653 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1654 }
1655
1656 /* If one of the operands is a PLUS or a MINUS, see if we can
1657 simplify this by the associative law.
1658 Don't use the associative law for floating point.
1659 The inaccuracy makes it nonassociative,
1660 and subtle programs can break if operations are associated. */
1661
1662 if (INTEGRAL_MODE_P (mode)
1663 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1664 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1665 || (GET_CODE (op0) == CONST
1666 && GET_CODE (XEXP (op0, 0)) == PLUS)
1667 || (GET_CODE (op1) == CONST
1668 && GET_CODE (XEXP (op1, 0)) == PLUS))
1669 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1670 return tem;
1671
1672 /* Don't let a relocatable value get a negative coeff. */
1673 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1674 return simplify_gen_binary (PLUS, mode,
1675 op0,
1676 neg_const_int (mode, op1));
1677
1678 /* (x - (x & y)) -> (x & ~y) */
1679 if (GET_CODE (op1) == AND)
1680 {
1681 if (rtx_equal_p (op0, XEXP (op1, 0)))
1682 {
1683 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1684 GET_MODE (XEXP (op1, 1)));
1685 return simplify_gen_binary (AND, mode, op0, tem);
1686 }
1687 if (rtx_equal_p (op0, XEXP (op1, 1)))
1688 {
1689 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1690 GET_MODE (XEXP (op1, 0)));
1691 return simplify_gen_binary (AND, mode, op0, tem);
1692 }
1693 }
1694 break;
1695
1696 case MULT:
1697 if (trueop1 == constm1_rtx)
1698 return simplify_gen_unary (NEG, mode, op0, mode);
1699
1700 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1701 x is NaN, since x * 0 is then also NaN. Nor is it valid
1702 when the mode has signed zeros, since multiplying a negative
1703 number by 0 will give -0, not 0. */
1704 if (!HONOR_NANS (mode)
1705 && !HONOR_SIGNED_ZEROS (mode)
1706 && trueop1 == CONST0_RTX (mode)
1707 && ! side_effects_p (op0))
1708 return op1;
1709
1710 /* In IEEE floating point, x*1 is not equivalent to x for
1711 signalling NaNs. */
1712 if (!HONOR_SNANS (mode)
1713 && trueop1 == CONST1_RTX (mode))
1714 return op0;
1715
1716 /* Convert multiply by constant power of two into shift unless
1717 we are still generating RTL. This test is a kludge. */
1718 if (GET_CODE (trueop1) == CONST_INT
1719 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1720 /* If the mode is larger than the host word size, and the
1721 uppermost bit is set, then this isn't a power of two due
1722 to implicit sign extension. */
1723 && (width <= HOST_BITS_PER_WIDE_INT
1724 || val != HOST_BITS_PER_WIDE_INT - 1)
1725 && ! rtx_equal_function_value_matters)
1726 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1727
1728 /* x*2 is x+x and x*(-1) is -x */
1729 if (GET_CODE (trueop1) == CONST_DOUBLE
1730 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1731 && GET_MODE (op0) == mode)
1732 {
1733 REAL_VALUE_TYPE d;
1734 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1735
1736 if (REAL_VALUES_EQUAL (d, dconst2))
1737 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1738
1739 if (REAL_VALUES_EQUAL (d, dconstm1))
1740 return simplify_gen_unary (NEG, mode, op0, mode);
1741 }
1742
1743 /* Reassociate multiplication, but for floating point MULTs
1744 only when the user specifies unsafe math optimizations. */
1745 if (! FLOAT_MODE_P (mode)
1746 || flag_unsafe_math_optimizations)
1747 {
1748 tem = simplify_associative_operation (code, mode, op0, op1);
1749 if (tem)
1750 return tem;
1751 }
1752 break;
1753
1754 case IOR:
1755 if (trueop1 == const0_rtx)
1756 return op0;
1757 if (GET_CODE (trueop1) == CONST_INT
1758 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1759 == GET_MODE_MASK (mode)))
1760 return op1;
1761 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1762 return op0;
1763 /* A | (~A) -> -1 */
1764 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1765 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1766 && ! side_effects_p (op0)
1767 && GET_MODE_CLASS (mode) != MODE_CC)
1768 return constm1_rtx;
1769 tem = simplify_associative_operation (code, mode, op0, op1);
1770 if (tem)
1771 return tem;
1772 break;
1773
1774 case XOR:
1775 if (trueop1 == const0_rtx)
1776 return op0;
1777 if (GET_CODE (trueop1) == CONST_INT
1778 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1779 == GET_MODE_MASK (mode)))
1780 return simplify_gen_unary (NOT, mode, op0, mode);
1781 if (trueop0 == trueop1 && ! side_effects_p (op0)
1782 && GET_MODE_CLASS (mode) != MODE_CC)
1783 return const0_rtx;
1784 tem = simplify_associative_operation (code, mode, op0, op1);
1785 if (tem)
1786 return tem;
1787 break;
1788
1789 case AND:
1790 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1791 return const0_rtx;
1792 if (GET_CODE (trueop1) == CONST_INT
1793 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1794 == GET_MODE_MASK (mode)))
1795 return op0;
1796 if (trueop0 == trueop1 && ! side_effects_p (op0)
1797 && GET_MODE_CLASS (mode) != MODE_CC)
1798 return op0;
1799 /* A & (~A) -> 0 */
1800 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1801 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1802 && ! side_effects_p (op0)
1803 && GET_MODE_CLASS (mode) != MODE_CC)
1804 return const0_rtx;
1805 tem = simplify_associative_operation (code, mode, op0, op1);
1806 if (tem)
1807 return tem;
1808 break;
1809
1810 case UDIV:
1811 /* Convert divide by power of two into shift (divide by 1 handled
1812 below). */
1813 if (GET_CODE (trueop1) == CONST_INT
1814 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1815 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1816
1817 /* Fall through.... */
1818
1819 case DIV:
1820 if (trueop1 == CONST1_RTX (mode))
1821 {
1822 /* On some platforms DIV uses narrower mode than its
1823 operands. */
1824 rtx x = gen_lowpart_common (mode, op0);
1825 if (x)
1826 return x;
1827 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1828 return gen_lowpart_SUBREG (mode, op0);
1829 else
1830 return op0;
1831 }
1832
1833 /* Maybe change 0 / x to 0. This transformation isn't safe for
1834 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1835 Nor is it safe for modes with signed zeros, since dividing
1836 0 by a negative number gives -0, not 0. */
1837 if (!HONOR_NANS (mode)
1838 && !HONOR_SIGNED_ZEROS (mode)
1839 && trueop0 == CONST0_RTX (mode)
1840 && ! side_effects_p (op1))
1841 return op0;
1842
1843 /* Change division by a constant into multiplication. Only do
1844 this with -funsafe-math-optimizations. */
1845 else if (GET_CODE (trueop1) == CONST_DOUBLE
1846 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1847 && trueop1 != CONST0_RTX (mode)
1848 && flag_unsafe_math_optimizations)
1849 {
1850 REAL_VALUE_TYPE d;
1851 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1852
1853 if (! REAL_VALUES_EQUAL (d, dconst0))
1854 {
1855 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1856 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1857 return simplify_gen_binary (MULT, mode, op0, tem);
1858 }
1859 }
1860 break;
1861
1862 case UMOD:
1863 /* Handle modulus by power of two (mod with 1 handled below). */
1864 if (GET_CODE (trueop1) == CONST_INT
1865 && exact_log2 (INTVAL (trueop1)) > 0)
1866 return simplify_gen_binary (AND, mode, op0,
1867 GEN_INT (INTVAL (op1) - 1));
1868
1869 /* Fall through.... */
1870
1871 case MOD:
1872 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1873 && ! side_effects_p (op0) && ! side_effects_p (op1))
1874 return const0_rtx;
1875 break;
1876
1877 case ROTATERT:
1878 case ROTATE:
1879 case ASHIFTRT:
1880 /* Rotating ~0 always results in ~0. */
1881 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1882 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1883 && ! side_effects_p (op1))
1884 return op0;
1885
1886 /* Fall through.... */
1887
1888 case ASHIFT:
1889 case LSHIFTRT:
1890 if (trueop1 == const0_rtx)
1891 return op0;
1892 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1893 return op0;
1894 break;
1895
1896 case SMIN:
1897 if (width <= HOST_BITS_PER_WIDE_INT
1898 && GET_CODE (trueop1) == CONST_INT
1899 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1900 && ! side_effects_p (op0))
1901 return op1;
1902 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1903 return op0;
1904 tem = simplify_associative_operation (code, mode, op0, op1);
1905 if (tem)
1906 return tem;
1907 break;
1908
1909 case SMAX:
1910 if (width <= HOST_BITS_PER_WIDE_INT
1911 && GET_CODE (trueop1) == CONST_INT
1912 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1913 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1914 && ! side_effects_p (op0))
1915 return op1;
1916 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1917 return op0;
1918 tem = simplify_associative_operation (code, mode, op0, op1);
1919 if (tem)
1920 return tem;
1921 break;
1922
1923 case UMIN:
1924 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1925 return op1;
1926 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1927 return op0;
1928 tem = simplify_associative_operation (code, mode, op0, op1);
1929 if (tem)
1930 return tem;
1931 break;
1932
1933 case UMAX:
1934 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1935 return op1;
1936 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1937 return op0;
1938 tem = simplify_associative_operation (code, mode, op0, op1);
1939 if (tem)
1940 return tem;
1941 break;
1942
1943 case SS_PLUS:
1944 case US_PLUS:
1945 case SS_MINUS:
1946 case US_MINUS:
1947 /* ??? There are simplifications that can be done. */
1948 return 0;
1949
1950 case VEC_SELECT:
1951 if (!VECTOR_MODE_P (mode))
1952 {
1953 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1954 || (mode
1955 != GET_MODE_INNER (GET_MODE (trueop0)))
1956 || GET_CODE (trueop1) != PARALLEL
1957 || XVECLEN (trueop1, 0) != 1
1958 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1959 abort ();
1960
1961 if (GET_CODE (trueop0) == CONST_VECTOR)
1962 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1963 }
1964 else
1965 {
1966 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1967 || (GET_MODE_INNER (mode)
1968 != GET_MODE_INNER (GET_MODE (trueop0)))
1969 || GET_CODE (trueop1) != PARALLEL)
1970 abort ();
1971
1972 if (GET_CODE (trueop0) == CONST_VECTOR)
1973 {
1974 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1975 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1976 rtvec v = rtvec_alloc (n_elts);
1977 unsigned int i;
1978
1979 if (XVECLEN (trueop1, 0) != (int) n_elts)
1980 abort ();
1981 for (i = 0; i < n_elts; i++)
1982 {
1983 rtx x = XVECEXP (trueop1, 0, i);
1984
1985 if (GET_CODE (x) != CONST_INT)
1986 abort ();
1987 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1988 }
1989
1990 return gen_rtx_CONST_VECTOR (mode, v);
1991 }
1992 }
1993 return 0;
1994 case VEC_CONCAT:
1995 {
1996 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1997 ? GET_MODE (trueop0)
1998 : GET_MODE_INNER (mode));
1999 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2000 ? GET_MODE (trueop1)
2001 : GET_MODE_INNER (mode));
2002
2003 if (!VECTOR_MODE_P (mode)
2004 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2005 != GET_MODE_SIZE (mode)))
2006 abort ();
2007
2008 if ((VECTOR_MODE_P (op0_mode)
2009 && (GET_MODE_INNER (mode)
2010 != GET_MODE_INNER (op0_mode)))
2011 || (!VECTOR_MODE_P (op0_mode)
2012 && GET_MODE_INNER (mode) != op0_mode))
2013 abort ();
2014
2015 if ((VECTOR_MODE_P (op1_mode)
2016 && (GET_MODE_INNER (mode)
2017 != GET_MODE_INNER (op1_mode)))
2018 || (!VECTOR_MODE_P (op1_mode)
2019 && GET_MODE_INNER (mode) != op1_mode))
2020 abort ();
2021
2022 if ((GET_CODE (trueop0) == CONST_VECTOR
2023 || GET_CODE (trueop0) == CONST_INT
2024 || GET_CODE (trueop0) == CONST_DOUBLE)
2025 && (GET_CODE (trueop1) == CONST_VECTOR
2026 || GET_CODE (trueop1) == CONST_INT
2027 || GET_CODE (trueop1) == CONST_DOUBLE))
2028 {
2029 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2030 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2031 rtvec v = rtvec_alloc (n_elts);
2032 unsigned int i;
2033 unsigned in_n_elts = 1;
2034
2035 if (VECTOR_MODE_P (op0_mode))
2036 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2037 for (i = 0; i < n_elts; i++)
2038 {
2039 if (i < in_n_elts)
2040 {
2041 if (!VECTOR_MODE_P (op0_mode))
2042 RTVEC_ELT (v, i) = trueop0;
2043 else
2044 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2045 }
2046 else
2047 {
2048 if (!VECTOR_MODE_P (op1_mode))
2049 RTVEC_ELT (v, i) = trueop1;
2050 else
2051 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2052 i - in_n_elts);
2053 }
2054 }
2055
2056 return gen_rtx_CONST_VECTOR (mode, v);
2057 }
2058 }
2059 return 0;
2060
2061 default:
2062 abort ();
2063 }
2064
2065 return 0;
2066 }
2067
2068 /* Get the integer argument values in two forms:
2069 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2070
2071 arg0 = INTVAL (trueop0);
2072 arg1 = INTVAL (trueop1);
2073
2074 if (width < HOST_BITS_PER_WIDE_INT)
2075 {
2076 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2077 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2078
2079 arg0s = arg0;
2080 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2081 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2082
2083 arg1s = arg1;
2084 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2085 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2086 }
2087 else
2088 {
2089 arg0s = arg0;
2090 arg1s = arg1;
2091 }
2092
2093 /* Compute the value of the arithmetic. */
2094
2095 switch (code)
2096 {
2097 case PLUS:
2098 val = arg0s + arg1s;
2099 break;
2100
2101 case MINUS:
2102 val = arg0s - arg1s;
2103 break;
2104
2105 case MULT:
2106 val = arg0s * arg1s;
2107 break;
2108
2109 case DIV:
2110 if (arg1s == 0
2111 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2112 && arg1s == -1))
2113 return 0;
2114 val = arg0s / arg1s;
2115 break;
2116
2117 case MOD:
2118 if (arg1s == 0
2119 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2120 && arg1s == -1))
2121 return 0;
2122 val = arg0s % arg1s;
2123 break;
2124
2125 case UDIV:
2126 if (arg1 == 0
2127 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2128 && arg1s == -1))
2129 return 0;
2130 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2131 break;
2132
2133 case UMOD:
2134 if (arg1 == 0
2135 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2136 && arg1s == -1))
2137 return 0;
2138 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2139 break;
2140
2141 case AND:
2142 val = arg0 & arg1;
2143 break;
2144
2145 case IOR:
2146 val = arg0 | arg1;
2147 break;
2148
2149 case XOR:
2150 val = arg0 ^ arg1;
2151 break;
2152
2153 case LSHIFTRT:
2154 /* If shift count is undefined, don't fold it; let the machine do
2155 what it wants. But truncate it if the machine will do that. */
2156 if (arg1 < 0)
2157 return 0;
2158
2159 if (SHIFT_COUNT_TRUNCATED)
2160 arg1 %= width;
2161
2162 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2163 break;
2164
2165 case ASHIFT:
2166 if (arg1 < 0)
2167 return 0;
2168
2169 if (SHIFT_COUNT_TRUNCATED)
2170 arg1 %= width;
2171
2172 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2173 break;
2174
2175 case ASHIFTRT:
2176 if (arg1 < 0)
2177 return 0;
2178
2179 if (SHIFT_COUNT_TRUNCATED)
2180 arg1 %= width;
2181
2182 val = arg0s >> arg1;
2183
2184 /* Bootstrap compiler may not have sign extended the right shift.
2185 Manually extend the sign to insure bootstrap cc matches gcc. */
2186 if (arg0s < 0 && arg1 > 0)
2187 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2188
2189 break;
2190
2191 case ROTATERT:
2192 if (arg1 < 0)
2193 return 0;
2194
2195 arg1 %= width;
2196 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2197 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2198 break;
2199
2200 case ROTATE:
2201 if (arg1 < 0)
2202 return 0;
2203
2204 arg1 %= width;
2205 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2206 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2207 break;
2208
2209 case COMPARE:
2210 /* Do nothing here. */
2211 return 0;
2212
2213 case SMIN:
2214 val = arg0s <= arg1s ? arg0s : arg1s;
2215 break;
2216
2217 case UMIN:
2218 val = ((unsigned HOST_WIDE_INT) arg0
2219 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2220 break;
2221
2222 case SMAX:
2223 val = arg0s > arg1s ? arg0s : arg1s;
2224 break;
2225
2226 case UMAX:
2227 val = ((unsigned HOST_WIDE_INT) arg0
2228 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2229 break;
2230
2231 case SS_PLUS:
2232 case US_PLUS:
2233 case SS_MINUS:
2234 case US_MINUS:
2235 /* ??? There are simplifications that can be done. */
2236 return 0;
2237
2238 default:
2239 abort ();
2240 }
2241
2242 val = trunc_int_for_mode (val, mode);
2243
2244 return GEN_INT (val);
2245 }
2246 \f
2247 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2248 PLUS or MINUS.
2249
2250 Rather than test for specific case, we do this by a brute-force method
2251 and do all possible simplifications until no more changes occur. Then
2252 we rebuild the operation.
2253
2254 If FORCE is true, then always generate the rtx. This is used to
2255 canonicalize stuff emitted from simplify_gen_binary. Note that this
2256 can still fail if the rtx is too complex. It won't fail just because
2257 the result is not 'simpler' than the input, however. */
2258
2259 struct simplify_plus_minus_op_data
2260 {
2261 rtx op;
2262 int neg;
2263 };
2264
2265 static int
2266 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2267 {
2268 const struct simplify_plus_minus_op_data *d1 = p1;
2269 const struct simplify_plus_minus_op_data *d2 = p2;
2270
2271 return (commutative_operand_precedence (d2->op)
2272 - commutative_operand_precedence (d1->op));
2273 }
2274
2275 static rtx
2276 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2277 rtx op1, int force)
2278 {
2279 struct simplify_plus_minus_op_data ops[8];
2280 rtx result, tem;
2281 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2282 int first, changed;
2283 int i, j;
2284
2285 memset (ops, 0, sizeof ops);
2286
2287 /* Set up the two operands and then expand them until nothing has been
2288 changed. If we run out of room in our array, give up; this should
2289 almost never happen. */
2290
2291 ops[0].op = op0;
2292 ops[0].neg = 0;
2293 ops[1].op = op1;
2294 ops[1].neg = (code == MINUS);
2295
2296 do
2297 {
2298 changed = 0;
2299
2300 for (i = 0; i < n_ops; i++)
2301 {
2302 rtx this_op = ops[i].op;
2303 int this_neg = ops[i].neg;
2304 enum rtx_code this_code = GET_CODE (this_op);
2305
2306 switch (this_code)
2307 {
2308 case PLUS:
2309 case MINUS:
2310 if (n_ops == 7)
2311 return NULL_RTX;
2312
2313 ops[n_ops].op = XEXP (this_op, 1);
2314 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2315 n_ops++;
2316
2317 ops[i].op = XEXP (this_op, 0);
2318 input_ops++;
2319 changed = 1;
2320 break;
2321
2322 case NEG:
2323 ops[i].op = XEXP (this_op, 0);
2324 ops[i].neg = ! this_neg;
2325 changed = 1;
2326 break;
2327
2328 case CONST:
2329 if (n_ops < 7
2330 && GET_CODE (XEXP (this_op, 0)) == PLUS
2331 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2332 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2333 {
2334 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2335 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2336 ops[n_ops].neg = this_neg;
2337 n_ops++;
2338 input_consts++;
2339 changed = 1;
2340 }
2341 break;
2342
2343 case NOT:
2344 /* ~a -> (-a - 1) */
2345 if (n_ops != 7)
2346 {
2347 ops[n_ops].op = constm1_rtx;
2348 ops[n_ops++].neg = this_neg;
2349 ops[i].op = XEXP (this_op, 0);
2350 ops[i].neg = !this_neg;
2351 changed = 1;
2352 }
2353 break;
2354
2355 case CONST_INT:
2356 if (this_neg)
2357 {
2358 ops[i].op = neg_const_int (mode, this_op);
2359 ops[i].neg = 0;
2360 changed = 1;
2361 }
2362 break;
2363
2364 default:
2365 break;
2366 }
2367 }
2368 }
2369 while (changed);
2370
2371 /* If we only have two operands, we can't do anything. */
2372 if (n_ops <= 2 && !force)
2373 return NULL_RTX;
2374
2375 /* Count the number of CONSTs we didn't split above. */
2376 for (i = 0; i < n_ops; i++)
2377 if (GET_CODE (ops[i].op) == CONST)
2378 input_consts++;
2379
2380 /* Now simplify each pair of operands until nothing changes. The first
2381 time through just simplify constants against each other. */
2382
2383 first = 1;
2384 do
2385 {
2386 changed = first;
2387
2388 for (i = 0; i < n_ops - 1; i++)
2389 for (j = i + 1; j < n_ops; j++)
2390 {
2391 rtx lhs = ops[i].op, rhs = ops[j].op;
2392 int lneg = ops[i].neg, rneg = ops[j].neg;
2393
2394 if (lhs != 0 && rhs != 0
2395 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2396 {
2397 enum rtx_code ncode = PLUS;
2398
2399 if (lneg != rneg)
2400 {
2401 ncode = MINUS;
2402 if (lneg)
2403 tem = lhs, lhs = rhs, rhs = tem;
2404 }
2405 else if (swap_commutative_operands_p (lhs, rhs))
2406 tem = lhs, lhs = rhs, rhs = tem;
2407
2408 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2409
2410 /* Reject "simplifications" that just wrap the two
2411 arguments in a CONST. Failure to do so can result
2412 in infinite recursion with simplify_binary_operation
2413 when it calls us to simplify CONST operations. */
2414 if (tem
2415 && ! (GET_CODE (tem) == CONST
2416 && GET_CODE (XEXP (tem, 0)) == ncode
2417 && XEXP (XEXP (tem, 0), 0) == lhs
2418 && XEXP (XEXP (tem, 0), 1) == rhs)
2419 /* Don't allow -x + -1 -> ~x simplifications in the
2420 first pass. This allows us the chance to combine
2421 the -1 with other constants. */
2422 && ! (first
2423 && GET_CODE (tem) == NOT
2424 && XEXP (tem, 0) == rhs))
2425 {
2426 lneg &= rneg;
2427 if (GET_CODE (tem) == NEG)
2428 tem = XEXP (tem, 0), lneg = !lneg;
2429 if (GET_CODE (tem) == CONST_INT && lneg)
2430 tem = neg_const_int (mode, tem), lneg = 0;
2431
2432 ops[i].op = tem;
2433 ops[i].neg = lneg;
2434 ops[j].op = NULL_RTX;
2435 changed = 1;
2436 }
2437 }
2438 }
2439
2440 first = 0;
2441 }
2442 while (changed);
2443
2444 /* Pack all the operands to the lower-numbered entries. */
2445 for (i = 0, j = 0; j < n_ops; j++)
2446 if (ops[j].op)
2447 ops[i++] = ops[j];
2448 n_ops = i;
2449
2450 /* Sort the operations based on swap_commutative_operands_p. */
2451 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2452
2453 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2454 if (n_ops == 2
2455 && GET_CODE (ops[1].op) == CONST_INT
2456 && CONSTANT_P (ops[0].op)
2457 && ops[0].neg)
2458 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2459
2460 /* We suppressed creation of trivial CONST expressions in the
2461 combination loop to avoid recursion. Create one manually now.
2462 The combination loop should have ensured that there is exactly
2463 one CONST_INT, and the sort will have ensured that it is last
2464 in the array and that any other constant will be next-to-last. */
2465
2466 if (n_ops > 1
2467 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2468 && CONSTANT_P (ops[n_ops - 2].op))
2469 {
2470 rtx value = ops[n_ops - 1].op;
2471 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2472 value = neg_const_int (mode, value);
2473 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2474 n_ops--;
2475 }
2476
2477 /* Count the number of CONSTs that we generated. */
2478 n_consts = 0;
2479 for (i = 0; i < n_ops; i++)
2480 if (GET_CODE (ops[i].op) == CONST)
2481 n_consts++;
2482
2483 /* Give up if we didn't reduce the number of operands we had. Make
2484 sure we count a CONST as two operands. If we have the same
2485 number of operands, but have made more CONSTs than before, this
2486 is also an improvement, so accept it. */
2487 if (!force
2488 && (n_ops + n_consts > input_ops
2489 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2490 return NULL_RTX;
2491
2492 /* Put a non-negated operand first, if possible. */
2493
2494 for (i = 0; i < n_ops && ops[i].neg; i++)
2495 continue;
2496 if (i == n_ops)
2497 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2498 else if (i != 0)
2499 {
2500 tem = ops[0].op;
2501 ops[0] = ops[i];
2502 ops[i].op = tem;
2503 ops[i].neg = 1;
2504 }
2505
2506 /* Now make the result by performing the requested operations. */
2507 result = ops[0].op;
2508 for (i = 1; i < n_ops; i++)
2509 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2510 mode, result, ops[i].op);
2511
2512 return result;
2513 }
2514
2515 /* Like simplify_binary_operation except used for relational operators.
2516 MODE is the mode of the operands, not that of the result. If MODE
2517 is VOIDmode, both operands must also be VOIDmode and we compare the
2518 operands in "infinite precision".
2519
2520 If no simplification is possible, this function returns zero.
2521 Otherwise, it returns either const_true_rtx or const0_rtx. */
2522
2523 rtx
2524 simplify_const_relational_operation (enum rtx_code code,
2525 enum machine_mode mode,
2526 rtx op0, rtx op1)
2527 {
2528 int equal, op0lt, op0ltu, op1lt, op1ltu;
2529 rtx tem;
2530 rtx trueop0;
2531 rtx trueop1;
2532
2533 if (mode == VOIDmode
2534 && (GET_MODE (op0) != VOIDmode
2535 || GET_MODE (op1) != VOIDmode))
2536 abort ();
2537
2538 /* If op0 is a compare, extract the comparison arguments from it. */
2539 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2540 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2541
2542 /* We can't simplify MODE_CC values since we don't know what the
2543 actual comparison is. */
2544 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2545 return 0;
2546
2547 /* Make sure the constant is second. */
2548 if (swap_commutative_operands_p (op0, op1))
2549 {
2550 tem = op0, op0 = op1, op1 = tem;
2551 code = swap_condition (code);
2552 }
2553
2554 trueop0 = avoid_constant_pool_reference (op0);
2555 trueop1 = avoid_constant_pool_reference (op1);
2556
2557 /* For integer comparisons of A and B maybe we can simplify A - B and can
2558 then simplify a comparison of that with zero. If A and B are both either
2559 a register or a CONST_INT, this can't help; testing for these cases will
2560 prevent infinite recursion here and speed things up.
2561
2562 If CODE is an unsigned comparison, then we can never do this optimization,
2563 because it gives an incorrect result if the subtraction wraps around zero.
2564 ANSI C defines unsigned operations such that they never overflow, and
2565 thus such cases can not be ignored; but we cannot do it even for
2566 signed comparisons for languages such as Java, so test flag_wrapv. */
2567
2568 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2569 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2570 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2571 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2572 /* We cannot do this for == or != if tem is a nonzero address. */
2573 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2574 && code != GTU && code != GEU && code != LTU && code != LEU)
2575 return simplify_const_relational_operation (signed_condition (code),
2576 mode, tem, const0_rtx);
2577
2578 if (flag_unsafe_math_optimizations && code == ORDERED)
2579 return const_true_rtx;
2580
2581 if (flag_unsafe_math_optimizations && code == UNORDERED)
2582 return const0_rtx;
2583
2584 /* For modes without NaNs, if the two operands are equal, we know the
2585 result except if they have side-effects. */
2586 if (! HONOR_NANS (GET_MODE (trueop0))
2587 && rtx_equal_p (trueop0, trueop1)
2588 && ! side_effects_p (trueop0))
2589 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2590
2591 /* If the operands are floating-point constants, see if we can fold
2592 the result. */
2593 else if (GET_CODE (trueop0) == CONST_DOUBLE
2594 && GET_CODE (trueop1) == CONST_DOUBLE
2595 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2596 {
2597 REAL_VALUE_TYPE d0, d1;
2598
2599 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2600 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2601
2602 /* Comparisons are unordered iff at least one of the values is NaN. */
2603 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2604 switch (code)
2605 {
2606 case UNEQ:
2607 case UNLT:
2608 case UNGT:
2609 case UNLE:
2610 case UNGE:
2611 case NE:
2612 case UNORDERED:
2613 return const_true_rtx;
2614 case EQ:
2615 case LT:
2616 case GT:
2617 case LE:
2618 case GE:
2619 case LTGT:
2620 case ORDERED:
2621 return const0_rtx;
2622 default:
2623 return 0;
2624 }
2625
2626 equal = REAL_VALUES_EQUAL (d0, d1);
2627 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2628 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2629 }
2630
2631 /* Otherwise, see if the operands are both integers. */
2632 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2633 && (GET_CODE (trueop0) == CONST_DOUBLE
2634 || GET_CODE (trueop0) == CONST_INT)
2635 && (GET_CODE (trueop1) == CONST_DOUBLE
2636 || GET_CODE (trueop1) == CONST_INT))
2637 {
2638 int width = GET_MODE_BITSIZE (mode);
2639 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2640 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2641
2642 /* Get the two words comprising each integer constant. */
2643 if (GET_CODE (trueop0) == CONST_DOUBLE)
2644 {
2645 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2646 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2647 }
2648 else
2649 {
2650 l0u = l0s = INTVAL (trueop0);
2651 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2652 }
2653
2654 if (GET_CODE (trueop1) == CONST_DOUBLE)
2655 {
2656 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2657 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2658 }
2659 else
2660 {
2661 l1u = l1s = INTVAL (trueop1);
2662 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2663 }
2664
2665 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2666 we have to sign or zero-extend the values. */
2667 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2668 {
2669 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2670 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2671
2672 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2673 l0s |= ((HOST_WIDE_INT) (-1) << width);
2674
2675 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2676 l1s |= ((HOST_WIDE_INT) (-1) << width);
2677 }
2678 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2679 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2680
2681 equal = (h0u == h1u && l0u == l1u);
2682 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2683 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2684 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2685 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2686 }
2687
2688 /* Otherwise, there are some code-specific tests we can make. */
2689 else
2690 {
2691 switch (code)
2692 {
2693 case EQ:
2694 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2695 return const0_rtx;
2696 break;
2697
2698 case NE:
2699 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2700 return const_true_rtx;
2701 break;
2702
2703 case GEU:
2704 /* Unsigned values are never negative. */
2705 if (trueop1 == const0_rtx)
2706 return const_true_rtx;
2707 break;
2708
2709 case LTU:
2710 if (trueop1 == const0_rtx)
2711 return const0_rtx;
2712 break;
2713
2714 case LEU:
2715 /* Unsigned values are never greater than the largest
2716 unsigned value. */
2717 if (GET_CODE (trueop1) == CONST_INT
2718 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2719 && INTEGRAL_MODE_P (mode))
2720 return const_true_rtx;
2721 break;
2722
2723 case GTU:
2724 if (GET_CODE (trueop1) == CONST_INT
2725 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2726 && INTEGRAL_MODE_P (mode))
2727 return const0_rtx;
2728 break;
2729
2730 case LT:
2731 /* Optimize abs(x) < 0.0. */
2732 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2733 {
2734 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2735 : trueop0;
2736 if (GET_CODE (tem) == ABS)
2737 return const0_rtx;
2738 }
2739 break;
2740
2741 case GE:
2742 /* Optimize abs(x) >= 0.0. */
2743 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2744 {
2745 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2746 : trueop0;
2747 if (GET_CODE (tem) == ABS)
2748 return const_true_rtx;
2749 }
2750 break;
2751
2752 case UNGE:
2753 /* Optimize ! (abs(x) < 0.0). */
2754 if (trueop1 == CONST0_RTX (mode))
2755 {
2756 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2757 : trueop0;
2758 if (GET_CODE (tem) == ABS)
2759 return const_true_rtx;
2760 }
2761 break;
2762
2763 default:
2764 break;
2765 }
2766
2767 return 0;
2768 }
2769
2770 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2771 as appropriate. */
2772 switch (code)
2773 {
2774 case EQ:
2775 case UNEQ:
2776 return equal ? const_true_rtx : const0_rtx;
2777 case NE:
2778 case LTGT:
2779 return ! equal ? const_true_rtx : const0_rtx;
2780 case LT:
2781 case UNLT:
2782 return op0lt ? const_true_rtx : const0_rtx;
2783 case GT:
2784 case UNGT:
2785 return op1lt ? const_true_rtx : const0_rtx;
2786 case LTU:
2787 return op0ltu ? const_true_rtx : const0_rtx;
2788 case GTU:
2789 return op1ltu ? const_true_rtx : const0_rtx;
2790 case LE:
2791 case UNLE:
2792 return equal || op0lt ? const_true_rtx : const0_rtx;
2793 case GE:
2794 case UNGE:
2795 return equal || op1lt ? const_true_rtx : const0_rtx;
2796 case LEU:
2797 return equal || op0ltu ? const_true_rtx : const0_rtx;
2798 case GEU:
2799 return equal || op1ltu ? const_true_rtx : const0_rtx;
2800 case ORDERED:
2801 return const_true_rtx;
2802 case UNORDERED:
2803 return const0_rtx;
2804 default:
2805 abort ();
2806 }
2807 }
2808
2809 /* Like simplify_binary_operation except used for relational operators.
2810 MODE is the mode of the result, and CMP_MODE is the mode of the operands.
2811 If CMP_MODE is VOIDmode, both operands must also be VOIDmode and we
2812 compare the operands in "infinite precision". */
2813
2814 rtx
2815 simplify_relational_operation (enum rtx_code code,
2816 enum machine_mode mode ATTRIBUTE_UNUSED,
2817 enum machine_mode cmp_mode, rtx op0, rtx op1)
2818 {
2819 rtx tmp;
2820
2821 tmp = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2822 if (tmp)
2823 {
2824 #ifdef FLOAT_STORE_FLAG_VALUE
2825 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2826 {
2827 if (tmp == const0_rtx)
2828 return CONST0_RTX (mode);
2829 return CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
2830 mode);
2831 }
2832 #endif
2833 return tmp;
2834 }
2835
2836 return NULL_RTX;
2837 }
2838 \f
2839 /* Simplify CODE, an operation with result mode MODE and three operands,
2840 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2841 a constant. Return 0 if no simplifications is possible. */
2842
2843 rtx
2844 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2845 enum machine_mode op0_mode, rtx op0, rtx op1,
2846 rtx op2)
2847 {
2848 unsigned int width = GET_MODE_BITSIZE (mode);
2849
2850 /* VOIDmode means "infinite" precision. */
2851 if (width == 0)
2852 width = HOST_BITS_PER_WIDE_INT;
2853
2854 switch (code)
2855 {
2856 case SIGN_EXTRACT:
2857 case ZERO_EXTRACT:
2858 if (GET_CODE (op0) == CONST_INT
2859 && GET_CODE (op1) == CONST_INT
2860 && GET_CODE (op2) == CONST_INT
2861 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2862 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2863 {
2864 /* Extracting a bit-field from a constant */
2865 HOST_WIDE_INT val = INTVAL (op0);
2866
2867 if (BITS_BIG_ENDIAN)
2868 val >>= (GET_MODE_BITSIZE (op0_mode)
2869 - INTVAL (op2) - INTVAL (op1));
2870 else
2871 val >>= INTVAL (op2);
2872
2873 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2874 {
2875 /* First zero-extend. */
2876 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2877 /* If desired, propagate sign bit. */
2878 if (code == SIGN_EXTRACT
2879 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2880 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2881 }
2882
2883 /* Clear the bits that don't belong in our mode,
2884 unless they and our sign bit are all one.
2885 So we get either a reasonable negative value or a reasonable
2886 unsigned value for this mode. */
2887 if (width < HOST_BITS_PER_WIDE_INT
2888 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2889 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2890 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2891
2892 return GEN_INT (val);
2893 }
2894 break;
2895
2896 case IF_THEN_ELSE:
2897 if (GET_CODE (op0) == CONST_INT)
2898 return op0 != const0_rtx ? op1 : op2;
2899
2900 /* Convert c ? a : a into "a". */
2901 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2902 return op1;
2903
2904 /* Convert a != b ? a : b into "a". */
2905 if (GET_CODE (op0) == NE
2906 && ! side_effects_p (op0)
2907 && ! HONOR_NANS (mode)
2908 && ! HONOR_SIGNED_ZEROS (mode)
2909 && ((rtx_equal_p (XEXP (op0, 0), op1)
2910 && rtx_equal_p (XEXP (op0, 1), op2))
2911 || (rtx_equal_p (XEXP (op0, 0), op2)
2912 && rtx_equal_p (XEXP (op0, 1), op1))))
2913 return op1;
2914
2915 /* Convert a == b ? a : b into "b". */
2916 if (GET_CODE (op0) == EQ
2917 && ! side_effects_p (op0)
2918 && ! HONOR_NANS (mode)
2919 && ! HONOR_SIGNED_ZEROS (mode)
2920 && ((rtx_equal_p (XEXP (op0, 0), op1)
2921 && rtx_equal_p (XEXP (op0, 1), op2))
2922 || (rtx_equal_p (XEXP (op0, 0), op2)
2923 && rtx_equal_p (XEXP (op0, 1), op1))))
2924 return op2;
2925
2926 if (COMPARISON_P (op0) && ! side_effects_p (op0))
2927 {
2928 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2929 ? GET_MODE (XEXP (op0, 1))
2930 : GET_MODE (XEXP (op0, 0)));
2931 rtx temp;
2932 if (cmp_mode == VOIDmode)
2933 cmp_mode = op0_mode;
2934 temp = simplify_const_relational_operation (GET_CODE (op0),
2935 cmp_mode,
2936 XEXP (op0, 0),
2937 XEXP (op0, 1));
2938
2939 /* See if any simplifications were possible. */
2940 if (temp == const0_rtx)
2941 return op2;
2942 else if (temp == const_true_rtx)
2943 return op1;
2944 else if (temp)
2945 abort ();
2946
2947 /* Look for happy constants in op1 and op2. */
2948 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2949 {
2950 HOST_WIDE_INT t = INTVAL (op1);
2951 HOST_WIDE_INT f = INTVAL (op2);
2952
2953 if (t == STORE_FLAG_VALUE && f == 0)
2954 code = GET_CODE (op0);
2955 else if (t == 0 && f == STORE_FLAG_VALUE)
2956 {
2957 enum rtx_code tmp;
2958 tmp = reversed_comparison_code (op0, NULL_RTX);
2959 if (tmp == UNKNOWN)
2960 break;
2961 code = tmp;
2962 }
2963 else
2964 break;
2965
2966 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2967 }
2968 }
2969 break;
2970
2971 case VEC_MERGE:
2972 if (GET_MODE (op0) != mode
2973 || GET_MODE (op1) != mode
2974 || !VECTOR_MODE_P (mode))
2975 abort ();
2976 op2 = avoid_constant_pool_reference (op2);
2977 if (GET_CODE (op2) == CONST_INT)
2978 {
2979 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2980 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2981 int mask = (1 << n_elts) - 1;
2982
2983 if (!(INTVAL (op2) & mask))
2984 return op1;
2985 if ((INTVAL (op2) & mask) == mask)
2986 return op0;
2987
2988 op0 = avoid_constant_pool_reference (op0);
2989 op1 = avoid_constant_pool_reference (op1);
2990 if (GET_CODE (op0) == CONST_VECTOR
2991 && GET_CODE (op1) == CONST_VECTOR)
2992 {
2993 rtvec v = rtvec_alloc (n_elts);
2994 unsigned int i;
2995
2996 for (i = 0; i < n_elts; i++)
2997 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2998 ? CONST_VECTOR_ELT (op0, i)
2999 : CONST_VECTOR_ELT (op1, i));
3000 return gen_rtx_CONST_VECTOR (mode, v);
3001 }
3002 }
3003 break;
3004
3005 default:
3006 abort ();
3007 }
3008
3009 return 0;
3010 }
3011
3012 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3013 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3014
3015 Works by unpacking OP into a collection of 8-bit values
3016 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3017 and then repacking them again for OUTERMODE. */
3018
3019 static rtx
3020 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3021 enum machine_mode innermode, unsigned int byte)
3022 {
3023 /* We support up to 512-bit values (for V8DFmode). */
3024 enum {
3025 max_bitsize = 512,
3026 value_bit = 8,
3027 value_mask = (1 << value_bit) - 1
3028 };
3029 unsigned char value[max_bitsize / value_bit];
3030 int value_start;
3031 int i;
3032 int elem;
3033
3034 int num_elem;
3035 rtx * elems;
3036 int elem_bitsize;
3037 rtx result_s;
3038 rtvec result_v = NULL;
3039 enum mode_class outer_class;
3040 enum machine_mode outer_submode;
3041
3042 /* Some ports misuse CCmode. */
3043 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3044 return op;
3045
3046 /* Unpack the value. */
3047
3048 if (GET_CODE (op) == CONST_VECTOR)
3049 {
3050 num_elem = CONST_VECTOR_NUNITS (op);
3051 elems = &CONST_VECTOR_ELT (op, 0);
3052 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3053 }
3054 else
3055 {
3056 num_elem = 1;
3057 elems = &op;
3058 elem_bitsize = max_bitsize;
3059 }
3060
3061 if (BITS_PER_UNIT % value_bit != 0)
3062 abort (); /* Too complicated; reducing value_bit may help. */
3063 if (elem_bitsize % BITS_PER_UNIT != 0)
3064 abort (); /* I don't know how to handle endianness of sub-units. */
3065
3066 for (elem = 0; elem < num_elem; elem++)
3067 {
3068 unsigned char * vp;
3069 rtx el = elems[elem];
3070
3071 /* Vectors are kept in target memory order. (This is probably
3072 a mistake.) */
3073 {
3074 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3075 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3076 / BITS_PER_UNIT);
3077 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3078 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3079 unsigned bytele = (subword_byte % UNITS_PER_WORD
3080 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3081 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3082 }
3083
3084 switch (GET_CODE (el))
3085 {
3086 case CONST_INT:
3087 for (i = 0;
3088 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3089 i += value_bit)
3090 *vp++ = INTVAL (el) >> i;
3091 /* CONST_INTs are always logically sign-extended. */
3092 for (; i < elem_bitsize; i += value_bit)
3093 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3094 break;
3095
3096 case CONST_DOUBLE:
3097 if (GET_MODE (el) == VOIDmode)
3098 {
3099 /* If this triggers, someone should have generated a
3100 CONST_INT instead. */
3101 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3102 abort ();
3103
3104 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3105 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3106 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3107 {
3108 *vp++
3109 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3110 i += value_bit;
3111 }
3112 /* It shouldn't matter what's done here, so fill it with
3113 zero. */
3114 for (; i < max_bitsize; i += value_bit)
3115 *vp++ = 0;
3116 }
3117 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3118 {
3119 long tmp[max_bitsize / 32];
3120 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3121
3122 if (bitsize > elem_bitsize)
3123 abort ();
3124 if (bitsize % value_bit != 0)
3125 abort ();
3126
3127 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3128 GET_MODE (el));
3129
3130 /* real_to_target produces its result in words affected by
3131 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3132 and use WORDS_BIG_ENDIAN instead; see the documentation
3133 of SUBREG in rtl.texi. */
3134 for (i = 0; i < bitsize; i += value_bit)
3135 {
3136 int ibase;
3137 if (WORDS_BIG_ENDIAN)
3138 ibase = bitsize - 1 - i;
3139 else
3140 ibase = i;
3141 *vp++ = tmp[ibase / 32] >> i % 32;
3142 }
3143
3144 /* It shouldn't matter what's done here, so fill it with
3145 zero. */
3146 for (; i < elem_bitsize; i += value_bit)
3147 *vp++ = 0;
3148 }
3149 else
3150 abort ();
3151 break;
3152
3153 default:
3154 abort ();
3155 }
3156 }
3157
3158 /* Now, pick the right byte to start with. */
3159 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3160 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3161 will already have offset 0. */
3162 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3163 {
3164 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3165 - byte);
3166 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3167 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3168 byte = (subword_byte % UNITS_PER_WORD
3169 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3170 }
3171
3172 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3173 so if it's become negative it will instead be very large.) */
3174 if (byte >= GET_MODE_SIZE (innermode))
3175 abort ();
3176
3177 /* Convert from bytes to chunks of size value_bit. */
3178 value_start = byte * (BITS_PER_UNIT / value_bit);
3179
3180 /* Re-pack the value. */
3181
3182 if (VECTOR_MODE_P (outermode))
3183 {
3184 num_elem = GET_MODE_NUNITS (outermode);
3185 result_v = rtvec_alloc (num_elem);
3186 elems = &RTVEC_ELT (result_v, 0);
3187 outer_submode = GET_MODE_INNER (outermode);
3188 }
3189 else
3190 {
3191 num_elem = 1;
3192 elems = &result_s;
3193 outer_submode = outermode;
3194 }
3195
3196 outer_class = GET_MODE_CLASS (outer_submode);
3197 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3198
3199 if (elem_bitsize % value_bit != 0)
3200 abort ();
3201 if (elem_bitsize + value_start * value_bit > max_bitsize)
3202 abort ();
3203
3204 for (elem = 0; elem < num_elem; elem++)
3205 {
3206 unsigned char *vp;
3207
3208 /* Vectors are stored in target memory order. (This is probably
3209 a mistake.) */
3210 {
3211 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3212 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3213 / BITS_PER_UNIT);
3214 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3215 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3216 unsigned bytele = (subword_byte % UNITS_PER_WORD
3217 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3218 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3219 }
3220
3221 switch (outer_class)
3222 {
3223 case MODE_INT:
3224 case MODE_PARTIAL_INT:
3225 {
3226 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3227
3228 for (i = 0;
3229 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3230 i += value_bit)
3231 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3232 for (; i < elem_bitsize; i += value_bit)
3233 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3234 << (i - HOST_BITS_PER_WIDE_INT));
3235
3236 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3237 know why. */
3238 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3239 elems[elem] = gen_int_mode (lo, outer_submode);
3240 else
3241 elems[elem] = immed_double_const (lo, hi, outer_submode);
3242 }
3243 break;
3244
3245 case MODE_FLOAT:
3246 {
3247 REAL_VALUE_TYPE r;
3248 long tmp[max_bitsize / 32];
3249
3250 /* real_from_target wants its input in words affected by
3251 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3252 and use WORDS_BIG_ENDIAN instead; see the documentation
3253 of SUBREG in rtl.texi. */
3254 for (i = 0; i < max_bitsize / 32; i++)
3255 tmp[i] = 0;
3256 for (i = 0; i < elem_bitsize; i += value_bit)
3257 {
3258 int ibase;
3259 if (WORDS_BIG_ENDIAN)
3260 ibase = elem_bitsize - 1 - i;
3261 else
3262 ibase = i;
3263 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3264 }
3265
3266 real_from_target (&r, tmp, outer_submode);
3267 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3268 }
3269 break;
3270
3271 default:
3272 abort ();
3273 }
3274 }
3275 if (VECTOR_MODE_P (outermode))
3276 return gen_rtx_CONST_VECTOR (outermode, result_v);
3277 else
3278 return result_s;
3279 }
3280
3281 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3282 Return 0 if no simplifications are possible. */
3283 rtx
3284 simplify_subreg (enum machine_mode outermode, rtx op,
3285 enum machine_mode innermode, unsigned int byte)
3286 {
3287 /* Little bit of sanity checking. */
3288 if (innermode == VOIDmode || outermode == VOIDmode
3289 || innermode == BLKmode || outermode == BLKmode)
3290 abort ();
3291
3292 if (GET_MODE (op) != innermode
3293 && GET_MODE (op) != VOIDmode)
3294 abort ();
3295
3296 if (byte % GET_MODE_SIZE (outermode)
3297 || byte >= GET_MODE_SIZE (innermode))
3298 abort ();
3299
3300 if (outermode == innermode && !byte)
3301 return op;
3302
3303 if (GET_CODE (op) == CONST_INT
3304 || GET_CODE (op) == CONST_DOUBLE
3305 || GET_CODE (op) == CONST_VECTOR)
3306 return simplify_immed_subreg (outermode, op, innermode, byte);
3307
3308 /* Changing mode twice with SUBREG => just change it once,
3309 or not at all if changing back op starting mode. */
3310 if (GET_CODE (op) == SUBREG)
3311 {
3312 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3313 int final_offset = byte + SUBREG_BYTE (op);
3314 rtx new;
3315
3316 if (outermode == innermostmode
3317 && byte == 0 && SUBREG_BYTE (op) == 0)
3318 return SUBREG_REG (op);
3319
3320 /* The SUBREG_BYTE represents offset, as if the value were stored
3321 in memory. Irritating exception is paradoxical subreg, where
3322 we define SUBREG_BYTE to be 0. On big endian machines, this
3323 value should be negative. For a moment, undo this exception. */
3324 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3325 {
3326 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3327 if (WORDS_BIG_ENDIAN)
3328 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3329 if (BYTES_BIG_ENDIAN)
3330 final_offset += difference % UNITS_PER_WORD;
3331 }
3332 if (SUBREG_BYTE (op) == 0
3333 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3334 {
3335 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3336 if (WORDS_BIG_ENDIAN)
3337 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3338 if (BYTES_BIG_ENDIAN)
3339 final_offset += difference % UNITS_PER_WORD;
3340 }
3341
3342 /* See whether resulting subreg will be paradoxical. */
3343 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3344 {
3345 /* In nonparadoxical subregs we can't handle negative offsets. */
3346 if (final_offset < 0)
3347 return NULL_RTX;
3348 /* Bail out in case resulting subreg would be incorrect. */
3349 if (final_offset % GET_MODE_SIZE (outermode)
3350 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3351 return NULL_RTX;
3352 }
3353 else
3354 {
3355 int offset = 0;
3356 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3357
3358 /* In paradoxical subreg, see if we are still looking on lower part.
3359 If so, our SUBREG_BYTE will be 0. */
3360 if (WORDS_BIG_ENDIAN)
3361 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3362 if (BYTES_BIG_ENDIAN)
3363 offset += difference % UNITS_PER_WORD;
3364 if (offset == final_offset)
3365 final_offset = 0;
3366 else
3367 return NULL_RTX;
3368 }
3369
3370 /* Recurse for further possible simplifications. */
3371 new = simplify_subreg (outermode, SUBREG_REG (op),
3372 GET_MODE (SUBREG_REG (op)),
3373 final_offset);
3374 if (new)
3375 return new;
3376 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3377 }
3378
3379 /* SUBREG of a hard register => just change the register number
3380 and/or mode. If the hard register is not valid in that mode,
3381 suppress this simplification. If the hard register is the stack,
3382 frame, or argument pointer, leave this as a SUBREG. */
3383
3384 if (REG_P (op)
3385 && (! REG_FUNCTION_VALUE_P (op)
3386 || ! rtx_equal_function_value_matters)
3387 && REGNO (op) < FIRST_PSEUDO_REGISTER
3388 #ifdef CANNOT_CHANGE_MODE_CLASS
3389 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3390 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3391 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3392 #endif
3393 && ((reload_completed && !frame_pointer_needed)
3394 || (REGNO (op) != FRAME_POINTER_REGNUM
3395 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3396 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3397 #endif
3398 ))
3399 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3400 && REGNO (op) != ARG_POINTER_REGNUM
3401 #endif
3402 && REGNO (op) != STACK_POINTER_REGNUM
3403 && subreg_offset_representable_p (REGNO (op), innermode,
3404 byte, outermode))
3405 {
3406 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3407 int final_regno = subreg_hard_regno (tem, 0);
3408
3409 /* ??? We do allow it if the current REG is not valid for
3410 its mode. This is a kludge to work around how float/complex
3411 arguments are passed on 32-bit SPARC and should be fixed. */
3412 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3413 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3414 {
3415 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3416
3417 /* Propagate original regno. We don't have any way to specify
3418 the offset inside original regno, so do so only for lowpart.
3419 The information is used only by alias analysis that can not
3420 grog partial register anyway. */
3421
3422 if (subreg_lowpart_offset (outermode, innermode) == byte)
3423 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3424 return x;
3425 }
3426 }
3427
3428 /* If we have a SUBREG of a register that we are replacing and we are
3429 replacing it with a MEM, make a new MEM and try replacing the
3430 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3431 or if we would be widening it. */
3432
3433 if (GET_CODE (op) == MEM
3434 && ! mode_dependent_address_p (XEXP (op, 0))
3435 /* Allow splitting of volatile memory references in case we don't
3436 have instruction to move the whole thing. */
3437 && (! MEM_VOLATILE_P (op)
3438 || ! have_insn_for (SET, innermode))
3439 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3440 return adjust_address_nv (op, outermode, byte);
3441
3442 /* Handle complex values represented as CONCAT
3443 of real and imaginary part. */
3444 if (GET_CODE (op) == CONCAT)
3445 {
3446 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3447 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3448 unsigned int final_offset;
3449 rtx res;
3450
3451 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3452 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3453 if (res)
3454 return res;
3455 /* We can at least simplify it by referring directly to the
3456 relevant part. */
3457 return gen_rtx_SUBREG (outermode, part, final_offset);
3458 }
3459
3460 /* Optimize SUBREG truncations of zero and sign extended values. */
3461 if ((GET_CODE (op) == ZERO_EXTEND
3462 || GET_CODE (op) == SIGN_EXTEND)
3463 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3464 {
3465 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3466
3467 /* If we're requesting the lowpart of a zero or sign extension,
3468 there are three possibilities. If the outermode is the same
3469 as the origmode, we can omit both the extension and the subreg.
3470 If the outermode is not larger than the origmode, we can apply
3471 the truncation without the extension. Finally, if the outermode
3472 is larger than the origmode, but both are integer modes, we
3473 can just extend to the appropriate mode. */
3474 if (bitpos == 0)
3475 {
3476 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3477 if (outermode == origmode)
3478 return XEXP (op, 0);
3479 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3480 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3481 subreg_lowpart_offset (outermode,
3482 origmode));
3483 if (SCALAR_INT_MODE_P (outermode))
3484 return simplify_gen_unary (GET_CODE (op), outermode,
3485 XEXP (op, 0), origmode);
3486 }
3487
3488 /* A SUBREG resulting from a zero extension may fold to zero if
3489 it extracts higher bits that the ZERO_EXTEND's source bits. */
3490 if (GET_CODE (op) == ZERO_EXTEND
3491 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3492 return CONST0_RTX (outermode);
3493 }
3494
3495 return NULL_RTX;
3496 }
3497
3498 /* Make a SUBREG operation or equivalent if it folds. */
3499
3500 rtx
3501 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3502 enum machine_mode innermode, unsigned int byte)
3503 {
3504 rtx new;
3505 /* Little bit of sanity checking. */
3506 if (innermode == VOIDmode || outermode == VOIDmode
3507 || innermode == BLKmode || outermode == BLKmode)
3508 abort ();
3509
3510 if (GET_MODE (op) != innermode
3511 && GET_MODE (op) != VOIDmode)
3512 abort ();
3513
3514 if (byte % GET_MODE_SIZE (outermode)
3515 || byte >= GET_MODE_SIZE (innermode))
3516 abort ();
3517
3518 if (GET_CODE (op) == QUEUED)
3519 return NULL_RTX;
3520
3521 new = simplify_subreg (outermode, op, innermode, byte);
3522 if (new)
3523 return new;
3524
3525 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3526 return NULL_RTX;
3527
3528 return gen_rtx_SUBREG (outermode, op, byte);
3529 }
3530 /* Simplify X, an rtx expression.
3531
3532 Return the simplified expression or NULL if no simplifications
3533 were possible.
3534
3535 This is the preferred entry point into the simplification routines;
3536 however, we still allow passes to call the more specific routines.
3537
3538 Right now GCC has three (yes, three) major bodies of RTL simplification
3539 code that need to be unified.
3540
3541 1. fold_rtx in cse.c. This code uses various CSE specific
3542 information to aid in RTL simplification.
3543
3544 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3545 it uses combine specific information to aid in RTL
3546 simplification.
3547
3548 3. The routines in this file.
3549
3550
3551 Long term we want to only have one body of simplification code; to
3552 get to that state I recommend the following steps:
3553
3554 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3555 which are not pass dependent state into these routines.
3556
3557 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3558 use this routine whenever possible.
3559
3560 3. Allow for pass dependent state to be provided to these
3561 routines and add simplifications based on the pass dependent
3562 state. Remove code from cse.c & combine.c that becomes
3563 redundant/dead.
3564
3565 It will take time, but ultimately the compiler will be easier to
3566 maintain and improve. It's totally silly that when we add a
3567 simplification that it needs to be added to 4 places (3 for RTL
3568 simplification and 1 for tree simplification. */
3569
3570 rtx
3571 simplify_rtx (rtx x)
3572 {
3573 enum rtx_code code = GET_CODE (x);
3574 enum machine_mode mode = GET_MODE (x);
3575 rtx temp;
3576
3577 switch (GET_RTX_CLASS (code))
3578 {
3579 case RTX_UNARY:
3580 return simplify_unary_operation (code, mode,
3581 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3582 case RTX_COMM_ARITH:
3583 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3584 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3585
3586 /* Fall through.... */
3587
3588 case RTX_BIN_ARITH:
3589 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3590
3591 case RTX_TERNARY:
3592 case RTX_BITFIELD_OPS:
3593 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3594 XEXP (x, 0), XEXP (x, 1),
3595 XEXP (x, 2));
3596
3597 case RTX_COMPARE:
3598 case RTX_COMM_COMPARE:
3599 temp = simplify_relational_operation (code, mode,
3600 ((GET_MODE (XEXP (x, 0))
3601 != VOIDmode)
3602 ? GET_MODE (XEXP (x, 0))
3603 : GET_MODE (XEXP (x, 1))),
3604 XEXP (x, 0), XEXP (x, 1));
3605 return temp;
3606
3607 case RTX_EXTRA:
3608 if (code == SUBREG)
3609 return simplify_gen_subreg (mode, SUBREG_REG (x),
3610 GET_MODE (SUBREG_REG (x)),
3611 SUBREG_BYTE (x));
3612 if (code == CONSTANT_P_RTX)
3613 {
3614 if (CONSTANT_P (XEXP (x, 0)))
3615 return const1_rtx;
3616 }
3617 break;
3618
3619 case RTX_OBJ:
3620 if (code == LO_SUM)
3621 {
3622 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3623 if (GET_CODE (XEXP (x, 0)) == HIGH
3624 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3625 return XEXP (x, 1);
3626 }
3627 break;
3628
3629 default:
3630 break;
3631 }
3632 return NULL;
3633 }