]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
* tm.texi (POINTERS_EXTEND_UNSIGNED) Modify definition.
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
26
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
46
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
51
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
65
66 /* Similar, but also allows reference to the stack pointer.
67
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
71
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
91
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
95 signed wide int. */
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
102 static rtx avoid_constant_pool_reference PARAMS ((rtx));
103 \f
104 /* Make a binary operation by properly ordering the operands and
105 seeing if the expression folds. */
106
107 rtx
108 simplify_gen_binary (code, mode, op0, op1)
109 enum rtx_code code;
110 enum machine_mode mode;
111 rtx op0, op1;
112 {
113 rtx tem;
114
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == 'c'
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
119
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122
123 if (tem)
124 return tem;
125
126 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
127 just form the operation. */
128
129 if (code == PLUS && GET_CODE (op1) == CONST_INT
130 && GET_MODE (op0) != VOIDmode)
131 return plus_constant (op0, INTVAL (op1));
132 else if (code == MINUS && GET_CODE (op1) == CONST_INT
133 && GET_MODE (op0) != VOIDmode)
134 return plus_constant (op0, - INTVAL (op1));
135 else
136 return gen_rtx_fmt_ee (code, mode, op0, op1);
137 }
138 \f
139 /* In case X is MEM referencing constant pool, return the real value.
140 Otherwise return X. */
141 static rtx
142 avoid_constant_pool_reference (x)
143 rtx x;
144 {
145 if (GET_CODE (x) != MEM)
146 return x;
147 if (GET_CODE (XEXP (x, 0)) != SYMBOL_REF
148 || !CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
149 return x;
150 return get_pool_constant (XEXP (x, 0));
151 }
152 \f
153 /* Make a unary operation by first seeing if it folds and otherwise making
154 the specified operation. */
155
156 rtx
157 simplify_gen_unary (code, mode, op, op_mode)
158 enum rtx_code code;
159 enum machine_mode mode;
160 rtx op;
161 enum machine_mode op_mode;
162 {
163 rtx tem;
164
165 /* If this simplifies, use it. */
166 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
167 return tem;
168
169 return gen_rtx_fmt_e (code, mode, op);
170 }
171
172 /* Likewise for ternary operations. */
173
174 rtx
175 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
176 enum rtx_code code;
177 enum machine_mode mode, op0_mode;
178 rtx op0, op1, op2;
179 {
180 rtx tem;
181
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
184 op0, op1, op2)))
185 return tem;
186
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
188 }
189 \f
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
192 */
193
194 rtx
195 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
196 enum rtx_code code;
197 enum machine_mode mode;
198 enum machine_mode cmp_mode;
199 rtx op0, op1;
200 {
201 rtx tem;
202
203 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
204 return tem;
205
206 /* Put complex operands first and constants second. */
207 if (swap_commutative_operands_p (op0, op1))
208 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
209
210 return gen_rtx_fmt_ee (code, mode, op0, op1);
211 }
212 \f
213 /* Replace all occurrences of OLD in X with NEW and try to simplify the
214 resulting RTX. Return a new RTX which is as simplified as possible. */
215
216 rtx
217 simplify_replace_rtx (x, old, new)
218 rtx x;
219 rtx old;
220 rtx new;
221 {
222 enum rtx_code code = GET_CODE (x);
223 enum machine_mode mode = GET_MODE (x);
224
225 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
226 to build a new expression substituting recursively. If we can't do
227 anything, return our input. */
228
229 if (x == old)
230 return new;
231
232 switch (GET_RTX_CLASS (code))
233 {
234 case '1':
235 {
236 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
237 rtx op = (XEXP (x, 0) == old
238 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
239
240 return simplify_gen_unary (code, mode, op, op_mode);
241 }
242
243 case '2':
244 case 'c':
245 return
246 simplify_gen_binary (code, mode,
247 simplify_replace_rtx (XEXP (x, 0), old, new),
248 simplify_replace_rtx (XEXP (x, 1), old, new));
249 case '<':
250 {
251 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
252 ? GET_MODE (XEXP (x, 0))
253 : GET_MODE (XEXP (x, 1)));
254 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
255 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
256
257 return
258 simplify_gen_relational (code, mode,
259 (op_mode != VOIDmode
260 ? op_mode
261 : GET_MODE (op0) != VOIDmode
262 ? GET_MODE (op0)
263 : GET_MODE (op1)),
264 op0, op1);
265 }
266
267 case '3':
268 case 'b':
269 {
270 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
271 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
272
273 return
274 simplify_gen_ternary (code, mode,
275 (op_mode != VOIDmode
276 ? op_mode
277 : GET_MODE (op0)),
278 op0,
279 simplify_replace_rtx (XEXP (x, 1), old, new),
280 simplify_replace_rtx (XEXP (x, 2), old, new));
281 }
282
283 case 'x':
284 /* The only case we try to handle is a SUBREG. */
285 if (code == SUBREG)
286 {
287 rtx exp;
288 exp = simplify_gen_subreg (GET_MODE (x),
289 simplify_replace_rtx (SUBREG_REG (x),
290 old, new),
291 GET_MODE (SUBREG_REG (x)),
292 SUBREG_BYTE (x));
293 if (exp)
294 x = exp;
295 }
296 return x;
297
298 default:
299 if (GET_CODE (x) == MEM)
300 return
301 replace_equiv_address_nv (x,
302 simplify_replace_rtx (XEXP (x, 0),
303 old, new));
304
305 return x;
306 }
307 return x;
308 }
309 \f
310 /* Try to simplify a unary operation CODE whose output mode is to be
311 MODE with input operand OP whose mode was originally OP_MODE.
312 Return zero if no simplification can be made. */
313
314 rtx
315 simplify_unary_operation (code, mode, op, op_mode)
316 enum rtx_code code;
317 enum machine_mode mode;
318 rtx op;
319 enum machine_mode op_mode;
320 {
321 unsigned int width = GET_MODE_BITSIZE (mode);
322 rtx trueop = avoid_constant_pool_reference (op);
323
324 /* The order of these tests is critical so that, for example, we don't
325 check the wrong mode (input vs. output) for a conversion operation,
326 such as FIX. At some point, this should be simplified. */
327
328 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
329
330 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
331 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
332 {
333 HOST_WIDE_INT hv, lv;
334 REAL_VALUE_TYPE d;
335
336 if (GET_CODE (trueop) == CONST_INT)
337 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
338 else
339 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
340
341 #ifdef REAL_ARITHMETIC
342 REAL_VALUE_FROM_INT (d, lv, hv, mode);
343 #else
344 if (hv < 0)
345 {
346 d = (double) (~ hv);
347 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
348 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
349 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
350 d = (- d - 1.0);
351 }
352 else
353 {
354 d = (double) hv;
355 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
356 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
357 d += (double) (unsigned HOST_WIDE_INT) lv;
358 }
359 #endif /* REAL_ARITHMETIC */
360 d = real_value_truncate (mode, d);
361 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
362 }
363 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
364 && (GET_CODE (trueop) == CONST_DOUBLE
365 || GET_CODE (trueop) == CONST_INT))
366 {
367 HOST_WIDE_INT hv, lv;
368 REAL_VALUE_TYPE d;
369
370 if (GET_CODE (trueop) == CONST_INT)
371 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
372 else
373 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
374
375 if (op_mode == VOIDmode)
376 {
377 /* We don't know how to interpret negative-looking numbers in
378 this case, so don't try to fold those. */
379 if (hv < 0)
380 return 0;
381 }
382 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
383 ;
384 else
385 hv = 0, lv &= GET_MODE_MASK (op_mode);
386
387 #ifdef REAL_ARITHMETIC
388 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
389 #else
390
391 d = (double) (unsigned HOST_WIDE_INT) hv;
392 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
393 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
394 d += (double) (unsigned HOST_WIDE_INT) lv;
395 #endif /* REAL_ARITHMETIC */
396 d = real_value_truncate (mode, d);
397 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
398 }
399 #endif
400
401 if (GET_CODE (trueop) == CONST_INT
402 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
403 {
404 register HOST_WIDE_INT arg0 = INTVAL (trueop);
405 register HOST_WIDE_INT val;
406
407 switch (code)
408 {
409 case NOT:
410 val = ~ arg0;
411 break;
412
413 case NEG:
414 val = - arg0;
415 break;
416
417 case ABS:
418 val = (arg0 >= 0 ? arg0 : - arg0);
419 break;
420
421 case FFS:
422 /* Don't use ffs here. Instead, get low order bit and then its
423 number. If arg0 is zero, this will return 0, as desired. */
424 arg0 &= GET_MODE_MASK (mode);
425 val = exact_log2 (arg0 & (- arg0)) + 1;
426 break;
427
428 case TRUNCATE:
429 val = arg0;
430 break;
431
432 case ZERO_EXTEND:
433 if (op_mode == VOIDmode)
434 op_mode = mode;
435 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
436 {
437 /* If we were really extending the mode,
438 we would have to distinguish between zero-extension
439 and sign-extension. */
440 if (width != GET_MODE_BITSIZE (op_mode))
441 abort ();
442 val = arg0;
443 }
444 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
445 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
446 else
447 return 0;
448 break;
449
450 case SIGN_EXTEND:
451 if (op_mode == VOIDmode)
452 op_mode = mode;
453 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
454 {
455 /* If we were really extending the mode,
456 we would have to distinguish between zero-extension
457 and sign-extension. */
458 if (width != GET_MODE_BITSIZE (op_mode))
459 abort ();
460 val = arg0;
461 }
462 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
463 {
464 val
465 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
466 if (val
467 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
468 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
469 }
470 else
471 return 0;
472 break;
473
474 case SQRT:
475 case FLOAT_EXTEND:
476 case FLOAT_TRUNCATE:
477 return 0;
478
479 default:
480 abort ();
481 }
482
483 val = trunc_int_for_mode (val, mode);
484
485 return GEN_INT (val);
486 }
487
488 /* We can do some operations on integer CONST_DOUBLEs. Also allow
489 for a DImode operation on a CONST_INT. */
490 else if (GET_MODE (trueop) == VOIDmode && width <= HOST_BITS_PER_INT * 2
491 && (GET_CODE (trueop) == CONST_DOUBLE
492 || GET_CODE (trueop) == CONST_INT))
493 {
494 unsigned HOST_WIDE_INT l1, lv;
495 HOST_WIDE_INT h1, hv;
496
497 if (GET_CODE (trueop) == CONST_DOUBLE)
498 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
499 else
500 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
501
502 switch (code)
503 {
504 case NOT:
505 lv = ~ l1;
506 hv = ~ h1;
507 break;
508
509 case NEG:
510 neg_double (l1, h1, &lv, &hv);
511 break;
512
513 case ABS:
514 if (h1 < 0)
515 neg_double (l1, h1, &lv, &hv);
516 else
517 lv = l1, hv = h1;
518 break;
519
520 case FFS:
521 hv = 0;
522 if (l1 == 0)
523 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
524 else
525 lv = exact_log2 (l1 & (-l1)) + 1;
526 break;
527
528 case TRUNCATE:
529 /* This is just a change-of-mode, so do nothing. */
530 lv = l1, hv = h1;
531 break;
532
533 case ZERO_EXTEND:
534 if (op_mode == VOIDmode
535 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
536 return 0;
537
538 hv = 0;
539 lv = l1 & GET_MODE_MASK (op_mode);
540 break;
541
542 case SIGN_EXTEND:
543 if (op_mode == VOIDmode
544 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
545 return 0;
546 else
547 {
548 lv = l1 & GET_MODE_MASK (op_mode);
549 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
550 && (lv & ((HOST_WIDE_INT) 1
551 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
552 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
553
554 hv = HWI_SIGN_EXTEND (lv);
555 }
556 break;
557
558 case SQRT:
559 return 0;
560
561 default:
562 return 0;
563 }
564
565 return immed_double_const (lv, hv, mode);
566 }
567
568 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
569 else if (GET_CODE (trueop) == CONST_DOUBLE
570 && GET_MODE_CLASS (mode) == MODE_FLOAT)
571 {
572 REAL_VALUE_TYPE d;
573 jmp_buf handler;
574 rtx x;
575
576 if (setjmp (handler))
577 /* There used to be a warning here, but that is inadvisable.
578 People may want to cause traps, and the natural way
579 to do it should not get a warning. */
580 return 0;
581
582 set_float_handler (handler);
583
584 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
585
586 switch (code)
587 {
588 case NEG:
589 d = REAL_VALUE_NEGATE (d);
590 break;
591
592 case ABS:
593 if (REAL_VALUE_NEGATIVE (d))
594 d = REAL_VALUE_NEGATE (d);
595 break;
596
597 case FLOAT_TRUNCATE:
598 d = real_value_truncate (mode, d);
599 break;
600
601 case FLOAT_EXTEND:
602 /* All this does is change the mode. */
603 break;
604
605 case FIX:
606 d = REAL_VALUE_RNDZINT (d);
607 break;
608
609 case UNSIGNED_FIX:
610 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
611 break;
612
613 case SQRT:
614 return 0;
615
616 default:
617 abort ();
618 }
619
620 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
621 set_float_handler (NULL);
622 return x;
623 }
624
625 else if (GET_CODE (trueop) == CONST_DOUBLE
626 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
627 && GET_MODE_CLASS (mode) == MODE_INT
628 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
629 {
630 REAL_VALUE_TYPE d;
631 jmp_buf handler;
632 HOST_WIDE_INT val;
633
634 if (setjmp (handler))
635 return 0;
636
637 set_float_handler (handler);
638
639 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
640
641 switch (code)
642 {
643 case FIX:
644 val = REAL_VALUE_FIX (d);
645 break;
646
647 case UNSIGNED_FIX:
648 val = REAL_VALUE_UNSIGNED_FIX (d);
649 break;
650
651 default:
652 abort ();
653 }
654
655 set_float_handler (NULL);
656
657 val = trunc_int_for_mode (val, mode);
658
659 return GEN_INT (val);
660 }
661 #endif
662 /* This was formerly used only for non-IEEE float.
663 eggert@twinsun.com says it is safe for IEEE also. */
664 else
665 {
666 enum rtx_code reversed;
667 /* There are some simplifications we can do even if the operands
668 aren't constant. */
669 switch (code)
670 {
671 case NOT:
672 /* (not (not X)) == X. */
673 if (GET_CODE (op) == NOT)
674 return XEXP (op, 0);
675
676 /* (not (eq X Y)) == (ne X Y), etc. */
677 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
678 && ((reversed = reversed_comparison_code (op, NULL_RTX))
679 != UNKNOWN))
680 return gen_rtx_fmt_ee (reversed,
681 op_mode, XEXP (op, 0), XEXP (op, 1));
682 break;
683
684 case NEG:
685 /* (neg (neg X)) == X. */
686 if (GET_CODE (op) == NEG)
687 return XEXP (op, 0);
688 break;
689
690 case SIGN_EXTEND:
691 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
692 becomes just the MINUS if its mode is MODE. This allows
693 folding switch statements on machines using casesi (such as
694 the Vax). */
695 if (GET_CODE (op) == TRUNCATE
696 && GET_MODE (XEXP (op, 0)) == mode
697 && GET_CODE (XEXP (op, 0)) == MINUS
698 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
699 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
700 return XEXP (op, 0);
701
702 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
703 if (! POINTERS_EXTEND_UNSIGNED
704 && mode == Pmode && GET_MODE (op) == ptr_mode
705 && (CONSTANT_P (op)
706 || (GET_CODE (op) == SUBREG
707 && GET_CODE (SUBREG_REG (op)) == REG
708 && REG_POINTER (SUBREG_REG (op))
709 && GET_MODE (SUBREG_REG (op)) == Pmode)))
710 return convert_memory_address (Pmode, op);
711 #endif
712 break;
713
714 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
715 case ZERO_EXTEND:
716 if (POINTERS_EXTEND_UNSIGNED > 0
717 && mode == Pmode && GET_MODE (op) == ptr_mode
718 && (CONSTANT_P (op)
719 || (GET_CODE (op) == SUBREG
720 && GET_CODE (SUBREG_REG (op)) == REG
721 && REG_POINTER (SUBREG_REG (op))
722 && GET_MODE (SUBREG_REG (op)) == Pmode)))
723 return convert_memory_address (Pmode, op);
724 break;
725 #endif
726
727 default:
728 break;
729 }
730
731 return 0;
732 }
733 }
734 \f
735 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
736 and OP1. Return 0 if no simplification is possible.
737
738 Don't use this for relational operations such as EQ or LT.
739 Use simplify_relational_operation instead. */
740
741 rtx
742 simplify_binary_operation (code, mode, op0, op1)
743 enum rtx_code code;
744 enum machine_mode mode;
745 rtx op0, op1;
746 {
747 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
748 HOST_WIDE_INT val;
749 unsigned int width = GET_MODE_BITSIZE (mode);
750 rtx tem;
751 rtx trueop0 = avoid_constant_pool_reference (op0);
752 rtx trueop1 = avoid_constant_pool_reference (op1);
753
754 /* Relational operations don't work here. We must know the mode
755 of the operands in order to do the comparison correctly.
756 Assuming a full word can give incorrect results.
757 Consider comparing 128 with -128 in QImode. */
758
759 if (GET_RTX_CLASS (code) == '<')
760 abort ();
761
762 /* Make sure the constant is second. */
763 if (GET_RTX_CLASS (code) == 'c'
764 && swap_commutative_operands_p (trueop0, trueop1))
765 {
766 tem = op0, op0 = op1, op1 = tem;
767 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
768 }
769
770 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
771 if (GET_MODE_CLASS (mode) == MODE_FLOAT
772 && GET_CODE (trueop0) == CONST_DOUBLE
773 && GET_CODE (trueop1) == CONST_DOUBLE
774 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
775 {
776 REAL_VALUE_TYPE f0, f1, value;
777 jmp_buf handler;
778
779 if (setjmp (handler))
780 return 0;
781
782 set_float_handler (handler);
783
784 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
785 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
786 f0 = real_value_truncate (mode, f0);
787 f1 = real_value_truncate (mode, f1);
788
789 #ifdef REAL_ARITHMETIC
790 #ifndef REAL_INFINITY
791 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
792 return 0;
793 #endif
794 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
795 #else
796 switch (code)
797 {
798 case PLUS:
799 value = f0 + f1;
800 break;
801 case MINUS:
802 value = f0 - f1;
803 break;
804 case MULT:
805 value = f0 * f1;
806 break;
807 case DIV:
808 #ifndef REAL_INFINITY
809 if (f1 == 0)
810 return 0;
811 #endif
812 value = f0 / f1;
813 break;
814 case SMIN:
815 value = MIN (f0, f1);
816 break;
817 case SMAX:
818 value = MAX (f0, f1);
819 break;
820 default:
821 abort ();
822 }
823 #endif
824
825 value = real_value_truncate (mode, value);
826 set_float_handler (NULL);
827 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
828 }
829 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
830
831 /* We can fold some multi-word operations. */
832 if (GET_MODE_CLASS (mode) == MODE_INT
833 && width == HOST_BITS_PER_WIDE_INT * 2
834 && (GET_CODE (trueop0) == CONST_DOUBLE
835 || GET_CODE (trueop0) == CONST_INT)
836 && (GET_CODE (trueop1) == CONST_DOUBLE
837 || GET_CODE (trueop1) == CONST_INT))
838 {
839 unsigned HOST_WIDE_INT l1, l2, lv;
840 HOST_WIDE_INT h1, h2, hv;
841
842 if (GET_CODE (trueop0) == CONST_DOUBLE)
843 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
844 else
845 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
846
847 if (GET_CODE (trueop1) == CONST_DOUBLE)
848 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
849 else
850 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
851
852 switch (code)
853 {
854 case MINUS:
855 /* A - B == A + (-B). */
856 neg_double (l2, h2, &lv, &hv);
857 l2 = lv, h2 = hv;
858
859 /* .. fall through ... */
860
861 case PLUS:
862 add_double (l1, h1, l2, h2, &lv, &hv);
863 break;
864
865 case MULT:
866 mul_double (l1, h1, l2, h2, &lv, &hv);
867 break;
868
869 case DIV: case MOD: case UDIV: case UMOD:
870 /* We'd need to include tree.h to do this and it doesn't seem worth
871 it. */
872 return 0;
873
874 case AND:
875 lv = l1 & l2, hv = h1 & h2;
876 break;
877
878 case IOR:
879 lv = l1 | l2, hv = h1 | h2;
880 break;
881
882 case XOR:
883 lv = l1 ^ l2, hv = h1 ^ h2;
884 break;
885
886 case SMIN:
887 if (h1 < h2
888 || (h1 == h2
889 && ((unsigned HOST_WIDE_INT) l1
890 < (unsigned HOST_WIDE_INT) l2)))
891 lv = l1, hv = h1;
892 else
893 lv = l2, hv = h2;
894 break;
895
896 case SMAX:
897 if (h1 > h2
898 || (h1 == h2
899 && ((unsigned HOST_WIDE_INT) l1
900 > (unsigned HOST_WIDE_INT) l2)))
901 lv = l1, hv = h1;
902 else
903 lv = l2, hv = h2;
904 break;
905
906 case UMIN:
907 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
908 || (h1 == h2
909 && ((unsigned HOST_WIDE_INT) l1
910 < (unsigned HOST_WIDE_INT) l2)))
911 lv = l1, hv = h1;
912 else
913 lv = l2, hv = h2;
914 break;
915
916 case UMAX:
917 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
918 || (h1 == h2
919 && ((unsigned HOST_WIDE_INT) l1
920 > (unsigned HOST_WIDE_INT) l2)))
921 lv = l1, hv = h1;
922 else
923 lv = l2, hv = h2;
924 break;
925
926 case LSHIFTRT: case ASHIFTRT:
927 case ASHIFT:
928 case ROTATE: case ROTATERT:
929 #ifdef SHIFT_COUNT_TRUNCATED
930 if (SHIFT_COUNT_TRUNCATED)
931 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
932 #endif
933
934 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
935 return 0;
936
937 if (code == LSHIFTRT || code == ASHIFTRT)
938 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
939 code == ASHIFTRT);
940 else if (code == ASHIFT)
941 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
942 else if (code == ROTATE)
943 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
944 else /* code == ROTATERT */
945 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
946 break;
947
948 default:
949 return 0;
950 }
951
952 return immed_double_const (lv, hv, mode);
953 }
954
955 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
956 || width > HOST_BITS_PER_WIDE_INT || width == 0)
957 {
958 /* Even if we can't compute a constant result,
959 there are some cases worth simplifying. */
960
961 switch (code)
962 {
963 case PLUS:
964 /* In IEEE floating point, x+0 is not the same as x. Similarly
965 for the other optimizations below. */
966 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
967 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
968 break;
969
970 if (trueop1 == CONST0_RTX (mode))
971 return op0;
972
973 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
974 if (GET_CODE (op0) == NEG)
975 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
976 else if (GET_CODE (op1) == NEG)
977 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
978
979 /* (~a) + 1 -> -a */
980 if (INTEGRAL_MODE_P (mode)
981 && GET_CODE (op0) == NOT
982 && trueop1 == const1_rtx)
983 return gen_rtx_NEG (mode, XEXP (op0, 0));
984
985 /* Handle both-operands-constant cases. We can only add
986 CONST_INTs to constants since the sum of relocatable symbols
987 can't be handled by most assemblers. Don't add CONST_INT
988 to CONST_INT since overflow won't be computed properly if wider
989 than HOST_BITS_PER_WIDE_INT. */
990
991 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
992 && GET_CODE (op1) == CONST_INT)
993 return plus_constant (op0, INTVAL (op1));
994 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
995 && GET_CODE (op0) == CONST_INT)
996 return plus_constant (op1, INTVAL (op0));
997
998 /* See if this is something like X * C - X or vice versa or
999 if the multiplication is written as a shift. If so, we can
1000 distribute and make a new multiply, shift, or maybe just
1001 have X (if C is 2 in the example above). But don't make
1002 real multiply if we didn't have one before. */
1003
1004 if (! FLOAT_MODE_P (mode))
1005 {
1006 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1007 rtx lhs = op0, rhs = op1;
1008 int had_mult = 0;
1009
1010 if (GET_CODE (lhs) == NEG)
1011 coeff0 = -1, lhs = XEXP (lhs, 0);
1012 else if (GET_CODE (lhs) == MULT
1013 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1014 {
1015 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1016 had_mult = 1;
1017 }
1018 else if (GET_CODE (lhs) == ASHIFT
1019 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1020 && INTVAL (XEXP (lhs, 1)) >= 0
1021 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1022 {
1023 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1024 lhs = XEXP (lhs, 0);
1025 }
1026
1027 if (GET_CODE (rhs) == NEG)
1028 coeff1 = -1, rhs = XEXP (rhs, 0);
1029 else if (GET_CODE (rhs) == MULT
1030 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1031 {
1032 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1033 had_mult = 1;
1034 }
1035 else if (GET_CODE (rhs) == ASHIFT
1036 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1037 && INTVAL (XEXP (rhs, 1)) >= 0
1038 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1039 {
1040 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1041 rhs = XEXP (rhs, 0);
1042 }
1043
1044 if (rtx_equal_p (lhs, rhs))
1045 {
1046 tem = simplify_gen_binary (MULT, mode, lhs,
1047 GEN_INT (coeff0 + coeff1));
1048 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1049 }
1050 }
1051
1052 /* If one of the operands is a PLUS or a MINUS, see if we can
1053 simplify this by the associative law.
1054 Don't use the associative law for floating point.
1055 The inaccuracy makes it nonassociative,
1056 and subtle programs can break if operations are associated. */
1057
1058 if (INTEGRAL_MODE_P (mode)
1059 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1060 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1061 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1062 return tem;
1063 break;
1064
1065 case COMPARE:
1066 #ifdef HAVE_cc0
1067 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1068 using cc0, in which case we want to leave it as a COMPARE
1069 so we can distinguish it from a register-register-copy.
1070
1071 In IEEE floating point, x-0 is not the same as x. */
1072
1073 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1074 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1075 && trueop1 == CONST0_RTX (mode))
1076 return op0;
1077 #endif
1078
1079 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1080 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1081 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1082 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1083 {
1084 rtx xop00 = XEXP (op0, 0);
1085 rtx xop10 = XEXP (op1, 0);
1086
1087 #ifdef HAVE_cc0
1088 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1089 #else
1090 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1091 && GET_MODE (xop00) == GET_MODE (xop10)
1092 && REGNO (xop00) == REGNO (xop10)
1093 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1094 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1095 #endif
1096 return xop00;
1097 }
1098
1099 break;
1100 case MINUS:
1101 /* None of these optimizations can be done for IEEE
1102 floating point. */
1103 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1104 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1105 break;
1106
1107 /* We can't assume x-x is 0 even with non-IEEE floating point,
1108 but since it is zero except in very strange circumstances, we
1109 will treat it as zero with -funsafe-math-optimizations. */
1110 if (rtx_equal_p (trueop0, trueop1)
1111 && ! side_effects_p (op0)
1112 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1113 return CONST0_RTX (mode);
1114
1115 /* Change subtraction from zero into negation. */
1116 if (trueop0 == CONST0_RTX (mode))
1117 return gen_rtx_NEG (mode, op1);
1118
1119 /* (-1 - a) is ~a. */
1120 if (trueop0 == constm1_rtx)
1121 return gen_rtx_NOT (mode, op1);
1122
1123 /* Subtracting 0 has no effect. */
1124 if (trueop1 == CONST0_RTX (mode))
1125 return op0;
1126
1127 /* See if this is something like X * C - X or vice versa or
1128 if the multiplication is written as a shift. If so, we can
1129 distribute and make a new multiply, shift, or maybe just
1130 have X (if C is 2 in the example above). But don't make
1131 real multiply if we didn't have one before. */
1132
1133 if (! FLOAT_MODE_P (mode))
1134 {
1135 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1136 rtx lhs = op0, rhs = op1;
1137 int had_mult = 0;
1138
1139 if (GET_CODE (lhs) == NEG)
1140 coeff0 = -1, lhs = XEXP (lhs, 0);
1141 else if (GET_CODE (lhs) == MULT
1142 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1143 {
1144 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1145 had_mult = 1;
1146 }
1147 else if (GET_CODE (lhs) == ASHIFT
1148 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1149 && INTVAL (XEXP (lhs, 1)) >= 0
1150 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1151 {
1152 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1153 lhs = XEXP (lhs, 0);
1154 }
1155
1156 if (GET_CODE (rhs) == NEG)
1157 coeff1 = - 1, rhs = XEXP (rhs, 0);
1158 else if (GET_CODE (rhs) == MULT
1159 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1160 {
1161 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1162 had_mult = 1;
1163 }
1164 else if (GET_CODE (rhs) == ASHIFT
1165 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1166 && INTVAL (XEXP (rhs, 1)) >= 0
1167 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1168 {
1169 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1170 rhs = XEXP (rhs, 0);
1171 }
1172
1173 if (rtx_equal_p (lhs, rhs))
1174 {
1175 tem = simplify_gen_binary (MULT, mode, lhs,
1176 GEN_INT (coeff0 - coeff1));
1177 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1178 }
1179 }
1180
1181 /* (a - (-b)) -> (a + b). */
1182 if (GET_CODE (op1) == NEG)
1183 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1184
1185 /* If one of the operands is a PLUS or a MINUS, see if we can
1186 simplify this by the associative law.
1187 Don't use the associative law for floating point.
1188 The inaccuracy makes it nonassociative,
1189 and subtle programs can break if operations are associated. */
1190
1191 if (INTEGRAL_MODE_P (mode)
1192 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1193 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1194 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1195 return tem;
1196
1197 /* Don't let a relocatable value get a negative coeff. */
1198 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1199 return plus_constant (op0, - INTVAL (op1));
1200
1201 /* (x - (x & y)) -> (x & ~y) */
1202 if (GET_CODE (op1) == AND)
1203 {
1204 if (rtx_equal_p (op0, XEXP (op1, 0)))
1205 return simplify_gen_binary (AND, mode, op0,
1206 gen_rtx_NOT (mode, XEXP (op1, 1)));
1207 if (rtx_equal_p (op0, XEXP (op1, 1)))
1208 return simplify_gen_binary (AND, mode, op0,
1209 gen_rtx_NOT (mode, XEXP (op1, 0)));
1210 }
1211 break;
1212
1213 case MULT:
1214 if (trueop1 == constm1_rtx)
1215 {
1216 tem = simplify_unary_operation (NEG, mode, op0, mode);
1217
1218 return tem ? tem : gen_rtx_NEG (mode, op0);
1219 }
1220
1221 /* In IEEE floating point, x*0 is not always 0. */
1222 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1223 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1224 && trueop1 == CONST0_RTX (mode)
1225 && ! side_effects_p (op0))
1226 return op1;
1227
1228 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1229 However, ANSI says we can drop signals,
1230 so we can do this anyway. */
1231 if (trueop1 == CONST1_RTX (mode))
1232 return op0;
1233
1234 /* Convert multiply by constant power of two into shift unless
1235 we are still generating RTL. This test is a kludge. */
1236 if (GET_CODE (trueop1) == CONST_INT
1237 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1238 /* If the mode is larger than the host word size, and the
1239 uppermost bit is set, then this isn't a power of two due
1240 to implicit sign extension. */
1241 && (width <= HOST_BITS_PER_WIDE_INT
1242 || val != HOST_BITS_PER_WIDE_INT - 1)
1243 && ! rtx_equal_function_value_matters)
1244 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1245
1246 if (GET_CODE (trueop1) == CONST_DOUBLE
1247 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1248 {
1249 REAL_VALUE_TYPE d;
1250 jmp_buf handler;
1251 int op1is2, op1ism1;
1252
1253 if (setjmp (handler))
1254 return 0;
1255
1256 set_float_handler (handler);
1257 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1258 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1259 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1260 set_float_handler (NULL);
1261
1262 /* x*2 is x+x and x*(-1) is -x */
1263 if (op1is2 && GET_MODE (op0) == mode)
1264 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1265
1266 else if (op1ism1 && GET_MODE (op0) == mode)
1267 return gen_rtx_NEG (mode, op0);
1268 }
1269 break;
1270
1271 case IOR:
1272 if (trueop1 == const0_rtx)
1273 return op0;
1274 if (GET_CODE (trueop1) == CONST_INT
1275 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1276 == GET_MODE_MASK (mode)))
1277 return op1;
1278 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1279 return op0;
1280 /* A | (~A) -> -1 */
1281 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1282 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1283 && ! side_effects_p (op0)
1284 && GET_MODE_CLASS (mode) != MODE_CC)
1285 return constm1_rtx;
1286 break;
1287
1288 case XOR:
1289 if (trueop1 == const0_rtx)
1290 return op0;
1291 if (GET_CODE (trueop1) == CONST_INT
1292 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1293 == GET_MODE_MASK (mode)))
1294 return gen_rtx_NOT (mode, op0);
1295 if (trueop0 == trueop1 && ! side_effects_p (op0)
1296 && GET_MODE_CLASS (mode) != MODE_CC)
1297 return const0_rtx;
1298 break;
1299
1300 case AND:
1301 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1302 return const0_rtx;
1303 if (GET_CODE (trueop1) == CONST_INT
1304 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1305 == GET_MODE_MASK (mode)))
1306 return op0;
1307 if (trueop0 == trueop1 && ! side_effects_p (op0)
1308 && GET_MODE_CLASS (mode) != MODE_CC)
1309 return op0;
1310 /* A & (~A) -> 0 */
1311 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1312 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1313 && ! side_effects_p (op0)
1314 && GET_MODE_CLASS (mode) != MODE_CC)
1315 return const0_rtx;
1316 break;
1317
1318 case UDIV:
1319 /* Convert divide by power of two into shift (divide by 1 handled
1320 below). */
1321 if (GET_CODE (trueop1) == CONST_INT
1322 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1323 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1324
1325 /* ... fall through ... */
1326
1327 case DIV:
1328 if (trueop1 == CONST1_RTX (mode))
1329 return op0;
1330
1331 /* In IEEE floating point, 0/x is not always 0. */
1332 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1333 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1334 && trueop0 == CONST0_RTX (mode)
1335 && ! side_effects_p (op1))
1336 return op0;
1337
1338 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1339 /* Change division by a constant into multiplication. Only do
1340 this with -funsafe-math-optimizations. */
1341 else if (GET_CODE (trueop1) == CONST_DOUBLE
1342 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1343 && trueop1 != CONST0_RTX (mode)
1344 && flag_unsafe_math_optimizations)
1345 {
1346 REAL_VALUE_TYPE d;
1347 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1348
1349 if (! REAL_VALUES_EQUAL (d, dconst0))
1350 {
1351 #if defined (REAL_ARITHMETIC)
1352 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1353 return gen_rtx_MULT (mode, op0,
1354 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1355 #else
1356 return
1357 gen_rtx_MULT (mode, op0,
1358 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1359 #endif
1360 }
1361 }
1362 #endif
1363 break;
1364
1365 case UMOD:
1366 /* Handle modulus by power of two (mod with 1 handled below). */
1367 if (GET_CODE (trueop1) == CONST_INT
1368 && exact_log2 (INTVAL (trueop1)) > 0)
1369 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1370
1371 /* ... fall through ... */
1372
1373 case MOD:
1374 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1375 && ! side_effects_p (op0) && ! side_effects_p (op1))
1376 return const0_rtx;
1377 break;
1378
1379 case ROTATERT:
1380 case ROTATE:
1381 /* Rotating ~0 always results in ~0. */
1382 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1383 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1384 && ! side_effects_p (op1))
1385 return op0;
1386
1387 /* ... fall through ... */
1388
1389 case ASHIFT:
1390 case ASHIFTRT:
1391 case LSHIFTRT:
1392 if (trueop1 == const0_rtx)
1393 return op0;
1394 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1395 return op0;
1396 break;
1397
1398 case SMIN:
1399 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1400 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1401 && ! side_effects_p (op0))
1402 return op1;
1403 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1404 return op0;
1405 break;
1406
1407 case SMAX:
1408 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1409 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1410 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1411 && ! side_effects_p (op0))
1412 return op1;
1413 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1414 return op0;
1415 break;
1416
1417 case UMIN:
1418 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1419 return op1;
1420 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1421 return op0;
1422 break;
1423
1424 case UMAX:
1425 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1426 return op1;
1427 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1428 return op0;
1429 break;
1430
1431 default:
1432 abort ();
1433 }
1434
1435 return 0;
1436 }
1437
1438 /* Get the integer argument values in two forms:
1439 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1440
1441 arg0 = INTVAL (trueop0);
1442 arg1 = INTVAL (trueop1);
1443
1444 if (width < HOST_BITS_PER_WIDE_INT)
1445 {
1446 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1447 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1448
1449 arg0s = arg0;
1450 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1451 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1452
1453 arg1s = arg1;
1454 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1455 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1456 }
1457 else
1458 {
1459 arg0s = arg0;
1460 arg1s = arg1;
1461 }
1462
1463 /* Compute the value of the arithmetic. */
1464
1465 switch (code)
1466 {
1467 case PLUS:
1468 val = arg0s + arg1s;
1469 break;
1470
1471 case MINUS:
1472 val = arg0s - arg1s;
1473 break;
1474
1475 case MULT:
1476 val = arg0s * arg1s;
1477 break;
1478
1479 case DIV:
1480 if (arg1s == 0
1481 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1482 && arg1s == -1))
1483 return 0;
1484 val = arg0s / arg1s;
1485 break;
1486
1487 case MOD:
1488 if (arg1s == 0
1489 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1490 && arg1s == -1))
1491 return 0;
1492 val = arg0s % arg1s;
1493 break;
1494
1495 case UDIV:
1496 if (arg1 == 0
1497 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1498 && arg1s == -1))
1499 return 0;
1500 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1501 break;
1502
1503 case UMOD:
1504 if (arg1 == 0
1505 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1506 && arg1s == -1))
1507 return 0;
1508 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1509 break;
1510
1511 case AND:
1512 val = arg0 & arg1;
1513 break;
1514
1515 case IOR:
1516 val = arg0 | arg1;
1517 break;
1518
1519 case XOR:
1520 val = arg0 ^ arg1;
1521 break;
1522
1523 case LSHIFTRT:
1524 /* If shift count is undefined, don't fold it; let the machine do
1525 what it wants. But truncate it if the machine will do that. */
1526 if (arg1 < 0)
1527 return 0;
1528
1529 #ifdef SHIFT_COUNT_TRUNCATED
1530 if (SHIFT_COUNT_TRUNCATED)
1531 arg1 %= width;
1532 #endif
1533
1534 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1535 break;
1536
1537 case ASHIFT:
1538 if (arg1 < 0)
1539 return 0;
1540
1541 #ifdef SHIFT_COUNT_TRUNCATED
1542 if (SHIFT_COUNT_TRUNCATED)
1543 arg1 %= width;
1544 #endif
1545
1546 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1547 break;
1548
1549 case ASHIFTRT:
1550 if (arg1 < 0)
1551 return 0;
1552
1553 #ifdef SHIFT_COUNT_TRUNCATED
1554 if (SHIFT_COUNT_TRUNCATED)
1555 arg1 %= width;
1556 #endif
1557
1558 val = arg0s >> arg1;
1559
1560 /* Bootstrap compiler may not have sign extended the right shift.
1561 Manually extend the sign to insure bootstrap cc matches gcc. */
1562 if (arg0s < 0 && arg1 > 0)
1563 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1564
1565 break;
1566
1567 case ROTATERT:
1568 if (arg1 < 0)
1569 return 0;
1570
1571 arg1 %= width;
1572 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1573 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1574 break;
1575
1576 case ROTATE:
1577 if (arg1 < 0)
1578 return 0;
1579
1580 arg1 %= width;
1581 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1582 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1583 break;
1584
1585 case COMPARE:
1586 /* Do nothing here. */
1587 return 0;
1588
1589 case SMIN:
1590 val = arg0s <= arg1s ? arg0s : arg1s;
1591 break;
1592
1593 case UMIN:
1594 val = ((unsigned HOST_WIDE_INT) arg0
1595 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1596 break;
1597
1598 case SMAX:
1599 val = arg0s > arg1s ? arg0s : arg1s;
1600 break;
1601
1602 case UMAX:
1603 val = ((unsigned HOST_WIDE_INT) arg0
1604 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1605 break;
1606
1607 default:
1608 abort ();
1609 }
1610
1611 val = trunc_int_for_mode (val, mode);
1612
1613 return GEN_INT (val);
1614 }
1615 \f
1616 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1617 PLUS or MINUS.
1618
1619 Rather than test for specific case, we do this by a brute-force method
1620 and do all possible simplifications until no more changes occur. Then
1621 we rebuild the operation. */
1622
1623 static rtx
1624 simplify_plus_minus (code, mode, op0, op1)
1625 enum rtx_code code;
1626 enum machine_mode mode;
1627 rtx op0, op1;
1628 {
1629 rtx ops[8];
1630 int negs[8];
1631 rtx result, tem;
1632 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1633 int first = 1, negate = 0, changed;
1634 int i, j;
1635
1636 memset ((char *) ops, 0, sizeof ops);
1637
1638 /* Set up the two operands and then expand them until nothing has been
1639 changed. If we run out of room in our array, give up; this should
1640 almost never happen. */
1641
1642 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1643
1644 changed = 1;
1645 while (changed)
1646 {
1647 changed = 0;
1648
1649 for (i = 0; i < n_ops; i++)
1650 switch (GET_CODE (ops[i]))
1651 {
1652 case PLUS:
1653 case MINUS:
1654 if (n_ops == 7)
1655 return 0;
1656
1657 ops[n_ops] = XEXP (ops[i], 1);
1658 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1659 ops[i] = XEXP (ops[i], 0);
1660 input_ops++;
1661 changed = 1;
1662 break;
1663
1664 case NEG:
1665 ops[i] = XEXP (ops[i], 0);
1666 negs[i] = ! negs[i];
1667 changed = 1;
1668 break;
1669
1670 case CONST:
1671 ops[i] = XEXP (ops[i], 0);
1672 input_consts++;
1673 changed = 1;
1674 break;
1675
1676 case NOT:
1677 /* ~a -> (-a - 1) */
1678 if (n_ops != 7)
1679 {
1680 ops[n_ops] = constm1_rtx;
1681 negs[n_ops++] = negs[i];
1682 ops[i] = XEXP (ops[i], 0);
1683 negs[i] = ! negs[i];
1684 changed = 1;
1685 }
1686 break;
1687
1688 case CONST_INT:
1689 if (negs[i])
1690 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1691 break;
1692
1693 default:
1694 break;
1695 }
1696 }
1697
1698 /* If we only have two operands, we can't do anything. */
1699 if (n_ops <= 2)
1700 return 0;
1701
1702 /* Now simplify each pair of operands until nothing changes. The first
1703 time through just simplify constants against each other. */
1704
1705 changed = 1;
1706 while (changed)
1707 {
1708 changed = first;
1709
1710 for (i = 0; i < n_ops - 1; i++)
1711 for (j = i + 1; j < n_ops; j++)
1712 if (ops[i] != 0 && ops[j] != 0
1713 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1714 {
1715 rtx lhs = ops[i], rhs = ops[j];
1716 enum rtx_code ncode = PLUS;
1717
1718 if (negs[i] && ! negs[j])
1719 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1720 else if (! negs[i] && negs[j])
1721 ncode = MINUS;
1722
1723 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1724 if (tem)
1725 {
1726 ops[i] = tem, ops[j] = 0;
1727 negs[i] = negs[i] && negs[j];
1728 if (GET_CODE (tem) == NEG)
1729 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1730
1731 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1732 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1733 changed = 1;
1734 }
1735 }
1736
1737 first = 0;
1738 }
1739
1740 /* Pack all the operands to the lower-numbered entries and give up if
1741 we didn't reduce the number of operands we had. Make sure we
1742 count a CONST as two operands. If we have the same number of
1743 operands, but have made more CONSTs than we had, this is also
1744 an improvement, so accept it. */
1745
1746 for (i = 0, j = 0; j < n_ops; j++)
1747 if (ops[j] != 0)
1748 {
1749 ops[i] = ops[j], negs[i++] = negs[j];
1750 if (GET_CODE (ops[j]) == CONST)
1751 n_consts++;
1752 }
1753
1754 if (i + n_consts > input_ops
1755 || (i + n_consts == input_ops && n_consts <= input_consts))
1756 return 0;
1757
1758 n_ops = i;
1759
1760 /* If we have a CONST_INT, put it last. */
1761 for (i = 0; i < n_ops - 1; i++)
1762 if (GET_CODE (ops[i]) == CONST_INT)
1763 {
1764 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1765 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1766 }
1767
1768 /* Put a non-negated operand first. If there aren't any, make all
1769 operands positive and negate the whole thing later. */
1770 for (i = 0; i < n_ops && negs[i]; i++)
1771 ;
1772
1773 if (i == n_ops)
1774 {
1775 for (i = 0; i < n_ops; i++)
1776 negs[i] = 0;
1777 negate = 1;
1778 }
1779 else if (i != 0)
1780 {
1781 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1782 j = negs[0], negs[0] = negs[i], negs[i] = j;
1783 }
1784
1785 /* Now make the result by performing the requested operations. */
1786 result = ops[0];
1787 for (i = 1; i < n_ops; i++)
1788 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1789
1790 return negate ? gen_rtx_NEG (mode, result) : result;
1791 }
1792
1793 struct cfc_args
1794 {
1795 rtx op0, op1; /* Input */
1796 int equal, op0lt, op1lt; /* Output */
1797 int unordered;
1798 };
1799
1800 static void
1801 check_fold_consts (data)
1802 PTR data;
1803 {
1804 struct cfc_args *args = (struct cfc_args *) data;
1805 REAL_VALUE_TYPE d0, d1;
1806
1807 /* We may possibly raise an exception while reading the value. */
1808 args->unordered = 1;
1809 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1810 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1811
1812 /* Comparisons of Inf versus Inf are ordered. */
1813 if (REAL_VALUE_ISNAN (d0)
1814 || REAL_VALUE_ISNAN (d1))
1815 return;
1816 args->equal = REAL_VALUES_EQUAL (d0, d1);
1817 args->op0lt = REAL_VALUES_LESS (d0, d1);
1818 args->op1lt = REAL_VALUES_LESS (d1, d0);
1819 args->unordered = 0;
1820 }
1821
1822 /* Like simplify_binary_operation except used for relational operators.
1823 MODE is the mode of the operands, not that of the result. If MODE
1824 is VOIDmode, both operands must also be VOIDmode and we compare the
1825 operands in "infinite precision".
1826
1827 If no simplification is possible, this function returns zero. Otherwise,
1828 it returns either const_true_rtx or const0_rtx. */
1829
1830 rtx
1831 simplify_relational_operation (code, mode, op0, op1)
1832 enum rtx_code code;
1833 enum machine_mode mode;
1834 rtx op0, op1;
1835 {
1836 int equal, op0lt, op0ltu, op1lt, op1ltu;
1837 rtx tem;
1838 rtx trueop0;
1839 rtx trueop1;
1840
1841 if (mode == VOIDmode
1842 && (GET_MODE (op0) != VOIDmode
1843 || GET_MODE (op1) != VOIDmode))
1844 abort ();
1845
1846 /* If op0 is a compare, extract the comparison arguments from it. */
1847 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1848 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1849
1850 trueop0 = avoid_constant_pool_reference (op0);
1851 trueop1 = avoid_constant_pool_reference (op1);
1852
1853 /* We can't simplify MODE_CC values since we don't know what the
1854 actual comparison is. */
1855 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1856 #ifdef HAVE_cc0
1857 || op0 == cc0_rtx
1858 #endif
1859 )
1860 return 0;
1861
1862 /* Make sure the constant is second. */
1863 if (swap_commutative_operands_p (trueop0, trueop1))
1864 {
1865 tem = op0, op0 = op1, op1 = tem;
1866 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1867 code = swap_condition (code);
1868 }
1869
1870 /* For integer comparisons of A and B maybe we can simplify A - B and can
1871 then simplify a comparison of that with zero. If A and B are both either
1872 a register or a CONST_INT, this can't help; testing for these cases will
1873 prevent infinite recursion here and speed things up.
1874
1875 If CODE is an unsigned comparison, then we can never do this optimization,
1876 because it gives an incorrect result if the subtraction wraps around zero.
1877 ANSI C defines unsigned operations such that they never overflow, and
1878 thus such cases can not be ignored. */
1879
1880 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1881 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1882 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1883 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1884 && code != GTU && code != GEU && code != LTU && code != LEU)
1885 return simplify_relational_operation (signed_condition (code),
1886 mode, tem, const0_rtx);
1887
1888 if (flag_unsafe_math_optimizations && code == ORDERED)
1889 return const_true_rtx;
1890
1891 if (flag_unsafe_math_optimizations && code == UNORDERED)
1892 return const0_rtx;
1893
1894 /* For non-IEEE floating-point, if the two operands are equal, we know the
1895 result. */
1896 if (rtx_equal_p (trueop0, trueop1)
1897 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1898 || ! FLOAT_MODE_P (GET_MODE (trueop0))
1899 || flag_unsafe_math_optimizations))
1900 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1901
1902 /* If the operands are floating-point constants, see if we can fold
1903 the result. */
1904 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1905 else if (GET_CODE (trueop0) == CONST_DOUBLE
1906 && GET_CODE (trueop1) == CONST_DOUBLE
1907 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1908 {
1909 struct cfc_args args;
1910
1911 /* Setup input for check_fold_consts() */
1912 args.op0 = trueop0;
1913 args.op1 = trueop1;
1914
1915
1916 if (!do_float_handler (check_fold_consts, (PTR) &args))
1917 args.unordered = 1;
1918
1919 if (args.unordered)
1920 switch (code)
1921 {
1922 case UNEQ:
1923 case UNLT:
1924 case UNGT:
1925 case UNLE:
1926 case UNGE:
1927 case NE:
1928 case UNORDERED:
1929 return const_true_rtx;
1930 case EQ:
1931 case LT:
1932 case GT:
1933 case LE:
1934 case GE:
1935 case LTGT:
1936 case ORDERED:
1937 return const0_rtx;
1938 default:
1939 return 0;
1940 }
1941
1942 /* Receive output from check_fold_consts() */
1943 equal = args.equal;
1944 op0lt = op0ltu = args.op0lt;
1945 op1lt = op1ltu = args.op1lt;
1946 }
1947 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1948
1949 /* Otherwise, see if the operands are both integers. */
1950 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1951 && (GET_CODE (trueop0) == CONST_DOUBLE
1952 || GET_CODE (trueop0) == CONST_INT)
1953 && (GET_CODE (trueop1) == CONST_DOUBLE
1954 || GET_CODE (trueop1) == CONST_INT))
1955 {
1956 int width = GET_MODE_BITSIZE (mode);
1957 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1958 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1959
1960 /* Get the two words comprising each integer constant. */
1961 if (GET_CODE (trueop0) == CONST_DOUBLE)
1962 {
1963 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1964 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1965 }
1966 else
1967 {
1968 l0u = l0s = INTVAL (trueop0);
1969 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1970 }
1971
1972 if (GET_CODE (trueop1) == CONST_DOUBLE)
1973 {
1974 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1975 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1976 }
1977 else
1978 {
1979 l1u = l1s = INTVAL (trueop1);
1980 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1981 }
1982
1983 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1984 we have to sign or zero-extend the values. */
1985 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1986 {
1987 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1988 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1989
1990 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1991 l0s |= ((HOST_WIDE_INT) (-1) << width);
1992
1993 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1994 l1s |= ((HOST_WIDE_INT) (-1) << width);
1995 }
1996 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1997 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1998
1999 equal = (h0u == h1u && l0u == l1u);
2000 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2001 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2002 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2003 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2004 }
2005
2006 /* Otherwise, there are some code-specific tests we can make. */
2007 else
2008 {
2009 switch (code)
2010 {
2011 case EQ:
2012 /* References to the frame plus a constant or labels cannot
2013 be zero, but a SYMBOL_REF can due to #pragma weak. */
2014 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2015 || GET_CODE (trueop0) == LABEL_REF)
2016 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2017 /* On some machines, the ap reg can be 0 sometimes. */
2018 && op0 != arg_pointer_rtx
2019 #endif
2020 )
2021 return const0_rtx;
2022 break;
2023
2024 case NE:
2025 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2026 || GET_CODE (trueop0) == LABEL_REF)
2027 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2028 && op0 != arg_pointer_rtx
2029 #endif
2030 )
2031 return const_true_rtx;
2032 break;
2033
2034 case GEU:
2035 /* Unsigned values are never negative. */
2036 if (trueop1 == const0_rtx)
2037 return const_true_rtx;
2038 break;
2039
2040 case LTU:
2041 if (trueop1 == const0_rtx)
2042 return const0_rtx;
2043 break;
2044
2045 case LEU:
2046 /* Unsigned values are never greater than the largest
2047 unsigned value. */
2048 if (GET_CODE (trueop1) == CONST_INT
2049 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2050 && INTEGRAL_MODE_P (mode))
2051 return const_true_rtx;
2052 break;
2053
2054 case GTU:
2055 if (GET_CODE (trueop1) == CONST_INT
2056 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2057 && INTEGRAL_MODE_P (mode))
2058 return const0_rtx;
2059 break;
2060
2061 default:
2062 break;
2063 }
2064
2065 return 0;
2066 }
2067
2068 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2069 as appropriate. */
2070 switch (code)
2071 {
2072 case EQ:
2073 case UNEQ:
2074 return equal ? const_true_rtx : const0_rtx;
2075 case NE:
2076 case LTGT:
2077 return ! equal ? const_true_rtx : const0_rtx;
2078 case LT:
2079 case UNLT:
2080 return op0lt ? const_true_rtx : const0_rtx;
2081 case GT:
2082 case UNGT:
2083 return op1lt ? const_true_rtx : const0_rtx;
2084 case LTU:
2085 return op0ltu ? const_true_rtx : const0_rtx;
2086 case GTU:
2087 return op1ltu ? const_true_rtx : const0_rtx;
2088 case LE:
2089 case UNLE:
2090 return equal || op0lt ? const_true_rtx : const0_rtx;
2091 case GE:
2092 case UNGE:
2093 return equal || op1lt ? const_true_rtx : const0_rtx;
2094 case LEU:
2095 return equal || op0ltu ? const_true_rtx : const0_rtx;
2096 case GEU:
2097 return equal || op1ltu ? const_true_rtx : const0_rtx;
2098 case ORDERED:
2099 return const_true_rtx;
2100 case UNORDERED:
2101 return const0_rtx;
2102 default:
2103 abort ();
2104 }
2105 }
2106 \f
2107 /* Simplify CODE, an operation with result mode MODE and three operands,
2108 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2109 a constant. Return 0 if no simplifications is possible. */
2110
2111 rtx
2112 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2113 enum rtx_code code;
2114 enum machine_mode mode, op0_mode;
2115 rtx op0, op1, op2;
2116 {
2117 unsigned int width = GET_MODE_BITSIZE (mode);
2118
2119 /* VOIDmode means "infinite" precision. */
2120 if (width == 0)
2121 width = HOST_BITS_PER_WIDE_INT;
2122
2123 switch (code)
2124 {
2125 case SIGN_EXTRACT:
2126 case ZERO_EXTRACT:
2127 if (GET_CODE (op0) == CONST_INT
2128 && GET_CODE (op1) == CONST_INT
2129 && GET_CODE (op2) == CONST_INT
2130 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2131 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2132 {
2133 /* Extracting a bit-field from a constant */
2134 HOST_WIDE_INT val = INTVAL (op0);
2135
2136 if (BITS_BIG_ENDIAN)
2137 val >>= (GET_MODE_BITSIZE (op0_mode)
2138 - INTVAL (op2) - INTVAL (op1));
2139 else
2140 val >>= INTVAL (op2);
2141
2142 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2143 {
2144 /* First zero-extend. */
2145 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2146 /* If desired, propagate sign bit. */
2147 if (code == SIGN_EXTRACT
2148 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2149 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2150 }
2151
2152 /* Clear the bits that don't belong in our mode,
2153 unless they and our sign bit are all one.
2154 So we get either a reasonable negative value or a reasonable
2155 unsigned value for this mode. */
2156 if (width < HOST_BITS_PER_WIDE_INT
2157 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2158 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2159 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2160
2161 return GEN_INT (val);
2162 }
2163 break;
2164
2165 case IF_THEN_ELSE:
2166 if (GET_CODE (op0) == CONST_INT)
2167 return op0 != const0_rtx ? op1 : op2;
2168
2169 /* Convert a == b ? b : a to "a". */
2170 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2171 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2172 && rtx_equal_p (XEXP (op0, 0), op1)
2173 && rtx_equal_p (XEXP (op0, 1), op2))
2174 return op1;
2175 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2176 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2177 && rtx_equal_p (XEXP (op0, 1), op1)
2178 && rtx_equal_p (XEXP (op0, 0), op2))
2179 return op2;
2180 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2181 {
2182 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2183 ? GET_MODE (XEXP (op0, 1))
2184 : GET_MODE (XEXP (op0, 0)));
2185 rtx temp;
2186 if (cmp_mode == VOIDmode)
2187 cmp_mode = op0_mode;
2188 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2189 XEXP (op0, 0), XEXP (op0, 1));
2190
2191 /* See if any simplifications were possible. */
2192 if (temp == const0_rtx)
2193 return op2;
2194 else if (temp == const1_rtx)
2195 return op1;
2196 else if (temp)
2197 op0 = temp;
2198
2199 /* Look for happy constants in op1 and op2. */
2200 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2201 {
2202 HOST_WIDE_INT t = INTVAL (op1);
2203 HOST_WIDE_INT f = INTVAL (op2);
2204
2205 if (t == STORE_FLAG_VALUE && f == 0)
2206 code = GET_CODE (op0);
2207 else if (t == 0 && f == STORE_FLAG_VALUE)
2208 {
2209 enum rtx_code tmp;
2210 tmp = reversed_comparison_code (op0, NULL_RTX);
2211 if (tmp == UNKNOWN)
2212 break;
2213 code = tmp;
2214 }
2215 else
2216 break;
2217
2218 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2219 }
2220 }
2221 break;
2222
2223 default:
2224 abort ();
2225 }
2226
2227 return 0;
2228 }
2229
2230 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2231 Return 0 if no simplifications is possible. */
2232 rtx
2233 simplify_subreg (outermode, op, innermode, byte)
2234 rtx op;
2235 unsigned int byte;
2236 enum machine_mode outermode, innermode;
2237 {
2238 /* Little bit of sanity checking. */
2239 if (innermode == VOIDmode || outermode == VOIDmode
2240 || innermode == BLKmode || outermode == BLKmode)
2241 abort ();
2242
2243 if (GET_MODE (op) != innermode
2244 && GET_MODE (op) != VOIDmode)
2245 abort ();
2246
2247 if (byte % GET_MODE_SIZE (outermode)
2248 || byte >= GET_MODE_SIZE (innermode))
2249 abort ();
2250
2251 if (outermode == innermode && !byte)
2252 return op;
2253
2254 /* Attempt to simplify constant to non-SUBREG expression. */
2255 if (CONSTANT_P (op))
2256 {
2257 int offset, part;
2258 unsigned HOST_WIDE_INT val = 0;
2259
2260 /* ??? This code is partly redundant with code below, but can handle
2261 the subregs of floats and similar corner cases.
2262 Later it we should move all simplification code here and rewrite
2263 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2264 using SIMPLIFY_SUBREG. */
2265 if (subreg_lowpart_offset (outermode, innermode) == byte)
2266 {
2267 rtx new = gen_lowpart_if_possible (outermode, op);
2268 if (new)
2269 return new;
2270 }
2271
2272 /* Similar comment as above apply here. */
2273 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2274 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2275 && GET_MODE_CLASS (outermode) == MODE_INT)
2276 {
2277 rtx new = constant_subword (op,
2278 (byte / UNITS_PER_WORD),
2279 innermode);
2280 if (new)
2281 return new;
2282 }
2283
2284 offset = byte * BITS_PER_UNIT;
2285 switch (GET_CODE (op))
2286 {
2287 case CONST_DOUBLE:
2288 if (GET_MODE (op) != VOIDmode)
2289 break;
2290
2291 /* We can't handle this case yet. */
2292 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2293 return NULL_RTX;
2294
2295 part = offset >= HOST_BITS_PER_WIDE_INT;
2296 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2297 && BYTES_BIG_ENDIAN)
2298 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2299 && WORDS_BIG_ENDIAN))
2300 part = !part;
2301 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2302 offset %= HOST_BITS_PER_WIDE_INT;
2303
2304 /* We've already picked the word we want from a double, so
2305 pretend this is actually an integer. */
2306 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2307
2308 /* FALLTHROUGH */
2309 case CONST_INT:
2310 if (GET_CODE (op) == CONST_INT)
2311 val = INTVAL (op);
2312
2313 /* We don't handle synthetizing of non-integral constants yet. */
2314 if (GET_MODE_CLASS (outermode) != MODE_INT)
2315 return NULL_RTX;
2316
2317 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2318 {
2319 if (WORDS_BIG_ENDIAN)
2320 offset = (GET_MODE_BITSIZE (innermode)
2321 - GET_MODE_BITSIZE (outermode) - offset);
2322 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2323 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2324 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2325 - 2 * (offset % BITS_PER_WORD));
2326 }
2327
2328 if (offset >= HOST_BITS_PER_WIDE_INT)
2329 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2330 else
2331 {
2332 val >>= offset;
2333 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2334 val = trunc_int_for_mode (val, outermode);
2335 return GEN_INT (val);
2336 }
2337 default:
2338 break;
2339 }
2340 }
2341
2342 /* Changing mode twice with SUBREG => just change it once,
2343 or not at all if changing back op starting mode. */
2344 if (GET_CODE (op) == SUBREG)
2345 {
2346 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2347 int final_offset = byte + SUBREG_BYTE (op);
2348 rtx new;
2349
2350 if (outermode == innermostmode
2351 && byte == 0 && SUBREG_BYTE (op) == 0)
2352 return SUBREG_REG (op);
2353
2354 /* The SUBREG_BYTE represents offset, as if the value were stored
2355 in memory. Irritating exception is paradoxical subreg, where
2356 we define SUBREG_BYTE to be 0. On big endian machines, this
2357 value should be negative. For a moment, undo this exception. */
2358 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2359 {
2360 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2361 if (WORDS_BIG_ENDIAN)
2362 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2363 if (BYTES_BIG_ENDIAN)
2364 final_offset += difference % UNITS_PER_WORD;
2365 }
2366 if (SUBREG_BYTE (op) == 0
2367 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2368 {
2369 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2370 if (WORDS_BIG_ENDIAN)
2371 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2372 if (BYTES_BIG_ENDIAN)
2373 final_offset += difference % UNITS_PER_WORD;
2374 }
2375
2376 /* See whether resulting subreg will be paradoxical. */
2377 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2378 {
2379 /* In nonparadoxical subregs we can't handle negative offsets. */
2380 if (final_offset < 0)
2381 return NULL_RTX;
2382 /* Bail out in case resulting subreg would be incorrect. */
2383 if (final_offset % GET_MODE_SIZE (outermode)
2384 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2385 return NULL_RTX;
2386 }
2387 else
2388 {
2389 int offset = 0;
2390 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2391
2392 /* In paradoxical subreg, see if we are still looking on lower part.
2393 If so, our SUBREG_BYTE will be 0. */
2394 if (WORDS_BIG_ENDIAN)
2395 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2396 if (BYTES_BIG_ENDIAN)
2397 offset += difference % UNITS_PER_WORD;
2398 if (offset == final_offset)
2399 final_offset = 0;
2400 else
2401 return NULL_RTX;
2402 }
2403
2404 /* Recurse for futher possible simplifications. */
2405 new = simplify_subreg (outermode, SUBREG_REG (op),
2406 GET_MODE (SUBREG_REG (op)),
2407 final_offset);
2408 if (new)
2409 return new;
2410 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2411 }
2412
2413 /* SUBREG of a hard register => just change the register number
2414 and/or mode. If the hard register is not valid in that mode,
2415 suppress this simplification. If the hard register is the stack,
2416 frame, or argument pointer, leave this as a SUBREG. */
2417
2418 if (REG_P (op)
2419 && (! REG_FUNCTION_VALUE_P (op)
2420 || ! rtx_equal_function_value_matters)
2421 #ifdef CLASS_CANNOT_CHANGE_MODE
2422 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2423 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2424 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2425 && (TEST_HARD_REG_BIT
2426 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2427 REGNO (op))))
2428 #endif
2429 && REGNO (op) < FIRST_PSEUDO_REGISTER
2430 && ((reload_completed && !frame_pointer_needed)
2431 || (REGNO (op) != FRAME_POINTER_REGNUM
2432 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2433 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2434 #endif
2435 ))
2436 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2437 && REGNO (op) != ARG_POINTER_REGNUM
2438 #endif
2439 && REGNO (op) != STACK_POINTER_REGNUM)
2440 {
2441 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2442 0);
2443
2444 /* ??? We do allow it if the current REG is not valid for
2445 its mode. This is a kludge to work around how float/complex
2446 arguments are passed on 32-bit Sparc and should be fixed. */
2447 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2448 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2449 return gen_rtx_REG (outermode, final_regno);
2450 }
2451
2452 /* If we have a SUBREG of a register that we are replacing and we are
2453 replacing it with a MEM, make a new MEM and try replacing the
2454 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2455 or if we would be widening it. */
2456
2457 if (GET_CODE (op) == MEM
2458 && ! mode_dependent_address_p (XEXP (op, 0))
2459 /* Allow splitting of volatile memory references in case we don't
2460 have instruction to move the whole thing. */
2461 && (! MEM_VOLATILE_P (op)
2462 || (mov_optab->handlers[(int) innermode].insn_code
2463 == CODE_FOR_nothing))
2464 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2465 return adjust_address_nv (op, outermode, byte);
2466
2467 /* Handle complex values represented as CONCAT
2468 of real and imaginary part. */
2469 if (GET_CODE (op) == CONCAT)
2470 {
2471 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2472 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2473 unsigned int final_offset;
2474 rtx res;
2475
2476 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2477 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2478 if (res)
2479 return res;
2480 /* We can at least simplify it by referring directly to the relevent part. */
2481 return gen_rtx_SUBREG (outermode, part, final_offset);
2482 }
2483
2484 return NULL_RTX;
2485 }
2486 /* Make a SUBREG operation or equivalent if it folds. */
2487
2488 rtx
2489 simplify_gen_subreg (outermode, op, innermode, byte)
2490 rtx op;
2491 unsigned int byte;
2492 enum machine_mode outermode, innermode;
2493 {
2494 rtx new;
2495 /* Little bit of sanity checking. */
2496 if (innermode == VOIDmode || outermode == VOIDmode
2497 || innermode == BLKmode || outermode == BLKmode)
2498 abort ();
2499
2500 if (GET_MODE (op) != innermode
2501 && GET_MODE (op) != VOIDmode)
2502 abort ();
2503
2504 if (byte % GET_MODE_SIZE (outermode)
2505 || byte >= GET_MODE_SIZE (innermode))
2506 abort ();
2507
2508 if (GET_CODE (op) == QUEUED)
2509 return NULL_RTX;
2510
2511 new = simplify_subreg (outermode, op, innermode, byte);
2512 if (new)
2513 return new;
2514
2515 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2516 return NULL_RTX;
2517
2518 return gen_rtx_SUBREG (outermode, op, byte);
2519 }
2520 /* Simplify X, an rtx expression.
2521
2522 Return the simplified expression or NULL if no simplifications
2523 were possible.
2524
2525 This is the preferred entry point into the simplification routines;
2526 however, we still allow passes to call the more specific routines.
2527
2528 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2529 code that need to be unified.
2530
2531 1. fold_rtx in cse.c. This code uses various CSE specific
2532 information to aid in RTL simplification.
2533
2534 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2535 it uses combine specific information to aid in RTL
2536 simplification.
2537
2538 3. The routines in this file.
2539
2540
2541 Long term we want to only have one body of simplification code; to
2542 get to that state I recommend the following steps:
2543
2544 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2545 which are not pass dependent state into these routines.
2546
2547 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2548 use this routine whenever possible.
2549
2550 3. Allow for pass dependent state to be provided to these
2551 routines and add simplifications based on the pass dependent
2552 state. Remove code from cse.c & combine.c that becomes
2553 redundant/dead.
2554
2555 It will take time, but ultimately the compiler will be easier to
2556 maintain and improve. It's totally silly that when we add a
2557 simplification that it needs to be added to 4 places (3 for RTL
2558 simplification and 1 for tree simplification. */
2559
2560 rtx
2561 simplify_rtx (x)
2562 rtx x;
2563 {
2564 enum rtx_code code = GET_CODE (x);
2565 enum machine_mode mode = GET_MODE (x);
2566
2567 switch (GET_RTX_CLASS (code))
2568 {
2569 case '1':
2570 return simplify_unary_operation (code, mode,
2571 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2572 case 'c':
2573 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2574 {
2575 rtx tem;
2576
2577 tem = XEXP (x, 0);
2578 XEXP (x, 0) = XEXP (x, 1);
2579 XEXP (x, 1) = tem;
2580 return simplify_binary_operation (code, mode,
2581 XEXP (x, 0), XEXP (x, 1));
2582 }
2583
2584 case '2':
2585 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2586
2587 case '3':
2588 case 'b':
2589 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2590 XEXP (x, 0), XEXP (x, 1),
2591 XEXP (x, 2));
2592
2593 case '<':
2594 return simplify_relational_operation (code,
2595 ((GET_MODE (XEXP (x, 0))
2596 != VOIDmode)
2597 ? GET_MODE (XEXP (x, 0))
2598 : GET_MODE (XEXP (x, 1))),
2599 XEXP (x, 0), XEXP (x, 1));
2600 case 'x':
2601 /* The only case we try to handle is a SUBREG. */
2602 if (code == SUBREG)
2603 return simplify_gen_subreg (mode, SUBREG_REG (x),
2604 GET_MODE (SUBREG_REG (x)),
2605 SUBREG_BYTE (x));
2606 return NULL;
2607 default:
2608 return NULL;
2609 }
2610 }