]>
Commit | Line | Data |
---|---|---|
ef6c17ed | 1 | /* RTL simplification functions for GNU compiler. |
9daf6266 | 2 | Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, |
5377f687 | 3 | 1999, 2000, 2001 Free Software Foundation, Inc. |
af21a202 | 4 | |
f12b58b3 | 5 | This file is part of GCC. |
af21a202 | 6 | |
f12b58b3 | 7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 2, or (at your option) any later | |
10 | version. | |
af21a202 | 11 | |
f12b58b3 | 12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
af21a202 | 16 | |
17 | You should have received a copy of the GNU General Public License | |
f12b58b3 | 18 | along with GCC; see the file COPYING. If not, write to the Free |
19 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | |
20 | 02111-1307, USA. */ | |
af21a202 | 21 | |
22 | ||
23 | #include "config.h" | |
af21a202 | 24 | #include "system.h" |
af21a202 | 25 | |
26 | #include "rtl.h" | |
27 | #include "tm_p.h" | |
28 | #include "regs.h" | |
29 | #include "hard-reg-set.h" | |
30 | #include "flags.h" | |
31 | #include "real.h" | |
32 | #include "insn-config.h" | |
33 | #include "recog.h" | |
34 | #include "function.h" | |
35 | #include "expr.h" | |
36 | #include "toplev.h" | |
37 | #include "output.h" | |
1617c276 | 38 | #include "ggc.h" |
af21a202 | 39 | |
40 | /* Simplification and canonicalization of RTL. */ | |
41 | ||
42 | /* Nonzero if X has the form (PLUS frame-pointer integer). We check for | |
43 | virtual regs here because the simplify_*_operation routines are called | |
44 | by integrate.c, which is called before virtual register instantiation. | |
45 | ||
46 | ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into | |
47 | a header file so that their definitions can be shared with the | |
48 | simplification routines in simplify-rtx.c. Until then, do not | |
49 | change these macros without also changing the copy in simplify-rtx.c. */ | |
50 | ||
51 | #define FIXED_BASE_PLUS_P(X) \ | |
52 | ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \ | |
53 | || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\ | |
54 | || (X) == virtual_stack_vars_rtx \ | |
55 | || (X) == virtual_incoming_args_rtx \ | |
56 | || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \ | |
57 | && (XEXP (X, 0) == frame_pointer_rtx \ | |
58 | || XEXP (X, 0) == hard_frame_pointer_rtx \ | |
59 | || ((X) == arg_pointer_rtx \ | |
60 | && fixed_regs[ARG_POINTER_REGNUM]) \ | |
61 | || XEXP (X, 0) == virtual_stack_vars_rtx \ | |
62 | || XEXP (X, 0) == virtual_incoming_args_rtx)) \ | |
63 | || GET_CODE (X) == ADDRESSOF) | |
64 | ||
65 | /* Similar, but also allows reference to the stack pointer. | |
66 | ||
67 | This used to include FIXED_BASE_PLUS_P, however, we can't assume that | |
68 | arg_pointer_rtx by itself is nonzero, because on at least one machine, | |
69 | the i960, the arg pointer is zero when it is unused. */ | |
70 | ||
71 | #define NONZERO_BASE_PLUS_P(X) \ | |
72 | ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \ | |
73 | || (X) == virtual_stack_vars_rtx \ | |
74 | || (X) == virtual_incoming_args_rtx \ | |
75 | || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \ | |
76 | && (XEXP (X, 0) == frame_pointer_rtx \ | |
77 | || XEXP (X, 0) == hard_frame_pointer_rtx \ | |
78 | || ((X) == arg_pointer_rtx \ | |
79 | && fixed_regs[ARG_POINTER_REGNUM]) \ | |
80 | || XEXP (X, 0) == virtual_stack_vars_rtx \ | |
81 | || XEXP (X, 0) == virtual_incoming_args_rtx)) \ | |
82 | || (X) == stack_pointer_rtx \ | |
83 | || (X) == virtual_stack_dynamic_rtx \ | |
84 | || (X) == virtual_outgoing_args_rtx \ | |
85 | || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \ | |
86 | && (XEXP (X, 0) == stack_pointer_rtx \ | |
87 | || XEXP (X, 0) == virtual_stack_dynamic_rtx \ | |
88 | || XEXP (X, 0) == virtual_outgoing_args_rtx)) \ | |
89 | || GET_CODE (X) == ADDRESSOF) | |
90 | ||
a8fb076f | 91 | /* Much code operates on (low, high) pairs; the low value is an |
92 | unsigned wide int, the high value a signed wide int. We | |
93 | occasionally need to sign extend from low to high as if low were a | |
94 | signed wide int. */ | |
2cae60d1 | 95 | #define HWI_SIGN_EXTEND(low) \ |
a8fb076f | 96 | ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0)) |
af21a202 | 97 | |
ef6c17ed | 98 | static rtx simplify_plus_minus PARAMS ((enum rtx_code, |
99 | enum machine_mode, rtx, rtx)); | |
100 | static void check_fold_consts PARAMS ((PTR)); | |
cc190096 | 101 | #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) |
102 | static void simplify_unary_real PARAMS ((PTR)); | |
103 | static void simplify_binary_real PARAMS ((PTR)); | |
104 | #endif | |
105 | static void simplify_binary_is2orm1 PARAMS ((PTR)); | |
106 | ||
ef6c17ed | 107 | \f |
af21a202 | 108 | /* Make a binary operation by properly ordering the operands and |
109 | seeing if the expression folds. */ | |
110 | ||
111 | rtx | |
112 | simplify_gen_binary (code, mode, op0, op1) | |
113 | enum rtx_code code; | |
114 | enum machine_mode mode; | |
115 | rtx op0, op1; | |
116 | { | |
117 | rtx tem; | |
118 | ||
119 | /* Put complex operands first and constants second if commutative. */ | |
120 | if (GET_RTX_CLASS (code) == 'c' | |
09f800b9 | 121 | && swap_commutative_operands_p (op0, op1)) |
af21a202 | 122 | tem = op0, op0 = op1, op1 = tem; |
123 | ||
124 | /* If this simplifies, do it. */ | |
125 | tem = simplify_binary_operation (code, mode, op0, op1); | |
126 | ||
127 | if (tem) | |
128 | return tem; | |
129 | ||
130 | /* Handle addition and subtraction of CONST_INT specially. Otherwise, | |
131 | just form the operation. */ | |
132 | ||
133 | if (code == PLUS && GET_CODE (op1) == CONST_INT | |
134 | && GET_MODE (op0) != VOIDmode) | |
135 | return plus_constant (op0, INTVAL (op1)); | |
136 | else if (code == MINUS && GET_CODE (op1) == CONST_INT | |
137 | && GET_MODE (op0) != VOIDmode) | |
138 | return plus_constant (op0, - INTVAL (op1)); | |
139 | else | |
140 | return gen_rtx_fmt_ee (code, mode, op0, op1); | |
141 | } | |
142 | \f | |
6f2c2456 | 143 | /* If X is a MEM referencing the constant pool, return the real value. |
02cd84cd | 144 | Otherwise return X. */ |
c0876b6c | 145 | rtx |
02cd84cd | 146 | avoid_constant_pool_reference (x) |
147 | rtx x; | |
148 | { | |
6f2c2456 | 149 | rtx c, addr; |
150 | enum machine_mode cmode; | |
151 | ||
02cd84cd | 152 | if (GET_CODE (x) != MEM) |
153 | return x; | |
6f2c2456 | 154 | addr = XEXP (x, 0); |
155 | ||
156 | if (GET_CODE (addr) != SYMBOL_REF | |
157 | || ! CONSTANT_POOL_ADDRESS_P (addr)) | |
02cd84cd | 158 | return x; |
6f2c2456 | 159 | |
160 | c = get_pool_constant (addr); | |
161 | cmode = get_pool_mode (addr); | |
162 | ||
163 | /* If we're accessing the constant in a different mode than it was | |
164 | originally stored, attempt to fix that up via subreg simplifications. | |
165 | If that fails we have no choice but to return the original memory. */ | |
166 | if (cmode != GET_MODE (x)) | |
167 | { | |
168 | c = simplify_subreg (GET_MODE (x), c, cmode, 0); | |
169 | return c ? c : x; | |
170 | } | |
171 | ||
172 | return c; | |
02cd84cd | 173 | } |
174 | \f | |
53cb61a7 | 175 | /* Make a unary operation by first seeing if it folds and otherwise making |
176 | the specified operation. */ | |
177 | ||
178 | rtx | |
179 | simplify_gen_unary (code, mode, op, op_mode) | |
180 | enum rtx_code code; | |
181 | enum machine_mode mode; | |
182 | rtx op; | |
183 | enum machine_mode op_mode; | |
184 | { | |
185 | rtx tem; | |
186 | ||
187 | /* If this simplifies, use it. */ | |
188 | if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0) | |
189 | return tem; | |
190 | ||
191 | return gen_rtx_fmt_e (code, mode, op); | |
192 | } | |
193 | ||
194 | /* Likewise for ternary operations. */ | |
195 | ||
196 | rtx | |
197 | simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2) | |
198 | enum rtx_code code; | |
199 | enum machine_mode mode, op0_mode; | |
200 | rtx op0, op1, op2; | |
201 | { | |
202 | rtx tem; | |
203 | ||
204 | /* If this simplifies, use it. */ | |
205 | if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode, | |
206 | op0, op1, op2))) | |
207 | return tem; | |
208 | ||
209 | return gen_rtx_fmt_eee (code, mode, op0, op1, op2); | |
210 | } | |
211 | \f | |
e78c6706 | 212 | /* Likewise, for relational operations. |
213 | CMP_MODE specifies mode comparison is done in. | |
214 | */ | |
53cb61a7 | 215 | |
216 | rtx | |
e78c6706 | 217 | simplify_gen_relational (code, mode, cmp_mode, op0, op1) |
53cb61a7 | 218 | enum rtx_code code; |
219 | enum machine_mode mode; | |
e78c6706 | 220 | enum machine_mode cmp_mode; |
53cb61a7 | 221 | rtx op0, op1; |
222 | { | |
223 | rtx tem; | |
224 | ||
e78c6706 | 225 | if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0) |
53cb61a7 | 226 | return tem; |
227 | ||
228 | /* Put complex operands first and constants second. */ | |
09f800b9 | 229 | if (swap_commutative_operands_p (op0, op1)) |
53cb61a7 | 230 | tem = op0, op0 = op1, op1 = tem, code = swap_condition (code); |
231 | ||
232 | return gen_rtx_fmt_ee (code, mode, op0, op1); | |
233 | } | |
234 | \f | |
235 | /* Replace all occurrences of OLD in X with NEW and try to simplify the | |
236 | resulting RTX. Return a new RTX which is as simplified as possible. */ | |
237 | ||
238 | rtx | |
239 | simplify_replace_rtx (x, old, new) | |
240 | rtx x; | |
241 | rtx old; | |
242 | rtx new; | |
243 | { | |
244 | enum rtx_code code = GET_CODE (x); | |
245 | enum machine_mode mode = GET_MODE (x); | |
246 | ||
247 | /* If X is OLD, return NEW. Otherwise, if this is an expression, try | |
248 | to build a new expression substituting recursively. If we can't do | |
249 | anything, return our input. */ | |
250 | ||
251 | if (x == old) | |
252 | return new; | |
253 | ||
254 | switch (GET_RTX_CLASS (code)) | |
255 | { | |
256 | case '1': | |
257 | { | |
258 | enum machine_mode op_mode = GET_MODE (XEXP (x, 0)); | |
259 | rtx op = (XEXP (x, 0) == old | |
260 | ? new : simplify_replace_rtx (XEXP (x, 0), old, new)); | |
261 | ||
262 | return simplify_gen_unary (code, mode, op, op_mode); | |
263 | } | |
264 | ||
265 | case '2': | |
266 | case 'c': | |
267 | return | |
268 | simplify_gen_binary (code, mode, | |
269 | simplify_replace_rtx (XEXP (x, 0), old, new), | |
270 | simplify_replace_rtx (XEXP (x, 1), old, new)); | |
e78c6706 | 271 | case '<': |
3fe4cd25 | 272 | { |
273 | enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode | |
274 | ? GET_MODE (XEXP (x, 0)) | |
275 | : GET_MODE (XEXP (x, 1))); | |
276 | rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new); | |
277 | rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new); | |
278 | ||
279 | return | |
280 | simplify_gen_relational (code, mode, | |
281 | (op_mode != VOIDmode | |
282 | ? op_mode | |
283 | : GET_MODE (op0) != VOIDmode | |
284 | ? GET_MODE (op0) | |
285 | : GET_MODE (op1)), | |
286 | op0, op1); | |
287 | } | |
53cb61a7 | 288 | |
289 | case '3': | |
290 | case 'b': | |
3fe4cd25 | 291 | { |
292 | enum machine_mode op_mode = GET_MODE (XEXP (x, 0)); | |
293 | rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new); | |
294 | ||
295 | return | |
296 | simplify_gen_ternary (code, mode, | |
297 | (op_mode != VOIDmode | |
298 | ? op_mode | |
299 | : GET_MODE (op0)), | |
300 | op0, | |
301 | simplify_replace_rtx (XEXP (x, 1), old, new), | |
302 | simplify_replace_rtx (XEXP (x, 2), old, new)); | |
303 | } | |
53cb61a7 | 304 | |
305 | case 'x': | |
f760707b | 306 | /* The only case we try to handle is a SUBREG. */ |
307 | if (code == SUBREG) | |
308 | { | |
309 | rtx exp; | |
310 | exp = simplify_gen_subreg (GET_MODE (x), | |
311 | simplify_replace_rtx (SUBREG_REG (x), | |
312 | old, new), | |
313 | GET_MODE (SUBREG_REG (x)), | |
314 | SUBREG_BYTE (x)); | |
315 | if (exp) | |
316 | x = exp; | |
317 | } | |
53cb61a7 | 318 | return x; |
319 | ||
320 | default: | |
e78c6706 | 321 | if (GET_CODE (x) == MEM) |
e4e86ec5 | 322 | return |
323 | replace_equiv_address_nv (x, | |
324 | simplify_replace_rtx (XEXP (x, 0), | |
325 | old, new)); | |
e78c6706 | 326 | |
53cb61a7 | 327 | return x; |
328 | } | |
e78c6706 | 329 | return x; |
53cb61a7 | 330 | } |
331 | \f | |
cc190096 | 332 | #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) |
333 | /* Subroutine of simplify_unary_operation, called via do_float_handler. | |
334 | Handles simplification of unary ops on floating point values. */ | |
335 | struct simplify_unary_real_args | |
336 | { | |
337 | rtx operand; | |
338 | rtx result; | |
339 | enum machine_mode mode; | |
340 | enum rtx_code code; | |
341 | bool want_integer; | |
342 | }; | |
343 | #define REAL_VALUE_ABS(d_) \ | |
344 | (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_)) | |
345 | ||
346 | static void | |
347 | simplify_unary_real (p) | |
348 | PTR p; | |
349 | { | |
350 | REAL_VALUE_TYPE d; | |
351 | ||
352 | struct simplify_unary_real_args *args = | |
353 | (struct simplify_unary_real_args *) p; | |
354 | ||
355 | REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand); | |
356 | ||
357 | if (args->want_integer) | |
358 | { | |
359 | HOST_WIDE_INT i; | |
360 | ||
361 | switch (args->code) | |
362 | { | |
363 | case FIX: i = REAL_VALUE_FIX (d); break; | |
364 | case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break; | |
365 | default: | |
366 | abort (); | |
367 | } | |
368 | args->result = GEN_INT (trunc_int_for_mode (i, args->mode)); | |
369 | } | |
370 | else | |
371 | { | |
372 | switch (args->code) | |
373 | { | |
374 | case SQRT: | |
375 | /* We don't attempt to optimize this. */ | |
376 | args->result = 0; | |
377 | return; | |
378 | ||
379 | case ABS: d = REAL_VALUE_ABS (d); break; | |
380 | case NEG: d = REAL_VALUE_NEGATE (d); break; | |
381 | case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break; | |
382 | case FLOAT_EXTEND: /* All this does is change the mode. */ break; | |
383 | case FIX: d = REAL_VALUE_RNDZINT (d); break; | |
384 | case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break; | |
385 | default: | |
386 | abort (); | |
387 | } | |
388 | args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode); | |
389 | } | |
390 | } | |
391 | #endif | |
392 | ||
af21a202 | 393 | /* Try to simplify a unary operation CODE whose output mode is to be |
394 | MODE with input operand OP whose mode was originally OP_MODE. | |
395 | Return zero if no simplification can be made. */ | |
af21a202 | 396 | rtx |
397 | simplify_unary_operation (code, mode, op, op_mode) | |
398 | enum rtx_code code; | |
399 | enum machine_mode mode; | |
400 | rtx op; | |
401 | enum machine_mode op_mode; | |
402 | { | |
02e7a332 | 403 | unsigned int width = GET_MODE_BITSIZE (mode); |
02cd84cd | 404 | rtx trueop = avoid_constant_pool_reference (op); |
af21a202 | 405 | |
406 | /* The order of these tests is critical so that, for example, we don't | |
407 | check the wrong mode (input vs. output) for a conversion operation, | |
408 | such as FIX. At some point, this should be simplified. */ | |
409 | ||
410 | #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC) | |
411 | ||
02cd84cd | 412 | if (code == FLOAT && GET_MODE (trueop) == VOIDmode |
413 | && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT)) | |
af21a202 | 414 | { |
415 | HOST_WIDE_INT hv, lv; | |
416 | REAL_VALUE_TYPE d; | |
417 | ||
02cd84cd | 418 | if (GET_CODE (trueop) == CONST_INT) |
419 | lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); | |
af21a202 | 420 | else |
02cd84cd | 421 | lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); |
af21a202 | 422 | |
423 | #ifdef REAL_ARITHMETIC | |
424 | REAL_VALUE_FROM_INT (d, lv, hv, mode); | |
425 | #else | |
426 | if (hv < 0) | |
427 | { | |
428 | d = (double) (~ hv); | |
429 | d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) | |
430 | * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))); | |
431 | d += (double) (unsigned HOST_WIDE_INT) (~ lv); | |
432 | d = (- d - 1.0); | |
433 | } | |
434 | else | |
435 | { | |
436 | d = (double) hv; | |
437 | d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) | |
438 | * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))); | |
439 | d += (double) (unsigned HOST_WIDE_INT) lv; | |
440 | } | |
441 | #endif /* REAL_ARITHMETIC */ | |
442 | d = real_value_truncate (mode, d); | |
443 | return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
444 | } | |
02cd84cd | 445 | else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode |
446 | && (GET_CODE (trueop) == CONST_DOUBLE | |
447 | || GET_CODE (trueop) == CONST_INT)) | |
af21a202 | 448 | { |
449 | HOST_WIDE_INT hv, lv; | |
450 | REAL_VALUE_TYPE d; | |
451 | ||
02cd84cd | 452 | if (GET_CODE (trueop) == CONST_INT) |
453 | lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); | |
af21a202 | 454 | else |
02cd84cd | 455 | lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); |
af21a202 | 456 | |
457 | if (op_mode == VOIDmode) | |
458 | { | |
459 | /* We don't know how to interpret negative-looking numbers in | |
460 | this case, so don't try to fold those. */ | |
461 | if (hv < 0) | |
462 | return 0; | |
463 | } | |
464 | else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2) | |
465 | ; | |
466 | else | |
467 | hv = 0, lv &= GET_MODE_MASK (op_mode); | |
468 | ||
469 | #ifdef REAL_ARITHMETIC | |
470 | REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode); | |
471 | #else | |
472 | ||
473 | d = (double) (unsigned HOST_WIDE_INT) hv; | |
474 | d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) | |
475 | * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))); | |
476 | d += (double) (unsigned HOST_WIDE_INT) lv; | |
477 | #endif /* REAL_ARITHMETIC */ | |
478 | d = real_value_truncate (mode, d); | |
479 | return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
480 | } | |
481 | #endif | |
482 | ||
02cd84cd | 483 | if (GET_CODE (trueop) == CONST_INT |
af21a202 | 484 | && width <= HOST_BITS_PER_WIDE_INT && width > 0) |
485 | { | |
02cd84cd | 486 | register HOST_WIDE_INT arg0 = INTVAL (trueop); |
af21a202 | 487 | register HOST_WIDE_INT val; |
488 | ||
489 | switch (code) | |
490 | { | |
491 | case NOT: | |
492 | val = ~ arg0; | |
493 | break; | |
494 | ||
495 | case NEG: | |
496 | val = - arg0; | |
497 | break; | |
498 | ||
499 | case ABS: | |
500 | val = (arg0 >= 0 ? arg0 : - arg0); | |
501 | break; | |
502 | ||
503 | case FFS: | |
504 | /* Don't use ffs here. Instead, get low order bit and then its | |
505 | number. If arg0 is zero, this will return 0, as desired. */ | |
506 | arg0 &= GET_MODE_MASK (mode); | |
507 | val = exact_log2 (arg0 & (- arg0)) + 1; | |
508 | break; | |
509 | ||
510 | case TRUNCATE: | |
511 | val = arg0; | |
512 | break; | |
513 | ||
514 | case ZERO_EXTEND: | |
515 | if (op_mode == VOIDmode) | |
516 | op_mode = mode; | |
517 | if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) | |
518 | { | |
519 | /* If we were really extending the mode, | |
520 | we would have to distinguish between zero-extension | |
521 | and sign-extension. */ | |
522 | if (width != GET_MODE_BITSIZE (op_mode)) | |
523 | abort (); | |
524 | val = arg0; | |
525 | } | |
526 | else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) | |
527 | val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); | |
528 | else | |
529 | return 0; | |
530 | break; | |
531 | ||
532 | case SIGN_EXTEND: | |
533 | if (op_mode == VOIDmode) | |
534 | op_mode = mode; | |
535 | if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) | |
536 | { | |
537 | /* If we were really extending the mode, | |
538 | we would have to distinguish between zero-extension | |
539 | and sign-extension. */ | |
540 | if (width != GET_MODE_BITSIZE (op_mode)) | |
541 | abort (); | |
542 | val = arg0; | |
543 | } | |
544 | else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) | |
545 | { | |
546 | val | |
547 | = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); | |
548 | if (val | |
549 | & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1))) | |
550 | val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); | |
551 | } | |
552 | else | |
553 | return 0; | |
554 | break; | |
555 | ||
556 | case SQRT: | |
bf78c7a8 | 557 | case FLOAT_EXTEND: |
558 | case FLOAT_TRUNCATE: | |
af21a202 | 559 | return 0; |
560 | ||
561 | default: | |
562 | abort (); | |
563 | } | |
564 | ||
565 | val = trunc_int_for_mode (val, mode); | |
566 | ||
567 | return GEN_INT (val); | |
568 | } | |
569 | ||
570 | /* We can do some operations on integer CONST_DOUBLEs. Also allow | |
571 | for a DImode operation on a CONST_INT. */ | |
02cd84cd | 572 | else if (GET_MODE (trueop) == VOIDmode && width <= HOST_BITS_PER_INT * 2 |
573 | && (GET_CODE (trueop) == CONST_DOUBLE | |
574 | || GET_CODE (trueop) == CONST_INT)) | |
af21a202 | 575 | { |
a8fb076f | 576 | unsigned HOST_WIDE_INT l1, lv; |
577 | HOST_WIDE_INT h1, hv; | |
af21a202 | 578 | |
02cd84cd | 579 | if (GET_CODE (trueop) == CONST_DOUBLE) |
580 | l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop); | |
af21a202 | 581 | else |
02cd84cd | 582 | l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1); |
af21a202 | 583 | |
584 | switch (code) | |
585 | { | |
586 | case NOT: | |
587 | lv = ~ l1; | |
588 | hv = ~ h1; | |
589 | break; | |
590 | ||
591 | case NEG: | |
592 | neg_double (l1, h1, &lv, &hv); | |
593 | break; | |
594 | ||
595 | case ABS: | |
596 | if (h1 < 0) | |
597 | neg_double (l1, h1, &lv, &hv); | |
598 | else | |
599 | lv = l1, hv = h1; | |
600 | break; | |
601 | ||
602 | case FFS: | |
603 | hv = 0; | |
604 | if (l1 == 0) | |
605 | lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1; | |
606 | else | |
607 | lv = exact_log2 (l1 & (-l1)) + 1; | |
608 | break; | |
609 | ||
610 | case TRUNCATE: | |
611 | /* This is just a change-of-mode, so do nothing. */ | |
612 | lv = l1, hv = h1; | |
613 | break; | |
614 | ||
615 | case ZERO_EXTEND: | |
616 | if (op_mode == VOIDmode | |
617 | || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) | |
618 | return 0; | |
619 | ||
620 | hv = 0; | |
621 | lv = l1 & GET_MODE_MASK (op_mode); | |
622 | break; | |
623 | ||
624 | case SIGN_EXTEND: | |
625 | if (op_mode == VOIDmode | |
626 | || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) | |
627 | return 0; | |
628 | else | |
629 | { | |
630 | lv = l1 & GET_MODE_MASK (op_mode); | |
631 | if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT | |
632 | && (lv & ((HOST_WIDE_INT) 1 | |
633 | << (GET_MODE_BITSIZE (op_mode) - 1))) != 0) | |
634 | lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); | |
635 | ||
2cae60d1 | 636 | hv = HWI_SIGN_EXTEND (lv); |
af21a202 | 637 | } |
638 | break; | |
639 | ||
640 | case SQRT: | |
641 | return 0; | |
642 | ||
643 | default: | |
644 | return 0; | |
645 | } | |
646 | ||
647 | return immed_double_const (lv, hv, mode); | |
648 | } | |
649 | ||
650 | #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) | |
02cd84cd | 651 | else if (GET_CODE (trueop) == CONST_DOUBLE |
af21a202 | 652 | && GET_MODE_CLASS (mode) == MODE_FLOAT) |
653 | { | |
cc190096 | 654 | struct simplify_unary_real_args args; |
655 | args.operand = trueop; | |
656 | args.mode = mode; | |
657 | args.code = code; | |
658 | args.want_integer = false; | |
af21a202 | 659 | |
cc190096 | 660 | if (do_float_handler (simplify_unary_real, (PTR) &args)) |
661 | return args.result; | |
af21a202 | 662 | |
cc190096 | 663 | return 0; |
af21a202 | 664 | } |
665 | ||
02cd84cd | 666 | else if (GET_CODE (trueop) == CONST_DOUBLE |
667 | && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT | |
af21a202 | 668 | && GET_MODE_CLASS (mode) == MODE_INT |
669 | && width <= HOST_BITS_PER_WIDE_INT && width > 0) | |
670 | { | |
cc190096 | 671 | struct simplify_unary_real_args args; |
672 | args.operand = trueop; | |
673 | args.mode = mode; | |
674 | args.code = code; | |
675 | args.want_integer = true; | |
af21a202 | 676 | |
cc190096 | 677 | if (do_float_handler (simplify_unary_real, (PTR) &args)) |
678 | return args.result; | |
af21a202 | 679 | |
cc190096 | 680 | return 0; |
af21a202 | 681 | } |
682 | #endif | |
683 | /* This was formerly used only for non-IEEE float. | |
684 | eggert@twinsun.com says it is safe for IEEE also. */ | |
685 | else | |
686 | { | |
7da6ea0c | 687 | enum rtx_code reversed; |
af21a202 | 688 | /* There are some simplifications we can do even if the operands |
689 | aren't constant. */ | |
690 | switch (code) | |
691 | { | |
af21a202 | 692 | case NOT: |
069a05d1 | 693 | /* (not (not X)) == X. */ |
694 | if (GET_CODE (op) == NOT) | |
695 | return XEXP (op, 0); | |
696 | ||
697 | /* (not (eq X Y)) == (ne X Y), etc. */ | |
698 | if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<' | |
7da6ea0c | 699 | && ((reversed = reversed_comparison_code (op, NULL_RTX)) |
700 | != UNKNOWN)) | |
701 | return gen_rtx_fmt_ee (reversed, | |
069a05d1 | 702 | op_mode, XEXP (op, 0), XEXP (op, 1)); |
703 | break; | |
704 | ||
705 | case NEG: | |
706 | /* (neg (neg X)) == X. */ | |
707 | if (GET_CODE (op) == NEG) | |
af21a202 | 708 | return XEXP (op, 0); |
709 | break; | |
710 | ||
711 | case SIGN_EXTEND: | |
712 | /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2)))) | |
713 | becomes just the MINUS if its mode is MODE. This allows | |
714 | folding switch statements on machines using casesi (such as | |
6c842310 | 715 | the VAX). */ |
af21a202 | 716 | if (GET_CODE (op) == TRUNCATE |
717 | && GET_MODE (XEXP (op, 0)) == mode | |
718 | && GET_CODE (XEXP (op, 0)) == MINUS | |
719 | && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF | |
720 | && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF) | |
721 | return XEXP (op, 0); | |
722 | ||
3cc092f7 | 723 | #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) |
af21a202 | 724 | if (! POINTERS_EXTEND_UNSIGNED |
725 | && mode == Pmode && GET_MODE (op) == ptr_mode | |
bc17f7a4 | 726 | && (CONSTANT_P (op) |
727 | || (GET_CODE (op) == SUBREG | |
728 | && GET_CODE (SUBREG_REG (op)) == REG | |
729 | && REG_POINTER (SUBREG_REG (op)) | |
730 | && GET_MODE (SUBREG_REG (op)) == Pmode))) | |
af21a202 | 731 | return convert_memory_address (Pmode, op); |
732 | #endif | |
733 | break; | |
734 | ||
3cc092f7 | 735 | #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) |
af21a202 | 736 | case ZERO_EXTEND: |
3cc092f7 | 737 | if (POINTERS_EXTEND_UNSIGNED > 0 |
af21a202 | 738 | && mode == Pmode && GET_MODE (op) == ptr_mode |
bc17f7a4 | 739 | && (CONSTANT_P (op) |
740 | || (GET_CODE (op) == SUBREG | |
741 | && GET_CODE (SUBREG_REG (op)) == REG | |
742 | && REG_POINTER (SUBREG_REG (op)) | |
743 | && GET_MODE (SUBREG_REG (op)) == Pmode))) | |
af21a202 | 744 | return convert_memory_address (Pmode, op); |
745 | break; | |
746 | #endif | |
747 | ||
748 | default: | |
749 | break; | |
750 | } | |
751 | ||
752 | return 0; | |
753 | } | |
754 | } | |
755 | \f | |
cc190096 | 756 | #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) |
757 | /* Subroutine of simplify_binary_operation, called via do_float_handler. | |
758 | Handles simplification of binary ops on floating point values. */ | |
759 | struct simplify_binary_real_args | |
760 | { | |
761 | rtx trueop0, trueop1; | |
762 | rtx result; | |
763 | enum rtx_code code; | |
764 | enum machine_mode mode; | |
765 | }; | |
766 | ||
767 | static void | |
768 | simplify_binary_real (p) | |
769 | PTR p; | |
770 | { | |
771 | REAL_VALUE_TYPE f0, f1, value; | |
772 | struct simplify_binary_real_args *args = | |
773 | (struct simplify_binary_real_args *) p; | |
774 | ||
775 | REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0); | |
776 | REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1); | |
777 | f0 = real_value_truncate (args->mode, f0); | |
778 | f1 = real_value_truncate (args->mode, f1); | |
779 | ||
780 | #ifdef REAL_ARITHMETIC | |
781 | #ifndef REAL_INFINITY | |
782 | if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0)) | |
783 | { | |
784 | args->result = 0; | |
785 | return; | |
786 | } | |
787 | #endif | |
788 | REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1); | |
789 | #else | |
790 | switch (args->code) | |
791 | { | |
792 | case PLUS: | |
793 | value = f0 + f1; | |
794 | break; | |
795 | case MINUS: | |
796 | value = f0 - f1; | |
797 | break; | |
798 | case MULT: | |
799 | value = f0 * f1; | |
800 | break; | |
801 | case DIV: | |
802 | #ifndef REAL_INFINITY | |
803 | if (f1 == 0) | |
804 | return 0; | |
805 | #endif | |
806 | value = f0 / f1; | |
807 | break; | |
808 | case SMIN: | |
809 | value = MIN (f0, f1); | |
810 | break; | |
811 | case SMAX: | |
812 | value = MAX (f0, f1); | |
813 | break; | |
814 | default: | |
815 | abort (); | |
816 | } | |
817 | #endif | |
818 | ||
819 | value = real_value_truncate (args->mode, value); | |
820 | args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode); | |
821 | } | |
822 | #endif | |
823 | ||
824 | /* Another subroutine called via do_float_handler. This one tests | |
825 | the floating point value given against 2. and -1. */ | |
826 | struct simplify_binary_is2orm1_args | |
827 | { | |
828 | rtx value; | |
829 | bool is_2; | |
830 | bool is_m1; | |
831 | }; | |
832 | ||
833 | static void | |
834 | simplify_binary_is2orm1 (p) | |
835 | PTR p; | |
836 | { | |
837 | REAL_VALUE_TYPE d; | |
838 | struct simplify_binary_is2orm1_args *args = | |
839 | (struct simplify_binary_is2orm1_args *) p; | |
840 | ||
841 | REAL_VALUE_FROM_CONST_DOUBLE (d, args->value); | |
842 | args->is_2 = REAL_VALUES_EQUAL (d, dconst2); | |
843 | args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1); | |
844 | } | |
845 | ||
af21a202 | 846 | /* Simplify a binary operation CODE with result mode MODE, operating on OP0 |
847 | and OP1. Return 0 if no simplification is possible. | |
848 | ||
849 | Don't use this for relational operations such as EQ or LT. | |
850 | Use simplify_relational_operation instead. */ | |
af21a202 | 851 | rtx |
852 | simplify_binary_operation (code, mode, op0, op1) | |
853 | enum rtx_code code; | |
854 | enum machine_mode mode; | |
855 | rtx op0, op1; | |
856 | { | |
857 | register HOST_WIDE_INT arg0, arg1, arg0s, arg1s; | |
858 | HOST_WIDE_INT val; | |
02e7a332 | 859 | unsigned int width = GET_MODE_BITSIZE (mode); |
af21a202 | 860 | rtx tem; |
02cd84cd | 861 | rtx trueop0 = avoid_constant_pool_reference (op0); |
862 | rtx trueop1 = avoid_constant_pool_reference (op1); | |
af21a202 | 863 | |
864 | /* Relational operations don't work here. We must know the mode | |
865 | of the operands in order to do the comparison correctly. | |
866 | Assuming a full word can give incorrect results. | |
867 | Consider comparing 128 with -128 in QImode. */ | |
868 | ||
869 | if (GET_RTX_CLASS (code) == '<') | |
870 | abort (); | |
871 | ||
02cd84cd | 872 | /* Make sure the constant is second. */ |
873 | if (GET_RTX_CLASS (code) == 'c' | |
874 | && swap_commutative_operands_p (trueop0, trueop1)) | |
875 | { | |
876 | tem = op0, op0 = op1, op1 = tem; | |
877 | tem = trueop0, trueop0 = trueop1, trueop1 = tem; | |
878 | } | |
879 | ||
af21a202 | 880 | #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) |
881 | if (GET_MODE_CLASS (mode) == MODE_FLOAT | |
02cd84cd | 882 | && GET_CODE (trueop0) == CONST_DOUBLE |
883 | && GET_CODE (trueop1) == CONST_DOUBLE | |
af21a202 | 884 | && mode == GET_MODE (op0) && mode == GET_MODE (op1)) |
885 | { | |
cc190096 | 886 | struct simplify_binary_real_args args; |
887 | args.trueop0 = trueop0; | |
888 | args.trueop1 = trueop1; | |
889 | args.mode = mode; | |
890 | args.code = code; | |
891 | ||
892 | if (do_float_handler (simplify_binary_real, (PTR) &args)) | |
893 | return args.result; | |
894 | return 0; | |
af21a202 | 895 | } |
896 | #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */ | |
897 | ||
898 | /* We can fold some multi-word operations. */ | |
899 | if (GET_MODE_CLASS (mode) == MODE_INT | |
900 | && width == HOST_BITS_PER_WIDE_INT * 2 | |
02cd84cd | 901 | && (GET_CODE (trueop0) == CONST_DOUBLE |
902 | || GET_CODE (trueop0) == CONST_INT) | |
903 | && (GET_CODE (trueop1) == CONST_DOUBLE | |
904 | || GET_CODE (trueop1) == CONST_INT)) | |
af21a202 | 905 | { |
a8fb076f | 906 | unsigned HOST_WIDE_INT l1, l2, lv; |
907 | HOST_WIDE_INT h1, h2, hv; | |
af21a202 | 908 | |
02cd84cd | 909 | if (GET_CODE (trueop0) == CONST_DOUBLE) |
910 | l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0); | |
af21a202 | 911 | else |
02cd84cd | 912 | l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1); |
af21a202 | 913 | |
02cd84cd | 914 | if (GET_CODE (trueop1) == CONST_DOUBLE) |
915 | l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1); | |
af21a202 | 916 | else |
02cd84cd | 917 | l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2); |
af21a202 | 918 | |
919 | switch (code) | |
920 | { | |
921 | case MINUS: | |
922 | /* A - B == A + (-B). */ | |
923 | neg_double (l2, h2, &lv, &hv); | |
924 | l2 = lv, h2 = hv; | |
925 | ||
926 | /* .. fall through ... */ | |
927 | ||
928 | case PLUS: | |
929 | add_double (l1, h1, l2, h2, &lv, &hv); | |
930 | break; | |
931 | ||
932 | case MULT: | |
933 | mul_double (l1, h1, l2, h2, &lv, &hv); | |
934 | break; | |
935 | ||
936 | case DIV: case MOD: case UDIV: case UMOD: | |
937 | /* We'd need to include tree.h to do this and it doesn't seem worth | |
938 | it. */ | |
939 | return 0; | |
940 | ||
941 | case AND: | |
942 | lv = l1 & l2, hv = h1 & h2; | |
943 | break; | |
944 | ||
945 | case IOR: | |
946 | lv = l1 | l2, hv = h1 | h2; | |
947 | break; | |
948 | ||
949 | case XOR: | |
950 | lv = l1 ^ l2, hv = h1 ^ h2; | |
951 | break; | |
952 | ||
953 | case SMIN: | |
954 | if (h1 < h2 | |
955 | || (h1 == h2 | |
956 | && ((unsigned HOST_WIDE_INT) l1 | |
957 | < (unsigned HOST_WIDE_INT) l2))) | |
958 | lv = l1, hv = h1; | |
959 | else | |
960 | lv = l2, hv = h2; | |
961 | break; | |
962 | ||
963 | case SMAX: | |
964 | if (h1 > h2 | |
965 | || (h1 == h2 | |
966 | && ((unsigned HOST_WIDE_INT) l1 | |
967 | > (unsigned HOST_WIDE_INT) l2))) | |
968 | lv = l1, hv = h1; | |
969 | else | |
970 | lv = l2, hv = h2; | |
971 | break; | |
972 | ||
973 | case UMIN: | |
974 | if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2 | |
975 | || (h1 == h2 | |
976 | && ((unsigned HOST_WIDE_INT) l1 | |
977 | < (unsigned HOST_WIDE_INT) l2))) | |
978 | lv = l1, hv = h1; | |
979 | else | |
980 | lv = l2, hv = h2; | |
981 | break; | |
982 | ||
983 | case UMAX: | |
984 | if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2 | |
985 | || (h1 == h2 | |
986 | && ((unsigned HOST_WIDE_INT) l1 | |
987 | > (unsigned HOST_WIDE_INT) l2))) | |
988 | lv = l1, hv = h1; | |
989 | else | |
990 | lv = l2, hv = h2; | |
991 | break; | |
992 | ||
993 | case LSHIFTRT: case ASHIFTRT: | |
994 | case ASHIFT: | |
995 | case ROTATE: case ROTATERT: | |
996 | #ifdef SHIFT_COUNT_TRUNCATED | |
997 | if (SHIFT_COUNT_TRUNCATED) | |
998 | l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0; | |
999 | #endif | |
1000 | ||
a8fb076f | 1001 | if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode)) |
af21a202 | 1002 | return 0; |
1003 | ||
1004 | if (code == LSHIFTRT || code == ASHIFTRT) | |
1005 | rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, | |
1006 | code == ASHIFTRT); | |
1007 | else if (code == ASHIFT) | |
1008 | lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1); | |
1009 | else if (code == ROTATE) | |
1010 | lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); | |
1011 | else /* code == ROTATERT */ | |
1012 | rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); | |
1013 | break; | |
1014 | ||
1015 | default: | |
1016 | return 0; | |
1017 | } | |
1018 | ||
1019 | return immed_double_const (lv, hv, mode); | |
1020 | } | |
1021 | ||
1022 | if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT | |
1023 | || width > HOST_BITS_PER_WIDE_INT || width == 0) | |
1024 | { | |
1025 | /* Even if we can't compute a constant result, | |
1026 | there are some cases worth simplifying. */ | |
1027 | ||
1028 | switch (code) | |
1029 | { | |
1030 | case PLUS: | |
1031 | /* In IEEE floating point, x+0 is not the same as x. Similarly | |
1032 | for the other optimizations below. */ | |
1033 | if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT | |
7f3be425 | 1034 | && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations) |
af21a202 | 1035 | break; |
1036 | ||
02cd84cd | 1037 | if (trueop1 == CONST0_RTX (mode)) |
af21a202 | 1038 | return op0; |
1039 | ||
1040 | /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */ | |
1041 | if (GET_CODE (op0) == NEG) | |
1042 | return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0)); | |
1043 | else if (GET_CODE (op1) == NEG) | |
1044 | return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0)); | |
1045 | ||
658d295f | 1046 | /* (~a) + 1 -> -a */ |
1047 | if (INTEGRAL_MODE_P (mode) | |
1048 | && GET_CODE (op0) == NOT | |
02cd84cd | 1049 | && trueop1 == const1_rtx) |
658d295f | 1050 | return gen_rtx_NEG (mode, XEXP (op0, 0)); |
1051 | ||
af21a202 | 1052 | /* Handle both-operands-constant cases. We can only add |
1053 | CONST_INTs to constants since the sum of relocatable symbols | |
1054 | can't be handled by most assemblers. Don't add CONST_INT | |
1055 | to CONST_INT since overflow won't be computed properly if wider | |
1056 | than HOST_BITS_PER_WIDE_INT. */ | |
1057 | ||
1058 | if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode | |
1059 | && GET_CODE (op1) == CONST_INT) | |
1060 | return plus_constant (op0, INTVAL (op1)); | |
1061 | else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode | |
1062 | && GET_CODE (op0) == CONST_INT) | |
1063 | return plus_constant (op1, INTVAL (op0)); | |
1064 | ||
1065 | /* See if this is something like X * C - X or vice versa or | |
1066 | if the multiplication is written as a shift. If so, we can | |
1067 | distribute and make a new multiply, shift, or maybe just | |
1068 | have X (if C is 2 in the example above). But don't make | |
1069 | real multiply if we didn't have one before. */ | |
1070 | ||
1071 | if (! FLOAT_MODE_P (mode)) | |
1072 | { | |
1073 | HOST_WIDE_INT coeff0 = 1, coeff1 = 1; | |
1074 | rtx lhs = op0, rhs = op1; | |
1075 | int had_mult = 0; | |
1076 | ||
1077 | if (GET_CODE (lhs) == NEG) | |
1078 | coeff0 = -1, lhs = XEXP (lhs, 0); | |
1079 | else if (GET_CODE (lhs) == MULT | |
1080 | && GET_CODE (XEXP (lhs, 1)) == CONST_INT) | |
1081 | { | |
1082 | coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0); | |
1083 | had_mult = 1; | |
1084 | } | |
1085 | else if (GET_CODE (lhs) == ASHIFT | |
1086 | && GET_CODE (XEXP (lhs, 1)) == CONST_INT | |
1087 | && INTVAL (XEXP (lhs, 1)) >= 0 | |
1088 | && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1089 | { | |
1090 | coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); | |
1091 | lhs = XEXP (lhs, 0); | |
1092 | } | |
1093 | ||
1094 | if (GET_CODE (rhs) == NEG) | |
1095 | coeff1 = -1, rhs = XEXP (rhs, 0); | |
1096 | else if (GET_CODE (rhs) == MULT | |
1097 | && GET_CODE (XEXP (rhs, 1)) == CONST_INT) | |
1098 | { | |
1099 | coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0); | |
1100 | had_mult = 1; | |
1101 | } | |
1102 | else if (GET_CODE (rhs) == ASHIFT | |
1103 | && GET_CODE (XEXP (rhs, 1)) == CONST_INT | |
1104 | && INTVAL (XEXP (rhs, 1)) >= 0 | |
1105 | && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1106 | { | |
1107 | coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)); | |
1108 | rhs = XEXP (rhs, 0); | |
1109 | } | |
1110 | ||
1111 | if (rtx_equal_p (lhs, rhs)) | |
1112 | { | |
1113 | tem = simplify_gen_binary (MULT, mode, lhs, | |
1114 | GEN_INT (coeff0 + coeff1)); | |
1115 | return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem; | |
1116 | } | |
1117 | } | |
1118 | ||
1119 | /* If one of the operands is a PLUS or a MINUS, see if we can | |
1120 | simplify this by the associative law. | |
1121 | Don't use the associative law for floating point. | |
1122 | The inaccuracy makes it nonassociative, | |
1123 | and subtle programs can break if operations are associated. */ | |
1124 | ||
1125 | if (INTEGRAL_MODE_P (mode) | |
1126 | && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS | |
1127 | || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS) | |
1128 | && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0) | |
1129 | return tem; | |
1130 | break; | |
1131 | ||
1132 | case COMPARE: | |
1133 | #ifdef HAVE_cc0 | |
1134 | /* Convert (compare FOO (const_int 0)) to FOO unless we aren't | |
1135 | using cc0, in which case we want to leave it as a COMPARE | |
1136 | so we can distinguish it from a register-register-copy. | |
1137 | ||
1138 | In IEEE floating point, x-0 is not the same as x. */ | |
1139 | ||
1140 | if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT | |
7f3be425 | 1141 | || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) |
02cd84cd | 1142 | && trueop1 == CONST0_RTX (mode)) |
af21a202 | 1143 | return op0; |
e2496245 | 1144 | #endif |
1145 | ||
1146 | /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */ | |
1147 | if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT) | |
1148 | || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU)) | |
1149 | && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx) | |
1150 | { | |
1151 | rtx xop00 = XEXP (op0, 0); | |
1152 | rtx xop10 = XEXP (op1, 0); | |
1153 | ||
1154 | #ifdef HAVE_cc0 | |
1155 | if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0) | |
af21a202 | 1156 | #else |
e2496245 | 1157 | if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG |
1158 | && GET_MODE (xop00) == GET_MODE (xop10) | |
1159 | && REGNO (xop00) == REGNO (xop10) | |
1160 | && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC | |
1161 | && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC) | |
af21a202 | 1162 | #endif |
e2496245 | 1163 | return xop00; |
1164 | } | |
1165 | ||
1166 | break; | |
af21a202 | 1167 | case MINUS: |
1168 | /* None of these optimizations can be done for IEEE | |
1169 | floating point. */ | |
1170 | if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT | |
7f3be425 | 1171 | && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations) |
af21a202 | 1172 | break; |
1173 | ||
1174 | /* We can't assume x-x is 0 even with non-IEEE floating point, | |
1175 | but since it is zero except in very strange circumstances, we | |
7f3be425 | 1176 | will treat it as zero with -funsafe-math-optimizations. */ |
02cd84cd | 1177 | if (rtx_equal_p (trueop0, trueop1) |
af21a202 | 1178 | && ! side_effects_p (op0) |
7f3be425 | 1179 | && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)) |
af21a202 | 1180 | return CONST0_RTX (mode); |
1181 | ||
1182 | /* Change subtraction from zero into negation. */ | |
02cd84cd | 1183 | if (trueop0 == CONST0_RTX (mode)) |
af21a202 | 1184 | return gen_rtx_NEG (mode, op1); |
1185 | ||
1186 | /* (-1 - a) is ~a. */ | |
02cd84cd | 1187 | if (trueop0 == constm1_rtx) |
af21a202 | 1188 | return gen_rtx_NOT (mode, op1); |
1189 | ||
1190 | /* Subtracting 0 has no effect. */ | |
02cd84cd | 1191 | if (trueop1 == CONST0_RTX (mode)) |
af21a202 | 1192 | return op0; |
1193 | ||
1194 | /* See if this is something like X * C - X or vice versa or | |
1195 | if the multiplication is written as a shift. If so, we can | |
1196 | distribute and make a new multiply, shift, or maybe just | |
1197 | have X (if C is 2 in the example above). But don't make | |
1198 | real multiply if we didn't have one before. */ | |
1199 | ||
1200 | if (! FLOAT_MODE_P (mode)) | |
1201 | { | |
1202 | HOST_WIDE_INT coeff0 = 1, coeff1 = 1; | |
1203 | rtx lhs = op0, rhs = op1; | |
1204 | int had_mult = 0; | |
1205 | ||
1206 | if (GET_CODE (lhs) == NEG) | |
1207 | coeff0 = -1, lhs = XEXP (lhs, 0); | |
1208 | else if (GET_CODE (lhs) == MULT | |
1209 | && GET_CODE (XEXP (lhs, 1)) == CONST_INT) | |
1210 | { | |
1211 | coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0); | |
1212 | had_mult = 1; | |
1213 | } | |
1214 | else if (GET_CODE (lhs) == ASHIFT | |
1215 | && GET_CODE (XEXP (lhs, 1)) == CONST_INT | |
1216 | && INTVAL (XEXP (lhs, 1)) >= 0 | |
1217 | && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1218 | { | |
1219 | coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); | |
1220 | lhs = XEXP (lhs, 0); | |
1221 | } | |
1222 | ||
1223 | if (GET_CODE (rhs) == NEG) | |
1224 | coeff1 = - 1, rhs = XEXP (rhs, 0); | |
1225 | else if (GET_CODE (rhs) == MULT | |
1226 | && GET_CODE (XEXP (rhs, 1)) == CONST_INT) | |
1227 | { | |
1228 | coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0); | |
1229 | had_mult = 1; | |
1230 | } | |
1231 | else if (GET_CODE (rhs) == ASHIFT | |
1232 | && GET_CODE (XEXP (rhs, 1)) == CONST_INT | |
1233 | && INTVAL (XEXP (rhs, 1)) >= 0 | |
1234 | && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1235 | { | |
1236 | coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)); | |
1237 | rhs = XEXP (rhs, 0); | |
1238 | } | |
1239 | ||
1240 | if (rtx_equal_p (lhs, rhs)) | |
1241 | { | |
1242 | tem = simplify_gen_binary (MULT, mode, lhs, | |
1243 | GEN_INT (coeff0 - coeff1)); | |
1244 | return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem; | |
1245 | } | |
1246 | } | |
1247 | ||
1248 | /* (a - (-b)) -> (a + b). */ | |
1249 | if (GET_CODE (op1) == NEG) | |
1250 | return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0)); | |
1251 | ||
1252 | /* If one of the operands is a PLUS or a MINUS, see if we can | |
1253 | simplify this by the associative law. | |
1254 | Don't use the associative law for floating point. | |
1255 | The inaccuracy makes it nonassociative, | |
1256 | and subtle programs can break if operations are associated. */ | |
1257 | ||
1258 | if (INTEGRAL_MODE_P (mode) | |
1259 | && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS | |
1260 | || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS) | |
1261 | && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0) | |
1262 | return tem; | |
1263 | ||
1264 | /* Don't let a relocatable value get a negative coeff. */ | |
1265 | if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode) | |
1266 | return plus_constant (op0, - INTVAL (op1)); | |
1267 | ||
1268 | /* (x - (x & y)) -> (x & ~y) */ | |
1269 | if (GET_CODE (op1) == AND) | |
1270 | { | |
1271 | if (rtx_equal_p (op0, XEXP (op1, 0))) | |
1272 | return simplify_gen_binary (AND, mode, op0, | |
1273 | gen_rtx_NOT (mode, XEXP (op1, 1))); | |
1274 | if (rtx_equal_p (op0, XEXP (op1, 1))) | |
1275 | return simplify_gen_binary (AND, mode, op0, | |
1276 | gen_rtx_NOT (mode, XEXP (op1, 0))); | |
1277 | } | |
1278 | break; | |
1279 | ||
1280 | case MULT: | |
02cd84cd | 1281 | if (trueop1 == constm1_rtx) |
af21a202 | 1282 | { |
1283 | tem = simplify_unary_operation (NEG, mode, op0, mode); | |
1284 | ||
1285 | return tem ? tem : gen_rtx_NEG (mode, op0); | |
1286 | } | |
1287 | ||
1288 | /* In IEEE floating point, x*0 is not always 0. */ | |
1289 | if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT | |
7f3be425 | 1290 | || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) |
02cd84cd | 1291 | && trueop1 == CONST0_RTX (mode) |
af21a202 | 1292 | && ! side_effects_p (op0)) |
1293 | return op1; | |
1294 | ||
1295 | /* In IEEE floating point, x*1 is not equivalent to x for nans. | |
1296 | However, ANSI says we can drop signals, | |
1297 | so we can do this anyway. */ | |
02cd84cd | 1298 | if (trueop1 == CONST1_RTX (mode)) |
af21a202 | 1299 | return op0; |
1300 | ||
1301 | /* Convert multiply by constant power of two into shift unless | |
1302 | we are still generating RTL. This test is a kludge. */ | |
02cd84cd | 1303 | if (GET_CODE (trueop1) == CONST_INT |
1304 | && (val = exact_log2 (INTVAL (trueop1))) >= 0 | |
af21a202 | 1305 | /* If the mode is larger than the host word size, and the |
1306 | uppermost bit is set, then this isn't a power of two due | |
1307 | to implicit sign extension. */ | |
1308 | && (width <= HOST_BITS_PER_WIDE_INT | |
1309 | || val != HOST_BITS_PER_WIDE_INT - 1) | |
1310 | && ! rtx_equal_function_value_matters) | |
1311 | return gen_rtx_ASHIFT (mode, op0, GEN_INT (val)); | |
1312 | ||
02cd84cd | 1313 | if (GET_CODE (trueop1) == CONST_DOUBLE |
1314 | && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT) | |
af21a202 | 1315 | { |
cc190096 | 1316 | struct simplify_binary_is2orm1_args args; |
af21a202 | 1317 | |
cc190096 | 1318 | args.value = trueop1; |
1319 | if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args)) | |
af21a202 | 1320 | return 0; |
1321 | ||
af21a202 | 1322 | /* x*2 is x+x and x*(-1) is -x */ |
cc190096 | 1323 | if (args.is_2 && GET_MODE (op0) == mode) |
af21a202 | 1324 | return gen_rtx_PLUS (mode, op0, copy_rtx (op0)); |
1325 | ||
cc190096 | 1326 | else if (args.is_m1 && GET_MODE (op0) == mode) |
af21a202 | 1327 | return gen_rtx_NEG (mode, op0); |
1328 | } | |
1329 | break; | |
1330 | ||
1331 | case IOR: | |
02cd84cd | 1332 | if (trueop1 == const0_rtx) |
af21a202 | 1333 | return op0; |
02cd84cd | 1334 | if (GET_CODE (trueop1) == CONST_INT |
1335 | && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) | |
1336 | == GET_MODE_MASK (mode))) | |
af21a202 | 1337 | return op1; |
02cd84cd | 1338 | if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
af21a202 | 1339 | return op0; |
1340 | /* A | (~A) -> -1 */ | |
1341 | if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) | |
1342 | || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) | |
1343 | && ! side_effects_p (op0) | |
1344 | && GET_MODE_CLASS (mode) != MODE_CC) | |
1345 | return constm1_rtx; | |
1346 | break; | |
1347 | ||
1348 | case XOR: | |
02cd84cd | 1349 | if (trueop1 == const0_rtx) |
af21a202 | 1350 | return op0; |
02cd84cd | 1351 | if (GET_CODE (trueop1) == CONST_INT |
1352 | && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) | |
1353 | == GET_MODE_MASK (mode))) | |
af21a202 | 1354 | return gen_rtx_NOT (mode, op0); |
02cd84cd | 1355 | if (trueop0 == trueop1 && ! side_effects_p (op0) |
af21a202 | 1356 | && GET_MODE_CLASS (mode) != MODE_CC) |
1357 | return const0_rtx; | |
1358 | break; | |
1359 | ||
1360 | case AND: | |
02cd84cd | 1361 | if (trueop1 == const0_rtx && ! side_effects_p (op0)) |
af21a202 | 1362 | return const0_rtx; |
02cd84cd | 1363 | if (GET_CODE (trueop1) == CONST_INT |
1364 | && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) | |
1365 | == GET_MODE_MASK (mode))) | |
af21a202 | 1366 | return op0; |
02cd84cd | 1367 | if (trueop0 == trueop1 && ! side_effects_p (op0) |
af21a202 | 1368 | && GET_MODE_CLASS (mode) != MODE_CC) |
1369 | return op0; | |
1370 | /* A & (~A) -> 0 */ | |
1371 | if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) | |
1372 | || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) | |
1373 | && ! side_effects_p (op0) | |
1374 | && GET_MODE_CLASS (mode) != MODE_CC) | |
1375 | return const0_rtx; | |
1376 | break; | |
1377 | ||
1378 | case UDIV: | |
1379 | /* Convert divide by power of two into shift (divide by 1 handled | |
1380 | below). */ | |
02cd84cd | 1381 | if (GET_CODE (trueop1) == CONST_INT |
1382 | && (arg1 = exact_log2 (INTVAL (trueop1))) > 0) | |
af21a202 | 1383 | return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1)); |
1384 | ||
1385 | /* ... fall through ... */ | |
1386 | ||
1387 | case DIV: | |
02cd84cd | 1388 | if (trueop1 == CONST1_RTX (mode)) |
af21a202 | 1389 | return op0; |
1390 | ||
1391 | /* In IEEE floating point, 0/x is not always 0. */ | |
1392 | if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT | |
7f3be425 | 1393 | || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) |
02cd84cd | 1394 | && trueop0 == CONST0_RTX (mode) |
af21a202 | 1395 | && ! side_effects_p (op1)) |
1396 | return op0; | |
1397 | ||
1398 | #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) | |
1399 | /* Change division by a constant into multiplication. Only do | |
7f3be425 | 1400 | this with -funsafe-math-optimizations. */ |
02cd84cd | 1401 | else if (GET_CODE (trueop1) == CONST_DOUBLE |
1402 | && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT | |
1403 | && trueop1 != CONST0_RTX (mode) | |
7f3be425 | 1404 | && flag_unsafe_math_optimizations) |
af21a202 | 1405 | { |
1406 | REAL_VALUE_TYPE d; | |
02cd84cd | 1407 | REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); |
af21a202 | 1408 | |
1409 | if (! REAL_VALUES_EQUAL (d, dconst0)) | |
1410 | { | |
1411 | #if defined (REAL_ARITHMETIC) | |
1412 | REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d); | |
1413 | return gen_rtx_MULT (mode, op0, | |
1414 | CONST_DOUBLE_FROM_REAL_VALUE (d, mode)); | |
1415 | #else | |
1416 | return | |
1417 | gen_rtx_MULT (mode, op0, | |
1418 | CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode)); | |
1419 | #endif | |
1420 | } | |
1421 | } | |
1422 | #endif | |
1423 | break; | |
1424 | ||
1425 | case UMOD: | |
1426 | /* Handle modulus by power of two (mod with 1 handled below). */ | |
02cd84cd | 1427 | if (GET_CODE (trueop1) == CONST_INT |
1428 | && exact_log2 (INTVAL (trueop1)) > 0) | |
af21a202 | 1429 | return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1)); |
1430 | ||
1431 | /* ... fall through ... */ | |
1432 | ||
1433 | case MOD: | |
02cd84cd | 1434 | if ((trueop0 == const0_rtx || trueop1 == const1_rtx) |
af21a202 | 1435 | && ! side_effects_p (op0) && ! side_effects_p (op1)) |
1436 | return const0_rtx; | |
1437 | break; | |
1438 | ||
1439 | case ROTATERT: | |
1440 | case ROTATE: | |
1441 | /* Rotating ~0 always results in ~0. */ | |
02cd84cd | 1442 | if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT |
1443 | && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode) | |
af21a202 | 1444 | && ! side_effects_p (op1)) |
1445 | return op0; | |
1446 | ||
1447 | /* ... fall through ... */ | |
1448 | ||
1449 | case ASHIFT: | |
1450 | case ASHIFTRT: | |
1451 | case LSHIFTRT: | |
02cd84cd | 1452 | if (trueop1 == const0_rtx) |
af21a202 | 1453 | return op0; |
02cd84cd | 1454 | if (trueop0 == const0_rtx && ! side_effects_p (op1)) |
af21a202 | 1455 | return op0; |
1456 | break; | |
1457 | ||
1458 | case SMIN: | |
02cd84cd | 1459 | if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT |
1460 | && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1) | |
af21a202 | 1461 | && ! side_effects_p (op0)) |
1462 | return op1; | |
02cd84cd | 1463 | else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
af21a202 | 1464 | return op0; |
1465 | break; | |
1466 | ||
1467 | case SMAX: | |
02cd84cd | 1468 | if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT |
1469 | && ((unsigned HOST_WIDE_INT) INTVAL (trueop1) | |
af21a202 | 1470 | == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1) |
1471 | && ! side_effects_p (op0)) | |
1472 | return op1; | |
02cd84cd | 1473 | else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
af21a202 | 1474 | return op0; |
1475 | break; | |
1476 | ||
1477 | case UMIN: | |
02cd84cd | 1478 | if (trueop1 == const0_rtx && ! side_effects_p (op0)) |
af21a202 | 1479 | return op1; |
02cd84cd | 1480 | else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
af21a202 | 1481 | return op0; |
1482 | break; | |
1483 | ||
1484 | case UMAX: | |
02cd84cd | 1485 | if (trueop1 == constm1_rtx && ! side_effects_p (op0)) |
af21a202 | 1486 | return op1; |
02cd84cd | 1487 | else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
af21a202 | 1488 | return op0; |
1489 | break; | |
1490 | ||
1491 | default: | |
1492 | abort (); | |
1493 | } | |
1494 | ||
1495 | return 0; | |
1496 | } | |
1497 | ||
1498 | /* Get the integer argument values in two forms: | |
1499 | zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */ | |
1500 | ||
02cd84cd | 1501 | arg0 = INTVAL (trueop0); |
1502 | arg1 = INTVAL (trueop1); | |
af21a202 | 1503 | |
1504 | if (width < HOST_BITS_PER_WIDE_INT) | |
1505 | { | |
1506 | arg0 &= ((HOST_WIDE_INT) 1 << width) - 1; | |
1507 | arg1 &= ((HOST_WIDE_INT) 1 << width) - 1; | |
1508 | ||
1509 | arg0s = arg0; | |
1510 | if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
1511 | arg0s |= ((HOST_WIDE_INT) (-1) << width); | |
1512 | ||
1513 | arg1s = arg1; | |
1514 | if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
1515 | arg1s |= ((HOST_WIDE_INT) (-1) << width); | |
1516 | } | |
1517 | else | |
1518 | { | |
1519 | arg0s = arg0; | |
1520 | arg1s = arg1; | |
1521 | } | |
1522 | ||
1523 | /* Compute the value of the arithmetic. */ | |
1524 | ||
1525 | switch (code) | |
1526 | { | |
1527 | case PLUS: | |
1528 | val = arg0s + arg1s; | |
1529 | break; | |
1530 | ||
1531 | case MINUS: | |
1532 | val = arg0s - arg1s; | |
1533 | break; | |
1534 | ||
1535 | case MULT: | |
1536 | val = arg0s * arg1s; | |
1537 | break; | |
1538 | ||
1539 | case DIV: | |
194d4e23 | 1540 | if (arg1s == 0 |
1541 | || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
1542 | && arg1s == -1)) | |
af21a202 | 1543 | return 0; |
1544 | val = arg0s / arg1s; | |
1545 | break; | |
1546 | ||
1547 | case MOD: | |
194d4e23 | 1548 | if (arg1s == 0 |
1549 | || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
1550 | && arg1s == -1)) | |
af21a202 | 1551 | return 0; |
1552 | val = arg0s % arg1s; | |
1553 | break; | |
1554 | ||
1555 | case UDIV: | |
194d4e23 | 1556 | if (arg1 == 0 |
1557 | || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
1558 | && arg1s == -1)) | |
af21a202 | 1559 | return 0; |
1560 | val = (unsigned HOST_WIDE_INT) arg0 / arg1; | |
1561 | break; | |
1562 | ||
1563 | case UMOD: | |
194d4e23 | 1564 | if (arg1 == 0 |
1565 | || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
1566 | && arg1s == -1)) | |
af21a202 | 1567 | return 0; |
1568 | val = (unsigned HOST_WIDE_INT) arg0 % arg1; | |
1569 | break; | |
1570 | ||
1571 | case AND: | |
1572 | val = arg0 & arg1; | |
1573 | break; | |
1574 | ||
1575 | case IOR: | |
1576 | val = arg0 | arg1; | |
1577 | break; | |
1578 | ||
1579 | case XOR: | |
1580 | val = arg0 ^ arg1; | |
1581 | break; | |
1582 | ||
1583 | case LSHIFTRT: | |
1584 | /* If shift count is undefined, don't fold it; let the machine do | |
1585 | what it wants. But truncate it if the machine will do that. */ | |
1586 | if (arg1 < 0) | |
1587 | return 0; | |
1588 | ||
1589 | #ifdef SHIFT_COUNT_TRUNCATED | |
1590 | if (SHIFT_COUNT_TRUNCATED) | |
1591 | arg1 %= width; | |
1592 | #endif | |
1593 | ||
1594 | val = ((unsigned HOST_WIDE_INT) arg0) >> arg1; | |
1595 | break; | |
1596 | ||
1597 | case ASHIFT: | |
1598 | if (arg1 < 0) | |
1599 | return 0; | |
1600 | ||
1601 | #ifdef SHIFT_COUNT_TRUNCATED | |
1602 | if (SHIFT_COUNT_TRUNCATED) | |
1603 | arg1 %= width; | |
1604 | #endif | |
1605 | ||
1606 | val = ((unsigned HOST_WIDE_INT) arg0) << arg1; | |
1607 | break; | |
1608 | ||
1609 | case ASHIFTRT: | |
1610 | if (arg1 < 0) | |
1611 | return 0; | |
1612 | ||
1613 | #ifdef SHIFT_COUNT_TRUNCATED | |
1614 | if (SHIFT_COUNT_TRUNCATED) | |
1615 | arg1 %= width; | |
1616 | #endif | |
1617 | ||
1618 | val = arg0s >> arg1; | |
1619 | ||
1620 | /* Bootstrap compiler may not have sign extended the right shift. | |
1621 | Manually extend the sign to insure bootstrap cc matches gcc. */ | |
1622 | if (arg0s < 0 && arg1 > 0) | |
1623 | val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1); | |
1624 | ||
1625 | break; | |
1626 | ||
1627 | case ROTATERT: | |
1628 | if (arg1 < 0) | |
1629 | return 0; | |
1630 | ||
1631 | arg1 %= width; | |
1632 | val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1)) | |
1633 | | (((unsigned HOST_WIDE_INT) arg0) >> arg1)); | |
1634 | break; | |
1635 | ||
1636 | case ROTATE: | |
1637 | if (arg1 < 0) | |
1638 | return 0; | |
1639 | ||
1640 | arg1 %= width; | |
1641 | val = ((((unsigned HOST_WIDE_INT) arg0) << arg1) | |
1642 | | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1))); | |
1643 | break; | |
1644 | ||
1645 | case COMPARE: | |
1646 | /* Do nothing here. */ | |
1647 | return 0; | |
1648 | ||
1649 | case SMIN: | |
1650 | val = arg0s <= arg1s ? arg0s : arg1s; | |
1651 | break; | |
1652 | ||
1653 | case UMIN: | |
1654 | val = ((unsigned HOST_WIDE_INT) arg0 | |
1655 | <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); | |
1656 | break; | |
1657 | ||
1658 | case SMAX: | |
1659 | val = arg0s > arg1s ? arg0s : arg1s; | |
1660 | break; | |
1661 | ||
1662 | case UMAX: | |
1663 | val = ((unsigned HOST_WIDE_INT) arg0 | |
1664 | > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); | |
1665 | break; | |
1666 | ||
1667 | default: | |
1668 | abort (); | |
1669 | } | |
1670 | ||
1671 | val = trunc_int_for_mode (val, mode); | |
1672 | ||
1673 | return GEN_INT (val); | |
1674 | } | |
1675 | \f | |
1676 | /* Simplify a PLUS or MINUS, at least one of whose operands may be another | |
1677 | PLUS or MINUS. | |
1678 | ||
1679 | Rather than test for specific case, we do this by a brute-force method | |
1680 | and do all possible simplifications until no more changes occur. Then | |
1681 | we rebuild the operation. */ | |
1682 | ||
1683 | static rtx | |
1684 | simplify_plus_minus (code, mode, op0, op1) | |
1685 | enum rtx_code code; | |
1686 | enum machine_mode mode; | |
1687 | rtx op0, op1; | |
1688 | { | |
1689 | rtx ops[8]; | |
1690 | int negs[8]; | |
1691 | rtx result, tem; | |
1692 | int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0; | |
1693 | int first = 1, negate = 0, changed; | |
1694 | int i, j; | |
1695 | ||
93d3b7de | 1696 | memset ((char *) ops, 0, sizeof ops); |
af21a202 | 1697 | |
1698 | /* Set up the two operands and then expand them until nothing has been | |
1699 | changed. If we run out of room in our array, give up; this should | |
1700 | almost never happen. */ | |
1701 | ||
1702 | ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS); | |
1703 | ||
1704 | changed = 1; | |
1705 | while (changed) | |
1706 | { | |
1707 | changed = 0; | |
1708 | ||
1709 | for (i = 0; i < n_ops; i++) | |
1710 | switch (GET_CODE (ops[i])) | |
1711 | { | |
1712 | case PLUS: | |
1713 | case MINUS: | |
1714 | if (n_ops == 7) | |
1715 | return 0; | |
1716 | ||
1717 | ops[n_ops] = XEXP (ops[i], 1); | |
1718 | negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i]; | |
1719 | ops[i] = XEXP (ops[i], 0); | |
1720 | input_ops++; | |
1721 | changed = 1; | |
1722 | break; | |
1723 | ||
1724 | case NEG: | |
1725 | ops[i] = XEXP (ops[i], 0); | |
1726 | negs[i] = ! negs[i]; | |
1727 | changed = 1; | |
1728 | break; | |
1729 | ||
1730 | case CONST: | |
1731 | ops[i] = XEXP (ops[i], 0); | |
1732 | input_consts++; | |
1733 | changed = 1; | |
1734 | break; | |
1735 | ||
1736 | case NOT: | |
1737 | /* ~a -> (-a - 1) */ | |
1738 | if (n_ops != 7) | |
1739 | { | |
1740 | ops[n_ops] = constm1_rtx; | |
1741 | negs[n_ops++] = negs[i]; | |
1742 | ops[i] = XEXP (ops[i], 0); | |
1743 | negs[i] = ! negs[i]; | |
1744 | changed = 1; | |
1745 | } | |
1746 | break; | |
1747 | ||
1748 | case CONST_INT: | |
1749 | if (negs[i]) | |
1750 | ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1; | |
1751 | break; | |
1752 | ||
1753 | default: | |
1754 | break; | |
1755 | } | |
1756 | } | |
1757 | ||
1758 | /* If we only have two operands, we can't do anything. */ | |
1759 | if (n_ops <= 2) | |
1760 | return 0; | |
1761 | ||
1762 | /* Now simplify each pair of operands until nothing changes. The first | |
1763 | time through just simplify constants against each other. */ | |
1764 | ||
1765 | changed = 1; | |
1766 | while (changed) | |
1767 | { | |
1768 | changed = first; | |
1769 | ||
1770 | for (i = 0; i < n_ops - 1; i++) | |
1771 | for (j = i + 1; j < n_ops; j++) | |
1772 | if (ops[i] != 0 && ops[j] != 0 | |
1773 | && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j])))) | |
1774 | { | |
1775 | rtx lhs = ops[i], rhs = ops[j]; | |
1776 | enum rtx_code ncode = PLUS; | |
1777 | ||
1778 | if (negs[i] && ! negs[j]) | |
1779 | lhs = ops[j], rhs = ops[i], ncode = MINUS; | |
1780 | else if (! negs[i] && negs[j]) | |
1781 | ncode = MINUS; | |
1782 | ||
1783 | tem = simplify_binary_operation (ncode, mode, lhs, rhs); | |
1784 | if (tem) | |
1785 | { | |
1786 | ops[i] = tem, ops[j] = 0; | |
1787 | negs[i] = negs[i] && negs[j]; | |
1788 | if (GET_CODE (tem) == NEG) | |
1789 | ops[i] = XEXP (tem, 0), negs[i] = ! negs[i]; | |
1790 | ||
1791 | if (GET_CODE (ops[i]) == CONST_INT && negs[i]) | |
1792 | ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0; | |
1793 | changed = 1; | |
1794 | } | |
1795 | } | |
1796 | ||
1797 | first = 0; | |
1798 | } | |
1799 | ||
1800 | /* Pack all the operands to the lower-numbered entries and give up if | |
1801 | we didn't reduce the number of operands we had. Make sure we | |
1802 | count a CONST as two operands. If we have the same number of | |
1803 | operands, but have made more CONSTs than we had, this is also | |
1804 | an improvement, so accept it. */ | |
1805 | ||
1806 | for (i = 0, j = 0; j < n_ops; j++) | |
1807 | if (ops[j] != 0) | |
1808 | { | |
1809 | ops[i] = ops[j], negs[i++] = negs[j]; | |
1810 | if (GET_CODE (ops[j]) == CONST) | |
1811 | n_consts++; | |
1812 | } | |
1813 | ||
1814 | if (i + n_consts > input_ops | |
1815 | || (i + n_consts == input_ops && n_consts <= input_consts)) | |
1816 | return 0; | |
1817 | ||
1818 | n_ops = i; | |
1819 | ||
1820 | /* If we have a CONST_INT, put it last. */ | |
1821 | for (i = 0; i < n_ops - 1; i++) | |
1822 | if (GET_CODE (ops[i]) == CONST_INT) | |
1823 | { | |
1824 | tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem; | |
1825 | j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j; | |
1826 | } | |
1827 | ||
1828 | /* Put a non-negated operand first. If there aren't any, make all | |
1829 | operands positive and negate the whole thing later. */ | |
1830 | for (i = 0; i < n_ops && negs[i]; i++) | |
1831 | ; | |
1832 | ||
1833 | if (i == n_ops) | |
1834 | { | |
1835 | for (i = 0; i < n_ops; i++) | |
1836 | negs[i] = 0; | |
1837 | negate = 1; | |
1838 | } | |
1839 | else if (i != 0) | |
1840 | { | |
1841 | tem = ops[0], ops[0] = ops[i], ops[i] = tem; | |
1842 | j = negs[0], negs[0] = negs[i], negs[i] = j; | |
1843 | } | |
1844 | ||
1845 | /* Now make the result by performing the requested operations. */ | |
1846 | result = ops[0]; | |
1847 | for (i = 1; i < n_ops; i++) | |
1848 | result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]); | |
1849 | ||
1850 | return negate ? gen_rtx_NEG (mode, result) : result; | |
1851 | } | |
1852 | ||
1853 | struct cfc_args | |
1854 | { | |
155b05dc | 1855 | rtx op0, op1; /* Input */ |
1856 | int equal, op0lt, op1lt; /* Output */ | |
5377f687 | 1857 | int unordered; |
af21a202 | 1858 | }; |
1859 | ||
1860 | static void | |
1861 | check_fold_consts (data) | |
1862 | PTR data; | |
1863 | { | |
155b05dc | 1864 | struct cfc_args *args = (struct cfc_args *) data; |
af21a202 | 1865 | REAL_VALUE_TYPE d0, d1; |
1866 | ||
5377f687 | 1867 | /* We may possibly raise an exception while reading the value. */ |
1868 | args->unordered = 1; | |
af21a202 | 1869 | REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0); |
1870 | REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1); | |
5377f687 | 1871 | |
1872 | /* Comparisons of Inf versus Inf are ordered. */ | |
1873 | if (REAL_VALUE_ISNAN (d0) | |
1874 | || REAL_VALUE_ISNAN (d1)) | |
1875 | return; | |
af21a202 | 1876 | args->equal = REAL_VALUES_EQUAL (d0, d1); |
1877 | args->op0lt = REAL_VALUES_LESS (d0, d1); | |
1878 | args->op1lt = REAL_VALUES_LESS (d1, d0); | |
5377f687 | 1879 | args->unordered = 0; |
af21a202 | 1880 | } |
1881 | ||
1882 | /* Like simplify_binary_operation except used for relational operators. | |
1883 | MODE is the mode of the operands, not that of the result. If MODE | |
1884 | is VOIDmode, both operands must also be VOIDmode and we compare the | |
1885 | operands in "infinite precision". | |
1886 | ||
1887 | If no simplification is possible, this function returns zero. Otherwise, | |
1888 | it returns either const_true_rtx or const0_rtx. */ | |
1889 | ||
1890 | rtx | |
1891 | simplify_relational_operation (code, mode, op0, op1) | |
1892 | enum rtx_code code; | |
1893 | enum machine_mode mode; | |
1894 | rtx op0, op1; | |
1895 | { | |
1896 | int equal, op0lt, op0ltu, op1lt, op1ltu; | |
1897 | rtx tem; | |
02cd84cd | 1898 | rtx trueop0; |
1899 | rtx trueop1; | |
af21a202 | 1900 | |
67405ba4 | 1901 | if (mode == VOIDmode |
1902 | && (GET_MODE (op0) != VOIDmode | |
1903 | || GET_MODE (op1) != VOIDmode)) | |
acdbdba3 | 1904 | abort (); |
67405ba4 | 1905 | |
af21a202 | 1906 | /* If op0 is a compare, extract the comparison arguments from it. */ |
1907 | if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) | |
1908 | op1 = XEXP (op0, 1), op0 = XEXP (op0, 0); | |
1909 | ||
02cd84cd | 1910 | trueop0 = avoid_constant_pool_reference (op0); |
1911 | trueop1 = avoid_constant_pool_reference (op1); | |
1912 | ||
af21a202 | 1913 | /* We can't simplify MODE_CC values since we don't know what the |
1914 | actual comparison is. */ | |
1915 | if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC | |
1916 | #ifdef HAVE_cc0 | |
1917 | || op0 == cc0_rtx | |
1918 | #endif | |
1919 | ) | |
1920 | return 0; | |
1921 | ||
125b9253 | 1922 | /* Make sure the constant is second. */ |
02cd84cd | 1923 | if (swap_commutative_operands_p (trueop0, trueop1)) |
125b9253 | 1924 | { |
1925 | tem = op0, op0 = op1, op1 = tem; | |
02cd84cd | 1926 | tem = trueop0, trueop0 = trueop1, trueop1 = tem; |
125b9253 | 1927 | code = swap_condition (code); |
1928 | } | |
1929 | ||
af21a202 | 1930 | /* For integer comparisons of A and B maybe we can simplify A - B and can |
1931 | then simplify a comparison of that with zero. If A and B are both either | |
1932 | a register or a CONST_INT, this can't help; testing for these cases will | |
1933 | prevent infinite recursion here and speed things up. | |
1934 | ||
1935 | If CODE is an unsigned comparison, then we can never do this optimization, | |
1936 | because it gives an incorrect result if the subtraction wraps around zero. | |
1937 | ANSI C defines unsigned operations such that they never overflow, and | |
1938 | thus such cases can not be ignored. */ | |
1939 | ||
02cd84cd | 1940 | if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx |
1941 | && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT) | |
1942 | && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT)) | |
af21a202 | 1943 | && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1)) |
1944 | && code != GTU && code != GEU && code != LTU && code != LEU) | |
1945 | return simplify_relational_operation (signed_condition (code), | |
1946 | mode, tem, const0_rtx); | |
1947 | ||
7f3be425 | 1948 | if (flag_unsafe_math_optimizations && code == ORDERED) |
79b403d4 | 1949 | return const_true_rtx; |
1950 | ||
7f3be425 | 1951 | if (flag_unsafe_math_optimizations && code == UNORDERED) |
79b403d4 | 1952 | return const0_rtx; |
1953 | ||
af21a202 | 1954 | /* For non-IEEE floating-point, if the two operands are equal, we know the |
1955 | result. */ | |
02cd84cd | 1956 | if (rtx_equal_p (trueop0, trueop1) |
af21a202 | 1957 | && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT |
02cd84cd | 1958 | || ! FLOAT_MODE_P (GET_MODE (trueop0)) |
7f3be425 | 1959 | || flag_unsafe_math_optimizations)) |
af21a202 | 1960 | equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0; |
1961 | ||
1962 | /* If the operands are floating-point constants, see if we can fold | |
1963 | the result. */ | |
1964 | #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) | |
02cd84cd | 1965 | else if (GET_CODE (trueop0) == CONST_DOUBLE |
1966 | && GET_CODE (trueop1) == CONST_DOUBLE | |
1967 | && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT) | |
af21a202 | 1968 | { |
1969 | struct cfc_args args; | |
1970 | ||
1971 | /* Setup input for check_fold_consts() */ | |
02cd84cd | 1972 | args.op0 = trueop0; |
1973 | args.op1 = trueop1; | |
af21a202 | 1974 | |
5377f687 | 1975 | |
1500f816 | 1976 | if (!do_float_handler (check_fold_consts, (PTR) &args)) |
5377f687 | 1977 | args.unordered = 1; |
1978 | ||
1979 | if (args.unordered) | |
1980 | switch (code) | |
1981 | { | |
1982 | case UNEQ: | |
1983 | case UNLT: | |
1984 | case UNGT: | |
1985 | case UNLE: | |
1986 | case UNGE: | |
1987 | case NE: | |
1988 | case UNORDERED: | |
1989 | return const_true_rtx; | |
1990 | case EQ: | |
1991 | case LT: | |
1992 | case GT: | |
1993 | case LE: | |
1994 | case GE: | |
1995 | case LTGT: | |
1996 | case ORDERED: | |
1997 | return const0_rtx; | |
1998 | default: | |
1999 | return 0; | |
2000 | } | |
af21a202 | 2001 | |
2002 | /* Receive output from check_fold_consts() */ | |
2003 | equal = args.equal; | |
2004 | op0lt = op0ltu = args.op0lt; | |
2005 | op1lt = op1ltu = args.op1lt; | |
2006 | } | |
2007 | #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */ | |
2008 | ||
2009 | /* Otherwise, see if the operands are both integers. */ | |
2010 | else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode) | |
02cd84cd | 2011 | && (GET_CODE (trueop0) == CONST_DOUBLE |
2012 | || GET_CODE (trueop0) == CONST_INT) | |
2013 | && (GET_CODE (trueop1) == CONST_DOUBLE | |
2014 | || GET_CODE (trueop1) == CONST_INT)) | |
af21a202 | 2015 | { |
2016 | int width = GET_MODE_BITSIZE (mode); | |
2017 | HOST_WIDE_INT l0s, h0s, l1s, h1s; | |
2018 | unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u; | |
2019 | ||
2020 | /* Get the two words comprising each integer constant. */ | |
02cd84cd | 2021 | if (GET_CODE (trueop0) == CONST_DOUBLE) |
af21a202 | 2022 | { |
02cd84cd | 2023 | l0u = l0s = CONST_DOUBLE_LOW (trueop0); |
2024 | h0u = h0s = CONST_DOUBLE_HIGH (trueop0); | |
af21a202 | 2025 | } |
2026 | else | |
2027 | { | |
02cd84cd | 2028 | l0u = l0s = INTVAL (trueop0); |
2cae60d1 | 2029 | h0u = h0s = HWI_SIGN_EXTEND (l0s); |
af21a202 | 2030 | } |
2031 | ||
02cd84cd | 2032 | if (GET_CODE (trueop1) == CONST_DOUBLE) |
af21a202 | 2033 | { |
02cd84cd | 2034 | l1u = l1s = CONST_DOUBLE_LOW (trueop1); |
2035 | h1u = h1s = CONST_DOUBLE_HIGH (trueop1); | |
af21a202 | 2036 | } |
2037 | else | |
2038 | { | |
02cd84cd | 2039 | l1u = l1s = INTVAL (trueop1); |
2cae60d1 | 2040 | h1u = h1s = HWI_SIGN_EXTEND (l1s); |
af21a202 | 2041 | } |
2042 | ||
2043 | /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT, | |
2044 | we have to sign or zero-extend the values. */ | |
af21a202 | 2045 | if (width != 0 && width < HOST_BITS_PER_WIDE_INT) |
2046 | { | |
2047 | l0u &= ((HOST_WIDE_INT) 1 << width) - 1; | |
2048 | l1u &= ((HOST_WIDE_INT) 1 << width) - 1; | |
2049 | ||
2050 | if (l0s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
2051 | l0s |= ((HOST_WIDE_INT) (-1) << width); | |
2052 | ||
2053 | if (l1s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
2054 | l1s |= ((HOST_WIDE_INT) (-1) << width); | |
2055 | } | |
f7d1f6d0 | 2056 | if (width != 0 && width <= HOST_BITS_PER_WIDE_INT) |
2057 | h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s); | |
af21a202 | 2058 | |
2059 | equal = (h0u == h1u && l0u == l1u); | |
bf74200c | 2060 | op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u)); |
2061 | op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u)); | |
af21a202 | 2062 | op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u)); |
2063 | op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u)); | |
2064 | } | |
2065 | ||
2066 | /* Otherwise, there are some code-specific tests we can make. */ | |
2067 | else | |
2068 | { | |
2069 | switch (code) | |
2070 | { | |
2071 | case EQ: | |
2072 | /* References to the frame plus a constant or labels cannot | |
2073 | be zero, but a SYMBOL_REF can due to #pragma weak. */ | |
02cd84cd | 2074 | if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx) |
2075 | || GET_CODE (trueop0) == LABEL_REF) | |
af21a202 | 2076 | #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
2077 | /* On some machines, the ap reg can be 0 sometimes. */ | |
2078 | && op0 != arg_pointer_rtx | |
2079 | #endif | |
2080 | ) | |
2081 | return const0_rtx; | |
2082 | break; | |
2083 | ||
2084 | case NE: | |
02cd84cd | 2085 | if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx) |
2086 | || GET_CODE (trueop0) == LABEL_REF) | |
af21a202 | 2087 | #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
2088 | && op0 != arg_pointer_rtx | |
2089 | #endif | |
2090 | ) | |
2091 | return const_true_rtx; | |
2092 | break; | |
2093 | ||
2094 | case GEU: | |
2095 | /* Unsigned values are never negative. */ | |
02cd84cd | 2096 | if (trueop1 == const0_rtx) |
af21a202 | 2097 | return const_true_rtx; |
2098 | break; | |
2099 | ||
2100 | case LTU: | |
02cd84cd | 2101 | if (trueop1 == const0_rtx) |
af21a202 | 2102 | return const0_rtx; |
2103 | break; | |
2104 | ||
2105 | case LEU: | |
2106 | /* Unsigned values are never greater than the largest | |
2107 | unsigned value. */ | |
02cd84cd | 2108 | if (GET_CODE (trueop1) == CONST_INT |
2109 | && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode) | |
af21a202 | 2110 | && INTEGRAL_MODE_P (mode)) |
2111 | return const_true_rtx; | |
2112 | break; | |
2113 | ||
2114 | case GTU: | |
02cd84cd | 2115 | if (GET_CODE (trueop1) == CONST_INT |
2116 | && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode) | |
af21a202 | 2117 | && INTEGRAL_MODE_P (mode)) |
2118 | return const0_rtx; | |
2119 | break; | |
2120 | ||
2121 | default: | |
2122 | break; | |
2123 | } | |
2124 | ||
2125 | return 0; | |
2126 | } | |
2127 | ||
2128 | /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set | |
2129 | as appropriate. */ | |
2130 | switch (code) | |
2131 | { | |
2132 | case EQ: | |
5377f687 | 2133 | case UNEQ: |
af21a202 | 2134 | return equal ? const_true_rtx : const0_rtx; |
2135 | case NE: | |
5377f687 | 2136 | case LTGT: |
af21a202 | 2137 | return ! equal ? const_true_rtx : const0_rtx; |
2138 | case LT: | |
5377f687 | 2139 | case UNLT: |
af21a202 | 2140 | return op0lt ? const_true_rtx : const0_rtx; |
2141 | case GT: | |
5377f687 | 2142 | case UNGT: |
af21a202 | 2143 | return op1lt ? const_true_rtx : const0_rtx; |
2144 | case LTU: | |
2145 | return op0ltu ? const_true_rtx : const0_rtx; | |
2146 | case GTU: | |
2147 | return op1ltu ? const_true_rtx : const0_rtx; | |
2148 | case LE: | |
79b403d4 | 2149 | case UNLE: |
af21a202 | 2150 | return equal || op0lt ? const_true_rtx : const0_rtx; |
2151 | case GE: | |
79b403d4 | 2152 | case UNGE: |
af21a202 | 2153 | return equal || op1lt ? const_true_rtx : const0_rtx; |
2154 | case LEU: | |
2155 | return equal || op0ltu ? const_true_rtx : const0_rtx; | |
2156 | case GEU: | |
2157 | return equal || op1ltu ? const_true_rtx : const0_rtx; | |
5377f687 | 2158 | case ORDERED: |
2159 | return const_true_rtx; | |
2160 | case UNORDERED: | |
2161 | return const0_rtx; | |
af21a202 | 2162 | default: |
2163 | abort (); | |
2164 | } | |
2165 | } | |
2166 | \f | |
2167 | /* Simplify CODE, an operation with result mode MODE and three operands, | |
2168 | OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became | |
2169 | a constant. Return 0 if no simplifications is possible. */ | |
2170 | ||
2171 | rtx | |
2172 | simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2) | |
2173 | enum rtx_code code; | |
2174 | enum machine_mode mode, op0_mode; | |
2175 | rtx op0, op1, op2; | |
2176 | { | |
ef6c17ed | 2177 | unsigned int width = GET_MODE_BITSIZE (mode); |
af21a202 | 2178 | |
2179 | /* VOIDmode means "infinite" precision. */ | |
2180 | if (width == 0) | |
2181 | width = HOST_BITS_PER_WIDE_INT; | |
2182 | ||
2183 | switch (code) | |
2184 | { | |
2185 | case SIGN_EXTRACT: | |
2186 | case ZERO_EXTRACT: | |
2187 | if (GET_CODE (op0) == CONST_INT | |
2188 | && GET_CODE (op1) == CONST_INT | |
2189 | && GET_CODE (op2) == CONST_INT | |
e2496245 | 2190 | && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width) |
4491f79f | 2191 | && width <= (unsigned) HOST_BITS_PER_WIDE_INT) |
af21a202 | 2192 | { |
2193 | /* Extracting a bit-field from a constant */ | |
2194 | HOST_WIDE_INT val = INTVAL (op0); | |
2195 | ||
2196 | if (BITS_BIG_ENDIAN) | |
2197 | val >>= (GET_MODE_BITSIZE (op0_mode) | |
2198 | - INTVAL (op2) - INTVAL (op1)); | |
2199 | else | |
2200 | val >>= INTVAL (op2); | |
2201 | ||
2202 | if (HOST_BITS_PER_WIDE_INT != INTVAL (op1)) | |
2203 | { | |
2204 | /* First zero-extend. */ | |
2205 | val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1; | |
2206 | /* If desired, propagate sign bit. */ | |
2207 | if (code == SIGN_EXTRACT | |
2208 | && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))) | |
2209 | val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1); | |
2210 | } | |
2211 | ||
2212 | /* Clear the bits that don't belong in our mode, | |
2213 | unless they and our sign bit are all one. | |
2214 | So we get either a reasonable negative value or a reasonable | |
2215 | unsigned value for this mode. */ | |
2216 | if (width < HOST_BITS_PER_WIDE_INT | |
2217 | && ((val & ((HOST_WIDE_INT) (-1) << (width - 1))) | |
2218 | != ((HOST_WIDE_INT) (-1) << (width - 1)))) | |
2219 | val &= ((HOST_WIDE_INT) 1 << width) - 1; | |
2220 | ||
2221 | return GEN_INT (val); | |
2222 | } | |
2223 | break; | |
2224 | ||
2225 | case IF_THEN_ELSE: | |
2226 | if (GET_CODE (op0) == CONST_INT) | |
2227 | return op0 != const0_rtx ? op1 : op2; | |
2228 | ||
2229 | /* Convert a == b ? b : a to "a". */ | |
2230 | if (GET_CODE (op0) == NE && ! side_effects_p (op0) | |
7f3be425 | 2231 | && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) |
af21a202 | 2232 | && rtx_equal_p (XEXP (op0, 0), op1) |
2233 | && rtx_equal_p (XEXP (op0, 1), op2)) | |
2234 | return op1; | |
2235 | else if (GET_CODE (op0) == EQ && ! side_effects_p (op0) | |
7f3be425 | 2236 | && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) |
af21a202 | 2237 | && rtx_equal_p (XEXP (op0, 1), op1) |
2238 | && rtx_equal_p (XEXP (op0, 0), op2)) | |
2239 | return op2; | |
2240 | else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0)) | |
2241 | { | |
67405ba4 | 2242 | enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode |
2243 | ? GET_MODE (XEXP (op0, 1)) | |
2244 | : GET_MODE (XEXP (op0, 0))); | |
b6564518 | 2245 | rtx temp; |
ec68004d | 2246 | if (cmp_mode == VOIDmode) |
2247 | cmp_mode = op0_mode; | |
b6564518 | 2248 | temp = simplify_relational_operation (GET_CODE (op0), cmp_mode, |
2249 | XEXP (op0, 0), XEXP (op0, 1)); | |
ef6c17ed | 2250 | |
af21a202 | 2251 | /* See if any simplifications were possible. */ |
2252 | if (temp == const0_rtx) | |
2253 | return op2; | |
2254 | else if (temp == const1_rtx) | |
2255 | return op1; | |
c21ff061 | 2256 | else if (temp) |
2257 | op0 = temp; | |
2258 | ||
2259 | /* Look for happy constants in op1 and op2. */ | |
2260 | if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT) | |
2261 | { | |
2262 | HOST_WIDE_INT t = INTVAL (op1); | |
2263 | HOST_WIDE_INT f = INTVAL (op2); | |
2264 | ||
2265 | if (t == STORE_FLAG_VALUE && f == 0) | |
2266 | code = GET_CODE (op0); | |
7da6ea0c | 2267 | else if (t == 0 && f == STORE_FLAG_VALUE) |
2268 | { | |
2269 | enum rtx_code tmp; | |
2270 | tmp = reversed_comparison_code (op0, NULL_RTX); | |
2271 | if (tmp == UNKNOWN) | |
2272 | break; | |
2273 | code = tmp; | |
2274 | } | |
c21ff061 | 2275 | else |
2276 | break; | |
2277 | ||
2278 | return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1)); | |
2279 | } | |
af21a202 | 2280 | } |
2281 | break; | |
2282 | ||
2283 | default: | |
2284 | abort (); | |
2285 | } | |
2286 | ||
2287 | return 0; | |
2288 | } | |
2289 | ||
64ab453f | 2290 | /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE) |
2291 | Return 0 if no simplifications is possible. */ | |
2292 | rtx | |
2293 | simplify_subreg (outermode, op, innermode, byte) | |
2294 | rtx op; | |
2295 | unsigned int byte; | |
2296 | enum machine_mode outermode, innermode; | |
2297 | { | |
2298 | /* Little bit of sanity checking. */ | |
2299 | if (innermode == VOIDmode || outermode == VOIDmode | |
2300 | || innermode == BLKmode || outermode == BLKmode) | |
2301 | abort (); | |
2302 | ||
2303 | if (GET_MODE (op) != innermode | |
2304 | && GET_MODE (op) != VOIDmode) | |
2305 | abort (); | |
2306 | ||
2307 | if (byte % GET_MODE_SIZE (outermode) | |
2308 | || byte >= GET_MODE_SIZE (innermode)) | |
2309 | abort (); | |
2310 | ||
09f800b9 | 2311 | if (outermode == innermode && !byte) |
2312 | return op; | |
2313 | ||
64ab453f | 2314 | /* Attempt to simplify constant to non-SUBREG expression. */ |
2315 | if (CONSTANT_P (op)) | |
2316 | { | |
2317 | int offset, part; | |
97b330ca | 2318 | unsigned HOST_WIDE_INT val = 0; |
64ab453f | 2319 | |
3c21e388 | 2320 | /* ??? This code is partly redundant with code below, but can handle |
64ab453f | 2321 | the subregs of floats and similar corner cases. |
2322 | Later it we should move all simplification code here and rewrite | |
2323 | GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends | |
2324 | using SIMPLIFY_SUBREG. */ | |
81802af6 | 2325 | if (subreg_lowpart_offset (outermode, innermode) == byte) |
64ab453f | 2326 | { |
2327 | rtx new = gen_lowpart_if_possible (outermode, op); | |
2328 | if (new) | |
2329 | return new; | |
2330 | } | |
2331 | ||
2332 | /* Similar comment as above apply here. */ | |
2333 | if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD | |
2334 | && GET_MODE_SIZE (innermode) > UNITS_PER_WORD | |
2335 | && GET_MODE_CLASS (outermode) == MODE_INT) | |
2336 | { | |
84e81e84 | 2337 | rtx new = constant_subword (op, |
2338 | (byte / UNITS_PER_WORD), | |
2339 | innermode); | |
64ab453f | 2340 | if (new) |
2341 | return new; | |
2342 | } | |
2343 | ||
2344 | offset = byte * BITS_PER_UNIT; | |
2345 | switch (GET_CODE (op)) | |
2346 | { | |
2347 | case CONST_DOUBLE: | |
2348 | if (GET_MODE (op) != VOIDmode) | |
2349 | break; | |
2350 | ||
2351 | /* We can't handle this case yet. */ | |
2352 | if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT) | |
97b330ca | 2353 | return NULL_RTX; |
64ab453f | 2354 | |
2355 | part = offset >= HOST_BITS_PER_WIDE_INT; | |
2356 | if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT | |
2357 | && BYTES_BIG_ENDIAN) | |
2358 | || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT | |
2359 | && WORDS_BIG_ENDIAN)) | |
2360 | part = !part; | |
2361 | val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op); | |
2362 | offset %= HOST_BITS_PER_WIDE_INT; | |
2363 | ||
2364 | /* We've already picked the word we want from a double, so | |
2365 | pretend this is actually an integer. */ | |
2366 | innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0); | |
2367 | ||
2368 | /* FALLTHROUGH */ | |
2369 | case CONST_INT: | |
2370 | if (GET_CODE (op) == CONST_INT) | |
2371 | val = INTVAL (op); | |
2372 | ||
2373 | /* We don't handle synthetizing of non-integral constants yet. */ | |
2374 | if (GET_MODE_CLASS (outermode) != MODE_INT) | |
97b330ca | 2375 | return NULL_RTX; |
64ab453f | 2376 | |
2377 | if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN) | |
2378 | { | |
2379 | if (WORDS_BIG_ENDIAN) | |
2380 | offset = (GET_MODE_BITSIZE (innermode) | |
2381 | - GET_MODE_BITSIZE (outermode) - offset); | |
2382 | if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN | |
2383 | && GET_MODE_SIZE (outermode) < UNITS_PER_WORD) | |
2384 | offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode) | |
2385 | - 2 * (offset % BITS_PER_WORD)); | |
2386 | } | |
2387 | ||
2388 | if (offset >= HOST_BITS_PER_WIDE_INT) | |
2389 | return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx; | |
2390 | else | |
2391 | { | |
2392 | val >>= offset; | |
2393 | if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT) | |
2394 | val = trunc_int_for_mode (val, outermode); | |
2395 | return GEN_INT (val); | |
2396 | } | |
2397 | default: | |
2398 | break; | |
2399 | } | |
2400 | } | |
2401 | ||
2402 | /* Changing mode twice with SUBREG => just change it once, | |
2403 | or not at all if changing back op starting mode. */ | |
2404 | if (GET_CODE (op) == SUBREG) | |
2405 | { | |
2406 | enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op)); | |
47c066cb | 2407 | int final_offset = byte + SUBREG_BYTE (op); |
64ab453f | 2408 | rtx new; |
2409 | ||
2410 | if (outermode == innermostmode | |
2411 | && byte == 0 && SUBREG_BYTE (op) == 0) | |
2412 | return SUBREG_REG (op); | |
2413 | ||
47c066cb | 2414 | /* The SUBREG_BYTE represents offset, as if the value were stored |
2415 | in memory. Irritating exception is paradoxical subreg, where | |
2416 | we define SUBREG_BYTE to be 0. On big endian machines, this | |
1be87b72 | 2417 | value should be negative. For a moment, undo this exception. */ |
47c066cb | 2418 | if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode)) |
64ab453f | 2419 | { |
47c066cb | 2420 | int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)); |
2421 | if (WORDS_BIG_ENDIAN) | |
2422 | final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
2423 | if (BYTES_BIG_ENDIAN) | |
2424 | final_offset += difference % UNITS_PER_WORD; | |
2425 | } | |
2426 | if (SUBREG_BYTE (op) == 0 | |
2427 | && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode)) | |
2428 | { | |
2429 | int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode)); | |
2430 | if (WORDS_BIG_ENDIAN) | |
2431 | final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
2432 | if (BYTES_BIG_ENDIAN) | |
2433 | final_offset += difference % UNITS_PER_WORD; | |
2434 | } | |
2435 | ||
2436 | /* See whether resulting subreg will be paradoxical. */ | |
f996f8fd | 2437 | if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode)) |
47c066cb | 2438 | { |
2439 | /* In nonparadoxical subregs we can't handle negative offsets. */ | |
2440 | if (final_offset < 0) | |
2441 | return NULL_RTX; | |
2442 | /* Bail out in case resulting subreg would be incorrect. */ | |
2443 | if (final_offset % GET_MODE_SIZE (outermode) | |
97b330ca | 2444 | || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode)) |
2445 | return NULL_RTX; | |
47c066cb | 2446 | } |
2447 | else | |
2448 | { | |
2449 | int offset = 0; | |
2450 | int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode)); | |
2451 | ||
2452 | /* In paradoxical subreg, see if we are still looking on lower part. | |
2453 | If so, our SUBREG_BYTE will be 0. */ | |
2454 | if (WORDS_BIG_ENDIAN) | |
2455 | offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
2456 | if (BYTES_BIG_ENDIAN) | |
2457 | offset += difference % UNITS_PER_WORD; | |
2458 | if (offset == final_offset) | |
2459 | final_offset = 0; | |
64ab453f | 2460 | else |
97b330ca | 2461 | return NULL_RTX; |
64ab453f | 2462 | } |
2463 | ||
2464 | /* Recurse for futher possible simplifications. */ | |
5e74a7c7 | 2465 | new = simplify_subreg (outermode, SUBREG_REG (op), |
2466 | GET_MODE (SUBREG_REG (op)), | |
64ab453f | 2467 | final_offset); |
2468 | if (new) | |
2469 | return new; | |
5e74a7c7 | 2470 | return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset); |
64ab453f | 2471 | } |
2472 | ||
2473 | /* SUBREG of a hard register => just change the register number | |
2474 | and/or mode. If the hard register is not valid in that mode, | |
2475 | suppress this simplification. If the hard register is the stack, | |
2476 | frame, or argument pointer, leave this as a SUBREG. */ | |
2477 | ||
32559c6d | 2478 | if (REG_P (op) |
81802af6 | 2479 | && (! REG_FUNCTION_VALUE_P (op) |
2480 | || ! rtx_equal_function_value_matters) | |
2481 | #ifdef CLASS_CANNOT_CHANGE_MODE | |
2482 | && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode) | |
2483 | && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT | |
2484 | && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT | |
2485 | && (TEST_HARD_REG_BIT | |
2486 | (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE], | |
2487 | REGNO (op)))) | |
2488 | #endif | |
64ab453f | 2489 | && REGNO (op) < FIRST_PSEUDO_REGISTER |
81802af6 | 2490 | && ((reload_completed && !frame_pointer_needed) |
2491 | || (REGNO (op) != FRAME_POINTER_REGNUM | |
64ab453f | 2492 | #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM |
81802af6 | 2493 | && REGNO (op) != HARD_FRAME_POINTER_REGNUM |
64ab453f | 2494 | #endif |
81802af6 | 2495 | )) |
64ab453f | 2496 | #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
2497 | && REGNO (op) != ARG_POINTER_REGNUM | |
2498 | #endif | |
2499 | && REGNO (op) != STACK_POINTER_REGNUM) | |
2500 | { | |
2501 | int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte), | |
2502 | 0); | |
2503 | ||
81802af6 | 2504 | /* ??? We do allow it if the current REG is not valid for |
2505 | its mode. This is a kludge to work around how float/complex | |
2506 | arguments are passed on 32-bit Sparc and should be fixed. */ | |
2507 | if (HARD_REGNO_MODE_OK (final_regno, outermode) | |
2508 | || ! HARD_REGNO_MODE_OK (REGNO (op), innermode)) | |
64ab453f | 2509 | return gen_rtx_REG (outermode, final_regno); |
2510 | } | |
2511 | ||
2512 | /* If we have a SUBREG of a register that we are replacing and we are | |
2513 | replacing it with a MEM, make a new MEM and try replacing the | |
2514 | SUBREG with it. Don't do this if the MEM has a mode-dependent address | |
2515 | or if we would be widening it. */ | |
2516 | ||
2517 | if (GET_CODE (op) == MEM | |
2518 | && ! mode_dependent_address_p (XEXP (op, 0)) | |
ced2face | 2519 | /* Allow splitting of volatile memory references in case we don't |
2520 | have instruction to move the whole thing. */ | |
2521 | && (! MEM_VOLATILE_P (op) | |
ad99e708 | 2522 | || ! have_insn_for (SET, innermode)) |
64ab453f | 2523 | && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op))) |
e4e86ec5 | 2524 | return adjust_address_nv (op, outermode, byte); |
09f800b9 | 2525 | |
2526 | /* Handle complex values represented as CONCAT | |
2527 | of real and imaginary part. */ | |
2528 | if (GET_CODE (op) == CONCAT) | |
2529 | { | |
1623850a | 2530 | int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode); |
09f800b9 | 2531 | rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1); |
2532 | unsigned int final_offset; | |
ddf7cbcf | 2533 | rtx res; |
09f800b9 | 2534 | |
1623850a | 2535 | final_offset = byte % (GET_MODE_UNIT_SIZE (innermode)); |
ddf7cbcf | 2536 | res = simplify_subreg (outermode, part, GET_MODE (part), final_offset); |
2537 | if (res) | |
2538 | return res; | |
1be87b72 | 2539 | /* We can at least simplify it by referring directly to the relevent part. */ |
ddf7cbcf | 2540 | return gen_rtx_SUBREG (outermode, part, final_offset); |
09f800b9 | 2541 | } |
2542 | ||
64ab453f | 2543 | return NULL_RTX; |
2544 | } | |
f760707b | 2545 | /* Make a SUBREG operation or equivalent if it folds. */ |
2546 | ||
2547 | rtx | |
2548 | simplify_gen_subreg (outermode, op, innermode, byte) | |
2549 | rtx op; | |
2550 | unsigned int byte; | |
2551 | enum machine_mode outermode, innermode; | |
2552 | { | |
2553 | rtx new; | |
2554 | /* Little bit of sanity checking. */ | |
2555 | if (innermode == VOIDmode || outermode == VOIDmode | |
2556 | || innermode == BLKmode || outermode == BLKmode) | |
2557 | abort (); | |
2558 | ||
2559 | if (GET_MODE (op) != innermode | |
2560 | && GET_MODE (op) != VOIDmode) | |
2561 | abort (); | |
2562 | ||
2563 | if (byte % GET_MODE_SIZE (outermode) | |
2564 | || byte >= GET_MODE_SIZE (innermode)) | |
2565 | abort (); | |
2566 | ||
3c25a678 | 2567 | if (GET_CODE (op) == QUEUED) |
2568 | return NULL_RTX; | |
2569 | ||
f760707b | 2570 | new = simplify_subreg (outermode, op, innermode, byte); |
2571 | if (new) | |
2572 | return new; | |
2573 | ||
2574 | if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode) | |
2575 | return NULL_RTX; | |
2576 | ||
2577 | return gen_rtx_SUBREG (outermode, op, byte); | |
2578 | } | |
af21a202 | 2579 | /* Simplify X, an rtx expression. |
2580 | ||
2581 | Return the simplified expression or NULL if no simplifications | |
2582 | were possible. | |
2583 | ||
2584 | This is the preferred entry point into the simplification routines; | |
2585 | however, we still allow passes to call the more specific routines. | |
2586 | ||
2587 | Right now GCC has three (yes, three) major bodies of RTL simplficiation | |
2588 | code that need to be unified. | |
2589 | ||
2590 | 1. fold_rtx in cse.c. This code uses various CSE specific | |
2591 | information to aid in RTL simplification. | |
2592 | ||
2593 | 2. simplify_rtx in combine.c. Similar to fold_rtx, except that | |
2594 | it uses combine specific information to aid in RTL | |
2595 | simplification. | |
2596 | ||
2597 | 3. The routines in this file. | |
2598 | ||
2599 | ||
2600 | Long term we want to only have one body of simplification code; to | |
2601 | get to that state I recommend the following steps: | |
2602 | ||
2603 | 1. Pour over fold_rtx & simplify_rtx and move any simplifications | |
2604 | which are not pass dependent state into these routines. | |
2605 | ||
2606 | 2. As code is moved by #1, change fold_rtx & simplify_rtx to | |
2607 | use this routine whenever possible. | |
2608 | ||
2609 | 3. Allow for pass dependent state to be provided to these | |
2610 | routines and add simplifications based on the pass dependent | |
2611 | state. Remove code from cse.c & combine.c that becomes | |
2612 | redundant/dead. | |
2613 | ||
2614 | It will take time, but ultimately the compiler will be easier to | |
2615 | maintain and improve. It's totally silly that when we add a | |
2616 | simplification that it needs to be added to 4 places (3 for RTL | |
2617 | simplification and 1 for tree simplification. */ | |
2618 | ||
2619 | rtx | |
2620 | simplify_rtx (x) | |
2621 | rtx x; | |
2622 | { | |
53cb61a7 | 2623 | enum rtx_code code = GET_CODE (x); |
2624 | enum machine_mode mode = GET_MODE (x); | |
af21a202 | 2625 | |
2626 | switch (GET_RTX_CLASS (code)) | |
2627 | { | |
2628 | case '1': | |
2629 | return simplify_unary_operation (code, mode, | |
2630 | XEXP (x, 0), GET_MODE (XEXP (x, 0))); | |
af21a202 | 2631 | case 'c': |
84842e6a | 2632 | if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) |
196e03f1 | 2633 | { |
2634 | rtx tem; | |
2635 | ||
2636 | tem = XEXP (x, 0); | |
2637 | XEXP (x, 0) = XEXP (x, 1); | |
2638 | XEXP (x, 1) = tem; | |
2639 | return simplify_binary_operation (code, mode, | |
2640 | XEXP (x, 0), XEXP (x, 1)); | |
2641 | } | |
2642 | ||
2643 | case '2': | |
af21a202 | 2644 | return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1)); |
2645 | ||
2646 | case '3': | |
2647 | case 'b': | |
2648 | return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)), | |
53cb61a7 | 2649 | XEXP (x, 0), XEXP (x, 1), |
2650 | XEXP (x, 2)); | |
af21a202 | 2651 | |
2652 | case '<': | |
acdbdba3 | 2653 | return simplify_relational_operation (code, |
53cb61a7 | 2654 | ((GET_MODE (XEXP (x, 0)) |
2655 | != VOIDmode) | |
acdbdba3 | 2656 | ? GET_MODE (XEXP (x, 0)) |
2657 | : GET_MODE (XEXP (x, 1))), | |
af21a202 | 2658 | XEXP (x, 0), XEXP (x, 1)); |
f760707b | 2659 | case 'x': |
2660 | /* The only case we try to handle is a SUBREG. */ | |
2661 | if (code == SUBREG) | |
2662 | return simplify_gen_subreg (mode, SUBREG_REG (x), | |
2663 | GET_MODE (SUBREG_REG (x)), | |
2664 | SUBREG_BYTE (x)); | |
2665 | return NULL; | |
af21a202 | 2666 | default: |
2667 | return NULL; | |
2668 | } | |
2669 | } |