]>
Commit | Line | Data |
---|---|---|
749a2da1 | 1 | /* RTL simplification functions for GNU compiler. |
af841dbd | 2 | Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, |
90a74703 | 3 | 1999, 2000, 2001 Free Software Foundation, Inc. |
0cedb36c | 4 | |
1322177d | 5 | This file is part of GCC. |
0cedb36c | 6 | |
1322177d LB |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 2, or (at your option) any later | |
10 | version. | |
0cedb36c | 11 | |
1322177d LB |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
0cedb36c JL |
16 | |
17 | You should have received a copy of the GNU General Public License | |
1322177d LB |
18 | along with GCC; see the file COPYING. If not, write to the Free |
19 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | |
20 | 02111-1307, USA. */ | |
0cedb36c JL |
21 | |
22 | ||
23 | #include "config.h" | |
0cedb36c | 24 | #include "system.h" |
0cedb36c JL |
25 | |
26 | #include "rtl.h" | |
27 | #include "tm_p.h" | |
28 | #include "regs.h" | |
29 | #include "hard-reg-set.h" | |
30 | #include "flags.h" | |
31 | #include "real.h" | |
32 | #include "insn-config.h" | |
33 | #include "recog.h" | |
34 | #include "function.h" | |
35 | #include "expr.h" | |
36 | #include "toplev.h" | |
37 | #include "output.h" | |
eab5c70a | 38 | #include "ggc.h" |
0cedb36c JL |
39 | |
40 | /* Simplification and canonicalization of RTL. */ | |
41 | ||
42 | /* Nonzero if X has the form (PLUS frame-pointer integer). We check for | |
43 | virtual regs here because the simplify_*_operation routines are called | |
44 | by integrate.c, which is called before virtual register instantiation. | |
45 | ||
46 | ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into | |
47 | a header file so that their definitions can be shared with the | |
48 | simplification routines in simplify-rtx.c. Until then, do not | |
49 | change these macros without also changing the copy in simplify-rtx.c. */ | |
50 | ||
51 | #define FIXED_BASE_PLUS_P(X) \ | |
52 | ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \ | |
53 | || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\ | |
54 | || (X) == virtual_stack_vars_rtx \ | |
55 | || (X) == virtual_incoming_args_rtx \ | |
56 | || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \ | |
57 | && (XEXP (X, 0) == frame_pointer_rtx \ | |
58 | || XEXP (X, 0) == hard_frame_pointer_rtx \ | |
59 | || ((X) == arg_pointer_rtx \ | |
60 | && fixed_regs[ARG_POINTER_REGNUM]) \ | |
61 | || XEXP (X, 0) == virtual_stack_vars_rtx \ | |
62 | || XEXP (X, 0) == virtual_incoming_args_rtx)) \ | |
63 | || GET_CODE (X) == ADDRESSOF) | |
64 | ||
65 | /* Similar, but also allows reference to the stack pointer. | |
66 | ||
67 | This used to include FIXED_BASE_PLUS_P, however, we can't assume that | |
68 | arg_pointer_rtx by itself is nonzero, because on at least one machine, | |
69 | the i960, the arg pointer is zero when it is unused. */ | |
70 | ||
71 | #define NONZERO_BASE_PLUS_P(X) \ | |
72 | ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \ | |
73 | || (X) == virtual_stack_vars_rtx \ | |
74 | || (X) == virtual_incoming_args_rtx \ | |
75 | || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \ | |
76 | && (XEXP (X, 0) == frame_pointer_rtx \ | |
77 | || XEXP (X, 0) == hard_frame_pointer_rtx \ | |
78 | || ((X) == arg_pointer_rtx \ | |
79 | && fixed_regs[ARG_POINTER_REGNUM]) \ | |
80 | || XEXP (X, 0) == virtual_stack_vars_rtx \ | |
81 | || XEXP (X, 0) == virtual_incoming_args_rtx)) \ | |
82 | || (X) == stack_pointer_rtx \ | |
83 | || (X) == virtual_stack_dynamic_rtx \ | |
84 | || (X) == virtual_outgoing_args_rtx \ | |
85 | || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \ | |
86 | && (XEXP (X, 0) == stack_pointer_rtx \ | |
87 | || XEXP (X, 0) == virtual_stack_dynamic_rtx \ | |
88 | || XEXP (X, 0) == virtual_outgoing_args_rtx)) \ | |
89 | || GET_CODE (X) == ADDRESSOF) | |
90 | ||
3839069b ZW |
91 | /* Much code operates on (low, high) pairs; the low value is an |
92 | unsigned wide int, the high value a signed wide int. We | |
93 | occasionally need to sign extend from low to high as if low were a | |
94 | signed wide int. */ | |
ba34d877 | 95 | #define HWI_SIGN_EXTEND(low) \ |
3839069b | 96 | ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0)) |
0cedb36c | 97 | |
aff8a8d5 | 98 | static rtx neg_const_int PARAMS ((enum machine_mode, rtx)); |
9b3bd424 RH |
99 | static int simplify_plus_minus_op_data_cmp PARAMS ((const void *, |
100 | const void *)); | |
749a2da1 | 101 | static rtx simplify_plus_minus PARAMS ((enum rtx_code, |
e3c8ea67 RH |
102 | enum machine_mode, rtx, |
103 | rtx, int)); | |
749a2da1 | 104 | static void check_fold_consts PARAMS ((PTR)); |
94aca342 ZW |
105 | static void simplify_unary_real PARAMS ((PTR)); |
106 | static void simplify_binary_real PARAMS ((PTR)); | |
94aca342 ZW |
107 | static void simplify_binary_is2orm1 PARAMS ((PTR)); |
108 | ||
aff8a8d5 CM |
109 | \f |
110 | /* Negate a CONST_INT rtx, truncating (because a conversion from a | |
23d1aac4 | 111 | maximally negative number can overflow). */ |
aff8a8d5 CM |
112 | static rtx |
113 | neg_const_int (mode, i) | |
114 | enum machine_mode mode; | |
115 | rtx i; | |
116 | { | |
117 | return GEN_INT (trunc_int_for_mode (- INTVAL (i), mode)); | |
118 | } | |
119 | ||
749a2da1 | 120 | \f |
0cedb36c JL |
121 | /* Make a binary operation by properly ordering the operands and |
122 | seeing if the expression folds. */ | |
123 | ||
124 | rtx | |
125 | simplify_gen_binary (code, mode, op0, op1) | |
126 | enum rtx_code code; | |
127 | enum machine_mode mode; | |
128 | rtx op0, op1; | |
129 | { | |
130 | rtx tem; | |
131 | ||
132 | /* Put complex operands first and constants second if commutative. */ | |
133 | if (GET_RTX_CLASS (code) == 'c' | |
e5c56fd9 | 134 | && swap_commutative_operands_p (op0, op1)) |
0cedb36c JL |
135 | tem = op0, op0 = op1, op1 = tem; |
136 | ||
137 | /* If this simplifies, do it. */ | |
138 | tem = simplify_binary_operation (code, mode, op0, op1); | |
0cedb36c JL |
139 | if (tem) |
140 | return tem; | |
141 | ||
e3c8ea67 RH |
142 | /* Handle addition and subtraction specially. Otherwise, just form |
143 | the operation. */ | |
0cedb36c | 144 | |
e3c8ea67 | 145 | if (code == PLUS || code == MINUS) |
e16e3291 UW |
146 | { |
147 | tem = simplify_plus_minus (code, mode, op0, op1, 1); | |
148 | if (tem) | |
149 | return tem; | |
150 | } | |
151 | ||
152 | return gen_rtx_fmt_ee (code, mode, op0, op1); | |
0cedb36c JL |
153 | } |
154 | \f | |
5a2aa3bd | 155 | /* If X is a MEM referencing the constant pool, return the real value. |
4ba5f925 | 156 | Otherwise return X. */ |
732910b9 | 157 | rtx |
4ba5f925 JH |
158 | avoid_constant_pool_reference (x) |
159 | rtx x; | |
160 | { | |
5a2aa3bd RH |
161 | rtx c, addr; |
162 | enum machine_mode cmode; | |
163 | ||
4ba5f925 JH |
164 | if (GET_CODE (x) != MEM) |
165 | return x; | |
5a2aa3bd RH |
166 | addr = XEXP (x, 0); |
167 | ||
168 | if (GET_CODE (addr) != SYMBOL_REF | |
169 | || ! CONSTANT_POOL_ADDRESS_P (addr)) | |
4ba5f925 | 170 | return x; |
5a2aa3bd RH |
171 | |
172 | c = get_pool_constant (addr); | |
173 | cmode = get_pool_mode (addr); | |
174 | ||
175 | /* If we're accessing the constant in a different mode than it was | |
176 | originally stored, attempt to fix that up via subreg simplifications. | |
177 | If that fails we have no choice but to return the original memory. */ | |
178 | if (cmode != GET_MODE (x)) | |
179 | { | |
180 | c = simplify_subreg (GET_MODE (x), c, cmode, 0); | |
181 | return c ? c : x; | |
182 | } | |
183 | ||
184 | return c; | |
4ba5f925 JH |
185 | } |
186 | \f | |
d9c695ff RK |
187 | /* Make a unary operation by first seeing if it folds and otherwise making |
188 | the specified operation. */ | |
189 | ||
190 | rtx | |
191 | simplify_gen_unary (code, mode, op, op_mode) | |
192 | enum rtx_code code; | |
193 | enum machine_mode mode; | |
194 | rtx op; | |
195 | enum machine_mode op_mode; | |
196 | { | |
197 | rtx tem; | |
198 | ||
199 | /* If this simplifies, use it. */ | |
200 | if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0) | |
201 | return tem; | |
202 | ||
203 | return gen_rtx_fmt_e (code, mode, op); | |
204 | } | |
205 | ||
206 | /* Likewise for ternary operations. */ | |
207 | ||
208 | rtx | |
209 | simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2) | |
210 | enum rtx_code code; | |
211 | enum machine_mode mode, op0_mode; | |
212 | rtx op0, op1, op2; | |
213 | { | |
214 | rtx tem; | |
215 | ||
216 | /* If this simplifies, use it. */ | |
217 | if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode, | |
218 | op0, op1, op2))) | |
219 | return tem; | |
220 | ||
221 | return gen_rtx_fmt_eee (code, mode, op0, op1, op2); | |
222 | } | |
223 | \f | |
141e454b JH |
224 | /* Likewise, for relational operations. |
225 | CMP_MODE specifies mode comparison is done in. | |
226 | */ | |
d9c695ff RK |
227 | |
228 | rtx | |
141e454b | 229 | simplify_gen_relational (code, mode, cmp_mode, op0, op1) |
d9c695ff RK |
230 | enum rtx_code code; |
231 | enum machine_mode mode; | |
141e454b | 232 | enum machine_mode cmp_mode; |
d9c695ff RK |
233 | rtx op0, op1; |
234 | { | |
235 | rtx tem; | |
236 | ||
141e454b | 237 | if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0) |
d9c695ff RK |
238 | return tem; |
239 | ||
240 | /* Put complex operands first and constants second. */ | |
e5c56fd9 | 241 | if (swap_commutative_operands_p (op0, op1)) |
d9c695ff RK |
242 | tem = op0, op0 = op1, op1 = tem, code = swap_condition (code); |
243 | ||
244 | return gen_rtx_fmt_ee (code, mode, op0, op1); | |
245 | } | |
246 | \f | |
247 | /* Replace all occurrences of OLD in X with NEW and try to simplify the | |
248 | resulting RTX. Return a new RTX which is as simplified as possible. */ | |
249 | ||
250 | rtx | |
251 | simplify_replace_rtx (x, old, new) | |
252 | rtx x; | |
253 | rtx old; | |
254 | rtx new; | |
255 | { | |
256 | enum rtx_code code = GET_CODE (x); | |
257 | enum machine_mode mode = GET_MODE (x); | |
258 | ||
259 | /* If X is OLD, return NEW. Otherwise, if this is an expression, try | |
260 | to build a new expression substituting recursively. If we can't do | |
261 | anything, return our input. */ | |
262 | ||
263 | if (x == old) | |
264 | return new; | |
265 | ||
266 | switch (GET_RTX_CLASS (code)) | |
267 | { | |
268 | case '1': | |
269 | { | |
270 | enum machine_mode op_mode = GET_MODE (XEXP (x, 0)); | |
271 | rtx op = (XEXP (x, 0) == old | |
272 | ? new : simplify_replace_rtx (XEXP (x, 0), old, new)); | |
273 | ||
274 | return simplify_gen_unary (code, mode, op, op_mode); | |
275 | } | |
276 | ||
277 | case '2': | |
278 | case 'c': | |
279 | return | |
280 | simplify_gen_binary (code, mode, | |
281 | simplify_replace_rtx (XEXP (x, 0), old, new), | |
282 | simplify_replace_rtx (XEXP (x, 1), old, new)); | |
141e454b | 283 | case '<': |
0248ce05 AO |
284 | { |
285 | enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode | |
286 | ? GET_MODE (XEXP (x, 0)) | |
287 | : GET_MODE (XEXP (x, 1))); | |
288 | rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new); | |
289 | rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new); | |
290 | ||
291 | return | |
292 | simplify_gen_relational (code, mode, | |
293 | (op_mode != VOIDmode | |
294 | ? op_mode | |
295 | : GET_MODE (op0) != VOIDmode | |
296 | ? GET_MODE (op0) | |
297 | : GET_MODE (op1)), | |
298 | op0, op1); | |
299 | } | |
d9c695ff RK |
300 | |
301 | case '3': | |
302 | case 'b': | |
0248ce05 AO |
303 | { |
304 | enum machine_mode op_mode = GET_MODE (XEXP (x, 0)); | |
305 | rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new); | |
306 | ||
307 | return | |
308 | simplify_gen_ternary (code, mode, | |
309 | (op_mode != VOIDmode | |
310 | ? op_mode | |
311 | : GET_MODE (op0)), | |
312 | op0, | |
313 | simplify_replace_rtx (XEXP (x, 1), old, new), | |
314 | simplify_replace_rtx (XEXP (x, 2), old, new)); | |
315 | } | |
d9c695ff RK |
316 | |
317 | case 'x': | |
949c5d62 JH |
318 | /* The only case we try to handle is a SUBREG. */ |
319 | if (code == SUBREG) | |
320 | { | |
321 | rtx exp; | |
322 | exp = simplify_gen_subreg (GET_MODE (x), | |
323 | simplify_replace_rtx (SUBREG_REG (x), | |
324 | old, new), | |
325 | GET_MODE (SUBREG_REG (x)), | |
326 | SUBREG_BYTE (x)); | |
327 | if (exp) | |
328 | x = exp; | |
329 | } | |
d9c695ff RK |
330 | return x; |
331 | ||
332 | default: | |
141e454b | 333 | if (GET_CODE (x) == MEM) |
f1ec5147 RK |
334 | return |
335 | replace_equiv_address_nv (x, | |
336 | simplify_replace_rtx (XEXP (x, 0), | |
337 | old, new)); | |
141e454b | 338 | |
d9c695ff RK |
339 | return x; |
340 | } | |
141e454b | 341 | return x; |
d9c695ff RK |
342 | } |
343 | \f | |
94aca342 ZW |
344 | /* Subroutine of simplify_unary_operation, called via do_float_handler. |
345 | Handles simplification of unary ops on floating point values. */ | |
346 | struct simplify_unary_real_args | |
347 | { | |
348 | rtx operand; | |
349 | rtx result; | |
350 | enum machine_mode mode; | |
351 | enum rtx_code code; | |
352 | bool want_integer; | |
353 | }; | |
354 | #define REAL_VALUE_ABS(d_) \ | |
355 | (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_)) | |
356 | ||
357 | static void | |
358 | simplify_unary_real (p) | |
359 | PTR p; | |
360 | { | |
361 | REAL_VALUE_TYPE d; | |
362 | ||
363 | struct simplify_unary_real_args *args = | |
364 | (struct simplify_unary_real_args *) p; | |
365 | ||
366 | REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand); | |
367 | ||
368 | if (args->want_integer) | |
369 | { | |
370 | HOST_WIDE_INT i; | |
371 | ||
372 | switch (args->code) | |
373 | { | |
374 | case FIX: i = REAL_VALUE_FIX (d); break; | |
375 | case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break; | |
376 | default: | |
377 | abort (); | |
378 | } | |
379 | args->result = GEN_INT (trunc_int_for_mode (i, args->mode)); | |
380 | } | |
381 | else | |
382 | { | |
383 | switch (args->code) | |
384 | { | |
385 | case SQRT: | |
386 | /* We don't attempt to optimize this. */ | |
387 | args->result = 0; | |
388 | return; | |
389 | ||
390 | case ABS: d = REAL_VALUE_ABS (d); break; | |
391 | case NEG: d = REAL_VALUE_NEGATE (d); break; | |
392 | case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break; | |
393 | case FLOAT_EXTEND: /* All this does is change the mode. */ break; | |
394 | case FIX: d = REAL_VALUE_RNDZINT (d); break; | |
395 | case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break; | |
396 | default: | |
397 | abort (); | |
398 | } | |
399 | args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode); | |
400 | } | |
401 | } | |
94aca342 | 402 | |
0cedb36c JL |
403 | /* Try to simplify a unary operation CODE whose output mode is to be |
404 | MODE with input operand OP whose mode was originally OP_MODE. | |
405 | Return zero if no simplification can be made. */ | |
0cedb36c JL |
406 | rtx |
407 | simplify_unary_operation (code, mode, op, op_mode) | |
408 | enum rtx_code code; | |
409 | enum machine_mode mode; | |
410 | rtx op; | |
411 | enum machine_mode op_mode; | |
412 | { | |
770ae6cc | 413 | unsigned int width = GET_MODE_BITSIZE (mode); |
4ba5f925 | 414 | rtx trueop = avoid_constant_pool_reference (op); |
0cedb36c JL |
415 | |
416 | /* The order of these tests is critical so that, for example, we don't | |
417 | check the wrong mode (input vs. output) for a conversion operation, | |
418 | such as FIX. At some point, this should be simplified. */ | |
419 | ||
4ba5f925 JH |
420 | if (code == FLOAT && GET_MODE (trueop) == VOIDmode |
421 | && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT)) | |
0cedb36c JL |
422 | { |
423 | HOST_WIDE_INT hv, lv; | |
424 | REAL_VALUE_TYPE d; | |
425 | ||
4ba5f925 JH |
426 | if (GET_CODE (trueop) == CONST_INT) |
427 | lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); | |
0cedb36c | 428 | else |
4ba5f925 | 429 | lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); |
0cedb36c | 430 | |
0cedb36c | 431 | REAL_VALUE_FROM_INT (d, lv, hv, mode); |
0cedb36c JL |
432 | d = real_value_truncate (mode, d); |
433 | return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
434 | } | |
4ba5f925 JH |
435 | else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode |
436 | && (GET_CODE (trueop) == CONST_DOUBLE | |
437 | || GET_CODE (trueop) == CONST_INT)) | |
0cedb36c JL |
438 | { |
439 | HOST_WIDE_INT hv, lv; | |
440 | REAL_VALUE_TYPE d; | |
441 | ||
4ba5f925 JH |
442 | if (GET_CODE (trueop) == CONST_INT) |
443 | lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); | |
0cedb36c | 444 | else |
4ba5f925 | 445 | lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); |
0cedb36c JL |
446 | |
447 | if (op_mode == VOIDmode) | |
448 | { | |
449 | /* We don't know how to interpret negative-looking numbers in | |
450 | this case, so don't try to fold those. */ | |
451 | if (hv < 0) | |
452 | return 0; | |
453 | } | |
454 | else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2) | |
455 | ; | |
456 | else | |
457 | hv = 0, lv &= GET_MODE_MASK (op_mode); | |
458 | ||
0cedb36c | 459 | REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode); |
0cedb36c JL |
460 | d = real_value_truncate (mode, d); |
461 | return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
462 | } | |
0cedb36c | 463 | |
4ba5f925 | 464 | if (GET_CODE (trueop) == CONST_INT |
0cedb36c JL |
465 | && width <= HOST_BITS_PER_WIDE_INT && width > 0) |
466 | { | |
b3694847 SS |
467 | HOST_WIDE_INT arg0 = INTVAL (trueop); |
468 | HOST_WIDE_INT val; | |
0cedb36c JL |
469 | |
470 | switch (code) | |
471 | { | |
472 | case NOT: | |
473 | val = ~ arg0; | |
474 | break; | |
475 | ||
476 | case NEG: | |
477 | val = - arg0; | |
478 | break; | |
479 | ||
480 | case ABS: | |
481 | val = (arg0 >= 0 ? arg0 : - arg0); | |
482 | break; | |
483 | ||
484 | case FFS: | |
485 | /* Don't use ffs here. Instead, get low order bit and then its | |
486 | number. If arg0 is zero, this will return 0, as desired. */ | |
487 | arg0 &= GET_MODE_MASK (mode); | |
488 | val = exact_log2 (arg0 & (- arg0)) + 1; | |
489 | break; | |
490 | ||
491 | case TRUNCATE: | |
492 | val = arg0; | |
493 | break; | |
494 | ||
495 | case ZERO_EXTEND: | |
4161da12 AO |
496 | /* When zero-extending a CONST_INT, we need to know its |
497 | original mode. */ | |
0cedb36c | 498 | if (op_mode == VOIDmode) |
4161da12 | 499 | abort (); |
0cedb36c JL |
500 | if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) |
501 | { | |
502 | /* If we were really extending the mode, | |
503 | we would have to distinguish between zero-extension | |
504 | and sign-extension. */ | |
505 | if (width != GET_MODE_BITSIZE (op_mode)) | |
506 | abort (); | |
507 | val = arg0; | |
508 | } | |
509 | else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) | |
510 | val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); | |
511 | else | |
512 | return 0; | |
513 | break; | |
514 | ||
515 | case SIGN_EXTEND: | |
516 | if (op_mode == VOIDmode) | |
517 | op_mode = mode; | |
518 | if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) | |
519 | { | |
520 | /* If we were really extending the mode, | |
521 | we would have to distinguish between zero-extension | |
522 | and sign-extension. */ | |
523 | if (width != GET_MODE_BITSIZE (op_mode)) | |
524 | abort (); | |
525 | val = arg0; | |
526 | } | |
527 | else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) | |
528 | { | |
529 | val | |
530 | = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); | |
531 | if (val | |
532 | & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1))) | |
533 | val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); | |
534 | } | |
535 | else | |
536 | return 0; | |
537 | break; | |
538 | ||
539 | case SQRT: | |
6a51f4a0 RK |
540 | case FLOAT_EXTEND: |
541 | case FLOAT_TRUNCATE: | |
6f1a6c5b RH |
542 | case SS_TRUNCATE: |
543 | case US_TRUNCATE: | |
0cedb36c JL |
544 | return 0; |
545 | ||
546 | default: | |
547 | abort (); | |
548 | } | |
549 | ||
550 | val = trunc_int_for_mode (val, mode); | |
551 | ||
552 | return GEN_INT (val); | |
553 | } | |
554 | ||
555 | /* We can do some operations on integer CONST_DOUBLEs. Also allow | |
556 | for a DImode operation on a CONST_INT. */ | |
4161da12 AO |
557 | else if (GET_MODE (trueop) == VOIDmode |
558 | && width <= HOST_BITS_PER_WIDE_INT * 2 | |
4ba5f925 JH |
559 | && (GET_CODE (trueop) == CONST_DOUBLE |
560 | || GET_CODE (trueop) == CONST_INT)) | |
0cedb36c | 561 | { |
3839069b ZW |
562 | unsigned HOST_WIDE_INT l1, lv; |
563 | HOST_WIDE_INT h1, hv; | |
0cedb36c | 564 | |
4ba5f925 JH |
565 | if (GET_CODE (trueop) == CONST_DOUBLE) |
566 | l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop); | |
0cedb36c | 567 | else |
4ba5f925 | 568 | l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1); |
0cedb36c JL |
569 | |
570 | switch (code) | |
571 | { | |
572 | case NOT: | |
573 | lv = ~ l1; | |
574 | hv = ~ h1; | |
575 | break; | |
576 | ||
577 | case NEG: | |
578 | neg_double (l1, h1, &lv, &hv); | |
579 | break; | |
580 | ||
581 | case ABS: | |
582 | if (h1 < 0) | |
583 | neg_double (l1, h1, &lv, &hv); | |
584 | else | |
585 | lv = l1, hv = h1; | |
586 | break; | |
587 | ||
588 | case FFS: | |
589 | hv = 0; | |
590 | if (l1 == 0) | |
591 | lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1; | |
592 | else | |
593 | lv = exact_log2 (l1 & (-l1)) + 1; | |
594 | break; | |
595 | ||
596 | case TRUNCATE: | |
597 | /* This is just a change-of-mode, so do nothing. */ | |
598 | lv = l1, hv = h1; | |
599 | break; | |
600 | ||
601 | case ZERO_EXTEND: | |
4161da12 AO |
602 | if (op_mode == VOIDmode) |
603 | abort (); | |
604 | ||
605 | if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) | |
0cedb36c JL |
606 | return 0; |
607 | ||
608 | hv = 0; | |
609 | lv = l1 & GET_MODE_MASK (op_mode); | |
610 | break; | |
611 | ||
612 | case SIGN_EXTEND: | |
613 | if (op_mode == VOIDmode | |
614 | || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) | |
615 | return 0; | |
616 | else | |
617 | { | |
618 | lv = l1 & GET_MODE_MASK (op_mode); | |
619 | if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT | |
620 | && (lv & ((HOST_WIDE_INT) 1 | |
621 | << (GET_MODE_BITSIZE (op_mode) - 1))) != 0) | |
622 | lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); | |
623 | ||
ba34d877 | 624 | hv = HWI_SIGN_EXTEND (lv); |
0cedb36c JL |
625 | } |
626 | break; | |
627 | ||
628 | case SQRT: | |
629 | return 0; | |
630 | ||
631 | default: | |
632 | return 0; | |
633 | } | |
634 | ||
635 | return immed_double_const (lv, hv, mode); | |
636 | } | |
637 | ||
4ba5f925 | 638 | else if (GET_CODE (trueop) == CONST_DOUBLE |
0cedb36c JL |
639 | && GET_MODE_CLASS (mode) == MODE_FLOAT) |
640 | { | |
94aca342 ZW |
641 | struct simplify_unary_real_args args; |
642 | args.operand = trueop; | |
643 | args.mode = mode; | |
644 | args.code = code; | |
645 | args.want_integer = false; | |
0cedb36c | 646 | |
94aca342 ZW |
647 | if (do_float_handler (simplify_unary_real, (PTR) &args)) |
648 | return args.result; | |
0cedb36c | 649 | |
94aca342 | 650 | return 0; |
0cedb36c JL |
651 | } |
652 | ||
4ba5f925 JH |
653 | else if (GET_CODE (trueop) == CONST_DOUBLE |
654 | && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT | |
0cedb36c JL |
655 | && GET_MODE_CLASS (mode) == MODE_INT |
656 | && width <= HOST_BITS_PER_WIDE_INT && width > 0) | |
657 | { | |
94aca342 ZW |
658 | struct simplify_unary_real_args args; |
659 | args.operand = trueop; | |
660 | args.mode = mode; | |
661 | args.code = code; | |
662 | args.want_integer = true; | |
0cedb36c | 663 | |
94aca342 ZW |
664 | if (do_float_handler (simplify_unary_real, (PTR) &args)) |
665 | return args.result; | |
0cedb36c | 666 | |
94aca342 | 667 | return 0; |
0cedb36c | 668 | } |
ba31d94e | 669 | |
0cedb36c JL |
670 | /* This was formerly used only for non-IEEE float. |
671 | eggert@twinsun.com says it is safe for IEEE also. */ | |
672 | else | |
673 | { | |
261efdef | 674 | enum rtx_code reversed; |
0cedb36c JL |
675 | /* There are some simplifications we can do even if the operands |
676 | aren't constant. */ | |
677 | switch (code) | |
678 | { | |
0cedb36c | 679 | case NOT: |
5bd60ce6 RH |
680 | /* (not (not X)) == X. */ |
681 | if (GET_CODE (op) == NOT) | |
682 | return XEXP (op, 0); | |
683 | ||
684 | /* (not (eq X Y)) == (ne X Y), etc. */ | |
685 | if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<' | |
261efdef JH |
686 | && ((reversed = reversed_comparison_code (op, NULL_RTX)) |
687 | != UNKNOWN)) | |
688 | return gen_rtx_fmt_ee (reversed, | |
5bd60ce6 RH |
689 | op_mode, XEXP (op, 0), XEXP (op, 1)); |
690 | break; | |
691 | ||
692 | case NEG: | |
693 | /* (neg (neg X)) == X. */ | |
694 | if (GET_CODE (op) == NEG) | |
0cedb36c JL |
695 | return XEXP (op, 0); |
696 | break; | |
697 | ||
698 | case SIGN_EXTEND: | |
699 | /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2)))) | |
700 | becomes just the MINUS if its mode is MODE. This allows | |
701 | folding switch statements on machines using casesi (such as | |
8aeea6e6 | 702 | the VAX). */ |
0cedb36c JL |
703 | if (GET_CODE (op) == TRUNCATE |
704 | && GET_MODE (XEXP (op, 0)) == mode | |
705 | && GET_CODE (XEXP (op, 0)) == MINUS | |
706 | && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF | |
707 | && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF) | |
708 | return XEXP (op, 0); | |
709 | ||
6dd12198 | 710 | #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) |
0cedb36c JL |
711 | if (! POINTERS_EXTEND_UNSIGNED |
712 | && mode == Pmode && GET_MODE (op) == ptr_mode | |
d1405722 RK |
713 | && (CONSTANT_P (op) |
714 | || (GET_CODE (op) == SUBREG | |
715 | && GET_CODE (SUBREG_REG (op)) == REG | |
716 | && REG_POINTER (SUBREG_REG (op)) | |
717 | && GET_MODE (SUBREG_REG (op)) == Pmode))) | |
0cedb36c JL |
718 | return convert_memory_address (Pmode, op); |
719 | #endif | |
720 | break; | |
721 | ||
6dd12198 | 722 | #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) |
0cedb36c | 723 | case ZERO_EXTEND: |
6dd12198 | 724 | if (POINTERS_EXTEND_UNSIGNED > 0 |
0cedb36c | 725 | && mode == Pmode && GET_MODE (op) == ptr_mode |
d1405722 RK |
726 | && (CONSTANT_P (op) |
727 | || (GET_CODE (op) == SUBREG | |
728 | && GET_CODE (SUBREG_REG (op)) == REG | |
729 | && REG_POINTER (SUBREG_REG (op)) | |
730 | && GET_MODE (SUBREG_REG (op)) == Pmode))) | |
0cedb36c JL |
731 | return convert_memory_address (Pmode, op); |
732 | break; | |
733 | #endif | |
734 | ||
735 | default: | |
736 | break; | |
737 | } | |
738 | ||
739 | return 0; | |
740 | } | |
741 | } | |
742 | \f | |
94aca342 ZW |
743 | /* Subroutine of simplify_binary_operation, called via do_float_handler. |
744 | Handles simplification of binary ops on floating point values. */ | |
745 | struct simplify_binary_real_args | |
746 | { | |
747 | rtx trueop0, trueop1; | |
748 | rtx result; | |
749 | enum rtx_code code; | |
750 | enum machine_mode mode; | |
751 | }; | |
752 | ||
753 | static void | |
754 | simplify_binary_real (p) | |
755 | PTR p; | |
756 | { | |
757 | REAL_VALUE_TYPE f0, f1, value; | |
758 | struct simplify_binary_real_args *args = | |
759 | (struct simplify_binary_real_args *) p; | |
760 | ||
761 | REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0); | |
762 | REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1); | |
763 | f0 = real_value_truncate (args->mode, f0); | |
764 | f1 = real_value_truncate (args->mode, f1); | |
765 | ||
94aca342 ZW |
766 | #ifndef REAL_INFINITY |
767 | if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0)) | |
768 | { | |
769 | args->result = 0; | |
770 | return; | |
771 | } | |
772 | #endif | |
773 | REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1); | |
94aca342 ZW |
774 | |
775 | value = real_value_truncate (args->mode, value); | |
776 | args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode); | |
777 | } | |
94aca342 ZW |
778 | |
779 | /* Another subroutine called via do_float_handler. This one tests | |
780 | the floating point value given against 2. and -1. */ | |
781 | struct simplify_binary_is2orm1_args | |
782 | { | |
783 | rtx value; | |
784 | bool is_2; | |
785 | bool is_m1; | |
786 | }; | |
787 | ||
788 | static void | |
789 | simplify_binary_is2orm1 (p) | |
790 | PTR p; | |
791 | { | |
792 | REAL_VALUE_TYPE d; | |
793 | struct simplify_binary_is2orm1_args *args = | |
794 | (struct simplify_binary_is2orm1_args *) p; | |
795 | ||
796 | REAL_VALUE_FROM_CONST_DOUBLE (d, args->value); | |
797 | args->is_2 = REAL_VALUES_EQUAL (d, dconst2); | |
798 | args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1); | |
799 | } | |
800 | ||
0cedb36c JL |
801 | /* Simplify a binary operation CODE with result mode MODE, operating on OP0 |
802 | and OP1. Return 0 if no simplification is possible. | |
803 | ||
804 | Don't use this for relational operations such as EQ or LT. | |
805 | Use simplify_relational_operation instead. */ | |
0cedb36c JL |
806 | rtx |
807 | simplify_binary_operation (code, mode, op0, op1) | |
808 | enum rtx_code code; | |
809 | enum machine_mode mode; | |
810 | rtx op0, op1; | |
811 | { | |
b3694847 | 812 | HOST_WIDE_INT arg0, arg1, arg0s, arg1s; |
0cedb36c | 813 | HOST_WIDE_INT val; |
770ae6cc | 814 | unsigned int width = GET_MODE_BITSIZE (mode); |
0cedb36c | 815 | rtx tem; |
4ba5f925 JH |
816 | rtx trueop0 = avoid_constant_pool_reference (op0); |
817 | rtx trueop1 = avoid_constant_pool_reference (op1); | |
0cedb36c JL |
818 | |
819 | /* Relational operations don't work here. We must know the mode | |
820 | of the operands in order to do the comparison correctly. | |
821 | Assuming a full word can give incorrect results. | |
822 | Consider comparing 128 with -128 in QImode. */ | |
823 | ||
824 | if (GET_RTX_CLASS (code) == '<') | |
825 | abort (); | |
826 | ||
4ba5f925 JH |
827 | /* Make sure the constant is second. */ |
828 | if (GET_RTX_CLASS (code) == 'c' | |
829 | && swap_commutative_operands_p (trueop0, trueop1)) | |
830 | { | |
831 | tem = op0, op0 = op1, op1 = tem; | |
832 | tem = trueop0, trueop0 = trueop1, trueop1 = tem; | |
833 | } | |
834 | ||
0cedb36c | 835 | if (GET_MODE_CLASS (mode) == MODE_FLOAT |
4ba5f925 JH |
836 | && GET_CODE (trueop0) == CONST_DOUBLE |
837 | && GET_CODE (trueop1) == CONST_DOUBLE | |
0cedb36c JL |
838 | && mode == GET_MODE (op0) && mode == GET_MODE (op1)) |
839 | { | |
94aca342 ZW |
840 | struct simplify_binary_real_args args; |
841 | args.trueop0 = trueop0; | |
842 | args.trueop1 = trueop1; | |
843 | args.mode = mode; | |
844 | args.code = code; | |
845 | ||
846 | if (do_float_handler (simplify_binary_real, (PTR) &args)) | |
847 | return args.result; | |
848 | return 0; | |
0cedb36c | 849 | } |
0cedb36c JL |
850 | |
851 | /* We can fold some multi-word operations. */ | |
852 | if (GET_MODE_CLASS (mode) == MODE_INT | |
853 | && width == HOST_BITS_PER_WIDE_INT * 2 | |
4ba5f925 JH |
854 | && (GET_CODE (trueop0) == CONST_DOUBLE |
855 | || GET_CODE (trueop0) == CONST_INT) | |
856 | && (GET_CODE (trueop1) == CONST_DOUBLE | |
857 | || GET_CODE (trueop1) == CONST_INT)) | |
0cedb36c | 858 | { |
3839069b ZW |
859 | unsigned HOST_WIDE_INT l1, l2, lv; |
860 | HOST_WIDE_INT h1, h2, hv; | |
0cedb36c | 861 | |
4ba5f925 JH |
862 | if (GET_CODE (trueop0) == CONST_DOUBLE) |
863 | l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0); | |
0cedb36c | 864 | else |
4ba5f925 | 865 | l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1); |
0cedb36c | 866 | |
4ba5f925 JH |
867 | if (GET_CODE (trueop1) == CONST_DOUBLE) |
868 | l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1); | |
0cedb36c | 869 | else |
4ba5f925 | 870 | l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2); |
0cedb36c JL |
871 | |
872 | switch (code) | |
873 | { | |
874 | case MINUS: | |
875 | /* A - B == A + (-B). */ | |
876 | neg_double (l2, h2, &lv, &hv); | |
877 | l2 = lv, h2 = hv; | |
878 | ||
879 | /* .. fall through ... */ | |
880 | ||
881 | case PLUS: | |
882 | add_double (l1, h1, l2, h2, &lv, &hv); | |
883 | break; | |
884 | ||
885 | case MULT: | |
886 | mul_double (l1, h1, l2, h2, &lv, &hv); | |
887 | break; | |
888 | ||
889 | case DIV: case MOD: case UDIV: case UMOD: | |
890 | /* We'd need to include tree.h to do this and it doesn't seem worth | |
891 | it. */ | |
892 | return 0; | |
893 | ||
894 | case AND: | |
895 | lv = l1 & l2, hv = h1 & h2; | |
896 | break; | |
897 | ||
898 | case IOR: | |
899 | lv = l1 | l2, hv = h1 | h2; | |
900 | break; | |
901 | ||
902 | case XOR: | |
903 | lv = l1 ^ l2, hv = h1 ^ h2; | |
904 | break; | |
905 | ||
906 | case SMIN: | |
907 | if (h1 < h2 | |
908 | || (h1 == h2 | |
909 | && ((unsigned HOST_WIDE_INT) l1 | |
910 | < (unsigned HOST_WIDE_INT) l2))) | |
911 | lv = l1, hv = h1; | |
912 | else | |
913 | lv = l2, hv = h2; | |
914 | break; | |
915 | ||
916 | case SMAX: | |
917 | if (h1 > h2 | |
918 | || (h1 == h2 | |
919 | && ((unsigned HOST_WIDE_INT) l1 | |
920 | > (unsigned HOST_WIDE_INT) l2))) | |
921 | lv = l1, hv = h1; | |
922 | else | |
923 | lv = l2, hv = h2; | |
924 | break; | |
925 | ||
926 | case UMIN: | |
927 | if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2 | |
928 | || (h1 == h2 | |
929 | && ((unsigned HOST_WIDE_INT) l1 | |
930 | < (unsigned HOST_WIDE_INT) l2))) | |
931 | lv = l1, hv = h1; | |
932 | else | |
933 | lv = l2, hv = h2; | |
934 | break; | |
935 | ||
936 | case UMAX: | |
937 | if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2 | |
938 | || (h1 == h2 | |
939 | && ((unsigned HOST_WIDE_INT) l1 | |
940 | > (unsigned HOST_WIDE_INT) l2))) | |
941 | lv = l1, hv = h1; | |
942 | else | |
943 | lv = l2, hv = h2; | |
944 | break; | |
945 | ||
946 | case LSHIFTRT: case ASHIFTRT: | |
947 | case ASHIFT: | |
948 | case ROTATE: case ROTATERT: | |
949 | #ifdef SHIFT_COUNT_TRUNCATED | |
950 | if (SHIFT_COUNT_TRUNCATED) | |
951 | l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0; | |
952 | #endif | |
953 | ||
3839069b | 954 | if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode)) |
0cedb36c JL |
955 | return 0; |
956 | ||
957 | if (code == LSHIFTRT || code == ASHIFTRT) | |
958 | rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, | |
959 | code == ASHIFTRT); | |
960 | else if (code == ASHIFT) | |
961 | lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1); | |
962 | else if (code == ROTATE) | |
963 | lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); | |
964 | else /* code == ROTATERT */ | |
965 | rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); | |
966 | break; | |
967 | ||
968 | default: | |
969 | return 0; | |
970 | } | |
971 | ||
972 | return immed_double_const (lv, hv, mode); | |
973 | } | |
974 | ||
975 | if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT | |
976 | || width > HOST_BITS_PER_WIDE_INT || width == 0) | |
977 | { | |
978 | /* Even if we can't compute a constant result, | |
979 | there are some cases worth simplifying. */ | |
980 | ||
981 | switch (code) | |
982 | { | |
983 | case PLUS: | |
71925bc0 RS |
984 | /* Maybe simplify x + 0 to x. The two expressions are equivalent |
985 | when x is NaN, infinite, or finite and non-zero. They aren't | |
986 | when x is -0 and the rounding mode is not towards -infinity, | |
987 | since (-0) + 0 is then 0. */ | |
988 | if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode)) | |
0cedb36c JL |
989 | return op0; |
990 | ||
71925bc0 RS |
991 | /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These |
992 | transformations are safe even for IEEE. */ | |
0cedb36c JL |
993 | if (GET_CODE (op0) == NEG) |
994 | return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0)); | |
995 | else if (GET_CODE (op1) == NEG) | |
996 | return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0)); | |
997 | ||
c52c9a9c LB |
998 | /* (~a) + 1 -> -a */ |
999 | if (INTEGRAL_MODE_P (mode) | |
1000 | && GET_CODE (op0) == NOT | |
4ba5f925 | 1001 | && trueop1 == const1_rtx) |
c52c9a9c LB |
1002 | return gen_rtx_NEG (mode, XEXP (op0, 0)); |
1003 | ||
0cedb36c JL |
1004 | /* Handle both-operands-constant cases. We can only add |
1005 | CONST_INTs to constants since the sum of relocatable symbols | |
1006 | can't be handled by most assemblers. Don't add CONST_INT | |
1007 | to CONST_INT since overflow won't be computed properly if wider | |
1008 | than HOST_BITS_PER_WIDE_INT. */ | |
1009 | ||
1010 | if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode | |
1011 | && GET_CODE (op1) == CONST_INT) | |
1012 | return plus_constant (op0, INTVAL (op1)); | |
1013 | else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode | |
1014 | && GET_CODE (op0) == CONST_INT) | |
1015 | return plus_constant (op1, INTVAL (op0)); | |
1016 | ||
1017 | /* See if this is something like X * C - X or vice versa or | |
1018 | if the multiplication is written as a shift. If so, we can | |
1019 | distribute and make a new multiply, shift, or maybe just | |
1020 | have X (if C is 2 in the example above). But don't make | |
1021 | real multiply if we didn't have one before. */ | |
1022 | ||
1023 | if (! FLOAT_MODE_P (mode)) | |
1024 | { | |
1025 | HOST_WIDE_INT coeff0 = 1, coeff1 = 1; | |
1026 | rtx lhs = op0, rhs = op1; | |
1027 | int had_mult = 0; | |
1028 | ||
1029 | if (GET_CODE (lhs) == NEG) | |
1030 | coeff0 = -1, lhs = XEXP (lhs, 0); | |
1031 | else if (GET_CODE (lhs) == MULT | |
1032 | && GET_CODE (XEXP (lhs, 1)) == CONST_INT) | |
1033 | { | |
1034 | coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0); | |
1035 | had_mult = 1; | |
1036 | } | |
1037 | else if (GET_CODE (lhs) == ASHIFT | |
1038 | && GET_CODE (XEXP (lhs, 1)) == CONST_INT | |
1039 | && INTVAL (XEXP (lhs, 1)) >= 0 | |
1040 | && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1041 | { | |
1042 | coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); | |
1043 | lhs = XEXP (lhs, 0); | |
1044 | } | |
1045 | ||
1046 | if (GET_CODE (rhs) == NEG) | |
1047 | coeff1 = -1, rhs = XEXP (rhs, 0); | |
1048 | else if (GET_CODE (rhs) == MULT | |
1049 | && GET_CODE (XEXP (rhs, 1)) == CONST_INT) | |
1050 | { | |
1051 | coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0); | |
1052 | had_mult = 1; | |
1053 | } | |
1054 | else if (GET_CODE (rhs) == ASHIFT | |
1055 | && GET_CODE (XEXP (rhs, 1)) == CONST_INT | |
1056 | && INTVAL (XEXP (rhs, 1)) >= 0 | |
1057 | && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1058 | { | |
1059 | coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)); | |
1060 | rhs = XEXP (rhs, 0); | |
1061 | } | |
1062 | ||
1063 | if (rtx_equal_p (lhs, rhs)) | |
1064 | { | |
1065 | tem = simplify_gen_binary (MULT, mode, lhs, | |
1066 | GEN_INT (coeff0 + coeff1)); | |
1067 | return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem; | |
1068 | } | |
1069 | } | |
1070 | ||
1071 | /* If one of the operands is a PLUS or a MINUS, see if we can | |
1072 | simplify this by the associative law. | |
1073 | Don't use the associative law for floating point. | |
1074 | The inaccuracy makes it nonassociative, | |
1075 | and subtle programs can break if operations are associated. */ | |
1076 | ||
1077 | if (INTEGRAL_MODE_P (mode) | |
1078 | && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS | |
9b3bd424 RH |
1079 | || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS |
1080 | || (GET_CODE (op0) == CONST | |
1081 | && GET_CODE (XEXP (op0, 0)) == PLUS) | |
1082 | || (GET_CODE (op1) == CONST | |
1083 | && GET_CODE (XEXP (op1, 0)) == PLUS)) | |
e3c8ea67 | 1084 | && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0) |
0cedb36c JL |
1085 | return tem; |
1086 | break; | |
1087 | ||
1088 | case COMPARE: | |
1089 | #ifdef HAVE_cc0 | |
1090 | /* Convert (compare FOO (const_int 0)) to FOO unless we aren't | |
1091 | using cc0, in which case we want to leave it as a COMPARE | |
1092 | so we can distinguish it from a register-register-copy. | |
1093 | ||
1094 | In IEEE floating point, x-0 is not the same as x. */ | |
1095 | ||
1096 | if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT | |
de6c5979 | 1097 | || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) |
4ba5f925 | 1098 | && trueop1 == CONST0_RTX (mode)) |
0cedb36c | 1099 | return op0; |
d882fe51 ZW |
1100 | #endif |
1101 | ||
1102 | /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */ | |
1103 | if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT) | |
1104 | || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU)) | |
1105 | && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx) | |
1106 | { | |
1107 | rtx xop00 = XEXP (op0, 0); | |
1108 | rtx xop10 = XEXP (op1, 0); | |
1109 | ||
1110 | #ifdef HAVE_cc0 | |
1111 | if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0) | |
0cedb36c | 1112 | #else |
d882fe51 ZW |
1113 | if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG |
1114 | && GET_MODE (xop00) == GET_MODE (xop10) | |
1115 | && REGNO (xop00) == REGNO (xop10) | |
1116 | && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC | |
1117 | && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC) | |
0cedb36c | 1118 | #endif |
d882fe51 ZW |
1119 | return xop00; |
1120 | } | |
d882fe51 | 1121 | break; |
9b3bd424 | 1122 | |
0cedb36c | 1123 | case MINUS: |
0cedb36c JL |
1124 | /* We can't assume x-x is 0 even with non-IEEE floating point, |
1125 | but since it is zero except in very strange circumstances, we | |
de6c5979 | 1126 | will treat it as zero with -funsafe-math-optimizations. */ |
4ba5f925 | 1127 | if (rtx_equal_p (trueop0, trueop1) |
0cedb36c | 1128 | && ! side_effects_p (op0) |
de6c5979 | 1129 | && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)) |
0cedb36c JL |
1130 | return CONST0_RTX (mode); |
1131 | ||
71925bc0 RS |
1132 | /* Change subtraction from zero into negation. (0 - x) is the |
1133 | same as -x when x is NaN, infinite, or finite and non-zero. | |
1134 | But if the mode has signed zeros, and does not round towards | |
1135 | -infinity, then 0 - 0 is 0, not -0. */ | |
1136 | if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode)) | |
0cedb36c JL |
1137 | return gen_rtx_NEG (mode, op1); |
1138 | ||
1139 | /* (-1 - a) is ~a. */ | |
4ba5f925 | 1140 | if (trueop0 == constm1_rtx) |
0cedb36c JL |
1141 | return gen_rtx_NOT (mode, op1); |
1142 | ||
71925bc0 RS |
1143 | /* Subtracting 0 has no effect unless the mode has signed zeros |
1144 | and supports rounding towards -infinity. In such a case, | |
1145 | 0 - 0 is -0. */ | |
1146 | if (!(HONOR_SIGNED_ZEROS (mode) | |
1147 | && HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
1148 | && trueop1 == CONST0_RTX (mode)) | |
0cedb36c JL |
1149 | return op0; |
1150 | ||
1151 | /* See if this is something like X * C - X or vice versa or | |
1152 | if the multiplication is written as a shift. If so, we can | |
1153 | distribute and make a new multiply, shift, or maybe just | |
1154 | have X (if C is 2 in the example above). But don't make | |
1155 | real multiply if we didn't have one before. */ | |
1156 | ||
1157 | if (! FLOAT_MODE_P (mode)) | |
1158 | { | |
1159 | HOST_WIDE_INT coeff0 = 1, coeff1 = 1; | |
1160 | rtx lhs = op0, rhs = op1; | |
1161 | int had_mult = 0; | |
1162 | ||
1163 | if (GET_CODE (lhs) == NEG) | |
1164 | coeff0 = -1, lhs = XEXP (lhs, 0); | |
1165 | else if (GET_CODE (lhs) == MULT | |
1166 | && GET_CODE (XEXP (lhs, 1)) == CONST_INT) | |
1167 | { | |
1168 | coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0); | |
1169 | had_mult = 1; | |
1170 | } | |
1171 | else if (GET_CODE (lhs) == ASHIFT | |
1172 | && GET_CODE (XEXP (lhs, 1)) == CONST_INT | |
1173 | && INTVAL (XEXP (lhs, 1)) >= 0 | |
1174 | && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1175 | { | |
1176 | coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); | |
1177 | lhs = XEXP (lhs, 0); | |
1178 | } | |
1179 | ||
1180 | if (GET_CODE (rhs) == NEG) | |
1181 | coeff1 = - 1, rhs = XEXP (rhs, 0); | |
1182 | else if (GET_CODE (rhs) == MULT | |
1183 | && GET_CODE (XEXP (rhs, 1)) == CONST_INT) | |
1184 | { | |
1185 | coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0); | |
1186 | had_mult = 1; | |
1187 | } | |
1188 | else if (GET_CODE (rhs) == ASHIFT | |
1189 | && GET_CODE (XEXP (rhs, 1)) == CONST_INT | |
1190 | && INTVAL (XEXP (rhs, 1)) >= 0 | |
1191 | && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1192 | { | |
1193 | coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)); | |
1194 | rhs = XEXP (rhs, 0); | |
1195 | } | |
1196 | ||
1197 | if (rtx_equal_p (lhs, rhs)) | |
1198 | { | |
1199 | tem = simplify_gen_binary (MULT, mode, lhs, | |
1200 | GEN_INT (coeff0 - coeff1)); | |
1201 | return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem; | |
1202 | } | |
1203 | } | |
1204 | ||
71925bc0 | 1205 | /* (a - (-b)) -> (a + b). True even for IEEE. */ |
0cedb36c JL |
1206 | if (GET_CODE (op1) == NEG) |
1207 | return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0)); | |
1208 | ||
1209 | /* If one of the operands is a PLUS or a MINUS, see if we can | |
1210 | simplify this by the associative law. | |
1211 | Don't use the associative law for floating point. | |
1212 | The inaccuracy makes it nonassociative, | |
1213 | and subtle programs can break if operations are associated. */ | |
1214 | ||
1215 | if (INTEGRAL_MODE_P (mode) | |
1216 | && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS | |
9b3bd424 RH |
1217 | || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS |
1218 | || (GET_CODE (op0) == CONST | |
1219 | && GET_CODE (XEXP (op0, 0)) == PLUS) | |
1220 | || (GET_CODE (op1) == CONST | |
1221 | && GET_CODE (XEXP (op1, 0)) == PLUS)) | |
e3c8ea67 | 1222 | && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0) |
0cedb36c JL |
1223 | return tem; |
1224 | ||
1225 | /* Don't let a relocatable value get a negative coeff. */ | |
1226 | if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode) | |
aff8a8d5 CM |
1227 | return simplify_gen_binary (PLUS, mode, |
1228 | op0, | |
1229 | neg_const_int (mode, op1)); | |
0cedb36c JL |
1230 | |
1231 | /* (x - (x & y)) -> (x & ~y) */ | |
1232 | if (GET_CODE (op1) == AND) | |
1233 | { | |
1234 | if (rtx_equal_p (op0, XEXP (op1, 0))) | |
1235 | return simplify_gen_binary (AND, mode, op0, | |
1236 | gen_rtx_NOT (mode, XEXP (op1, 1))); | |
1237 | if (rtx_equal_p (op0, XEXP (op1, 1))) | |
1238 | return simplify_gen_binary (AND, mode, op0, | |
1239 | gen_rtx_NOT (mode, XEXP (op1, 0))); | |
1240 | } | |
1241 | break; | |
1242 | ||
1243 | case MULT: | |
4ba5f925 | 1244 | if (trueop1 == constm1_rtx) |
0cedb36c JL |
1245 | { |
1246 | tem = simplify_unary_operation (NEG, mode, op0, mode); | |
1247 | ||
1248 | return tem ? tem : gen_rtx_NEG (mode, op0); | |
1249 | } | |
1250 | ||
71925bc0 RS |
1251 | /* Maybe simplify x * 0 to 0. The reduction is not valid if |
1252 | x is NaN, since x * 0 is then also NaN. Nor is it valid | |
1253 | when the mode has signed zeros, since multiplying a negative | |
1254 | number by 0 will give -0, not 0. */ | |
1255 | if (!HONOR_NANS (mode) | |
1256 | && !HONOR_SIGNED_ZEROS (mode) | |
4ba5f925 | 1257 | && trueop1 == CONST0_RTX (mode) |
0cedb36c JL |
1258 | && ! side_effects_p (op0)) |
1259 | return op1; | |
1260 | ||
1261 | /* In IEEE floating point, x*1 is not equivalent to x for nans. | |
1262 | However, ANSI says we can drop signals, | |
1263 | so we can do this anyway. */ | |
4ba5f925 | 1264 | if (trueop1 == CONST1_RTX (mode)) |
0cedb36c JL |
1265 | return op0; |
1266 | ||
1267 | /* Convert multiply by constant power of two into shift unless | |
1268 | we are still generating RTL. This test is a kludge. */ | |
4ba5f925 JH |
1269 | if (GET_CODE (trueop1) == CONST_INT |
1270 | && (val = exact_log2 (INTVAL (trueop1))) >= 0 | |
0cedb36c JL |
1271 | /* If the mode is larger than the host word size, and the |
1272 | uppermost bit is set, then this isn't a power of two due | |
1273 | to implicit sign extension. */ | |
1274 | && (width <= HOST_BITS_PER_WIDE_INT | |
1275 | || val != HOST_BITS_PER_WIDE_INT - 1) | |
1276 | && ! rtx_equal_function_value_matters) | |
1277 | return gen_rtx_ASHIFT (mode, op0, GEN_INT (val)); | |
1278 | ||
4ba5f925 JH |
1279 | if (GET_CODE (trueop1) == CONST_DOUBLE |
1280 | && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT) | |
0cedb36c | 1281 | { |
94aca342 | 1282 | struct simplify_binary_is2orm1_args args; |
0cedb36c | 1283 | |
94aca342 ZW |
1284 | args.value = trueop1; |
1285 | if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args)) | |
0cedb36c JL |
1286 | return 0; |
1287 | ||
0cedb36c | 1288 | /* x*2 is x+x and x*(-1) is -x */ |
94aca342 | 1289 | if (args.is_2 && GET_MODE (op0) == mode) |
0cedb36c JL |
1290 | return gen_rtx_PLUS (mode, op0, copy_rtx (op0)); |
1291 | ||
94aca342 | 1292 | else if (args.is_m1 && GET_MODE (op0) == mode) |
0cedb36c JL |
1293 | return gen_rtx_NEG (mode, op0); |
1294 | } | |
1295 | break; | |
1296 | ||
1297 | case IOR: | |
4ba5f925 | 1298 | if (trueop1 == const0_rtx) |
0cedb36c | 1299 | return op0; |
4ba5f925 JH |
1300 | if (GET_CODE (trueop1) == CONST_INT |
1301 | && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) | |
1302 | == GET_MODE_MASK (mode))) | |
0cedb36c | 1303 | return op1; |
4ba5f925 | 1304 | if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
0cedb36c JL |
1305 | return op0; |
1306 | /* A | (~A) -> -1 */ | |
1307 | if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) | |
1308 | || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) | |
1309 | && ! side_effects_p (op0) | |
1310 | && GET_MODE_CLASS (mode) != MODE_CC) | |
1311 | return constm1_rtx; | |
1312 | break; | |
1313 | ||
1314 | case XOR: | |
4ba5f925 | 1315 | if (trueop1 == const0_rtx) |
0cedb36c | 1316 | return op0; |
4ba5f925 JH |
1317 | if (GET_CODE (trueop1) == CONST_INT |
1318 | && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) | |
1319 | == GET_MODE_MASK (mode))) | |
0cedb36c | 1320 | return gen_rtx_NOT (mode, op0); |
4ba5f925 | 1321 | if (trueop0 == trueop1 && ! side_effects_p (op0) |
0cedb36c JL |
1322 | && GET_MODE_CLASS (mode) != MODE_CC) |
1323 | return const0_rtx; | |
1324 | break; | |
1325 | ||
1326 | case AND: | |
4ba5f925 | 1327 | if (trueop1 == const0_rtx && ! side_effects_p (op0)) |
0cedb36c | 1328 | return const0_rtx; |
4ba5f925 JH |
1329 | if (GET_CODE (trueop1) == CONST_INT |
1330 | && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) | |
1331 | == GET_MODE_MASK (mode))) | |
0cedb36c | 1332 | return op0; |
4ba5f925 | 1333 | if (trueop0 == trueop1 && ! side_effects_p (op0) |
0cedb36c JL |
1334 | && GET_MODE_CLASS (mode) != MODE_CC) |
1335 | return op0; | |
1336 | /* A & (~A) -> 0 */ | |
1337 | if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) | |
1338 | || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) | |
1339 | && ! side_effects_p (op0) | |
1340 | && GET_MODE_CLASS (mode) != MODE_CC) | |
1341 | return const0_rtx; | |
1342 | break; | |
1343 | ||
1344 | case UDIV: | |
1345 | /* Convert divide by power of two into shift (divide by 1 handled | |
1346 | below). */ | |
4ba5f925 JH |
1347 | if (GET_CODE (trueop1) == CONST_INT |
1348 | && (arg1 = exact_log2 (INTVAL (trueop1))) > 0) | |
0cedb36c JL |
1349 | return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1)); |
1350 | ||
1351 | /* ... fall through ... */ | |
1352 | ||
1353 | case DIV: | |
4ba5f925 | 1354 | if (trueop1 == CONST1_RTX (mode)) |
a9dc868f | 1355 | { |
cb1ac742 JJ |
1356 | /* On some platforms DIV uses narrower mode than its |
1357 | operands. */ | |
a9dc868f | 1358 | rtx x = gen_lowpart_common (mode, op0); |
cb1ac742 JJ |
1359 | if (x) |
1360 | return x; | |
1361 | else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode) | |
1362 | return gen_lowpart_SUBREG (mode, op0); | |
1363 | else | |
1364 | return op0; | |
a9dc868f | 1365 | } |
0cedb36c | 1366 | |
71925bc0 RS |
1367 | /* Maybe change 0 / x to 0. This transformation isn't safe for |
1368 | modes with NaNs, since 0 / 0 will then be NaN rather than 0. | |
1369 | Nor is it safe for modes with signed zeros, since dividing | |
1370 | 0 by a negative number gives -0, not 0. */ | |
1371 | if (!HONOR_NANS (mode) | |
1372 | && !HONOR_SIGNED_ZEROS (mode) | |
4ba5f925 | 1373 | && trueop0 == CONST0_RTX (mode) |
0cedb36c JL |
1374 | && ! side_effects_p (op1)) |
1375 | return op0; | |
1376 | ||
0cedb36c | 1377 | /* Change division by a constant into multiplication. Only do |
de6c5979 | 1378 | this with -funsafe-math-optimizations. */ |
4ba5f925 JH |
1379 | else if (GET_CODE (trueop1) == CONST_DOUBLE |
1380 | && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT | |
1381 | && trueop1 != CONST0_RTX (mode) | |
de6c5979 | 1382 | && flag_unsafe_math_optimizations) |
0cedb36c JL |
1383 | { |
1384 | REAL_VALUE_TYPE d; | |
4ba5f925 | 1385 | REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); |
0cedb36c JL |
1386 | |
1387 | if (! REAL_VALUES_EQUAL (d, dconst0)) | |
1388 | { | |
0cedb36c JL |
1389 | REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d); |
1390 | return gen_rtx_MULT (mode, op0, | |
1391 | CONST_DOUBLE_FROM_REAL_VALUE (d, mode)); | |
0cedb36c JL |
1392 | } |
1393 | } | |
0cedb36c JL |
1394 | break; |
1395 | ||
1396 | case UMOD: | |
1397 | /* Handle modulus by power of two (mod with 1 handled below). */ | |
4ba5f925 JH |
1398 | if (GET_CODE (trueop1) == CONST_INT |
1399 | && exact_log2 (INTVAL (trueop1)) > 0) | |
0cedb36c JL |
1400 | return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1)); |
1401 | ||
1402 | /* ... fall through ... */ | |
1403 | ||
1404 | case MOD: | |
4ba5f925 | 1405 | if ((trueop0 == const0_rtx || trueop1 == const1_rtx) |
0cedb36c JL |
1406 | && ! side_effects_p (op0) && ! side_effects_p (op1)) |
1407 | return const0_rtx; | |
1408 | break; | |
1409 | ||
1410 | case ROTATERT: | |
1411 | case ROTATE: | |
1412 | /* Rotating ~0 always results in ~0. */ | |
4ba5f925 JH |
1413 | if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT |
1414 | && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode) | |
0cedb36c JL |
1415 | && ! side_effects_p (op1)) |
1416 | return op0; | |
1417 | ||
1418 | /* ... fall through ... */ | |
1419 | ||
1420 | case ASHIFT: | |
1421 | case ASHIFTRT: | |
1422 | case LSHIFTRT: | |
4ba5f925 | 1423 | if (trueop1 == const0_rtx) |
0cedb36c | 1424 | return op0; |
4ba5f925 | 1425 | if (trueop0 == const0_rtx && ! side_effects_p (op1)) |
0cedb36c JL |
1426 | return op0; |
1427 | break; | |
1428 | ||
1429 | case SMIN: | |
4ba5f925 JH |
1430 | if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT |
1431 | && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1) | |
0cedb36c JL |
1432 | && ! side_effects_p (op0)) |
1433 | return op1; | |
4ba5f925 | 1434 | else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
0cedb36c JL |
1435 | return op0; |
1436 | break; | |
1437 | ||
1438 | case SMAX: | |
4ba5f925 JH |
1439 | if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT |
1440 | && ((unsigned HOST_WIDE_INT) INTVAL (trueop1) | |
0cedb36c JL |
1441 | == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1) |
1442 | && ! side_effects_p (op0)) | |
1443 | return op1; | |
4ba5f925 | 1444 | else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
0cedb36c JL |
1445 | return op0; |
1446 | break; | |
1447 | ||
1448 | case UMIN: | |
4ba5f925 | 1449 | if (trueop1 == const0_rtx && ! side_effects_p (op0)) |
0cedb36c | 1450 | return op1; |
4ba5f925 | 1451 | else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
0cedb36c JL |
1452 | return op0; |
1453 | break; | |
1454 | ||
1455 | case UMAX: | |
4ba5f925 | 1456 | if (trueop1 == constm1_rtx && ! side_effects_p (op0)) |
0cedb36c | 1457 | return op1; |
4ba5f925 | 1458 | else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) |
0cedb36c JL |
1459 | return op0; |
1460 | break; | |
1461 | ||
6f1a6c5b RH |
1462 | case SS_PLUS: |
1463 | case US_PLUS: | |
1464 | case SS_MINUS: | |
1465 | case US_MINUS: | |
1466 | /* ??? There are simplifications that can be done. */ | |
1467 | return 0; | |
1468 | ||
0cedb36c JL |
1469 | default: |
1470 | abort (); | |
1471 | } | |
1472 | ||
1473 | return 0; | |
1474 | } | |
1475 | ||
1476 | /* Get the integer argument values in two forms: | |
1477 | zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */ | |
1478 | ||
4ba5f925 JH |
1479 | arg0 = INTVAL (trueop0); |
1480 | arg1 = INTVAL (trueop1); | |
0cedb36c JL |
1481 | |
1482 | if (width < HOST_BITS_PER_WIDE_INT) | |
1483 | { | |
1484 | arg0 &= ((HOST_WIDE_INT) 1 << width) - 1; | |
1485 | arg1 &= ((HOST_WIDE_INT) 1 << width) - 1; | |
1486 | ||
1487 | arg0s = arg0; | |
1488 | if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
1489 | arg0s |= ((HOST_WIDE_INT) (-1) << width); | |
1490 | ||
1491 | arg1s = arg1; | |
1492 | if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
1493 | arg1s |= ((HOST_WIDE_INT) (-1) << width); | |
1494 | } | |
1495 | else | |
1496 | { | |
1497 | arg0s = arg0; | |
1498 | arg1s = arg1; | |
1499 | } | |
1500 | ||
1501 | /* Compute the value of the arithmetic. */ | |
1502 | ||
1503 | switch (code) | |
1504 | { | |
1505 | case PLUS: | |
1506 | val = arg0s + arg1s; | |
1507 | break; | |
1508 | ||
1509 | case MINUS: | |
1510 | val = arg0s - arg1s; | |
1511 | break; | |
1512 | ||
1513 | case MULT: | |
1514 | val = arg0s * arg1s; | |
1515 | break; | |
1516 | ||
1517 | case DIV: | |
b0835578 DN |
1518 | if (arg1s == 0 |
1519 | || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
1520 | && arg1s == -1)) | |
0cedb36c JL |
1521 | return 0; |
1522 | val = arg0s / arg1s; | |
1523 | break; | |
1524 | ||
1525 | case MOD: | |
b0835578 DN |
1526 | if (arg1s == 0 |
1527 | || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
1528 | && arg1s == -1)) | |
0cedb36c JL |
1529 | return 0; |
1530 | val = arg0s % arg1s; | |
1531 | break; | |
1532 | ||
1533 | case UDIV: | |
b0835578 DN |
1534 | if (arg1 == 0 |
1535 | || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
1536 | && arg1s == -1)) | |
0cedb36c JL |
1537 | return 0; |
1538 | val = (unsigned HOST_WIDE_INT) arg0 / arg1; | |
1539 | break; | |
1540 | ||
1541 | case UMOD: | |
b0835578 DN |
1542 | if (arg1 == 0 |
1543 | || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
1544 | && arg1s == -1)) | |
0cedb36c JL |
1545 | return 0; |
1546 | val = (unsigned HOST_WIDE_INT) arg0 % arg1; | |
1547 | break; | |
1548 | ||
1549 | case AND: | |
1550 | val = arg0 & arg1; | |
1551 | break; | |
1552 | ||
1553 | case IOR: | |
1554 | val = arg0 | arg1; | |
1555 | break; | |
1556 | ||
1557 | case XOR: | |
1558 | val = arg0 ^ arg1; | |
1559 | break; | |
1560 | ||
1561 | case LSHIFTRT: | |
1562 | /* If shift count is undefined, don't fold it; let the machine do | |
1563 | what it wants. But truncate it if the machine will do that. */ | |
1564 | if (arg1 < 0) | |
1565 | return 0; | |
1566 | ||
1567 | #ifdef SHIFT_COUNT_TRUNCATED | |
1568 | if (SHIFT_COUNT_TRUNCATED) | |
1569 | arg1 %= width; | |
1570 | #endif | |
1571 | ||
1572 | val = ((unsigned HOST_WIDE_INT) arg0) >> arg1; | |
1573 | break; | |
1574 | ||
1575 | case ASHIFT: | |
1576 | if (arg1 < 0) | |
1577 | return 0; | |
1578 | ||
1579 | #ifdef SHIFT_COUNT_TRUNCATED | |
1580 | if (SHIFT_COUNT_TRUNCATED) | |
1581 | arg1 %= width; | |
1582 | #endif | |
1583 | ||
1584 | val = ((unsigned HOST_WIDE_INT) arg0) << arg1; | |
1585 | break; | |
1586 | ||
1587 | case ASHIFTRT: | |
1588 | if (arg1 < 0) | |
1589 | return 0; | |
1590 | ||
1591 | #ifdef SHIFT_COUNT_TRUNCATED | |
1592 | if (SHIFT_COUNT_TRUNCATED) | |
1593 | arg1 %= width; | |
1594 | #endif | |
1595 | ||
1596 | val = arg0s >> arg1; | |
1597 | ||
1598 | /* Bootstrap compiler may not have sign extended the right shift. | |
1599 | Manually extend the sign to insure bootstrap cc matches gcc. */ | |
1600 | if (arg0s < 0 && arg1 > 0) | |
1601 | val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1); | |
1602 | ||
1603 | break; | |
1604 | ||
1605 | case ROTATERT: | |
1606 | if (arg1 < 0) | |
1607 | return 0; | |
1608 | ||
1609 | arg1 %= width; | |
1610 | val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1)) | |
1611 | | (((unsigned HOST_WIDE_INT) arg0) >> arg1)); | |
1612 | break; | |
1613 | ||
1614 | case ROTATE: | |
1615 | if (arg1 < 0) | |
1616 | return 0; | |
1617 | ||
1618 | arg1 %= width; | |
1619 | val = ((((unsigned HOST_WIDE_INT) arg0) << arg1) | |
1620 | | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1))); | |
1621 | break; | |
1622 | ||
1623 | case COMPARE: | |
1624 | /* Do nothing here. */ | |
1625 | return 0; | |
1626 | ||
1627 | case SMIN: | |
1628 | val = arg0s <= arg1s ? arg0s : arg1s; | |
1629 | break; | |
1630 | ||
1631 | case UMIN: | |
1632 | val = ((unsigned HOST_WIDE_INT) arg0 | |
1633 | <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); | |
1634 | break; | |
1635 | ||
1636 | case SMAX: | |
1637 | val = arg0s > arg1s ? arg0s : arg1s; | |
1638 | break; | |
1639 | ||
1640 | case UMAX: | |
1641 | val = ((unsigned HOST_WIDE_INT) arg0 | |
1642 | > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); | |
1643 | break; | |
1644 | ||
1645 | default: | |
1646 | abort (); | |
1647 | } | |
1648 | ||
1649 | val = trunc_int_for_mode (val, mode); | |
1650 | ||
1651 | return GEN_INT (val); | |
1652 | } | |
1653 | \f | |
1654 | /* Simplify a PLUS or MINUS, at least one of whose operands may be another | |
1655 | PLUS or MINUS. | |
1656 | ||
1657 | Rather than test for specific case, we do this by a brute-force method | |
1658 | and do all possible simplifications until no more changes occur. Then | |
e3c8ea67 RH |
1659 | we rebuild the operation. |
1660 | ||
1661 | If FORCE is true, then always generate the rtx. This is used to | |
e16e3291 UW |
1662 | canonicalize stuff emitted from simplify_gen_binary. Note that this |
1663 | can still fail if the rtx is too complex. It won't fail just because | |
1664 | the result is not 'simpler' than the input, however. */ | |
0cedb36c | 1665 | |
9b3bd424 RH |
1666 | struct simplify_plus_minus_op_data |
1667 | { | |
1668 | rtx op; | |
1669 | int neg; | |
1670 | }; | |
1671 | ||
1672 | static int | |
1673 | simplify_plus_minus_op_data_cmp (p1, p2) | |
1674 | const void *p1; | |
1675 | const void *p2; | |
1676 | { | |
1677 | const struct simplify_plus_minus_op_data *d1 = p1; | |
1678 | const struct simplify_plus_minus_op_data *d2 = p2; | |
1679 | ||
1680 | return (commutative_operand_precedence (d2->op) | |
1681 | - commutative_operand_precedence (d1->op)); | |
1682 | } | |
1683 | ||
0cedb36c | 1684 | static rtx |
e3c8ea67 | 1685 | simplify_plus_minus (code, mode, op0, op1, force) |
0cedb36c JL |
1686 | enum rtx_code code; |
1687 | enum machine_mode mode; | |
1688 | rtx op0, op1; | |
e3c8ea67 | 1689 | int force; |
0cedb36c | 1690 | { |
9b3bd424 | 1691 | struct simplify_plus_minus_op_data ops[8]; |
0cedb36c | 1692 | rtx result, tem; |
9b3bd424 RH |
1693 | int n_ops = 2, input_ops = 2, input_consts = 0, n_consts; |
1694 | int first, negate, changed; | |
0cedb36c JL |
1695 | int i, j; |
1696 | ||
961192e1 | 1697 | memset ((char *) ops, 0, sizeof ops); |
0cedb36c JL |
1698 | |
1699 | /* Set up the two operands and then expand them until nothing has been | |
1700 | changed. If we run out of room in our array, give up; this should | |
1701 | almost never happen. */ | |
1702 | ||
9b3bd424 RH |
1703 | ops[0].op = op0; |
1704 | ops[0].neg = 0; | |
1705 | ops[1].op = op1; | |
1706 | ops[1].neg = (code == MINUS); | |
0cedb36c | 1707 | |
9b3bd424 | 1708 | do |
0cedb36c JL |
1709 | { |
1710 | changed = 0; | |
1711 | ||
1712 | for (i = 0; i < n_ops; i++) | |
9b3bd424 RH |
1713 | { |
1714 | rtx this_op = ops[i].op; | |
1715 | int this_neg = ops[i].neg; | |
1716 | enum rtx_code this_code = GET_CODE (this_op); | |
0cedb36c | 1717 | |
9b3bd424 RH |
1718 | switch (this_code) |
1719 | { | |
1720 | case PLUS: | |
1721 | case MINUS: | |
1722 | if (n_ops == 7) | |
e16e3291 | 1723 | return NULL_RTX; |
0cedb36c | 1724 | |
9b3bd424 RH |
1725 | ops[n_ops].op = XEXP (this_op, 1); |
1726 | ops[n_ops].neg = (this_code == MINUS) ^ this_neg; | |
1727 | n_ops++; | |
1728 | ||
1729 | ops[i].op = XEXP (this_op, 0); | |
1730 | input_ops++; | |
1731 | changed = 1; | |
1732 | break; | |
1733 | ||
1734 | case NEG: | |
1735 | ops[i].op = XEXP (this_op, 0); | |
1736 | ops[i].neg = ! this_neg; | |
1737 | changed = 1; | |
1738 | break; | |
1739 | ||
1740 | case CONST: | |
e3c8ea67 RH |
1741 | if (n_ops < 7 |
1742 | && GET_CODE (XEXP (this_op, 0)) == PLUS | |
1743 | && CONSTANT_P (XEXP (XEXP (this_op, 0), 0)) | |
1744 | && CONSTANT_P (XEXP (XEXP (this_op, 0), 1))) | |
1745 | { | |
1746 | ops[i].op = XEXP (XEXP (this_op, 0), 0); | |
1747 | ops[n_ops].op = XEXP (XEXP (this_op, 0), 1); | |
1748 | ops[n_ops].neg = this_neg; | |
1749 | n_ops++; | |
1750 | input_consts++; | |
1751 | changed = 1; | |
1752 | } | |
9b3bd424 RH |
1753 | break; |
1754 | ||
1755 | case NOT: | |
1756 | /* ~a -> (-a - 1) */ | |
1757 | if (n_ops != 7) | |
1758 | { | |
1759 | ops[n_ops].op = constm1_rtx; | |
2e951384 | 1760 | ops[n_ops++].neg = this_neg; |
9b3bd424 RH |
1761 | ops[i].op = XEXP (this_op, 0); |
1762 | ops[i].neg = !this_neg; | |
1763 | changed = 1; | |
1764 | } | |
1765 | break; | |
0cedb36c | 1766 | |
9b3bd424 RH |
1767 | case CONST_INT: |
1768 | if (this_neg) | |
1769 | { | |
aff8a8d5 | 1770 | ops[i].op = neg_const_int (mode, this_op); |
9b3bd424 RH |
1771 | ops[i].neg = 0; |
1772 | changed = 1; | |
1773 | } | |
1774 | break; | |
0cedb36c | 1775 | |
9b3bd424 RH |
1776 | default: |
1777 | break; | |
1778 | } | |
1779 | } | |
0cedb36c | 1780 | } |
9b3bd424 | 1781 | while (changed); |
0cedb36c JL |
1782 | |
1783 | /* If we only have two operands, we can't do anything. */ | |
e3c8ea67 | 1784 | if (n_ops <= 2 && !force) |
9b3bd424 | 1785 | return NULL_RTX; |
0cedb36c | 1786 | |
e3c8ea67 RH |
1787 | /* Count the number of CONSTs we didn't split above. */ |
1788 | for (i = 0; i < n_ops; i++) | |
1789 | if (GET_CODE (ops[i].op) == CONST) | |
1790 | input_consts++; | |
1791 | ||
0cedb36c JL |
1792 | /* Now simplify each pair of operands until nothing changes. The first |
1793 | time through just simplify constants against each other. */ | |
1794 | ||
9b3bd424 RH |
1795 | first = 1; |
1796 | do | |
0cedb36c JL |
1797 | { |
1798 | changed = first; | |
1799 | ||
1800 | for (i = 0; i < n_ops - 1; i++) | |
1801 | for (j = i + 1; j < n_ops; j++) | |
9b3bd424 RH |
1802 | { |
1803 | rtx lhs = ops[i].op, rhs = ops[j].op; | |
1804 | int lneg = ops[i].neg, rneg = ops[j].neg; | |
0cedb36c | 1805 | |
9b3bd424 RH |
1806 | if (lhs != 0 && rhs != 0 |
1807 | && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs)))) | |
1808 | { | |
1809 | enum rtx_code ncode = PLUS; | |
1810 | ||
1811 | if (lneg != rneg) | |
1812 | { | |
1813 | ncode = MINUS; | |
1814 | if (lneg) | |
1815 | tem = lhs, lhs = rhs, rhs = tem; | |
1816 | } | |
1817 | else if (swap_commutative_operands_p (lhs, rhs)) | |
1818 | tem = lhs, lhs = rhs, rhs = tem; | |
1819 | ||
1820 | tem = simplify_binary_operation (ncode, mode, lhs, rhs); | |
1821 | ||
1822 | /* Reject "simplifications" that just wrap the two | |
1823 | arguments in a CONST. Failure to do so can result | |
1824 | in infinite recursion with simplify_binary_operation | |
1825 | when it calls us to simplify CONST operations. */ | |
1826 | if (tem | |
1827 | && ! (GET_CODE (tem) == CONST | |
1828 | && GET_CODE (XEXP (tem, 0)) == ncode | |
1829 | && XEXP (XEXP (tem, 0), 0) == lhs | |
2e951384 JJ |
1830 | && XEXP (XEXP (tem, 0), 1) == rhs) |
1831 | /* Don't allow -x + -1 -> ~x simplifications in the | |
1832 | first pass. This allows us the chance to combine | |
1833 | the -1 with other constants. */ | |
1834 | && ! (first | |
1835 | && GET_CODE (tem) == NOT | |
1836 | && XEXP (tem, 0) == rhs)) | |
9b3bd424 RH |
1837 | { |
1838 | lneg &= rneg; | |
1839 | if (GET_CODE (tem) == NEG) | |
1840 | tem = XEXP (tem, 0), lneg = !lneg; | |
1841 | if (GET_CODE (tem) == CONST_INT && lneg) | |
aff8a8d5 | 1842 | tem = neg_const_int (mode, tem), lneg = 0; |
9b3bd424 RH |
1843 | |
1844 | ops[i].op = tem; | |
1845 | ops[i].neg = lneg; | |
1846 | ops[j].op = NULL_RTX; | |
1847 | changed = 1; | |
1848 | } | |
1849 | } | |
1850 | } | |
0cedb36c JL |
1851 | |
1852 | first = 0; | |
1853 | } | |
9b3bd424 | 1854 | while (changed); |
0cedb36c | 1855 | |
9b3bd424 | 1856 | /* Pack all the operands to the lower-numbered entries. */ |
0cedb36c | 1857 | for (i = 0, j = 0; j < n_ops; j++) |
9b3bd424 RH |
1858 | if (ops[j].op) |
1859 | ops[i++] = ops[j]; | |
1860 | n_ops = i; | |
0cedb36c | 1861 | |
9b3bd424 RH |
1862 | /* Sort the operations based on swap_commutative_operands_p. */ |
1863 | qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp); | |
0cedb36c | 1864 | |
9b3bd424 RH |
1865 | /* We suppressed creation of trivial CONST expressions in the |
1866 | combination loop to avoid recursion. Create one manually now. | |
1867 | The combination loop should have ensured that there is exactly | |
1868 | one CONST_INT, and the sort will have ensured that it is last | |
1869 | in the array and that any other constant will be next-to-last. */ | |
0cedb36c | 1870 | |
9b3bd424 RH |
1871 | if (n_ops > 1 |
1872 | && GET_CODE (ops[n_ops - 1].op) == CONST_INT | |
1873 | && CONSTANT_P (ops[n_ops - 2].op)) | |
1874 | { | |
aff8a8d5 | 1875 | rtx value = ops[n_ops - 1].op; |
4768dbdd | 1876 | if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg) |
aff8a8d5 CM |
1877 | value = neg_const_int (mode, value); |
1878 | ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value)); | |
9b3bd424 RH |
1879 | n_ops--; |
1880 | } | |
1881 | ||
1882 | /* Count the number of CONSTs that we generated. */ | |
1883 | n_consts = 0; | |
1884 | for (i = 0; i < n_ops; i++) | |
1885 | if (GET_CODE (ops[i].op) == CONST) | |
1886 | n_consts++; | |
1887 | ||
1888 | /* Give up if we didn't reduce the number of operands we had. Make | |
1889 | sure we count a CONST as two operands. If we have the same | |
1890 | number of operands, but have made more CONSTs than before, this | |
1891 | is also an improvement, so accept it. */ | |
e3c8ea67 RH |
1892 | if (!force |
1893 | && (n_ops + n_consts > input_ops | |
1894 | || (n_ops + n_consts == input_ops && n_consts <= input_consts))) | |
9b3bd424 | 1895 | return NULL_RTX; |
0cedb36c JL |
1896 | |
1897 | /* Put a non-negated operand first. If there aren't any, make all | |
1898 | operands positive and negate the whole thing later. */ | |
0cedb36c | 1899 | |
9b3bd424 RH |
1900 | negate = 0; |
1901 | for (i = 0; i < n_ops && ops[i].neg; i++) | |
1902 | continue; | |
0cedb36c JL |
1903 | if (i == n_ops) |
1904 | { | |
1905 | for (i = 0; i < n_ops; i++) | |
9b3bd424 | 1906 | ops[i].neg = 0; |
0cedb36c JL |
1907 | negate = 1; |
1908 | } | |
1909 | else if (i != 0) | |
1910 | { | |
9b3bd424 RH |
1911 | tem = ops[0].op; |
1912 | ops[0] = ops[i]; | |
1913 | ops[i].op = tem; | |
1914 | ops[i].neg = 1; | |
0cedb36c JL |
1915 | } |
1916 | ||
1917 | /* Now make the result by performing the requested operations. */ | |
9b3bd424 | 1918 | result = ops[0].op; |
0cedb36c | 1919 | for (i = 1; i < n_ops; i++) |
9b3bd424 RH |
1920 | result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS, |
1921 | mode, result, ops[i].op); | |
0cedb36c JL |
1922 | |
1923 | return negate ? gen_rtx_NEG (mode, result) : result; | |
1924 | } | |
1925 | ||
1926 | struct cfc_args | |
1927 | { | |
14a774a9 RK |
1928 | rtx op0, op1; /* Input */ |
1929 | int equal, op0lt, op1lt; /* Output */ | |
90a74703 | 1930 | int unordered; |
0cedb36c JL |
1931 | }; |
1932 | ||
1933 | static void | |
1934 | check_fold_consts (data) | |
1935 | PTR data; | |
1936 | { | |
14a774a9 | 1937 | struct cfc_args *args = (struct cfc_args *) data; |
0cedb36c JL |
1938 | REAL_VALUE_TYPE d0, d1; |
1939 | ||
90a74703 JH |
1940 | /* We may possibly raise an exception while reading the value. */ |
1941 | args->unordered = 1; | |
0cedb36c JL |
1942 | REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0); |
1943 | REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1); | |
90a74703 JH |
1944 | |
1945 | /* Comparisons of Inf versus Inf are ordered. */ | |
1946 | if (REAL_VALUE_ISNAN (d0) | |
1947 | || REAL_VALUE_ISNAN (d1)) | |
1948 | return; | |
0cedb36c JL |
1949 | args->equal = REAL_VALUES_EQUAL (d0, d1); |
1950 | args->op0lt = REAL_VALUES_LESS (d0, d1); | |
1951 | args->op1lt = REAL_VALUES_LESS (d1, d0); | |
90a74703 | 1952 | args->unordered = 0; |
0cedb36c JL |
1953 | } |
1954 | ||
1955 | /* Like simplify_binary_operation except used for relational operators. | |
1956 | MODE is the mode of the operands, not that of the result. If MODE | |
1957 | is VOIDmode, both operands must also be VOIDmode and we compare the | |
1958 | operands in "infinite precision". | |
1959 | ||
1960 | If no simplification is possible, this function returns zero. Otherwise, | |
1961 | it returns either const_true_rtx or const0_rtx. */ | |
1962 | ||
1963 | rtx | |
1964 | simplify_relational_operation (code, mode, op0, op1) | |
1965 | enum rtx_code code; | |
1966 | enum machine_mode mode; | |
1967 | rtx op0, op1; | |
1968 | { | |
1969 | int equal, op0lt, op0ltu, op1lt, op1ltu; | |
1970 | rtx tem; | |
4ba5f925 JH |
1971 | rtx trueop0; |
1972 | rtx trueop1; | |
0cedb36c | 1973 | |
47b1e19b JH |
1974 | if (mode == VOIDmode |
1975 | && (GET_MODE (op0) != VOIDmode | |
1976 | || GET_MODE (op1) != VOIDmode)) | |
95d0e5f1 | 1977 | abort (); |
47b1e19b | 1978 | |
0cedb36c JL |
1979 | /* If op0 is a compare, extract the comparison arguments from it. */ |
1980 | if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) | |
1981 | op1 = XEXP (op0, 1), op0 = XEXP (op0, 0); | |
1982 | ||
4ba5f925 JH |
1983 | trueop0 = avoid_constant_pool_reference (op0); |
1984 | trueop1 = avoid_constant_pool_reference (op1); | |
1985 | ||
0cedb36c JL |
1986 | /* We can't simplify MODE_CC values since we don't know what the |
1987 | actual comparison is. */ | |
1988 | if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC | |
1989 | #ifdef HAVE_cc0 | |
1990 | || op0 == cc0_rtx | |
1991 | #endif | |
1992 | ) | |
1993 | return 0; | |
1994 | ||
52a75c3c | 1995 | /* Make sure the constant is second. */ |
4ba5f925 | 1996 | if (swap_commutative_operands_p (trueop0, trueop1)) |
52a75c3c RH |
1997 | { |
1998 | tem = op0, op0 = op1, op1 = tem; | |
4ba5f925 | 1999 | tem = trueop0, trueop0 = trueop1, trueop1 = tem; |
52a75c3c RH |
2000 | code = swap_condition (code); |
2001 | } | |
2002 | ||
0cedb36c JL |
2003 | /* For integer comparisons of A and B maybe we can simplify A - B and can |
2004 | then simplify a comparison of that with zero. If A and B are both either | |
2005 | a register or a CONST_INT, this can't help; testing for these cases will | |
2006 | prevent infinite recursion here and speed things up. | |
2007 | ||
2008 | If CODE is an unsigned comparison, then we can never do this optimization, | |
2009 | because it gives an incorrect result if the subtraction wraps around zero. | |
2010 | ANSI C defines unsigned operations such that they never overflow, and | |
2011 | thus such cases can not be ignored. */ | |
2012 | ||
4ba5f925 JH |
2013 | if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx |
2014 | && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT) | |
2015 | && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT)) | |
0cedb36c JL |
2016 | && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1)) |
2017 | && code != GTU && code != GEU && code != LTU && code != LEU) | |
2018 | return simplify_relational_operation (signed_condition (code), | |
2019 | mode, tem, const0_rtx); | |
2020 | ||
de6c5979 | 2021 | if (flag_unsafe_math_optimizations && code == ORDERED) |
1f36a2dd JH |
2022 | return const_true_rtx; |
2023 | ||
de6c5979 | 2024 | if (flag_unsafe_math_optimizations && code == UNORDERED) |
1f36a2dd JH |
2025 | return const0_rtx; |
2026 | ||
71925bc0 | 2027 | /* For modes without NaNs, if the two operands are equal, we know the |
0cedb36c | 2028 | result. */ |
71925bc0 | 2029 | if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1)) |
0cedb36c JL |
2030 | equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0; |
2031 | ||
2032 | /* If the operands are floating-point constants, see if we can fold | |
2033 | the result. */ | |
4ba5f925 JH |
2034 | else if (GET_CODE (trueop0) == CONST_DOUBLE |
2035 | && GET_CODE (trueop1) == CONST_DOUBLE | |
2036 | && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT) | |
0cedb36c JL |
2037 | { |
2038 | struct cfc_args args; | |
2039 | ||
2040 | /* Setup input for check_fold_consts() */ | |
4ba5f925 JH |
2041 | args.op0 = trueop0; |
2042 | args.op1 = trueop1; | |
0cedb36c | 2043 | |
90a74703 | 2044 | |
fa49fd0f | 2045 | if (!do_float_handler (check_fold_consts, (PTR) &args)) |
90a74703 JH |
2046 | args.unordered = 1; |
2047 | ||
2048 | if (args.unordered) | |
2049 | switch (code) | |
2050 | { | |
2051 | case UNEQ: | |
2052 | case UNLT: | |
2053 | case UNGT: | |
2054 | case UNLE: | |
2055 | case UNGE: | |
2056 | case NE: | |
2057 | case UNORDERED: | |
2058 | return const_true_rtx; | |
2059 | case EQ: | |
2060 | case LT: | |
2061 | case GT: | |
2062 | case LE: | |
2063 | case GE: | |
2064 | case LTGT: | |
2065 | case ORDERED: | |
2066 | return const0_rtx; | |
2067 | default: | |
2068 | return 0; | |
2069 | } | |
0cedb36c JL |
2070 | |
2071 | /* Receive output from check_fold_consts() */ | |
2072 | equal = args.equal; | |
2073 | op0lt = op0ltu = args.op0lt; | |
2074 | op1lt = op1ltu = args.op1lt; | |
2075 | } | |
0cedb36c JL |
2076 | |
2077 | /* Otherwise, see if the operands are both integers. */ | |
2078 | else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode) | |
4ba5f925 JH |
2079 | && (GET_CODE (trueop0) == CONST_DOUBLE |
2080 | || GET_CODE (trueop0) == CONST_INT) | |
2081 | && (GET_CODE (trueop1) == CONST_DOUBLE | |
2082 | || GET_CODE (trueop1) == CONST_INT)) | |
0cedb36c JL |
2083 | { |
2084 | int width = GET_MODE_BITSIZE (mode); | |
2085 | HOST_WIDE_INT l0s, h0s, l1s, h1s; | |
2086 | unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u; | |
2087 | ||
2088 | /* Get the two words comprising each integer constant. */ | |
4ba5f925 | 2089 | if (GET_CODE (trueop0) == CONST_DOUBLE) |
0cedb36c | 2090 | { |
4ba5f925 JH |
2091 | l0u = l0s = CONST_DOUBLE_LOW (trueop0); |
2092 | h0u = h0s = CONST_DOUBLE_HIGH (trueop0); | |
0cedb36c JL |
2093 | } |
2094 | else | |
2095 | { | |
4ba5f925 | 2096 | l0u = l0s = INTVAL (trueop0); |
ba34d877 | 2097 | h0u = h0s = HWI_SIGN_EXTEND (l0s); |
0cedb36c JL |
2098 | } |
2099 | ||
4ba5f925 | 2100 | if (GET_CODE (trueop1) == CONST_DOUBLE) |
0cedb36c | 2101 | { |
4ba5f925 JH |
2102 | l1u = l1s = CONST_DOUBLE_LOW (trueop1); |
2103 | h1u = h1s = CONST_DOUBLE_HIGH (trueop1); | |
0cedb36c JL |
2104 | } |
2105 | else | |
2106 | { | |
4ba5f925 | 2107 | l1u = l1s = INTVAL (trueop1); |
ba34d877 | 2108 | h1u = h1s = HWI_SIGN_EXTEND (l1s); |
0cedb36c JL |
2109 | } |
2110 | ||
2111 | /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT, | |
2112 | we have to sign or zero-extend the values. */ | |
0cedb36c JL |
2113 | if (width != 0 && width < HOST_BITS_PER_WIDE_INT) |
2114 | { | |
2115 | l0u &= ((HOST_WIDE_INT) 1 << width) - 1; | |
2116 | l1u &= ((HOST_WIDE_INT) 1 << width) - 1; | |
2117 | ||
2118 | if (l0s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
2119 | l0s |= ((HOST_WIDE_INT) (-1) << width); | |
2120 | ||
2121 | if (l1s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
2122 | l1s |= ((HOST_WIDE_INT) (-1) << width); | |
2123 | } | |
d4f1c1fa RH |
2124 | if (width != 0 && width <= HOST_BITS_PER_WIDE_INT) |
2125 | h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s); | |
0cedb36c JL |
2126 | |
2127 | equal = (h0u == h1u && l0u == l1u); | |
3b15076f GK |
2128 | op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u)); |
2129 | op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u)); | |
0cedb36c JL |
2130 | op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u)); |
2131 | op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u)); | |
2132 | } | |
2133 | ||
2134 | /* Otherwise, there are some code-specific tests we can make. */ | |
2135 | else | |
2136 | { | |
2137 | switch (code) | |
2138 | { | |
2139 | case EQ: | |
2140 | /* References to the frame plus a constant or labels cannot | |
2141 | be zero, but a SYMBOL_REF can due to #pragma weak. */ | |
4ba5f925 JH |
2142 | if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx) |
2143 | || GET_CODE (trueop0) == LABEL_REF) | |
0cedb36c JL |
2144 | #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
2145 | /* On some machines, the ap reg can be 0 sometimes. */ | |
2146 | && op0 != arg_pointer_rtx | |
2147 | #endif | |
2148 | ) | |
2149 | return const0_rtx; | |
2150 | break; | |
2151 | ||
2152 | case NE: | |
4ba5f925 JH |
2153 | if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx) |
2154 | || GET_CODE (trueop0) == LABEL_REF) | |
0cedb36c JL |
2155 | #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
2156 | && op0 != arg_pointer_rtx | |
2157 | #endif | |
2158 | ) | |
2159 | return const_true_rtx; | |
2160 | break; | |
2161 | ||
2162 | case GEU: | |
2163 | /* Unsigned values are never negative. */ | |
4ba5f925 | 2164 | if (trueop1 == const0_rtx) |
0cedb36c JL |
2165 | return const_true_rtx; |
2166 | break; | |
2167 | ||
2168 | case LTU: | |
4ba5f925 | 2169 | if (trueop1 == const0_rtx) |
0cedb36c JL |
2170 | return const0_rtx; |
2171 | break; | |
2172 | ||
2173 | case LEU: | |
2174 | /* Unsigned values are never greater than the largest | |
2175 | unsigned value. */ | |
4ba5f925 JH |
2176 | if (GET_CODE (trueop1) == CONST_INT |
2177 | && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode) | |
0cedb36c JL |
2178 | && INTEGRAL_MODE_P (mode)) |
2179 | return const_true_rtx; | |
2180 | break; | |
2181 | ||
2182 | case GTU: | |
4ba5f925 JH |
2183 | if (GET_CODE (trueop1) == CONST_INT |
2184 | && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode) | |
0cedb36c JL |
2185 | && INTEGRAL_MODE_P (mode)) |
2186 | return const0_rtx; | |
2187 | break; | |
2188 | ||
2189 | default: | |
2190 | break; | |
2191 | } | |
2192 | ||
2193 | return 0; | |
2194 | } | |
2195 | ||
2196 | /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set | |
2197 | as appropriate. */ | |
2198 | switch (code) | |
2199 | { | |
2200 | case EQ: | |
90a74703 | 2201 | case UNEQ: |
0cedb36c JL |
2202 | return equal ? const_true_rtx : const0_rtx; |
2203 | case NE: | |
90a74703 | 2204 | case LTGT: |
0cedb36c JL |
2205 | return ! equal ? const_true_rtx : const0_rtx; |
2206 | case LT: | |
90a74703 | 2207 | case UNLT: |
0cedb36c JL |
2208 | return op0lt ? const_true_rtx : const0_rtx; |
2209 | case GT: | |
90a74703 | 2210 | case UNGT: |
0cedb36c JL |
2211 | return op1lt ? const_true_rtx : const0_rtx; |
2212 | case LTU: | |
2213 | return op0ltu ? const_true_rtx : const0_rtx; | |
2214 | case GTU: | |
2215 | return op1ltu ? const_true_rtx : const0_rtx; | |
2216 | case LE: | |
1f36a2dd | 2217 | case UNLE: |
0cedb36c JL |
2218 | return equal || op0lt ? const_true_rtx : const0_rtx; |
2219 | case GE: | |
1f36a2dd | 2220 | case UNGE: |
0cedb36c JL |
2221 | return equal || op1lt ? const_true_rtx : const0_rtx; |
2222 | case LEU: | |
2223 | return equal || op0ltu ? const_true_rtx : const0_rtx; | |
2224 | case GEU: | |
2225 | return equal || op1ltu ? const_true_rtx : const0_rtx; | |
90a74703 JH |
2226 | case ORDERED: |
2227 | return const_true_rtx; | |
2228 | case UNORDERED: | |
2229 | return const0_rtx; | |
0cedb36c JL |
2230 | default: |
2231 | abort (); | |
2232 | } | |
2233 | } | |
2234 | \f | |
2235 | /* Simplify CODE, an operation with result mode MODE and three operands, | |
2236 | OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became | |
2237 | a constant. Return 0 if no simplifications is possible. */ | |
2238 | ||
2239 | rtx | |
2240 | simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2) | |
2241 | enum rtx_code code; | |
2242 | enum machine_mode mode, op0_mode; | |
2243 | rtx op0, op1, op2; | |
2244 | { | |
749a2da1 | 2245 | unsigned int width = GET_MODE_BITSIZE (mode); |
0cedb36c JL |
2246 | |
2247 | /* VOIDmode means "infinite" precision. */ | |
2248 | if (width == 0) | |
2249 | width = HOST_BITS_PER_WIDE_INT; | |
2250 | ||
2251 | switch (code) | |
2252 | { | |
2253 | case SIGN_EXTRACT: | |
2254 | case ZERO_EXTRACT: | |
2255 | if (GET_CODE (op0) == CONST_INT | |
2256 | && GET_CODE (op1) == CONST_INT | |
2257 | && GET_CODE (op2) == CONST_INT | |
d882fe51 | 2258 | && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width) |
f9e158c3 | 2259 | && width <= (unsigned) HOST_BITS_PER_WIDE_INT) |
0cedb36c JL |
2260 | { |
2261 | /* Extracting a bit-field from a constant */ | |
2262 | HOST_WIDE_INT val = INTVAL (op0); | |
2263 | ||
2264 | if (BITS_BIG_ENDIAN) | |
2265 | val >>= (GET_MODE_BITSIZE (op0_mode) | |
2266 | - INTVAL (op2) - INTVAL (op1)); | |
2267 | else | |
2268 | val >>= INTVAL (op2); | |
2269 | ||
2270 | if (HOST_BITS_PER_WIDE_INT != INTVAL (op1)) | |
2271 | { | |
2272 | /* First zero-extend. */ | |
2273 | val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1; | |
2274 | /* If desired, propagate sign bit. */ | |
2275 | if (code == SIGN_EXTRACT | |
2276 | && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))) | |
2277 | val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1); | |
2278 | } | |
2279 | ||
2280 | /* Clear the bits that don't belong in our mode, | |
2281 | unless they and our sign bit are all one. | |
2282 | So we get either a reasonable negative value or a reasonable | |
2283 | unsigned value for this mode. */ | |
2284 | if (width < HOST_BITS_PER_WIDE_INT | |
2285 | && ((val & ((HOST_WIDE_INT) (-1) << (width - 1))) | |
2286 | != ((HOST_WIDE_INT) (-1) << (width - 1)))) | |
2287 | val &= ((HOST_WIDE_INT) 1 << width) - 1; | |
2288 | ||
2289 | return GEN_INT (val); | |
2290 | } | |
2291 | break; | |
2292 | ||
2293 | case IF_THEN_ELSE: | |
2294 | if (GET_CODE (op0) == CONST_INT) | |
2295 | return op0 != const0_rtx ? op1 : op2; | |
2296 | ||
2297 | /* Convert a == b ? b : a to "a". */ | |
2298 | if (GET_CODE (op0) == NE && ! side_effects_p (op0) | |
de6c5979 | 2299 | && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) |
0cedb36c JL |
2300 | && rtx_equal_p (XEXP (op0, 0), op1) |
2301 | && rtx_equal_p (XEXP (op0, 1), op2)) | |
2302 | return op1; | |
2303 | else if (GET_CODE (op0) == EQ && ! side_effects_p (op0) | |
de6c5979 | 2304 | && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) |
0cedb36c JL |
2305 | && rtx_equal_p (XEXP (op0, 1), op1) |
2306 | && rtx_equal_p (XEXP (op0, 0), op2)) | |
2307 | return op2; | |
2308 | else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0)) | |
2309 | { | |
47b1e19b JH |
2310 | enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode |
2311 | ? GET_MODE (XEXP (op0, 1)) | |
2312 | : GET_MODE (XEXP (op0, 0))); | |
3e882897 | 2313 | rtx temp; |
1cac8785 DD |
2314 | if (cmp_mode == VOIDmode) |
2315 | cmp_mode = op0_mode; | |
3e882897 DD |
2316 | temp = simplify_relational_operation (GET_CODE (op0), cmp_mode, |
2317 | XEXP (op0, 0), XEXP (op0, 1)); | |
749a2da1 | 2318 | |
0cedb36c JL |
2319 | /* See if any simplifications were possible. */ |
2320 | if (temp == const0_rtx) | |
2321 | return op2; | |
2322 | else if (temp == const1_rtx) | |
2323 | return op1; | |
a774e06e RH |
2324 | else if (temp) |
2325 | op0 = temp; | |
2326 | ||
2327 | /* Look for happy constants in op1 and op2. */ | |
2328 | if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT) | |
2329 | { | |
2330 | HOST_WIDE_INT t = INTVAL (op1); | |
2331 | HOST_WIDE_INT f = INTVAL (op2); | |
2332 | ||
2333 | if (t == STORE_FLAG_VALUE && f == 0) | |
2334 | code = GET_CODE (op0); | |
261efdef JH |
2335 | else if (t == 0 && f == STORE_FLAG_VALUE) |
2336 | { | |
2337 | enum rtx_code tmp; | |
2338 | tmp = reversed_comparison_code (op0, NULL_RTX); | |
2339 | if (tmp == UNKNOWN) | |
2340 | break; | |
2341 | code = tmp; | |
2342 | } | |
a774e06e RH |
2343 | else |
2344 | break; | |
2345 | ||
2346 | return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1)); | |
2347 | } | |
0cedb36c JL |
2348 | } |
2349 | break; | |
2350 | ||
2351 | default: | |
2352 | abort (); | |
2353 | } | |
2354 | ||
2355 | return 0; | |
2356 | } | |
2357 | ||
eea50aa0 JH |
2358 | /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE) |
2359 | Return 0 if no simplifications is possible. */ | |
2360 | rtx | |
2361 | simplify_subreg (outermode, op, innermode, byte) | |
2362 | rtx op; | |
2363 | unsigned int byte; | |
2364 | enum machine_mode outermode, innermode; | |
2365 | { | |
2366 | /* Little bit of sanity checking. */ | |
2367 | if (innermode == VOIDmode || outermode == VOIDmode | |
2368 | || innermode == BLKmode || outermode == BLKmode) | |
2369 | abort (); | |
2370 | ||
2371 | if (GET_MODE (op) != innermode | |
2372 | && GET_MODE (op) != VOIDmode) | |
2373 | abort (); | |
2374 | ||
2375 | if (byte % GET_MODE_SIZE (outermode) | |
2376 | || byte >= GET_MODE_SIZE (innermode)) | |
2377 | abort (); | |
2378 | ||
e5c56fd9 JH |
2379 | if (outermode == innermode && !byte) |
2380 | return op; | |
2381 | ||
eea50aa0 JH |
2382 | /* Attempt to simplify constant to non-SUBREG expression. */ |
2383 | if (CONSTANT_P (op)) | |
2384 | { | |
2385 | int offset, part; | |
ae0ed63a | 2386 | unsigned HOST_WIDE_INT val = 0; |
eea50aa0 | 2387 | |
a1b2ebc0 | 2388 | /* ??? This code is partly redundant with code below, but can handle |
eea50aa0 JH |
2389 | the subregs of floats and similar corner cases. |
2390 | Later it we should move all simplification code here and rewrite | |
2391 | GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends | |
2392 | using SIMPLIFY_SUBREG. */ | |
e0e08ac2 | 2393 | if (subreg_lowpart_offset (outermode, innermode) == byte) |
eea50aa0 JH |
2394 | { |
2395 | rtx new = gen_lowpart_if_possible (outermode, op); | |
2396 | if (new) | |
2397 | return new; | |
2398 | } | |
2399 | ||
2400 | /* Similar comment as above apply here. */ | |
2401 | if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD | |
2402 | && GET_MODE_SIZE (innermode) > UNITS_PER_WORD | |
2403 | && GET_MODE_CLASS (outermode) == MODE_INT) | |
2404 | { | |
0631e0bf JH |
2405 | rtx new = constant_subword (op, |
2406 | (byte / UNITS_PER_WORD), | |
2407 | innermode); | |
eea50aa0 JH |
2408 | if (new) |
2409 | return new; | |
2410 | } | |
2411 | ||
2412 | offset = byte * BITS_PER_UNIT; | |
2413 | switch (GET_CODE (op)) | |
2414 | { | |
2415 | case CONST_DOUBLE: | |
2416 | if (GET_MODE (op) != VOIDmode) | |
2417 | break; | |
2418 | ||
2419 | /* We can't handle this case yet. */ | |
2420 | if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT) | |
ae0ed63a | 2421 | return NULL_RTX; |
eea50aa0 JH |
2422 | |
2423 | part = offset >= HOST_BITS_PER_WIDE_INT; | |
2424 | if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT | |
2425 | && BYTES_BIG_ENDIAN) | |
2426 | || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT | |
2427 | && WORDS_BIG_ENDIAN)) | |
2428 | part = !part; | |
2429 | val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op); | |
2430 | offset %= HOST_BITS_PER_WIDE_INT; | |
2431 | ||
2432 | /* We've already picked the word we want from a double, so | |
2433 | pretend this is actually an integer. */ | |
2434 | innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0); | |
2435 | ||
2436 | /* FALLTHROUGH */ | |
2437 | case CONST_INT: | |
2438 | if (GET_CODE (op) == CONST_INT) | |
2439 | val = INTVAL (op); | |
2440 | ||
2441 | /* We don't handle synthetizing of non-integral constants yet. */ | |
2442 | if (GET_MODE_CLASS (outermode) != MODE_INT) | |
ae0ed63a | 2443 | return NULL_RTX; |
eea50aa0 JH |
2444 | |
2445 | if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN) | |
2446 | { | |
2447 | if (WORDS_BIG_ENDIAN) | |
2448 | offset = (GET_MODE_BITSIZE (innermode) | |
2449 | - GET_MODE_BITSIZE (outermode) - offset); | |
2450 | if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN | |
2451 | && GET_MODE_SIZE (outermode) < UNITS_PER_WORD) | |
2452 | offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode) | |
2453 | - 2 * (offset % BITS_PER_WORD)); | |
2454 | } | |
2455 | ||
2456 | if (offset >= HOST_BITS_PER_WIDE_INT) | |
2457 | return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx; | |
2458 | else | |
2459 | { | |
2460 | val >>= offset; | |
2461 | if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT) | |
2462 | val = trunc_int_for_mode (val, outermode); | |
2463 | return GEN_INT (val); | |
2464 | } | |
2465 | default: | |
2466 | break; | |
2467 | } | |
2468 | } | |
2469 | ||
2470 | /* Changing mode twice with SUBREG => just change it once, | |
2471 | or not at all if changing back op starting mode. */ | |
2472 | if (GET_CODE (op) == SUBREG) | |
2473 | { | |
2474 | enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op)); | |
1ffb3f9a | 2475 | int final_offset = byte + SUBREG_BYTE (op); |
eea50aa0 JH |
2476 | rtx new; |
2477 | ||
2478 | if (outermode == innermostmode | |
2479 | && byte == 0 && SUBREG_BYTE (op) == 0) | |
2480 | return SUBREG_REG (op); | |
2481 | ||
1ffb3f9a JH |
2482 | /* The SUBREG_BYTE represents offset, as if the value were stored |
2483 | in memory. Irritating exception is paradoxical subreg, where | |
2484 | we define SUBREG_BYTE to be 0. On big endian machines, this | |
2d76cb1a | 2485 | value should be negative. For a moment, undo this exception. */ |
1ffb3f9a | 2486 | if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode)) |
eea50aa0 | 2487 | { |
1ffb3f9a JH |
2488 | int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)); |
2489 | if (WORDS_BIG_ENDIAN) | |
2490 | final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
2491 | if (BYTES_BIG_ENDIAN) | |
2492 | final_offset += difference % UNITS_PER_WORD; | |
2493 | } | |
2494 | if (SUBREG_BYTE (op) == 0 | |
2495 | && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode)) | |
2496 | { | |
2497 | int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode)); | |
2498 | if (WORDS_BIG_ENDIAN) | |
2499 | final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
2500 | if (BYTES_BIG_ENDIAN) | |
2501 | final_offset += difference % UNITS_PER_WORD; | |
2502 | } | |
2503 | ||
2504 | /* See whether resulting subreg will be paradoxical. */ | |
2fe7bb35 | 2505 | if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode)) |
1ffb3f9a JH |
2506 | { |
2507 | /* In nonparadoxical subregs we can't handle negative offsets. */ | |
2508 | if (final_offset < 0) | |
2509 | return NULL_RTX; | |
2510 | /* Bail out in case resulting subreg would be incorrect. */ | |
2511 | if (final_offset % GET_MODE_SIZE (outermode) | |
ae0ed63a JM |
2512 | || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode)) |
2513 | return NULL_RTX; | |
1ffb3f9a JH |
2514 | } |
2515 | else | |
2516 | { | |
2517 | int offset = 0; | |
2518 | int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode)); | |
2519 | ||
2520 | /* In paradoxical subreg, see if we are still looking on lower part. | |
2521 | If so, our SUBREG_BYTE will be 0. */ | |
2522 | if (WORDS_BIG_ENDIAN) | |
2523 | offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
2524 | if (BYTES_BIG_ENDIAN) | |
2525 | offset += difference % UNITS_PER_WORD; | |
2526 | if (offset == final_offset) | |
2527 | final_offset = 0; | |
eea50aa0 | 2528 | else |
ae0ed63a | 2529 | return NULL_RTX; |
eea50aa0 JH |
2530 | } |
2531 | ||
2532 | /* Recurse for futher possible simplifications. */ | |
07b53149 JH |
2533 | new = simplify_subreg (outermode, SUBREG_REG (op), |
2534 | GET_MODE (SUBREG_REG (op)), | |
eea50aa0 JH |
2535 | final_offset); |
2536 | if (new) | |
2537 | return new; | |
07b53149 | 2538 | return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset); |
eea50aa0 JH |
2539 | } |
2540 | ||
2541 | /* SUBREG of a hard register => just change the register number | |
2542 | and/or mode. If the hard register is not valid in that mode, | |
2543 | suppress this simplification. If the hard register is the stack, | |
2544 | frame, or argument pointer, leave this as a SUBREG. */ | |
2545 | ||
0fff4222 | 2546 | if (REG_P (op) |
e0e08ac2 JH |
2547 | && (! REG_FUNCTION_VALUE_P (op) |
2548 | || ! rtx_equal_function_value_matters) | |
2549 | #ifdef CLASS_CANNOT_CHANGE_MODE | |
2550 | && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode) | |
2551 | && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT | |
2552 | && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT | |
2553 | && (TEST_HARD_REG_BIT | |
2554 | (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE], | |
2555 | REGNO (op)))) | |
2556 | #endif | |
eea50aa0 | 2557 | && REGNO (op) < FIRST_PSEUDO_REGISTER |
e0e08ac2 JH |
2558 | && ((reload_completed && !frame_pointer_needed) |
2559 | || (REGNO (op) != FRAME_POINTER_REGNUM | |
eea50aa0 | 2560 | #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM |
e0e08ac2 | 2561 | && REGNO (op) != HARD_FRAME_POINTER_REGNUM |
eea50aa0 | 2562 | #endif |
e0e08ac2 | 2563 | )) |
eea50aa0 JH |
2564 | #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
2565 | && REGNO (op) != ARG_POINTER_REGNUM | |
2566 | #endif | |
2567 | && REGNO (op) != STACK_POINTER_REGNUM) | |
2568 | { | |
2569 | int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte), | |
2570 | 0); | |
2571 | ||
e0e08ac2 JH |
2572 | /* ??? We do allow it if the current REG is not valid for |
2573 | its mode. This is a kludge to work around how float/complex | |
2574 | arguments are passed on 32-bit Sparc and should be fixed. */ | |
2575 | if (HARD_REGNO_MODE_OK (final_regno, outermode) | |
2576 | || ! HARD_REGNO_MODE_OK (REGNO (op), innermode)) | |
49d801d3 JH |
2577 | { |
2578 | rtx x = gen_rtx_REG (outermode, final_regno); | |
2579 | ||
2580 | /* Propagate original regno. We don't have any way to specify | |
2581 | the offset inside orignal regno, so do so only for lowpart. | |
2582 | The information is used only by alias analysis that can not | |
2583 | grog partial register anyway. */ | |
2584 | ||
2585 | if (subreg_lowpart_offset (outermode, innermode) == byte) | |
2586 | ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op); | |
2587 | return x; | |
2588 | } | |
eea50aa0 JH |
2589 | } |
2590 | ||
2591 | /* If we have a SUBREG of a register that we are replacing and we are | |
2592 | replacing it with a MEM, make a new MEM and try replacing the | |
2593 | SUBREG with it. Don't do this if the MEM has a mode-dependent address | |
2594 | or if we would be widening it. */ | |
2595 | ||
2596 | if (GET_CODE (op) == MEM | |
2597 | && ! mode_dependent_address_p (XEXP (op, 0)) | |
04864a46 JH |
2598 | /* Allow splitting of volatile memory references in case we don't |
2599 | have instruction to move the whole thing. */ | |
2600 | && (! MEM_VOLATILE_P (op) | |
ef89d648 | 2601 | || ! have_insn_for (SET, innermode)) |
eea50aa0 | 2602 | && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op))) |
f1ec5147 | 2603 | return adjust_address_nv (op, outermode, byte); |
e5c56fd9 JH |
2604 | |
2605 | /* Handle complex values represented as CONCAT | |
2606 | of real and imaginary part. */ | |
2607 | if (GET_CODE (op) == CONCAT) | |
2608 | { | |
5fd83d4e | 2609 | int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode); |
e5c56fd9 JH |
2610 | rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1); |
2611 | unsigned int final_offset; | |
9199d62b | 2612 | rtx res; |
e5c56fd9 | 2613 | |
5fd83d4e | 2614 | final_offset = byte % (GET_MODE_UNIT_SIZE (innermode)); |
9199d62b DD |
2615 | res = simplify_subreg (outermode, part, GET_MODE (part), final_offset); |
2616 | if (res) | |
2617 | return res; | |
684d9f3b | 2618 | /* We can at least simplify it by referring directly to the relevant part. */ |
9199d62b | 2619 | return gen_rtx_SUBREG (outermode, part, final_offset); |
e5c56fd9 JH |
2620 | } |
2621 | ||
eea50aa0 JH |
2622 | return NULL_RTX; |
2623 | } | |
949c5d62 JH |
2624 | /* Make a SUBREG operation or equivalent if it folds. */ |
2625 | ||
2626 | rtx | |
2627 | simplify_gen_subreg (outermode, op, innermode, byte) | |
2628 | rtx op; | |
2629 | unsigned int byte; | |
2630 | enum machine_mode outermode, innermode; | |
2631 | { | |
2632 | rtx new; | |
2633 | /* Little bit of sanity checking. */ | |
2634 | if (innermode == VOIDmode || outermode == VOIDmode | |
2635 | || innermode == BLKmode || outermode == BLKmode) | |
2636 | abort (); | |
2637 | ||
2638 | if (GET_MODE (op) != innermode | |
2639 | && GET_MODE (op) != VOIDmode) | |
2640 | abort (); | |
2641 | ||
2642 | if (byte % GET_MODE_SIZE (outermode) | |
2643 | || byte >= GET_MODE_SIZE (innermode)) | |
2644 | abort (); | |
2645 | ||
bd77ba51 RS |
2646 | if (GET_CODE (op) == QUEUED) |
2647 | return NULL_RTX; | |
2648 | ||
949c5d62 JH |
2649 | new = simplify_subreg (outermode, op, innermode, byte); |
2650 | if (new) | |
2651 | return new; | |
2652 | ||
2653 | if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode) | |
2654 | return NULL_RTX; | |
2655 | ||
2656 | return gen_rtx_SUBREG (outermode, op, byte); | |
2657 | } | |
0cedb36c JL |
2658 | /* Simplify X, an rtx expression. |
2659 | ||
2660 | Return the simplified expression or NULL if no simplifications | |
2661 | were possible. | |
2662 | ||
2663 | This is the preferred entry point into the simplification routines; | |
2664 | however, we still allow passes to call the more specific routines. | |
2665 | ||
2666 | Right now GCC has three (yes, three) major bodies of RTL simplficiation | |
2667 | code that need to be unified. | |
2668 | ||
2669 | 1. fold_rtx in cse.c. This code uses various CSE specific | |
2670 | information to aid in RTL simplification. | |
2671 | ||
2672 | 2. simplify_rtx in combine.c. Similar to fold_rtx, except that | |
2673 | it uses combine specific information to aid in RTL | |
2674 | simplification. | |
2675 | ||
2676 | 3. The routines in this file. | |
2677 | ||
2678 | ||
2679 | Long term we want to only have one body of simplification code; to | |
2680 | get to that state I recommend the following steps: | |
2681 | ||
2682 | 1. Pour over fold_rtx & simplify_rtx and move any simplifications | |
2683 | which are not pass dependent state into these routines. | |
2684 | ||
2685 | 2. As code is moved by #1, change fold_rtx & simplify_rtx to | |
2686 | use this routine whenever possible. | |
2687 | ||
2688 | 3. Allow for pass dependent state to be provided to these | |
2689 | routines and add simplifications based on the pass dependent | |
2690 | state. Remove code from cse.c & combine.c that becomes | |
2691 | redundant/dead. | |
2692 | ||
2693 | It will take time, but ultimately the compiler will be easier to | |
2694 | maintain and improve. It's totally silly that when we add a | |
2695 | simplification that it needs to be added to 4 places (3 for RTL | |
2696 | simplification and 1 for tree simplification. */ | |
2697 | ||
2698 | rtx | |
2699 | simplify_rtx (x) | |
2700 | rtx x; | |
2701 | { | |
d9c695ff RK |
2702 | enum rtx_code code = GET_CODE (x); |
2703 | enum machine_mode mode = GET_MODE (x); | |
0cedb36c JL |
2704 | |
2705 | switch (GET_RTX_CLASS (code)) | |
2706 | { | |
2707 | case '1': | |
2708 | return simplify_unary_operation (code, mode, | |
2709 | XEXP (x, 0), GET_MODE (XEXP (x, 0))); | |
0cedb36c | 2710 | case 'c': |
df0afdbe | 2711 | if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) |
b42abad8 JL |
2712 | { |
2713 | rtx tem; | |
2714 | ||
2715 | tem = XEXP (x, 0); | |
2716 | XEXP (x, 0) = XEXP (x, 1); | |
2717 | XEXP (x, 1) = tem; | |
2718 | return simplify_binary_operation (code, mode, | |
2719 | XEXP (x, 0), XEXP (x, 1)); | |
2720 | } | |
2721 | ||
2722 | case '2': | |
0cedb36c JL |
2723 | return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1)); |
2724 | ||
2725 | case '3': | |
2726 | case 'b': | |
2727 | return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)), | |
d9c695ff RK |
2728 | XEXP (x, 0), XEXP (x, 1), |
2729 | XEXP (x, 2)); | |
0cedb36c JL |
2730 | |
2731 | case '<': | |
95d0e5f1 | 2732 | return simplify_relational_operation (code, |
d9c695ff RK |
2733 | ((GET_MODE (XEXP (x, 0)) |
2734 | != VOIDmode) | |
95d0e5f1 AO |
2735 | ? GET_MODE (XEXP (x, 0)) |
2736 | : GET_MODE (XEXP (x, 1))), | |
0cedb36c | 2737 | XEXP (x, 0), XEXP (x, 1)); |
949c5d62 JH |
2738 | case 'x': |
2739 | /* The only case we try to handle is a SUBREG. */ | |
2740 | if (code == SUBREG) | |
2741 | return simplify_gen_subreg (mode, SUBREG_REG (x), | |
2742 | GET_MODE (SUBREG_REG (x)), | |
2743 | SUBREG_BYTE (x)); | |
2744 | return NULL; | |
0cedb36c JL |
2745 | default: |
2746 | return NULL; | |
2747 | } | |
2748 | } |