]>
Commit | Line | Data |
---|---|---|
749a2da1 | 1 | /* RTL simplification functions for GNU compiler. |
d1e082c2 | 2 | Copyright (C) 1987-2013 Free Software Foundation, Inc. |
0cedb36c | 3 | |
1322177d | 4 | This file is part of GCC. |
0cedb36c | 5 | |
1322177d LB |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 8 | Software Foundation; either version 3, or (at your option) any later |
1322177d | 9 | version. |
0cedb36c | 10 | |
1322177d LB |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
0cedb36c JL |
15 | |
16 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
0cedb36c JL |
19 | |
20 | ||
21 | #include "config.h" | |
0cedb36c | 22 | #include "system.h" |
4977bab6 ZW |
23 | #include "coretypes.h" |
24 | #include "tm.h" | |
0cedb36c | 25 | #include "rtl.h" |
efdc7e19 | 26 | #include "tree.h" |
0cedb36c JL |
27 | #include "tm_p.h" |
28 | #include "regs.h" | |
29 | #include "hard-reg-set.h" | |
30 | #include "flags.h" | |
0cedb36c JL |
31 | #include "insn-config.h" |
32 | #include "recog.h" | |
33 | #include "function.h" | |
34 | #include "expr.h" | |
718f9c0f | 35 | #include "diagnostic-core.h" |
eab5c70a | 36 | #include "ggc.h" |
7daebb7a | 37 | #include "target.h" |
0cedb36c JL |
38 | |
39 | /* Simplification and canonicalization of RTL. */ | |
40 | ||
3839069b ZW |
41 | /* Much code operates on (low, high) pairs; the low value is an |
42 | unsigned wide int, the high value a signed wide int. We | |
43 | occasionally need to sign extend from low to high as if low were a | |
44 | signed wide int. */ | |
ba34d877 | 45 | #define HWI_SIGN_EXTEND(low) \ |
3839069b | 46 | ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0)) |
0cedb36c | 47 | |
f7d504c2 KG |
48 | static rtx neg_const_int (enum machine_mode, const_rtx); |
49 | static bool plus_minus_operand_p (const_rtx); | |
7e0b4eae | 50 | static bool simplify_plus_minus_op_data_cmp (rtx, rtx); |
1941069a | 51 | static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx); |
550d1387 GK |
52 | static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode, |
53 | unsigned int); | |
dd61aa98 RS |
54 | static rtx simplify_associative_operation (enum rtx_code, enum machine_mode, |
55 | rtx, rtx); | |
c6fb08ad PB |
56 | static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode, |
57 | enum machine_mode, rtx, rtx); | |
0a67e02c PB |
58 | static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx); |
59 | static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode, | |
60 | rtx, rtx, rtx, rtx); | |
aff8a8d5 CM |
61 | \f |
62 | /* Negate a CONST_INT rtx, truncating (because a conversion from a | |
23d1aac4 | 63 | maximally negative number can overflow). */ |
aff8a8d5 | 64 | static rtx |
f7d504c2 | 65 | neg_const_int (enum machine_mode mode, const_rtx i) |
aff8a8d5 | 66 | { |
eb87c7c4 | 67 | return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode); |
aff8a8d5 CM |
68 | } |
69 | ||
0b24db88 RS |
70 | /* Test whether expression, X, is an immediate constant that represents |
71 | the most significant bit of machine mode MODE. */ | |
72 | ||
b757b9f8 | 73 | bool |
f7d504c2 | 74 | mode_signbit_p (enum machine_mode mode, const_rtx x) |
0b24db88 RS |
75 | { |
76 | unsigned HOST_WIDE_INT val; | |
77 | unsigned int width; | |
78 | ||
79 | if (GET_MODE_CLASS (mode) != MODE_INT) | |
80 | return false; | |
81 | ||
2d0c270f | 82 | width = GET_MODE_PRECISION (mode); |
0b24db88 RS |
83 | if (width == 0) |
84 | return false; | |
b8698a0f | 85 | |
0b24db88 | 86 | if (width <= HOST_BITS_PER_WIDE_INT |
481683e1 | 87 | && CONST_INT_P (x)) |
0b24db88 | 88 | val = INTVAL (x); |
49ab6098 | 89 | else if (width <= HOST_BITS_PER_DOUBLE_INT |
48175537 | 90 | && CONST_DOUBLE_AS_INT_P (x) |
0b24db88 RS |
91 | && CONST_DOUBLE_LOW (x) == 0) |
92 | { | |
93 | val = CONST_DOUBLE_HIGH (x); | |
94 | width -= HOST_BITS_PER_WIDE_INT; | |
95 | } | |
96 | else | |
929e10f4 | 97 | /* FIXME: We don't yet have a representation for wider modes. */ |
0b24db88 RS |
98 | return false; |
99 | ||
100 | if (width < HOST_BITS_PER_WIDE_INT) | |
101 | val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; | |
102 | return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1)); | |
103 | } | |
2d0c270f BS |
104 | |
105 | /* Test whether VAL is equal to the most significant bit of mode MODE | |
106 | (after masking with the mode mask of MODE). Returns false if the | |
107 | precision of MODE is too large to handle. */ | |
108 | ||
109 | bool | |
110 | val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val) | |
111 | { | |
112 | unsigned int width; | |
113 | ||
114 | if (GET_MODE_CLASS (mode) != MODE_INT) | |
115 | return false; | |
116 | ||
117 | width = GET_MODE_PRECISION (mode); | |
118 | if (width == 0 || width > HOST_BITS_PER_WIDE_INT) | |
119 | return false; | |
120 | ||
121 | val &= GET_MODE_MASK (mode); | |
122 | return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1)); | |
123 | } | |
124 | ||
125 | /* Test whether the most significant bit of mode MODE is set in VAL. | |
126 | Returns false if the precision of MODE is too large to handle. */ | |
127 | bool | |
128 | val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val) | |
129 | { | |
130 | unsigned int width; | |
131 | ||
132 | if (GET_MODE_CLASS (mode) != MODE_INT) | |
133 | return false; | |
134 | ||
135 | width = GET_MODE_PRECISION (mode); | |
136 | if (width == 0 || width > HOST_BITS_PER_WIDE_INT) | |
137 | return false; | |
138 | ||
139 | val &= (unsigned HOST_WIDE_INT) 1 << (width - 1); | |
140 | return val != 0; | |
141 | } | |
142 | ||
143 | /* Test whether the most significant bit of mode MODE is clear in VAL. | |
144 | Returns false if the precision of MODE is too large to handle. */ | |
145 | bool | |
146 | val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val) | |
147 | { | |
148 | unsigned int width; | |
149 | ||
150 | if (GET_MODE_CLASS (mode) != MODE_INT) | |
151 | return false; | |
152 | ||
153 | width = GET_MODE_PRECISION (mode); | |
154 | if (width == 0 || width > HOST_BITS_PER_WIDE_INT) | |
155 | return false; | |
156 | ||
157 | val &= (unsigned HOST_WIDE_INT) 1 << (width - 1); | |
158 | return val == 0; | |
159 | } | |
749a2da1 | 160 | \f |
786de7eb | 161 | /* Make a binary operation by properly ordering the operands and |
0cedb36c JL |
162 | seeing if the expression folds. */ |
163 | ||
164 | rtx | |
46c5ad27 AJ |
165 | simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0, |
166 | rtx op1) | |
0cedb36c JL |
167 | { |
168 | rtx tem; | |
169 | ||
0cedb36c JL |
170 | /* If this simplifies, do it. */ |
171 | tem = simplify_binary_operation (code, mode, op0, op1); | |
0cedb36c JL |
172 | if (tem) |
173 | return tem; | |
174 | ||
68162a97 ILT |
175 | /* Put complex operands first and constants second if commutative. */ |
176 | if (GET_RTX_CLASS (code) == RTX_COMM_ARITH | |
177 | && swap_commutative_operands_p (op0, op1)) | |
178 | tem = op0, op0 = op1, op1 = tem; | |
179 | ||
e16e3291 | 180 | return gen_rtx_fmt_ee (code, mode, op0, op1); |
0cedb36c JL |
181 | } |
182 | \f | |
5a2aa3bd | 183 | /* If X is a MEM referencing the constant pool, return the real value. |
4ba5f925 | 184 | Otherwise return X. */ |
732910b9 | 185 | rtx |
46c5ad27 | 186 | avoid_constant_pool_reference (rtx x) |
4ba5f925 | 187 | { |
7daebb7a | 188 | rtx c, tmp, addr; |
5a2aa3bd | 189 | enum machine_mode cmode; |
bdb82177 | 190 | HOST_WIDE_INT offset = 0; |
5a2aa3bd | 191 | |
7daebb7a RS |
192 | switch (GET_CODE (x)) |
193 | { | |
194 | case MEM: | |
195 | break; | |
196 | ||
197 | case FLOAT_EXTEND: | |
198 | /* Handle float extensions of constant pool references. */ | |
199 | tmp = XEXP (x, 0); | |
200 | c = avoid_constant_pool_reference (tmp); | |
48175537 | 201 | if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c)) |
7daebb7a RS |
202 | { |
203 | REAL_VALUE_TYPE d; | |
204 | ||
205 | REAL_VALUE_FROM_CONST_DOUBLE (d, c); | |
206 | return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x)); | |
207 | } | |
208 | return x; | |
209 | ||
210 | default: | |
211 | return x; | |
212 | } | |
213 | ||
d82a02fa AK |
214 | if (GET_MODE (x) == BLKmode) |
215 | return x; | |
216 | ||
5a2aa3bd RH |
217 | addr = XEXP (x, 0); |
218 | ||
59e4e217 | 219 | /* Call target hook to avoid the effects of -fpic etc.... */ |
5fd9b178 | 220 | addr = targetm.delegitimize_address (addr); |
7daebb7a | 221 | |
bdb82177 PB |
222 | /* Split the address into a base and integer offset. */ |
223 | if (GET_CODE (addr) == CONST | |
224 | && GET_CODE (XEXP (addr, 0)) == PLUS | |
481683e1 | 225 | && CONST_INT_P (XEXP (XEXP (addr, 0), 1))) |
bdb82177 PB |
226 | { |
227 | offset = INTVAL (XEXP (XEXP (addr, 0), 1)); | |
228 | addr = XEXP (XEXP (addr, 0), 0); | |
229 | } | |
230 | ||
11f3e4c7 RS |
231 | if (GET_CODE (addr) == LO_SUM) |
232 | addr = XEXP (addr, 1); | |
233 | ||
bdb82177 PB |
234 | /* If this is a constant pool reference, we can turn it into its |
235 | constant and hope that simplifications happen. */ | |
236 | if (GET_CODE (addr) == SYMBOL_REF | |
237 | && CONSTANT_POOL_ADDRESS_P (addr)) | |
5a2aa3bd | 238 | { |
bdb82177 PB |
239 | c = get_pool_constant (addr); |
240 | cmode = get_pool_mode (addr); | |
241 | ||
242 | /* If we're accessing the constant in a different mode than it was | |
243 | originally stored, attempt to fix that up via subreg simplifications. | |
244 | If that fails we have no choice but to return the original memory. */ | |
b63fe007 UB |
245 | if ((offset != 0 || cmode != GET_MODE (x)) |
246 | && offset >= 0 && offset < GET_MODE_SIZE (cmode)) | |
bdb82177 PB |
247 | { |
248 | rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset); | |
249 | if (tem && CONSTANT_P (tem)) | |
250 | return tem; | |
251 | } | |
252 | else | |
253 | return c; | |
5a2aa3bd RH |
254 | } |
255 | ||
bdb82177 | 256 | return x; |
4ba5f925 JH |
257 | } |
258 | \f | |
b5b8b0ac AO |
259 | /* Simplify a MEM based on its attributes. This is the default |
260 | delegitimize_address target hook, and it's recommended that every | |
261 | overrider call it. */ | |
262 | ||
263 | rtx | |
264 | delegitimize_mem_from_attrs (rtx x) | |
265 | { | |
e0a80069 AO |
266 | /* MEMs without MEM_OFFSETs may have been offset, so we can't just |
267 | use their base addresses as equivalent. */ | |
b5b8b0ac AO |
268 | if (MEM_P (x) |
269 | && MEM_EXPR (x) | |
527210c4 | 270 | && MEM_OFFSET_KNOWN_P (x)) |
b5b8b0ac AO |
271 | { |
272 | tree decl = MEM_EXPR (x); | |
273 | enum machine_mode mode = GET_MODE (x); | |
274 | HOST_WIDE_INT offset = 0; | |
275 | ||
276 | switch (TREE_CODE (decl)) | |
277 | { | |
278 | default: | |
279 | decl = NULL; | |
280 | break; | |
281 | ||
282 | case VAR_DECL: | |
283 | break; | |
284 | ||
285 | case ARRAY_REF: | |
286 | case ARRAY_RANGE_REF: | |
287 | case COMPONENT_REF: | |
288 | case BIT_FIELD_REF: | |
289 | case REALPART_EXPR: | |
290 | case IMAGPART_EXPR: | |
291 | case VIEW_CONVERT_EXPR: | |
292 | { | |
293 | HOST_WIDE_INT bitsize, bitpos; | |
294 | tree toffset; | |
b199074d | 295 | int unsignedp, volatilep = 0; |
b5b8b0ac AO |
296 | |
297 | decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset, | |
298 | &mode, &unsignedp, &volatilep, false); | |
299 | if (bitsize != GET_MODE_BITSIZE (mode) | |
300 | || (bitpos % BITS_PER_UNIT) | |
301 | || (toffset && !host_integerp (toffset, 0))) | |
302 | decl = NULL; | |
303 | else | |
304 | { | |
305 | offset += bitpos / BITS_PER_UNIT; | |
306 | if (toffset) | |
307 | offset += TREE_INT_CST_LOW (toffset); | |
308 | } | |
309 | break; | |
310 | } | |
311 | } | |
312 | ||
313 | if (decl | |
314 | && mode == GET_MODE (x) | |
315 | && TREE_CODE (decl) == VAR_DECL | |
316 | && (TREE_STATIC (decl) | |
317 | || DECL_THREAD_LOCAL_P (decl)) | |
318 | && DECL_RTL_SET_P (decl) | |
319 | && MEM_P (DECL_RTL (decl))) | |
320 | { | |
321 | rtx newx; | |
322 | ||
527210c4 | 323 | offset += MEM_OFFSET (x); |
b5b8b0ac AO |
324 | |
325 | newx = DECL_RTL (decl); | |
326 | ||
327 | if (MEM_P (newx)) | |
328 | { | |
329 | rtx n = XEXP (newx, 0), o = XEXP (x, 0); | |
330 | ||
331 | /* Avoid creating a new MEM needlessly if we already had | |
332 | the same address. We do if there's no OFFSET and the | |
333 | old address X is identical to NEWX, or if X is of the | |
334 | form (plus NEWX OFFSET), or the NEWX is of the form | |
335 | (plus Y (const_int Z)) and X is that with the offset | |
336 | added: (plus Y (const_int Z+OFFSET)). */ | |
337 | if (!((offset == 0 | |
338 | || (GET_CODE (o) == PLUS | |
339 | && GET_CODE (XEXP (o, 1)) == CONST_INT | |
340 | && (offset == INTVAL (XEXP (o, 1)) | |
341 | || (GET_CODE (n) == PLUS | |
342 | && GET_CODE (XEXP (n, 1)) == CONST_INT | |
343 | && (INTVAL (XEXP (n, 1)) + offset | |
344 | == INTVAL (XEXP (o, 1))) | |
345 | && (n = XEXP (n, 0)))) | |
346 | && (o = XEXP (o, 0)))) | |
347 | && rtx_equal_p (o, n))) | |
348 | x = adjust_address_nv (newx, mode, offset); | |
349 | } | |
350 | else if (GET_MODE (x) == GET_MODE (newx) | |
351 | && offset == 0) | |
352 | x = newx; | |
353 | } | |
354 | } | |
355 | ||
356 | return x; | |
357 | } | |
358 | \f | |
d9c695ff RK |
359 | /* Make a unary operation by first seeing if it folds and otherwise making |
360 | the specified operation. */ | |
361 | ||
362 | rtx | |
46c5ad27 AJ |
363 | simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op, |
364 | enum machine_mode op_mode) | |
d9c695ff RK |
365 | { |
366 | rtx tem; | |
367 | ||
368 | /* If this simplifies, use it. */ | |
369 | if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0) | |
370 | return tem; | |
371 | ||
372 | return gen_rtx_fmt_e (code, mode, op); | |
373 | } | |
374 | ||
375 | /* Likewise for ternary operations. */ | |
376 | ||
377 | rtx | |
46c5ad27 AJ |
378 | simplify_gen_ternary (enum rtx_code code, enum machine_mode mode, |
379 | enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2) | |
d9c695ff RK |
380 | { |
381 | rtx tem; | |
382 | ||
383 | /* If this simplifies, use it. */ | |
384 | if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode, | |
385 | op0, op1, op2))) | |
386 | return tem; | |
387 | ||
388 | return gen_rtx_fmt_eee (code, mode, op0, op1, op2); | |
389 | } | |
c6fb08ad | 390 | |
141e454b | 391 | /* Likewise, for relational operations. |
c6fb08ad | 392 | CMP_MODE specifies mode comparison is done in. */ |
d9c695ff RK |
393 | |
394 | rtx | |
46c5ad27 AJ |
395 | simplify_gen_relational (enum rtx_code code, enum machine_mode mode, |
396 | enum machine_mode cmp_mode, rtx op0, rtx op1) | |
d9c695ff RK |
397 | { |
398 | rtx tem; | |
399 | ||
c6fb08ad PB |
400 | if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode, |
401 | op0, op1))) | |
402 | return tem; | |
bc9c18c3 | 403 | |
d9c695ff RK |
404 | return gen_rtx_fmt_ee (code, mode, op0, op1); |
405 | } | |
406 | \f | |
457eeaae JJ |
407 | /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA) |
408 | and simplify the result. If FN is non-NULL, call this callback on each | |
409 | X, if it returns non-NULL, replace X with its return value and simplify the | |
410 | result. */ | |
d9c695ff RK |
411 | |
412 | rtx | |
3af4ba41 | 413 | simplify_replace_fn_rtx (rtx x, const_rtx old_rtx, |
457eeaae | 414 | rtx (*fn) (rtx, const_rtx, void *), void *data) |
d9c695ff RK |
415 | { |
416 | enum rtx_code code = GET_CODE (x); | |
417 | enum machine_mode mode = GET_MODE (x); | |
077a148b | 418 | enum machine_mode op_mode; |
4fb296d9 RS |
419 | const char *fmt; |
420 | rtx op0, op1, op2, newx, op; | |
421 | rtvec vec, newvec; | |
422 | int i, j; | |
d9c695ff | 423 | |
457eeaae | 424 | if (__builtin_expect (fn != NULL, 0)) |
3af4ba41 | 425 | { |
457eeaae JJ |
426 | newx = fn (x, old_rtx, data); |
427 | if (newx) | |
428 | return newx; | |
3af4ba41 | 429 | } |
457eeaae JJ |
430 | else if (rtx_equal_p (x, old_rtx)) |
431 | return copy_rtx ((rtx) data); | |
d9c695ff RK |
432 | |
433 | switch (GET_RTX_CLASS (code)) | |
434 | { | |
ec8e098d | 435 | case RTX_UNARY: |
077a148b RS |
436 | op0 = XEXP (x, 0); |
437 | op_mode = GET_MODE (op0); | |
3af4ba41 | 438 | op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data); |
077a148b RS |
439 | if (op0 == XEXP (x, 0)) |
440 | return x; | |
441 | return simplify_gen_unary (code, mode, op0, op_mode); | |
d9c695ff | 442 | |
ec8e098d PB |
443 | case RTX_BIN_ARITH: |
444 | case RTX_COMM_ARITH: | |
3af4ba41 RS |
445 | op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data); |
446 | op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data); | |
077a148b RS |
447 | if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) |
448 | return x; | |
449 | return simplify_gen_binary (code, mode, op0, op1); | |
450 | ||
ec8e098d PB |
451 | case RTX_COMPARE: |
452 | case RTX_COMM_COMPARE: | |
077a148b RS |
453 | op0 = XEXP (x, 0); |
454 | op1 = XEXP (x, 1); | |
455 | op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1); | |
3af4ba41 RS |
456 | op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data); |
457 | op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data); | |
077a148b RS |
458 | if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) |
459 | return x; | |
460 | return simplify_gen_relational (code, mode, op_mode, op0, op1); | |
d9c695ff | 461 | |
ec8e098d PB |
462 | case RTX_TERNARY: |
463 | case RTX_BITFIELD_OPS: | |
077a148b RS |
464 | op0 = XEXP (x, 0); |
465 | op_mode = GET_MODE (op0); | |
3af4ba41 RS |
466 | op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data); |
467 | op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data); | |
468 | op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data); | |
077a148b RS |
469 | if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2)) |
470 | return x; | |
471 | if (op_mode == VOIDmode) | |
472 | op_mode = GET_MODE (op0); | |
473 | return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2); | |
d9c695ff | 474 | |
ec8e098d | 475 | case RTX_EXTRA: |
949c5d62 JH |
476 | if (code == SUBREG) |
477 | { | |
3af4ba41 | 478 | op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data); |
077a148b RS |
479 | if (op0 == SUBREG_REG (x)) |
480 | return x; | |
481 | op0 = simplify_gen_subreg (GET_MODE (x), op0, | |
949c5d62 JH |
482 | GET_MODE (SUBREG_REG (x)), |
483 | SUBREG_BYTE (x)); | |
077a148b | 484 | return op0 ? op0 : x; |
949c5d62 | 485 | } |
077a148b | 486 | break; |
d9c695ff | 487 | |
ec8e098d | 488 | case RTX_OBJ: |
60c86d4e | 489 | if (code == MEM) |
077a148b | 490 | { |
3af4ba41 | 491 | op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data); |
077a148b RS |
492 | if (op0 == XEXP (x, 0)) |
493 | return x; | |
494 | return replace_equiv_address_nv (x, op0); | |
495 | } | |
f4e3e618 RH |
496 | else if (code == LO_SUM) |
497 | { | |
3af4ba41 RS |
498 | op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data); |
499 | op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data); | |
f4e3e618 RH |
500 | |
501 | /* (lo_sum (high x) x) -> x */ | |
502 | if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1)) | |
503 | return op1; | |
60c86d4e | 504 | |
077a148b RS |
505 | if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) |
506 | return x; | |
f4e3e618 RH |
507 | return gen_rtx_LO_SUM (mode, op0, op1); |
508 | } | |
077a148b | 509 | break; |
60c86d4e RS |
510 | |
511 | default: | |
077a148b | 512 | break; |
d9c695ff | 513 | } |
4fb296d9 RS |
514 | |
515 | newx = x; | |
516 | fmt = GET_RTX_FORMAT (code); | |
517 | for (i = 0; fmt[i]; i++) | |
518 | switch (fmt[i]) | |
519 | { | |
520 | case 'E': | |
521 | vec = XVEC (x, i); | |
522 | newvec = XVEC (newx, i); | |
523 | for (j = 0; j < GET_NUM_ELEM (vec); j++) | |
524 | { | |
525 | op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j), | |
526 | old_rtx, fn, data); | |
527 | if (op != RTVEC_ELT (vec, j)) | |
528 | { | |
529 | if (newvec == vec) | |
530 | { | |
531 | newvec = shallow_copy_rtvec (vec); | |
532 | if (x == newx) | |
533 | newx = shallow_copy_rtx (x); | |
534 | XVEC (newx, i) = newvec; | |
535 | } | |
536 | RTVEC_ELT (newvec, j) = op; | |
537 | } | |
538 | } | |
539 | break; | |
540 | ||
541 | case 'e': | |
8a1eb57b | 542 | if (XEXP (x, i)) |
4fb296d9 | 543 | { |
8a1eb57b UB |
544 | op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data); |
545 | if (op != XEXP (x, i)) | |
546 | { | |
547 | if (x == newx) | |
548 | newx = shallow_copy_rtx (x); | |
549 | XEXP (newx, i) = op; | |
550 | } | |
4fb296d9 RS |
551 | } |
552 | break; | |
553 | } | |
554 | return newx; | |
d9c695ff | 555 | } |
3af4ba41 RS |
556 | |
557 | /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the | |
558 | resulting RTX. Return a new RTX which is as simplified as possible. */ | |
559 | ||
560 | rtx | |
561 | simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx) | |
562 | { | |
563 | return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx); | |
564 | } | |
d9c695ff | 565 | \f |
40c5ed5b RS |
566 | /* Try to simplify a MODE truncation of OP, which has OP_MODE. |
567 | Only handle cases where the truncated value is inherently an rvalue. | |
568 | ||
569 | RTL provides two ways of truncating a value: | |
570 | ||
571 | 1. a lowpart subreg. This form is only a truncation when both | |
572 | the outer and inner modes (here MODE and OP_MODE respectively) | |
573 | are scalar integers, and only then when the subreg is used as | |
574 | an rvalue. | |
575 | ||
576 | It is only valid to form such truncating subregs if the | |
577 | truncation requires no action by the target. The onus for | |
578 | proving this is on the creator of the subreg -- e.g. the | |
579 | caller to simplify_subreg or simplify_gen_subreg -- and typically | |
580 | involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode. | |
581 | ||
582 | 2. a TRUNCATE. This form handles both scalar and compound integers. | |
583 | ||
584 | The first form is preferred where valid. However, the TRUNCATE | |
585 | handling in simplify_unary_operation turns the second form into the | |
586 | first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow, | |
587 | so it is generally safe to form rvalue truncations using: | |
588 | ||
589 | simplify_gen_unary (TRUNCATE, ...) | |
590 | ||
591 | and leave simplify_unary_operation to work out which representation | |
592 | should be used. | |
593 | ||
594 | Because of the proof requirements on (1), simplify_truncation must | |
595 | also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP, | |
596 | regardless of whether the outer truncation came from a SUBREG or a | |
597 | TRUNCATE. For example, if the caller has proven that an SImode | |
598 | truncation of: | |
599 | ||
600 | (and:DI X Y) | |
601 | ||
602 | is a no-op and can be represented as a subreg, it does not follow | |
603 | that SImode truncations of X and Y are also no-ops. On a target | |
604 | like 64-bit MIPS that requires SImode values to be stored in | |
605 | sign-extended form, an SImode truncation of: | |
606 | ||
607 | (and:DI (reg:DI X) (const_int 63)) | |
608 | ||
609 | is trivially a no-op because only the lower 6 bits can be set. | |
610 | However, X is still an arbitrary 64-bit number and so we cannot | |
611 | assume that truncating it too is a no-op. */ | |
612 | ||
613 | static rtx | |
614 | simplify_truncation (enum machine_mode mode, rtx op, | |
615 | enum machine_mode op_mode) | |
616 | { | |
617 | unsigned int precision = GET_MODE_UNIT_PRECISION (mode); | |
618 | unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode); | |
619 | gcc_assert (precision <= op_precision); | |
620 | ||
621 | /* Optimize truncations of zero and sign extended values. */ | |
622 | if (GET_CODE (op) == ZERO_EXTEND | |
623 | || GET_CODE (op) == SIGN_EXTEND) | |
624 | { | |
625 | /* There are three possibilities. If MODE is the same as the | |
626 | origmode, we can omit both the extension and the subreg. | |
627 | If MODE is not larger than the origmode, we can apply the | |
628 | truncation without the extension. Finally, if the outermode | |
629 | is larger than the origmode, we can just extend to the appropriate | |
630 | mode. */ | |
631 | enum machine_mode origmode = GET_MODE (XEXP (op, 0)); | |
632 | if (mode == origmode) | |
633 | return XEXP (op, 0); | |
634 | else if (precision <= GET_MODE_UNIT_PRECISION (origmode)) | |
635 | return simplify_gen_unary (TRUNCATE, mode, | |
636 | XEXP (op, 0), origmode); | |
637 | else | |
638 | return simplify_gen_unary (GET_CODE (op), mode, | |
639 | XEXP (op, 0), origmode); | |
640 | } | |
641 | ||
642 | /* Simplify (truncate:SI (op:DI (x:DI) (y:DI))) | |
643 | to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */ | |
644 | if (GET_CODE (op) == PLUS | |
645 | || GET_CODE (op) == MINUS | |
646 | || GET_CODE (op) == MULT) | |
647 | { | |
648 | rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode); | |
649 | if (op0) | |
650 | { | |
651 | rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode); | |
652 | if (op1) | |
653 | return simplify_gen_binary (GET_CODE (op), mode, op0, op1); | |
654 | } | |
655 | } | |
656 | ||
657 | /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into | |
658 | to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and | |
659 | the outer subreg is effectively a truncation to the original mode. */ | |
660 | if ((GET_CODE (op) == LSHIFTRT | |
661 | || GET_CODE (op) == ASHIFTRT) | |
662 | /* Ensure that OP_MODE is at least twice as wide as MODE | |
663 | to avoid the possibility that an outer LSHIFTRT shifts by more | |
664 | than the sign extension's sign_bit_copies and introduces zeros | |
665 | into the high bits of the result. */ | |
666 | && 2 * precision <= op_precision | |
667 | && CONST_INT_P (XEXP (op, 1)) | |
668 | && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND | |
669 | && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode | |
0365ba7c | 670 | && UINTVAL (XEXP (op, 1)) < precision) |
40c5ed5b RS |
671 | return simplify_gen_binary (ASHIFTRT, mode, |
672 | XEXP (XEXP (op, 0), 0), XEXP (op, 1)); | |
673 | ||
674 | /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into | |
675 | to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and | |
676 | the outer subreg is effectively a truncation to the original mode. */ | |
677 | if ((GET_CODE (op) == LSHIFTRT | |
678 | || GET_CODE (op) == ASHIFTRT) | |
679 | && CONST_INT_P (XEXP (op, 1)) | |
680 | && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND | |
681 | && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode | |
0365ba7c | 682 | && UINTVAL (XEXP (op, 1)) < precision) |
40c5ed5b RS |
683 | return simplify_gen_binary (LSHIFTRT, mode, |
684 | XEXP (XEXP (op, 0), 0), XEXP (op, 1)); | |
685 | ||
686 | /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into | |
687 | to (ashift:QI (x:QI) C), where C is a suitable small constant and | |
688 | the outer subreg is effectively a truncation to the original mode. */ | |
689 | if (GET_CODE (op) == ASHIFT | |
690 | && CONST_INT_P (XEXP (op, 1)) | |
691 | && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND | |
692 | || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND) | |
693 | && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode | |
0365ba7c | 694 | && UINTVAL (XEXP (op, 1)) < precision) |
40c5ed5b RS |
695 | return simplify_gen_binary (ASHIFT, mode, |
696 | XEXP (XEXP (op, 0), 0), XEXP (op, 1)); | |
697 | ||
698 | /* Recognize a word extraction from a multi-word subreg. */ | |
699 | if ((GET_CODE (op) == LSHIFTRT | |
700 | || GET_CODE (op) == ASHIFTRT) | |
701 | && SCALAR_INT_MODE_P (mode) | |
702 | && SCALAR_INT_MODE_P (op_mode) | |
703 | && precision >= BITS_PER_WORD | |
704 | && 2 * precision <= op_precision | |
705 | && CONST_INT_P (XEXP (op, 1)) | |
706 | && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0 | |
0365ba7c | 707 | && UINTVAL (XEXP (op, 1)) < op_precision) |
40c5ed5b RS |
708 | { |
709 | int byte = subreg_lowpart_offset (mode, op_mode); | |
710 | int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT; | |
711 | return simplify_gen_subreg (mode, XEXP (op, 0), op_mode, | |
712 | (WORDS_BIG_ENDIAN | |
713 | ? byte - shifted_bytes | |
714 | : byte + shifted_bytes)); | |
715 | } | |
716 | ||
717 | /* If we have a TRUNCATE of a right shift of MEM, make a new MEM | |
718 | and try replacing the TRUNCATE and shift with it. Don't do this | |
719 | if the MEM has a mode-dependent address. */ | |
720 | if ((GET_CODE (op) == LSHIFTRT | |
721 | || GET_CODE (op) == ASHIFTRT) | |
722 | && SCALAR_INT_MODE_P (op_mode) | |
723 | && MEM_P (XEXP (op, 0)) | |
724 | && CONST_INT_P (XEXP (op, 1)) | |
725 | && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0 | |
726 | && INTVAL (XEXP (op, 1)) > 0 | |
727 | && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode) | |
728 | && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0), | |
729 | MEM_ADDR_SPACE (XEXP (op, 0))) | |
730 | && ! MEM_VOLATILE_P (XEXP (op, 0)) | |
731 | && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD | |
732 | || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN)) | |
733 | { | |
734 | int byte = subreg_lowpart_offset (mode, op_mode); | |
735 | int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT; | |
736 | return adjust_address_nv (XEXP (op, 0), mode, | |
737 | (WORDS_BIG_ENDIAN | |
738 | ? byte - shifted_bytes | |
739 | : byte + shifted_bytes)); | |
740 | } | |
741 | ||
742 | /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is | |
743 | (OP:SI foo:SI) if OP is NEG or ABS. */ | |
744 | if ((GET_CODE (op) == ABS | |
745 | || GET_CODE (op) == NEG) | |
746 | && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND | |
747 | || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND) | |
748 | && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode) | |
749 | return simplify_gen_unary (GET_CODE (op), mode, | |
750 | XEXP (XEXP (op, 0), 0), mode); | |
751 | ||
752 | /* (truncate:A (subreg:B (truncate:C X) 0)) is | |
753 | (truncate:A X). */ | |
754 | if (GET_CODE (op) == SUBREG | |
755 | && SCALAR_INT_MODE_P (mode) | |
756 | && SCALAR_INT_MODE_P (op_mode) | |
757 | && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op))) | |
758 | && GET_CODE (SUBREG_REG (op)) == TRUNCATE | |
759 | && subreg_lowpart_p (op)) | |
86efb5cd JJ |
760 | { |
761 | rtx inner = XEXP (SUBREG_REG (op), 0); | |
762 | if (GET_MODE_PRECISION (mode) | |
763 | <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))) | |
764 | return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner)); | |
765 | else | |
766 | /* If subreg above is paradoxical and C is narrower | |
767 | than A, return (subreg:A (truncate:C X) 0). */ | |
768 | return simplify_gen_subreg (mode, SUBREG_REG (op), | |
769 | GET_MODE (SUBREG_REG (op)), 0); | |
770 | } | |
40c5ed5b RS |
771 | |
772 | /* (truncate:A (truncate:B X)) is (truncate:A X). */ | |
773 | if (GET_CODE (op) == TRUNCATE) | |
774 | return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), | |
775 | GET_MODE (XEXP (op, 0))); | |
776 | ||
777 | return NULL_RTX; | |
778 | } | |
779 | \f | |
0cedb36c JL |
780 | /* Try to simplify a unary operation CODE whose output mode is to be |
781 | MODE with input operand OP whose mode was originally OP_MODE. | |
782 | Return zero if no simplification can be made. */ | |
0cedb36c | 783 | rtx |
46c5ad27 AJ |
784 | simplify_unary_operation (enum rtx_code code, enum machine_mode mode, |
785 | rtx op, enum machine_mode op_mode) | |
0a67e02c PB |
786 | { |
787 | rtx trueop, tem; | |
788 | ||
0a67e02c PB |
789 | trueop = avoid_constant_pool_reference (op); |
790 | ||
791 | tem = simplify_const_unary_operation (code, mode, trueop, op_mode); | |
792 | if (tem) | |
793 | return tem; | |
794 | ||
795 | return simplify_unary_operation_1 (code, mode, op); | |
796 | } | |
797 | ||
798 | /* Perform some simplifications we can do even if the operands | |
799 | aren't constant. */ | |
800 | static rtx | |
801 | simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) | |
802 | { | |
803 | enum rtx_code reversed; | |
804 | rtx temp; | |
805 | ||
806 | switch (code) | |
807 | { | |
808 | case NOT: | |
809 | /* (not (not X)) == X. */ | |
810 | if (GET_CODE (op) == NOT) | |
811 | return XEXP (op, 0); | |
812 | ||
bd1ef757 PB |
813 | /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the |
814 | comparison is all ones. */ | |
0a67e02c PB |
815 | if (COMPARISON_P (op) |
816 | && (mode == BImode || STORE_FLAG_VALUE == -1) | |
817 | && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)) | |
818 | return simplify_gen_relational (reversed, mode, VOIDmode, | |
819 | XEXP (op, 0), XEXP (op, 1)); | |
820 | ||
821 | /* (not (plus X -1)) can become (neg X). */ | |
822 | if (GET_CODE (op) == PLUS | |
823 | && XEXP (op, 1) == constm1_rtx) | |
824 | return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); | |
825 | ||
826 | /* Similarly, (not (neg X)) is (plus X -1). */ | |
827 | if (GET_CODE (op) == NEG) | |
0a81f074 | 828 | return plus_constant (mode, XEXP (op, 0), -1); |
0a67e02c PB |
829 | |
830 | /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */ | |
831 | if (GET_CODE (op) == XOR | |
481683e1 | 832 | && CONST_INT_P (XEXP (op, 1)) |
0a67e02c PB |
833 | && (temp = simplify_unary_operation (NOT, mode, |
834 | XEXP (op, 1), mode)) != 0) | |
835 | return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); | |
836 | ||
837 | /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */ | |
838 | if (GET_CODE (op) == PLUS | |
481683e1 | 839 | && CONST_INT_P (XEXP (op, 1)) |
0a67e02c PB |
840 | && mode_signbit_p (mode, XEXP (op, 1)) |
841 | && (temp = simplify_unary_operation (NOT, mode, | |
842 | XEXP (op, 1), mode)) != 0) | |
843 | return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); | |
844 | ||
845 | ||
846 | /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for | |
847 | operands other than 1, but that is not valid. We could do a | |
848 | similar simplification for (not (lshiftrt C X)) where C is | |
849 | just the sign bit, but this doesn't seem common enough to | |
850 | bother with. */ | |
851 | if (GET_CODE (op) == ASHIFT | |
852 | && XEXP (op, 0) == const1_rtx) | |
853 | { | |
854 | temp = simplify_gen_unary (NOT, mode, const1_rtx, mode); | |
855 | return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1)); | |
856 | } | |
857 | ||
0a67e02c PB |
858 | /* (not (ashiftrt foo C)) where C is the number of bits in FOO |
859 | minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1, | |
860 | so we can perform the above simplification. */ | |
0a67e02c PB |
861 | if (STORE_FLAG_VALUE == -1 |
862 | && GET_CODE (op) == ASHIFTRT | |
481683e1 | 863 | && GET_CODE (XEXP (op, 1)) |
5511bc5a | 864 | && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1) |
0a67e02c PB |
865 | return simplify_gen_relational (GE, mode, VOIDmode, |
866 | XEXP (op, 0), const0_rtx); | |
867 | ||
bd1ef757 PB |
868 | |
869 | if (GET_CODE (op) == SUBREG | |
870 | && subreg_lowpart_p (op) | |
871 | && (GET_MODE_SIZE (GET_MODE (op)) | |
872 | < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op)))) | |
873 | && GET_CODE (SUBREG_REG (op)) == ASHIFT | |
874 | && XEXP (SUBREG_REG (op), 0) == const1_rtx) | |
875 | { | |
876 | enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op)); | |
877 | rtx x; | |
878 | ||
879 | x = gen_rtx_ROTATE (inner_mode, | |
880 | simplify_gen_unary (NOT, inner_mode, const1_rtx, | |
881 | inner_mode), | |
882 | XEXP (SUBREG_REG (op), 1)); | |
76bd29f6 JJ |
883 | temp = rtl_hooks.gen_lowpart_no_emit (mode, x); |
884 | if (temp) | |
885 | return temp; | |
bd1ef757 PB |
886 | } |
887 | ||
888 | /* Apply De Morgan's laws to reduce number of patterns for machines | |
889 | with negating logical insns (and-not, nand, etc.). If result has | |
890 | only one NOT, put it first, since that is how the patterns are | |
891 | coded. */ | |
bd1ef757 PB |
892 | if (GET_CODE (op) == IOR || GET_CODE (op) == AND) |
893 | { | |
894 | rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1); | |
895 | enum machine_mode op_mode; | |
896 | ||
897 | op_mode = GET_MODE (in1); | |
898 | in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode); | |
899 | ||
900 | op_mode = GET_MODE (in2); | |
901 | if (op_mode == VOIDmode) | |
902 | op_mode = mode; | |
903 | in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode); | |
904 | ||
905 | if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT) | |
906 | { | |
907 | rtx tem = in2; | |
908 | in2 = in1; in1 = tem; | |
909 | } | |
910 | ||
911 | return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR, | |
912 | mode, in1, in2); | |
913 | } | |
b17c024f EB |
914 | |
915 | /* (not (bswap x)) -> (bswap (not x)). */ | |
916 | if (GET_CODE (op) == BSWAP) | |
917 | { | |
918 | rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode); | |
919 | return simplify_gen_unary (BSWAP, mode, x, mode); | |
920 | } | |
0a67e02c PB |
921 | break; |
922 | ||
923 | case NEG: | |
924 | /* (neg (neg X)) == X. */ | |
925 | if (GET_CODE (op) == NEG) | |
926 | return XEXP (op, 0); | |
927 | ||
928 | /* (neg (plus X 1)) can become (not X). */ | |
929 | if (GET_CODE (op) == PLUS | |
930 | && XEXP (op, 1) == const1_rtx) | |
931 | return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode); | |
b8698a0f | 932 | |
0a67e02c PB |
933 | /* Similarly, (neg (not X)) is (plus X 1). */ |
934 | if (GET_CODE (op) == NOT) | |
0a81f074 | 935 | return plus_constant (mode, XEXP (op, 0), 1); |
b8698a0f | 936 | |
0a67e02c PB |
937 | /* (neg (minus X Y)) can become (minus Y X). This transformation |
938 | isn't safe for modes with signed zeros, since if X and Y are | |
939 | both +0, (minus Y X) is the same as (minus X Y). If the | |
940 | rounding mode is towards +infinity (or -infinity) then the two | |
941 | expressions will be rounded differently. */ | |
942 | if (GET_CODE (op) == MINUS | |
943 | && !HONOR_SIGNED_ZEROS (mode) | |
944 | && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
945 | return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0)); | |
b8698a0f | 946 | |
0a67e02c PB |
947 | if (GET_CODE (op) == PLUS |
948 | && !HONOR_SIGNED_ZEROS (mode) | |
949 | && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
950 | { | |
951 | /* (neg (plus A C)) is simplified to (minus -C A). */ | |
33ffb5c5 KZ |
952 | if (CONST_SCALAR_INT_P (XEXP (op, 1)) |
953 | || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1))) | |
0a67e02c PB |
954 | { |
955 | temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode); | |
956 | if (temp) | |
957 | return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0)); | |
958 | } | |
959 | ||
960 | /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */ | |
961 | temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); | |
962 | return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1)); | |
963 | } | |
964 | ||
707f9919 | 965 | /* (neg (mult A B)) becomes (mult A (neg B)). |
0a67e02c PB |
966 | This works even for floating-point values. */ |
967 | if (GET_CODE (op) == MULT | |
968 | && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
969 | { | |
707f9919 JJ |
970 | temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode); |
971 | return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp); | |
0a67e02c PB |
972 | } |
973 | ||
974 | /* NEG commutes with ASHIFT since it is multiplication. Only do | |
975 | this if we can then eliminate the NEG (e.g., if the operand | |
976 | is a constant). */ | |
977 | if (GET_CODE (op) == ASHIFT) | |
978 | { | |
979 | temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode); | |
980 | if (temp) | |
981 | return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1)); | |
982 | } | |
983 | ||
984 | /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when | |
985 | C is equal to the width of MODE minus 1. */ | |
986 | if (GET_CODE (op) == ASHIFTRT | |
481683e1 | 987 | && CONST_INT_P (XEXP (op, 1)) |
5511bc5a | 988 | && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1) |
0a67e02c PB |
989 | return simplify_gen_binary (LSHIFTRT, mode, |
990 | XEXP (op, 0), XEXP (op, 1)); | |
991 | ||
992 | /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when | |
993 | C is equal to the width of MODE minus 1. */ | |
994 | if (GET_CODE (op) == LSHIFTRT | |
481683e1 | 995 | && CONST_INT_P (XEXP (op, 1)) |
5511bc5a | 996 | && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1) |
0a67e02c PB |
997 | return simplify_gen_binary (ASHIFTRT, mode, |
998 | XEXP (op, 0), XEXP (op, 1)); | |
b8698a0f | 999 | |
bd1ef757 PB |
1000 | /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */ |
1001 | if (GET_CODE (op) == XOR | |
1002 | && XEXP (op, 1) == const1_rtx | |
1003 | && nonzero_bits (XEXP (op, 0), mode) == 1) | |
0a81f074 | 1004 | return plus_constant (mode, XEXP (op, 0), -1); |
8305d786 RS |
1005 | |
1006 | /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */ | |
1007 | /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */ | |
1008 | if (GET_CODE (op) == LT | |
71cca289 JJ |
1009 | && XEXP (op, 1) == const0_rtx |
1010 | && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0)))) | |
8305d786 | 1011 | { |
0f2f71b5 | 1012 | enum machine_mode inner = GET_MODE (XEXP (op, 0)); |
5511bc5a | 1013 | int isize = GET_MODE_PRECISION (inner); |
8305d786 | 1014 | if (STORE_FLAG_VALUE == 1) |
0f2f71b5 RS |
1015 | { |
1016 | temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0), | |
1017 | GEN_INT (isize - 1)); | |
1018 | if (mode == inner) | |
1019 | return temp; | |
5511bc5a | 1020 | if (GET_MODE_PRECISION (mode) > isize) |
0f2f71b5 RS |
1021 | return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner); |
1022 | return simplify_gen_unary (TRUNCATE, mode, temp, inner); | |
1023 | } | |
8305d786 | 1024 | else if (STORE_FLAG_VALUE == -1) |
0f2f71b5 RS |
1025 | { |
1026 | temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0), | |
1027 | GEN_INT (isize - 1)); | |
1028 | if (mode == inner) | |
1029 | return temp; | |
5511bc5a | 1030 | if (GET_MODE_PRECISION (mode) > isize) |
0f2f71b5 RS |
1031 | return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner); |
1032 | return simplify_gen_unary (TRUNCATE, mode, temp, inner); | |
1033 | } | |
8305d786 | 1034 | } |
bd1ef757 PB |
1035 | break; |
1036 | ||
1037 | case TRUNCATE: | |
40c5ed5b RS |
1038 | /* Don't optimize (lshiftrt (mult ...)) as it would interfere |
1039 | with the umulXi3_highpart patterns. */ | |
1040 | if (GET_CODE (op) == LSHIFTRT | |
1041 | && GET_CODE (XEXP (op, 0)) == MULT) | |
1042 | break; | |
bd1ef757 | 1043 | |
40c5ed5b RS |
1044 | if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT) |
1045 | { | |
1046 | if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))) | |
76bd29f6 JJ |
1047 | { |
1048 | temp = rtl_hooks.gen_lowpart_no_emit (mode, op); | |
1049 | if (temp) | |
1050 | return temp; | |
1051 | } | |
40c5ed5b RS |
1052 | /* We can't handle truncation to a partial integer mode here |
1053 | because we don't know the real bitsize of the partial | |
1054 | integer mode. */ | |
1055 | break; | |
1056 | } | |
bd1ef757 | 1057 | |
40c5ed5b RS |
1058 | if (GET_MODE (op) != VOIDmode) |
1059 | { | |
1060 | temp = simplify_truncation (mode, op, GET_MODE (op)); | |
1061 | if (temp) | |
1062 | return temp; | |
1063 | } | |
bd1ef757 PB |
1064 | |
1065 | /* If we know that the value is already truncated, we can | |
40c5ed5b RS |
1066 | replace the TRUNCATE with a SUBREG. */ |
1067 | if (GET_MODE_NUNITS (mode) == 1 | |
1068 | && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)) | |
1069 | || truncated_to_mode (mode, op))) | |
76bd29f6 JJ |
1070 | { |
1071 | temp = rtl_hooks.gen_lowpart_no_emit (mode, op); | |
1072 | if (temp) | |
1073 | return temp; | |
1074 | } | |
bd1ef757 PB |
1075 | |
1076 | /* A truncate of a comparison can be replaced with a subreg if | |
1077 | STORE_FLAG_VALUE permits. This is like the previous test, | |
1078 | but it works even if the comparison is done in a mode larger | |
1079 | than HOST_BITS_PER_WIDE_INT. */ | |
46c9550f | 1080 | if (HWI_COMPUTABLE_MODE_P (mode) |
bd1ef757 | 1081 | && COMPARISON_P (op) |
43c36287 | 1082 | && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0) |
76bd29f6 JJ |
1083 | { |
1084 | temp = rtl_hooks.gen_lowpart_no_emit (mode, op); | |
1085 | if (temp) | |
1086 | return temp; | |
1087 | } | |
dcf8468f AP |
1088 | |
1089 | /* A truncate of a memory is just loading the low part of the memory | |
1090 | if we are not changing the meaning of the address. */ | |
1091 | if (GET_CODE (op) == MEM | |
fa607dda | 1092 | && !VECTOR_MODE_P (mode) |
dcf8468f | 1093 | && !MEM_VOLATILE_P (op) |
5bfed9a9 | 1094 | && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))) |
76bd29f6 JJ |
1095 | { |
1096 | temp = rtl_hooks.gen_lowpart_no_emit (mode, op); | |
1097 | if (temp) | |
1098 | return temp; | |
1099 | } | |
dcf8468f | 1100 | |
bd1ef757 PB |
1101 | break; |
1102 | ||
1103 | case FLOAT_TRUNCATE: | |
15ed7b52 JG |
1104 | if (DECIMAL_FLOAT_MODE_P (mode)) |
1105 | break; | |
1106 | ||
bd1ef757 PB |
1107 | /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */ |
1108 | if (GET_CODE (op) == FLOAT_EXTEND | |
1109 | && GET_MODE (XEXP (op, 0)) == mode) | |
1110 | return XEXP (op, 0); | |
1111 | ||
1112 | /* (float_truncate:SF (float_truncate:DF foo:XF)) | |
1113 | = (float_truncate:SF foo:XF). | |
1114 | This may eliminate double rounding, so it is unsafe. | |
1115 | ||
1116 | (float_truncate:SF (float_extend:XF foo:DF)) | |
1117 | = (float_truncate:SF foo:DF). | |
1118 | ||
1119 | (float_truncate:DF (float_extend:XF foo:SF)) | |
1120 | = (float_extend:SF foo:DF). */ | |
1121 | if ((GET_CODE (op) == FLOAT_TRUNCATE | |
1122 | && flag_unsafe_math_optimizations) | |
1123 | || GET_CODE (op) == FLOAT_EXTEND) | |
1124 | return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op, | |
1125 | 0))) | |
1126 | > GET_MODE_SIZE (mode) | |
1127 | ? FLOAT_TRUNCATE : FLOAT_EXTEND, | |
1128 | mode, | |
1129 | XEXP (op, 0), mode); | |
1130 | ||
1131 | /* (float_truncate (float x)) is (float x) */ | |
1132 | if (GET_CODE (op) == FLOAT | |
1133 | && (flag_unsafe_math_optimizations | |
a0c64295 UB |
1134 | || (SCALAR_FLOAT_MODE_P (GET_MODE (op)) |
1135 | && ((unsigned)significand_size (GET_MODE (op)) | |
5511bc5a | 1136 | >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))) |
a0c64295 UB |
1137 | - num_sign_bit_copies (XEXP (op, 0), |
1138 | GET_MODE (XEXP (op, 0)))))))) | |
bd1ef757 PB |
1139 | return simplify_gen_unary (FLOAT, mode, |
1140 | XEXP (op, 0), | |
1141 | GET_MODE (XEXP (op, 0))); | |
1142 | ||
1143 | /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is | |
1144 | (OP:SF foo:SF) if OP is NEG or ABS. */ | |
1145 | if ((GET_CODE (op) == ABS | |
1146 | || GET_CODE (op) == NEG) | |
1147 | && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND | |
1148 | && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode) | |
1149 | return simplify_gen_unary (GET_CODE (op), mode, | |
1150 | XEXP (XEXP (op, 0), 0), mode); | |
1151 | ||
1152 | /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0)) | |
1153 | is (float_truncate:SF x). */ | |
1154 | if (GET_CODE (op) == SUBREG | |
1155 | && subreg_lowpart_p (op) | |
1156 | && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE) | |
1157 | return SUBREG_REG (op); | |
1158 | break; | |
1159 | ||
1160 | case FLOAT_EXTEND: | |
15ed7b52 JG |
1161 | if (DECIMAL_FLOAT_MODE_P (mode)) |
1162 | break; | |
1163 | ||
bd1ef757 PB |
1164 | /* (float_extend (float_extend x)) is (float_extend x) |
1165 | ||
1166 | (float_extend (float x)) is (float x) assuming that double | |
1167 | rounding can't happen. | |
1168 | */ | |
1169 | if (GET_CODE (op) == FLOAT_EXTEND | |
1170 | || (GET_CODE (op) == FLOAT | |
a0c64295 | 1171 | && SCALAR_FLOAT_MODE_P (GET_MODE (op)) |
bd1ef757 | 1172 | && ((unsigned)significand_size (GET_MODE (op)) |
5511bc5a | 1173 | >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))) |
bd1ef757 PB |
1174 | - num_sign_bit_copies (XEXP (op, 0), |
1175 | GET_MODE (XEXP (op, 0))))))) | |
1176 | return simplify_gen_unary (GET_CODE (op), mode, | |
1177 | XEXP (op, 0), | |
1178 | GET_MODE (XEXP (op, 0))); | |
1179 | ||
1180 | break; | |
1181 | ||
1182 | case ABS: | |
1183 | /* (abs (neg <foo>)) -> (abs <foo>) */ | |
1184 | if (GET_CODE (op) == NEG) | |
1185 | return simplify_gen_unary (ABS, mode, XEXP (op, 0), | |
1186 | GET_MODE (XEXP (op, 0))); | |
1187 | ||
1188 | /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS), | |
1189 | do nothing. */ | |
1190 | if (GET_MODE (op) == VOIDmode) | |
1191 | break; | |
1192 | ||
1193 | /* If operand is something known to be positive, ignore the ABS. */ | |
1194 | if (GET_CODE (op) == FFS || GET_CODE (op) == ABS | |
2d0c270f BS |
1195 | || val_signbit_known_clear_p (GET_MODE (op), |
1196 | nonzero_bits (op, GET_MODE (op)))) | |
bd1ef757 PB |
1197 | return op; |
1198 | ||
1199 | /* If operand is known to be only -1 or 0, convert ABS to NEG. */ | |
5511bc5a | 1200 | if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode)) |
bd1ef757 PB |
1201 | return gen_rtx_NEG (mode, op); |
1202 | ||
1203 | break; | |
1204 | ||
1205 | case FFS: | |
1206 | /* (ffs (*_extend <X>)) = (ffs <X>) */ | |
1207 | if (GET_CODE (op) == SIGN_EXTEND | |
1208 | || GET_CODE (op) == ZERO_EXTEND) | |
1209 | return simplify_gen_unary (FFS, mode, XEXP (op, 0), | |
1210 | GET_MODE (XEXP (op, 0))); | |
1211 | break; | |
1212 | ||
1213 | case POPCOUNT: | |
9f05adb0 RS |
1214 | switch (GET_CODE (op)) |
1215 | { | |
1216 | case BSWAP: | |
1217 | case ZERO_EXTEND: | |
1218 | /* (popcount (zero_extend <X>)) = (popcount <X>) */ | |
1219 | return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), | |
1220 | GET_MODE (XEXP (op, 0))); | |
1221 | ||
1222 | case ROTATE: | |
1223 | case ROTATERT: | |
1224 | /* Rotations don't affect popcount. */ | |
1225 | if (!side_effects_p (XEXP (op, 1))) | |
1226 | return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), | |
1227 | GET_MODE (XEXP (op, 0))); | |
1228 | break; | |
1229 | ||
1230 | default: | |
1231 | break; | |
1232 | } | |
1233 | break; | |
1234 | ||
bd1ef757 | 1235 | case PARITY: |
9f05adb0 RS |
1236 | switch (GET_CODE (op)) |
1237 | { | |
1238 | case NOT: | |
1239 | case BSWAP: | |
1240 | case ZERO_EXTEND: | |
1241 | case SIGN_EXTEND: | |
1242 | return simplify_gen_unary (PARITY, mode, XEXP (op, 0), | |
1243 | GET_MODE (XEXP (op, 0))); | |
1244 | ||
1245 | case ROTATE: | |
1246 | case ROTATERT: | |
1247 | /* Rotations don't affect parity. */ | |
1248 | if (!side_effects_p (XEXP (op, 1))) | |
1249 | return simplify_gen_unary (PARITY, mode, XEXP (op, 0), | |
1250 | GET_MODE (XEXP (op, 0))); | |
1251 | break; | |
1252 | ||
1253 | default: | |
1254 | break; | |
1255 | } | |
1256 | break; | |
1257 | ||
1258 | case BSWAP: | |
1259 | /* (bswap (bswap x)) -> x. */ | |
1260 | if (GET_CODE (op) == BSWAP) | |
1261 | return XEXP (op, 0); | |
bd1ef757 PB |
1262 | break; |
1263 | ||
1264 | case FLOAT: | |
1265 | /* (float (sign_extend <X>)) = (float <X>). */ | |
1266 | if (GET_CODE (op) == SIGN_EXTEND) | |
1267 | return simplify_gen_unary (FLOAT, mode, XEXP (op, 0), | |
1268 | GET_MODE (XEXP (op, 0))); | |
0a67e02c PB |
1269 | break; |
1270 | ||
1271 | case SIGN_EXTEND: | |
1272 | /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2)))) | |
1273 | becomes just the MINUS if its mode is MODE. This allows | |
1274 | folding switch statements on machines using casesi (such as | |
1275 | the VAX). */ | |
1276 | if (GET_CODE (op) == TRUNCATE | |
1277 | && GET_MODE (XEXP (op, 0)) == mode | |
1278 | && GET_CODE (XEXP (op, 0)) == MINUS | |
1279 | && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF | |
1280 | && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF) | |
1281 | return XEXP (op, 0); | |
1282 | ||
c536876e AS |
1283 | /* Extending a widening multiplication should be canonicalized to |
1284 | a wider widening multiplication. */ | |
1285 | if (GET_CODE (op) == MULT) | |
1286 | { | |
1287 | rtx lhs = XEXP (op, 0); | |
1288 | rtx rhs = XEXP (op, 1); | |
1289 | enum rtx_code lcode = GET_CODE (lhs); | |
1290 | enum rtx_code rcode = GET_CODE (rhs); | |
1291 | ||
1292 | /* Widening multiplies usually extend both operands, but sometimes | |
1293 | they use a shift to extract a portion of a register. */ | |
1294 | if ((lcode == SIGN_EXTEND | |
1295 | || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1)))) | |
1296 | && (rcode == SIGN_EXTEND | |
1297 | || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1))))) | |
1298 | { | |
1299 | enum machine_mode lmode = GET_MODE (lhs); | |
1300 | enum machine_mode rmode = GET_MODE (rhs); | |
1301 | int bits; | |
1302 | ||
1303 | if (lcode == ASHIFTRT) | |
1304 | /* Number of bits not shifted off the end. */ | |
1305 | bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1)); | |
1306 | else /* lcode == SIGN_EXTEND */ | |
1307 | /* Size of inner mode. */ | |
1308 | bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0))); | |
1309 | ||
1310 | if (rcode == ASHIFTRT) | |
1311 | bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1)); | |
1312 | else /* rcode == SIGN_EXTEND */ | |
1313 | bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0))); | |
1314 | ||
1315 | /* We can only widen multiplies if the result is mathematiclly | |
1316 | equivalent. I.e. if overflow was impossible. */ | |
1317 | if (bits <= GET_MODE_PRECISION (GET_MODE (op))) | |
1318 | return simplify_gen_binary | |
1319 | (MULT, mode, | |
1320 | simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode), | |
1321 | simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode)); | |
1322 | } | |
1323 | } | |
1324 | ||
0a67e02c PB |
1325 | /* Check for a sign extension of a subreg of a promoted |
1326 | variable, where the promotion is sign-extended, and the | |
1327 | target mode is the same as the variable's promotion. */ | |
1328 | if (GET_CODE (op) == SUBREG | |
1329 | && SUBREG_PROMOTED_VAR_P (op) | |
1330 | && ! SUBREG_PROMOTED_UNSIGNED_P (op) | |
4613543f | 1331 | && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))) |
76bd29f6 JJ |
1332 | { |
1333 | temp = rtl_hooks.gen_lowpart_no_emit (mode, op); | |
1334 | if (temp) | |
1335 | return temp; | |
1336 | } | |
0a67e02c | 1337 | |
561da6bc JJ |
1338 | /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>). |
1339 | (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */ | |
1340 | if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND) | |
1341 | { | |
1342 | gcc_assert (GET_MODE_BITSIZE (mode) | |
1343 | > GET_MODE_BITSIZE (GET_MODE (op))); | |
1344 | return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0), | |
1345 | GET_MODE (XEXP (op, 0))); | |
1346 | } | |
a5d8253f JJ |
1347 | |
1348 | /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I))) | |
1349 | is (sign_extend:M (subreg:O <X>)) if there is mode with | |
561da6bc JJ |
1350 | GET_MODE_BITSIZE (N) - I bits. |
1351 | (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I))) | |
1352 | is similarly (zero_extend:M (subreg:O <X>)). */ | |
1353 | if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT) | |
a5d8253f JJ |
1354 | && GET_CODE (XEXP (op, 0)) == ASHIFT |
1355 | && CONST_INT_P (XEXP (op, 1)) | |
1356 | && XEXP (XEXP (op, 0), 1) == XEXP (op, 1) | |
1357 | && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1))) | |
1358 | { | |
1359 | enum machine_mode tmode | |
1360 | = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op)) | |
1361 | - INTVAL (XEXP (op, 1)), MODE_INT, 1); | |
561da6bc JJ |
1362 | gcc_assert (GET_MODE_BITSIZE (mode) |
1363 | > GET_MODE_BITSIZE (GET_MODE (op))); | |
a5d8253f JJ |
1364 | if (tmode != BLKmode) |
1365 | { | |
1366 | rtx inner = | |
1367 | rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0)); | |
76bd29f6 JJ |
1368 | if (inner) |
1369 | return simplify_gen_unary (GET_CODE (op) == ASHIFTRT | |
1370 | ? SIGN_EXTEND : ZERO_EXTEND, | |
1371 | mode, inner, tmode); | |
a5d8253f JJ |
1372 | } |
1373 | } | |
1374 | ||
0a67e02c | 1375 | #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) |
5932a4d4 | 1376 | /* As we do not know which address space the pointer is referring to, |
d4ebfa65 BE |
1377 | we can do this only if the target does not support different pointer |
1378 | or address modes depending on the address space. */ | |
1379 | if (target_default_pointer_address_modes_p () | |
1380 | && ! POINTERS_EXTEND_UNSIGNED | |
0a67e02c PB |
1381 | && mode == Pmode && GET_MODE (op) == ptr_mode |
1382 | && (CONSTANT_P (op) | |
1383 | || (GET_CODE (op) == SUBREG | |
1384 | && REG_P (SUBREG_REG (op)) | |
1385 | && REG_POINTER (SUBREG_REG (op)) | |
1386 | && GET_MODE (SUBREG_REG (op)) == Pmode))) | |
1387 | return convert_memory_address (Pmode, op); | |
1388 | #endif | |
1389 | break; | |
1390 | ||
1391 | case ZERO_EXTEND: | |
1392 | /* Check for a zero extension of a subreg of a promoted | |
1393 | variable, where the promotion is zero-extended, and the | |
1394 | target mode is the same as the variable's promotion. */ | |
1395 | if (GET_CODE (op) == SUBREG | |
1396 | && SUBREG_PROMOTED_VAR_P (op) | |
7443a71d | 1397 | && SUBREG_PROMOTED_UNSIGNED_P (op) > 0 |
4613543f | 1398 | && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))) |
76bd29f6 JJ |
1399 | { |
1400 | temp = rtl_hooks.gen_lowpart_no_emit (mode, op); | |
1401 | if (temp) | |
1402 | return temp; | |
1403 | } | |
0a67e02c | 1404 | |
c536876e AS |
1405 | /* Extending a widening multiplication should be canonicalized to |
1406 | a wider widening multiplication. */ | |
1407 | if (GET_CODE (op) == MULT) | |
1408 | { | |
1409 | rtx lhs = XEXP (op, 0); | |
1410 | rtx rhs = XEXP (op, 1); | |
1411 | enum rtx_code lcode = GET_CODE (lhs); | |
1412 | enum rtx_code rcode = GET_CODE (rhs); | |
1413 | ||
1414 | /* Widening multiplies usually extend both operands, but sometimes | |
1415 | they use a shift to extract a portion of a register. */ | |
1416 | if ((lcode == ZERO_EXTEND | |
1417 | || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1)))) | |
1418 | && (rcode == ZERO_EXTEND | |
1419 | || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1))))) | |
1420 | { | |
1421 | enum machine_mode lmode = GET_MODE (lhs); | |
1422 | enum machine_mode rmode = GET_MODE (rhs); | |
1423 | int bits; | |
1424 | ||
1425 | if (lcode == LSHIFTRT) | |
1426 | /* Number of bits not shifted off the end. */ | |
1427 | bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1)); | |
1428 | else /* lcode == ZERO_EXTEND */ | |
1429 | /* Size of inner mode. */ | |
1430 | bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0))); | |
1431 | ||
1432 | if (rcode == LSHIFTRT) | |
1433 | bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1)); | |
1434 | else /* rcode == ZERO_EXTEND */ | |
1435 | bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0))); | |
1436 | ||
1437 | /* We can only widen multiplies if the result is mathematiclly | |
1438 | equivalent. I.e. if overflow was impossible. */ | |
1439 | if (bits <= GET_MODE_PRECISION (GET_MODE (op))) | |
1440 | return simplify_gen_binary | |
1441 | (MULT, mode, | |
1442 | simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode), | |
1443 | simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode)); | |
1444 | } | |
1445 | } | |
1446 | ||
a5d8253f JJ |
1447 | /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */ |
1448 | if (GET_CODE (op) == ZERO_EXTEND) | |
1449 | return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0), | |
1450 | GET_MODE (XEXP (op, 0))); | |
1451 | ||
561da6bc JJ |
1452 | /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I))) |
1453 | is (zero_extend:M (subreg:O <X>)) if there is mode with | |
1454 | GET_MODE_BITSIZE (N) - I bits. */ | |
1455 | if (GET_CODE (op) == LSHIFTRT | |
1456 | && GET_CODE (XEXP (op, 0)) == ASHIFT | |
1457 | && CONST_INT_P (XEXP (op, 1)) | |
1458 | && XEXP (XEXP (op, 0), 1) == XEXP (op, 1) | |
1459 | && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1))) | |
1460 | { | |
1461 | enum machine_mode tmode | |
1462 | = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op)) | |
1463 | - INTVAL (XEXP (op, 1)), MODE_INT, 1); | |
1464 | if (tmode != BLKmode) | |
1465 | { | |
1466 | rtx inner = | |
1467 | rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0)); | |
76bd29f6 JJ |
1468 | if (inner) |
1469 | return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode); | |
561da6bc JJ |
1470 | } |
1471 | } | |
1472 | ||
8140c065 JJ |
1473 | /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or |
1474 | (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside | |
1475 | of mode N. E.g. | |
1476 | (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is | |
1477 | (and:SI (reg:SI) (const_int 63)). */ | |
1478 | if (GET_CODE (op) == SUBREG | |
1479 | && GET_MODE_PRECISION (GET_MODE (op)) | |
1480 | < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))) | |
1481 | && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))) | |
1482 | <= HOST_BITS_PER_WIDE_INT | |
1483 | && GET_MODE_PRECISION (mode) | |
1484 | >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))) | |
1485 | && subreg_lowpart_p (op) | |
1486 | && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op))) | |
1487 | & ~GET_MODE_MASK (GET_MODE (op))) == 0) | |
1488 | { | |
1489 | if (GET_MODE_PRECISION (mode) | |
1490 | == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))) | |
1491 | return SUBREG_REG (op); | |
1492 | return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op), | |
1493 | GET_MODE (SUBREG_REG (op))); | |
1494 | } | |
1495 | ||
0a67e02c | 1496 | #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) |
5932a4d4 | 1497 | /* As we do not know which address space the pointer is referring to, |
d4ebfa65 BE |
1498 | we can do this only if the target does not support different pointer |
1499 | or address modes depending on the address space. */ | |
1500 | if (target_default_pointer_address_modes_p () | |
1501 | && POINTERS_EXTEND_UNSIGNED > 0 | |
0a67e02c PB |
1502 | && mode == Pmode && GET_MODE (op) == ptr_mode |
1503 | && (CONSTANT_P (op) | |
1504 | || (GET_CODE (op) == SUBREG | |
1505 | && REG_P (SUBREG_REG (op)) | |
1506 | && REG_POINTER (SUBREG_REG (op)) | |
1507 | && GET_MODE (SUBREG_REG (op)) == Pmode))) | |
1508 | return convert_memory_address (Pmode, op); | |
1509 | #endif | |
1510 | break; | |
1511 | ||
1512 | default: | |
1513 | break; | |
1514 | } | |
b8698a0f | 1515 | |
0a67e02c PB |
1516 | return 0; |
1517 | } | |
1518 | ||
1519 | /* Try to compute the value of a unary operation CODE whose output mode is to | |
1520 | be MODE with input operand OP whose mode was originally OP_MODE. | |
1521 | Return zero if the value cannot be computed. */ | |
1522 | rtx | |
1523 | simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, | |
1524 | rtx op, enum machine_mode op_mode) | |
0cedb36c | 1525 | { |
5511bc5a BS |
1526 | unsigned int width = GET_MODE_PRECISION (mode); |
1527 | unsigned int op_width = GET_MODE_PRECISION (op_mode); | |
0cedb36c | 1528 | |
d9deed68 JH |
1529 | if (code == VEC_DUPLICATE) |
1530 | { | |
41374e13 | 1531 | gcc_assert (VECTOR_MODE_P (mode)); |
0a67e02c | 1532 | if (GET_MODE (op) != VOIDmode) |
41374e13 | 1533 | { |
0a67e02c PB |
1534 | if (!VECTOR_MODE_P (GET_MODE (op))) |
1535 | gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op)); | |
41374e13 NS |
1536 | else |
1537 | gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER | |
0a67e02c | 1538 | (GET_MODE (op))); |
41374e13 | 1539 | } |
33ffb5c5 | 1540 | if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op) |
0a67e02c | 1541 | || GET_CODE (op) == CONST_VECTOR) |
d9deed68 JH |
1542 | { |
1543 | int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
1544 | unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); | |
1545 | rtvec v = rtvec_alloc (n_elts); | |
1546 | unsigned int i; | |
1547 | ||
0a67e02c | 1548 | if (GET_CODE (op) != CONST_VECTOR) |
d9deed68 | 1549 | for (i = 0; i < n_elts; i++) |
0a67e02c | 1550 | RTVEC_ELT (v, i) = op; |
d9deed68 JH |
1551 | else |
1552 | { | |
0a67e02c | 1553 | enum machine_mode inmode = GET_MODE (op); |
d9deed68 JH |
1554 | int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode)); |
1555 | unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size); | |
1556 | ||
41374e13 NS |
1557 | gcc_assert (in_n_elts < n_elts); |
1558 | gcc_assert ((n_elts % in_n_elts) == 0); | |
d9deed68 | 1559 | for (i = 0; i < n_elts; i++) |
0a67e02c | 1560 | RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts); |
d9deed68 JH |
1561 | } |
1562 | return gen_rtx_CONST_VECTOR (mode, v); | |
1563 | } | |
1564 | } | |
1565 | ||
0a67e02c | 1566 | if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR) |
852c8ba1 JH |
1567 | { |
1568 | int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
1569 | unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); | |
0a67e02c | 1570 | enum machine_mode opmode = GET_MODE (op); |
852c8ba1 JH |
1571 | int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode)); |
1572 | unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size); | |
1573 | rtvec v = rtvec_alloc (n_elts); | |
1574 | unsigned int i; | |
1575 | ||
41374e13 | 1576 | gcc_assert (op_n_elts == n_elts); |
852c8ba1 JH |
1577 | for (i = 0; i < n_elts; i++) |
1578 | { | |
1579 | rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode), | |
0a67e02c | 1580 | CONST_VECTOR_ELT (op, i), |
852c8ba1 JH |
1581 | GET_MODE_INNER (opmode)); |
1582 | if (!x) | |
1583 | return 0; | |
1584 | RTVEC_ELT (v, i) = x; | |
1585 | } | |
1586 | return gen_rtx_CONST_VECTOR (mode, v); | |
1587 | } | |
1588 | ||
0cedb36c JL |
1589 | /* The order of these tests is critical so that, for example, we don't |
1590 | check the wrong mode (input vs. output) for a conversion operation, | |
1591 | such as FIX. At some point, this should be simplified. */ | |
1592 | ||
33ffb5c5 | 1593 | if (code == FLOAT && CONST_SCALAR_INT_P (op)) |
0cedb36c JL |
1594 | { |
1595 | HOST_WIDE_INT hv, lv; | |
1596 | REAL_VALUE_TYPE d; | |
1597 | ||
481683e1 | 1598 | if (CONST_INT_P (op)) |
0a67e02c | 1599 | lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); |
0cedb36c | 1600 | else |
0a67e02c | 1601 | lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); |
0cedb36c | 1602 | |
0cedb36c | 1603 | REAL_VALUE_FROM_INT (d, lv, hv, mode); |
0cedb36c JL |
1604 | d = real_value_truncate (mode, d); |
1605 | return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
1606 | } | |
33ffb5c5 | 1607 | else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op)) |
0cedb36c JL |
1608 | { |
1609 | HOST_WIDE_INT hv, lv; | |
1610 | REAL_VALUE_TYPE d; | |
1611 | ||
481683e1 | 1612 | if (CONST_INT_P (op)) |
0a67e02c | 1613 | lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); |
0cedb36c | 1614 | else |
0a67e02c | 1615 | lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); |
0cedb36c | 1616 | |
929e10f4 | 1617 | if (op_mode == VOIDmode |
49ab6098 | 1618 | || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT) |
929e10f4 MS |
1619 | /* We should never get a negative number. */ |
1620 | gcc_assert (hv >= 0); | |
1621 | else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT) | |
0cedb36c JL |
1622 | hv = 0, lv &= GET_MODE_MASK (op_mode); |
1623 | ||
0cedb36c | 1624 | REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode); |
0cedb36c JL |
1625 | d = real_value_truncate (mode, d); |
1626 | return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
1627 | } | |
0cedb36c | 1628 | |
481683e1 | 1629 | if (CONST_INT_P (op) |
e2f00837 | 1630 | && width <= HOST_BITS_PER_WIDE_INT && width > 0) |
0cedb36c | 1631 | { |
0a67e02c | 1632 | HOST_WIDE_INT arg0 = INTVAL (op); |
b3694847 | 1633 | HOST_WIDE_INT val; |
0cedb36c JL |
1634 | |
1635 | switch (code) | |
1636 | { | |
1637 | case NOT: | |
1638 | val = ~ arg0; | |
1639 | break; | |
1640 | ||
1641 | case NEG: | |
1642 | val = - arg0; | |
1643 | break; | |
1644 | ||
1645 | case ABS: | |
1646 | val = (arg0 >= 0 ? arg0 : - arg0); | |
1647 | break; | |
1648 | ||
1649 | case FFS: | |
e2f00837 | 1650 | arg0 &= GET_MODE_MASK (mode); |
bd95721f | 1651 | val = ffs_hwi (arg0); |
0cedb36c JL |
1652 | break; |
1653 | ||
2928cd7a | 1654 | case CLZ: |
e2f00837 JJ |
1655 | arg0 &= GET_MODE_MASK (mode); |
1656 | if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val)) | |
7dba8395 RH |
1657 | ; |
1658 | else | |
e2f00837 | 1659 | val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1; |
3801c801 BS |
1660 | break; |
1661 | ||
1662 | case CLRSB: | |
e2f00837 | 1663 | arg0 &= GET_MODE_MASK (mode); |
3801c801 | 1664 | if (arg0 == 0) |
e2f00837 | 1665 | val = GET_MODE_PRECISION (mode) - 1; |
3801c801 | 1666 | else if (arg0 >= 0) |
e2f00837 | 1667 | val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2; |
3801c801 | 1668 | else if (arg0 < 0) |
e2f00837 | 1669 | val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2; |
2928cd7a RH |
1670 | break; |
1671 | ||
1672 | case CTZ: | |
e2f00837 | 1673 | arg0 &= GET_MODE_MASK (mode); |
7dba8395 RH |
1674 | if (arg0 == 0) |
1675 | { | |
1676 | /* Even if the value at zero is undefined, we have to come | |
1677 | up with some replacement. Seems good enough. */ | |
e2f00837 JJ |
1678 | if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val)) |
1679 | val = GET_MODE_PRECISION (mode); | |
7dba8395 RH |
1680 | } |
1681 | else | |
bd95721f | 1682 | val = ctz_hwi (arg0); |
2928cd7a RH |
1683 | break; |
1684 | ||
1685 | case POPCOUNT: | |
e2f00837 | 1686 | arg0 &= GET_MODE_MASK (mode); |
2928cd7a RH |
1687 | val = 0; |
1688 | while (arg0) | |
1689 | val++, arg0 &= arg0 - 1; | |
1690 | break; | |
1691 | ||
1692 | case PARITY: | |
e2f00837 | 1693 | arg0 &= GET_MODE_MASK (mode); |
2928cd7a RH |
1694 | val = 0; |
1695 | while (arg0) | |
1696 | val++, arg0 &= arg0 - 1; | |
1697 | val &= 1; | |
1698 | break; | |
1699 | ||
167fa32c | 1700 | case BSWAP: |
9f05adb0 RS |
1701 | { |
1702 | unsigned int s; | |
1703 | ||
1704 | val = 0; | |
1705 | for (s = 0; s < width; s += 8) | |
1706 | { | |
1707 | unsigned int d = width - s - 8; | |
1708 | unsigned HOST_WIDE_INT byte; | |
1709 | byte = (arg0 >> s) & 0xff; | |
1710 | val |= byte << d; | |
1711 | } | |
1712 | } | |
1713 | break; | |
167fa32c | 1714 | |
0cedb36c JL |
1715 | case TRUNCATE: |
1716 | val = arg0; | |
1717 | break; | |
1718 | ||
1719 | case ZERO_EXTEND: | |
4161da12 AO |
1720 | /* When zero-extending a CONST_INT, we need to know its |
1721 | original mode. */ | |
41374e13 | 1722 | gcc_assert (op_mode != VOIDmode); |
5511bc5a | 1723 | if (op_width == HOST_BITS_PER_WIDE_INT) |
0cedb36c JL |
1724 | { |
1725 | /* If we were really extending the mode, | |
1726 | we would have to distinguish between zero-extension | |
1727 | and sign-extension. */ | |
5511bc5a | 1728 | gcc_assert (width == op_width); |
0cedb36c JL |
1729 | val = arg0; |
1730 | } | |
1731 | else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) | |
2d0c270f | 1732 | val = arg0 & GET_MODE_MASK (op_mode); |
0cedb36c JL |
1733 | else |
1734 | return 0; | |
1735 | break; | |
1736 | ||
1737 | case SIGN_EXTEND: | |
1738 | if (op_mode == VOIDmode) | |
1739 | op_mode = mode; | |
5511bc5a BS |
1740 | op_width = GET_MODE_PRECISION (op_mode); |
1741 | if (op_width == HOST_BITS_PER_WIDE_INT) | |
0cedb36c JL |
1742 | { |
1743 | /* If we were really extending the mode, | |
1744 | we would have to distinguish between zero-extension | |
1745 | and sign-extension. */ | |
5511bc5a | 1746 | gcc_assert (width == op_width); |
0cedb36c JL |
1747 | val = arg0; |
1748 | } | |
5511bc5a | 1749 | else if (op_width < HOST_BITS_PER_WIDE_INT) |
0cedb36c | 1750 | { |
2d0c270f BS |
1751 | val = arg0 & GET_MODE_MASK (op_mode); |
1752 | if (val_signbit_known_set_p (op_mode, val)) | |
1753 | val |= ~GET_MODE_MASK (op_mode); | |
0cedb36c JL |
1754 | } |
1755 | else | |
1756 | return 0; | |
1757 | break; | |
1758 | ||
1759 | case SQRT: | |
6a51f4a0 RK |
1760 | case FLOAT_EXTEND: |
1761 | case FLOAT_TRUNCATE: | |
6f1a6c5b RH |
1762 | case SS_TRUNCATE: |
1763 | case US_TRUNCATE: | |
e551ad26 | 1764 | case SS_NEG: |
14c931f1 | 1765 | case US_NEG: |
91c29f68 | 1766 | case SS_ABS: |
0cedb36c JL |
1767 | return 0; |
1768 | ||
1769 | default: | |
41374e13 | 1770 | gcc_unreachable (); |
0cedb36c JL |
1771 | } |
1772 | ||
bb80db7b | 1773 | return gen_int_mode (val, mode); |
0cedb36c JL |
1774 | } |
1775 | ||
1776 | /* We can do some operations on integer CONST_DOUBLEs. Also allow | |
1777 | for a DImode operation on a CONST_INT. */ | |
48175537 KZ |
1778 | else if (width <= HOST_BITS_PER_DOUBLE_INT |
1779 | && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op))) | |
0cedb36c | 1780 | { |
9be0ac8c | 1781 | double_int first, value; |
0cedb36c | 1782 | |
48175537 | 1783 | if (CONST_DOUBLE_AS_INT_P (op)) |
9be0ac8c LC |
1784 | first = double_int::from_pair (CONST_DOUBLE_HIGH (op), |
1785 | CONST_DOUBLE_LOW (op)); | |
0cedb36c | 1786 | else |
9be0ac8c | 1787 | first = double_int::from_shwi (INTVAL (op)); |
0cedb36c JL |
1788 | |
1789 | switch (code) | |
1790 | { | |
1791 | case NOT: | |
9be0ac8c | 1792 | value = ~first; |
0cedb36c JL |
1793 | break; |
1794 | ||
1795 | case NEG: | |
9be0ac8c | 1796 | value = -first; |
0cedb36c JL |
1797 | break; |
1798 | ||
1799 | case ABS: | |
9be0ac8c LC |
1800 | if (first.is_negative ()) |
1801 | value = -first; | |
0cedb36c | 1802 | else |
9be0ac8c | 1803 | value = first; |
0cedb36c JL |
1804 | break; |
1805 | ||
1806 | case FFS: | |
9be0ac8c LC |
1807 | value.high = 0; |
1808 | if (first.low != 0) | |
1809 | value.low = ffs_hwi (first.low); | |
1810 | else if (first.high != 0) | |
1811 | value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high); | |
0cedb36c | 1812 | else |
9be0ac8c | 1813 | value.low = 0; |
2928cd7a RH |
1814 | break; |
1815 | ||
1816 | case CLZ: | |
9be0ac8c LC |
1817 | value.high = 0; |
1818 | if (first.high != 0) | |
1819 | value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1 | |
1820 | - HOST_BITS_PER_WIDE_INT; | |
1821 | else if (first.low != 0) | |
1822 | value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1; | |
1823 | else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low)) | |
1824 | value.low = GET_MODE_PRECISION (mode); | |
2928cd7a RH |
1825 | break; |
1826 | ||
1827 | case CTZ: | |
9be0ac8c LC |
1828 | value.high = 0; |
1829 | if (first.low != 0) | |
1830 | value.low = ctz_hwi (first.low); | |
1831 | else if (first.high != 0) | |
1832 | value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high); | |
1833 | else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low)) | |
1834 | value.low = GET_MODE_PRECISION (mode); | |
2928cd7a RH |
1835 | break; |
1836 | ||
1837 | case POPCOUNT: | |
9be0ac8c LC |
1838 | value = double_int_zero; |
1839 | while (first.low) | |
1840 | { | |
1841 | value.low++; | |
1842 | first.low &= first.low - 1; | |
1843 | } | |
1844 | while (first.high) | |
1845 | { | |
1846 | value.low++; | |
1847 | first.high &= first.high - 1; | |
1848 | } | |
2928cd7a RH |
1849 | break; |
1850 | ||
1851 | case PARITY: | |
9be0ac8c LC |
1852 | value = double_int_zero; |
1853 | while (first.low) | |
1854 | { | |
1855 | value.low++; | |
1856 | first.low &= first.low - 1; | |
1857 | } | |
1858 | while (first.high) | |
1859 | { | |
1860 | value.low++; | |
1861 | first.high &= first.high - 1; | |
1862 | } | |
1863 | value.low &= 1; | |
0cedb36c JL |
1864 | break; |
1865 | ||
9f05adb0 RS |
1866 | case BSWAP: |
1867 | { | |
1868 | unsigned int s; | |
1869 | ||
9be0ac8c | 1870 | value = double_int_zero; |
9f05adb0 RS |
1871 | for (s = 0; s < width; s += 8) |
1872 | { | |
1873 | unsigned int d = width - s - 8; | |
1874 | unsigned HOST_WIDE_INT byte; | |
1875 | ||
1876 | if (s < HOST_BITS_PER_WIDE_INT) | |
9be0ac8c | 1877 | byte = (first.low >> s) & 0xff; |
9f05adb0 | 1878 | else |
9be0ac8c | 1879 | byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff; |
9f05adb0 RS |
1880 | |
1881 | if (d < HOST_BITS_PER_WIDE_INT) | |
9be0ac8c | 1882 | value.low |= byte << d; |
9f05adb0 | 1883 | else |
9be0ac8c | 1884 | value.high |= byte << (d - HOST_BITS_PER_WIDE_INT); |
9f05adb0 RS |
1885 | } |
1886 | } | |
1887 | break; | |
1888 | ||
0cedb36c JL |
1889 | case TRUNCATE: |
1890 | /* This is just a change-of-mode, so do nothing. */ | |
9be0ac8c | 1891 | value = first; |
0cedb36c JL |
1892 | break; |
1893 | ||
1894 | case ZERO_EXTEND: | |
41374e13 | 1895 | gcc_assert (op_mode != VOIDmode); |
4161da12 | 1896 | |
5511bc5a | 1897 | if (op_width > HOST_BITS_PER_WIDE_INT) |
0cedb36c JL |
1898 | return 0; |
1899 | ||
9be0ac8c | 1900 | value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode)); |
0cedb36c JL |
1901 | break; |
1902 | ||
1903 | case SIGN_EXTEND: | |
1904 | if (op_mode == VOIDmode | |
5511bc5a | 1905 | || op_width > HOST_BITS_PER_WIDE_INT) |
0cedb36c JL |
1906 | return 0; |
1907 | else | |
1908 | { | |
9be0ac8c LC |
1909 | value.low = first.low & GET_MODE_MASK (op_mode); |
1910 | if (val_signbit_known_set_p (op_mode, value.low)) | |
1911 | value.low |= ~GET_MODE_MASK (op_mode); | |
0cedb36c | 1912 | |
9be0ac8c | 1913 | value.high = HWI_SIGN_EXTEND (value.low); |
0cedb36c JL |
1914 | } |
1915 | break; | |
1916 | ||
1917 | case SQRT: | |
1918 | return 0; | |
1919 | ||
1920 | default: | |
1921 | return 0; | |
1922 | } | |
1923 | ||
9be0ac8c | 1924 | return immed_double_int_const (value, mode); |
0cedb36c JL |
1925 | } |
1926 | ||
48175537 | 1927 | else if (CONST_DOUBLE_AS_FLOAT_P (op) |
6f0c9f06 JJ |
1928 | && SCALAR_FLOAT_MODE_P (mode) |
1929 | && SCALAR_FLOAT_MODE_P (GET_MODE (op))) | |
0cedb36c | 1930 | { |
4977bab6 | 1931 | REAL_VALUE_TYPE d, t; |
0a67e02c | 1932 | REAL_VALUE_FROM_CONST_DOUBLE (d, op); |
0cedb36c | 1933 | |
15e5ad76 ZW |
1934 | switch (code) |
1935 | { | |
1936 | case SQRT: | |
4977bab6 ZW |
1937 | if (HONOR_SNANS (mode) && real_isnan (&d)) |
1938 | return 0; | |
1939 | real_sqrt (&t, mode, &d); | |
1940 | d = t; | |
1941 | break; | |
94313f35 | 1942 | case ABS: |
d49b6e1e | 1943 | d = real_value_abs (&d); |
94313f35 RH |
1944 | break; |
1945 | case NEG: | |
d49b6e1e | 1946 | d = real_value_negate (&d); |
94313f35 RH |
1947 | break; |
1948 | case FLOAT_TRUNCATE: | |
1949 | d = real_value_truncate (mode, d); | |
1950 | break; | |
1951 | case FLOAT_EXTEND: | |
6f0c9f06 JJ |
1952 | /* All this does is change the mode, unless changing |
1953 | mode class. */ | |
1954 | if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))) | |
1955 | real_convert (&d, mode, &d); | |
94313f35 RH |
1956 | break; |
1957 | case FIX: | |
1958 | real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL); | |
1959 | break; | |
79ae63b1 JH |
1960 | case NOT: |
1961 | { | |
1962 | long tmp[4]; | |
1963 | int i; | |
1964 | ||
0a67e02c | 1965 | real_to_target (tmp, &d, GET_MODE (op)); |
79ae63b1 JH |
1966 | for (i = 0; i < 4; i++) |
1967 | tmp[i] = ~tmp[i]; | |
1968 | real_from_target (&d, tmp, mode); | |
0a67e02c | 1969 | break; |
79ae63b1 | 1970 | } |
15e5ad76 | 1971 | default: |
41374e13 | 1972 | gcc_unreachable (); |
15e5ad76 ZW |
1973 | } |
1974 | return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
0cedb36c JL |
1975 | } |
1976 | ||
48175537 | 1977 | else if (CONST_DOUBLE_AS_FLOAT_P (op) |
3d8bf70f | 1978 | && SCALAR_FLOAT_MODE_P (GET_MODE (op)) |
0cedb36c | 1979 | && GET_MODE_CLASS (mode) == MODE_INT |
49ab6098 | 1980 | && width <= HOST_BITS_PER_DOUBLE_INT && width > 0) |
0cedb36c | 1981 | { |
875eda9c | 1982 | /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX |
2067c116 | 1983 | operators are intentionally left unspecified (to ease implementation |
875eda9c RS |
1984 | by target backends), for consistency, this routine implements the |
1985 | same semantics for constant folding as used by the middle-end. */ | |
1986 | ||
0a67e02c PB |
1987 | /* This was formerly used only for non-IEEE float. |
1988 | eggert@twinsun.com says it is safe for IEEE also. */ | |
875eda9c RS |
1989 | HOST_WIDE_INT xh, xl, th, tl; |
1990 | REAL_VALUE_TYPE x, t; | |
0a67e02c | 1991 | REAL_VALUE_FROM_CONST_DOUBLE (x, op); |
15e5ad76 ZW |
1992 | switch (code) |
1993 | { | |
875eda9c RS |
1994 | case FIX: |
1995 | if (REAL_VALUE_ISNAN (x)) | |
1996 | return const0_rtx; | |
1997 | ||
1998 | /* Test against the signed upper bound. */ | |
1999 | if (width > HOST_BITS_PER_WIDE_INT) | |
2000 | { | |
2001 | th = ((unsigned HOST_WIDE_INT) 1 | |
2002 | << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1; | |
2003 | tl = -1; | |
2004 | } | |
2005 | else | |
2006 | { | |
2007 | th = 0; | |
2008 | tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1; | |
2009 | } | |
2010 | real_from_integer (&t, VOIDmode, tl, th, 0); | |
2011 | if (REAL_VALUES_LESS (t, x)) | |
2012 | { | |
2013 | xh = th; | |
2014 | xl = tl; | |
2015 | break; | |
2016 | } | |
2017 | ||
2018 | /* Test against the signed lower bound. */ | |
2019 | if (width > HOST_BITS_PER_WIDE_INT) | |
2020 | { | |
43c36287 EB |
2021 | th = (unsigned HOST_WIDE_INT) (-1) |
2022 | << (width - HOST_BITS_PER_WIDE_INT - 1); | |
875eda9c RS |
2023 | tl = 0; |
2024 | } | |
2025 | else | |
2026 | { | |
2027 | th = -1; | |
43c36287 | 2028 | tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1); |
875eda9c RS |
2029 | } |
2030 | real_from_integer (&t, VOIDmode, tl, th, 0); | |
2031 | if (REAL_VALUES_LESS (x, t)) | |
2032 | { | |
2033 | xh = th; | |
2034 | xl = tl; | |
2035 | break; | |
2036 | } | |
2037 | REAL_VALUE_TO_INT (&xl, &xh, x); | |
2038 | break; | |
2039 | ||
2040 | case UNSIGNED_FIX: | |
2041 | if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x)) | |
2042 | return const0_rtx; | |
2043 | ||
2044 | /* Test against the unsigned upper bound. */ | |
49ab6098 | 2045 | if (width == HOST_BITS_PER_DOUBLE_INT) |
875eda9c RS |
2046 | { |
2047 | th = -1; | |
2048 | tl = -1; | |
2049 | } | |
2050 | else if (width >= HOST_BITS_PER_WIDE_INT) | |
2051 | { | |
2052 | th = ((unsigned HOST_WIDE_INT) 1 | |
2053 | << (width - HOST_BITS_PER_WIDE_INT)) - 1; | |
2054 | tl = -1; | |
2055 | } | |
2056 | else | |
2057 | { | |
2058 | th = 0; | |
2059 | tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1; | |
2060 | } | |
2061 | real_from_integer (&t, VOIDmode, tl, th, 1); | |
2062 | if (REAL_VALUES_LESS (t, x)) | |
2063 | { | |
2064 | xh = th; | |
2065 | xl = tl; | |
2066 | break; | |
2067 | } | |
2068 | ||
2069 | REAL_VALUE_TO_INT (&xl, &xh, x); | |
2070 | break; | |
2071 | ||
15e5ad76 | 2072 | default: |
41374e13 | 2073 | gcc_unreachable (); |
15e5ad76 | 2074 | } |
875eda9c | 2075 | return immed_double_const (xl, xh, mode); |
0cedb36c | 2076 | } |
ba31d94e | 2077 | |
0a67e02c | 2078 | return NULL_RTX; |
0cedb36c JL |
2079 | } |
2080 | \f | |
b17c024f EB |
2081 | /* Subroutine of simplify_binary_operation to simplify a binary operation |
2082 | CODE that can commute with byte swapping, with result mode MODE and | |
2083 | operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR. | |
2084 | Return zero if no simplification or canonicalization is possible. */ | |
2085 | ||
2086 | static rtx | |
2087 | simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode, | |
2088 | rtx op0, rtx op1) | |
2089 | { | |
2090 | rtx tem; | |
2091 | ||
2092 | /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */ | |
a8c50132 | 2093 | if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1)) |
b17c024f EB |
2094 | { |
2095 | tem = simplify_gen_binary (code, mode, XEXP (op0, 0), | |
2096 | simplify_gen_unary (BSWAP, mode, op1, mode)); | |
2097 | return simplify_gen_unary (BSWAP, mode, tem, mode); | |
2098 | } | |
2099 | ||
2100 | /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */ | |
2101 | if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP) | |
2102 | { | |
2103 | tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0)); | |
2104 | return simplify_gen_unary (BSWAP, mode, tem, mode); | |
2105 | } | |
2106 | ||
2107 | return NULL_RTX; | |
2108 | } | |
2109 | ||
9ce79a7a RS |
2110 | /* Subroutine of simplify_binary_operation to simplify a commutative, |
2111 | associative binary operation CODE with result mode MODE, operating | |
2112 | on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR, | |
2113 | SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or | |
2114 | canonicalization is possible. */ | |
dd61aa98 | 2115 | |
dd61aa98 RS |
2116 | static rtx |
2117 | simplify_associative_operation (enum rtx_code code, enum machine_mode mode, | |
2118 | rtx op0, rtx op1) | |
2119 | { | |
2120 | rtx tem; | |
2121 | ||
9ce79a7a RS |
2122 | /* Linearize the operator to the left. */ |
2123 | if (GET_CODE (op1) == code) | |
dd61aa98 | 2124 | { |
9ce79a7a RS |
2125 | /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */ |
2126 | if (GET_CODE (op0) == code) | |
2127 | { | |
2128 | tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0)); | |
2129 | return simplify_gen_binary (code, mode, tem, XEXP (op1, 1)); | |
2130 | } | |
dd61aa98 | 2131 | |
9ce79a7a RS |
2132 | /* "a op (b op c)" becomes "(b op c) op a". */ |
2133 | if (! swap_commutative_operands_p (op1, op0)) | |
2134 | return simplify_gen_binary (code, mode, op1, op0); | |
dd61aa98 | 2135 | |
9ce79a7a RS |
2136 | tem = op0; |
2137 | op0 = op1; | |
2138 | op1 = tem; | |
dd61aa98 RS |
2139 | } |
2140 | ||
9ce79a7a | 2141 | if (GET_CODE (op0) == code) |
dd61aa98 | 2142 | { |
9ce79a7a RS |
2143 | /* Canonicalize "(x op c) op y" as "(x op y) op c". */ |
2144 | if (swap_commutative_operands_p (XEXP (op0, 1), op1)) | |
2145 | { | |
2146 | tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1); | |
2147 | return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); | |
2148 | } | |
2149 | ||
2150 | /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */ | |
7e0b4eae | 2151 | tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1); |
9ce79a7a RS |
2152 | if (tem != 0) |
2153 | return simplify_gen_binary (code, mode, XEXP (op0, 0), tem); | |
2154 | ||
2155 | /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */ | |
7e0b4eae | 2156 | tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1); |
9ce79a7a RS |
2157 | if (tem != 0) |
2158 | return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); | |
dd61aa98 RS |
2159 | } |
2160 | ||
2161 | return 0; | |
2162 | } | |
2163 | ||
0a67e02c | 2164 | |
0cedb36c JL |
2165 | /* Simplify a binary operation CODE with result mode MODE, operating on OP0 |
2166 | and OP1. Return 0 if no simplification is possible. | |
2167 | ||
2168 | Don't use this for relational operations such as EQ or LT. | |
2169 | Use simplify_relational_operation instead. */ | |
0cedb36c | 2170 | rtx |
46c5ad27 AJ |
2171 | simplify_binary_operation (enum rtx_code code, enum machine_mode mode, |
2172 | rtx op0, rtx op1) | |
0cedb36c | 2173 | { |
9ce79a7a | 2174 | rtx trueop0, trueop1; |
0cedb36c JL |
2175 | rtx tem; |
2176 | ||
2177 | /* Relational operations don't work here. We must know the mode | |
2178 | of the operands in order to do the comparison correctly. | |
2179 | Assuming a full word can give incorrect results. | |
2180 | Consider comparing 128 with -128 in QImode. */ | |
41374e13 NS |
2181 | gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE); |
2182 | gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE); | |
0cedb36c | 2183 | |
4ba5f925 | 2184 | /* Make sure the constant is second. */ |
ec8e098d | 2185 | if (GET_RTX_CLASS (code) == RTX_COMM_ARITH |
9ce79a7a | 2186 | && swap_commutative_operands_p (op0, op1)) |
4ba5f925 JH |
2187 | { |
2188 | tem = op0, op0 = op1, op1 = tem; | |
4ba5f925 JH |
2189 | } |
2190 | ||
9ce79a7a RS |
2191 | trueop0 = avoid_constant_pool_reference (op0); |
2192 | trueop1 = avoid_constant_pool_reference (op1); | |
2193 | ||
0a67e02c PB |
2194 | tem = simplify_const_binary_operation (code, mode, trueop0, trueop1); |
2195 | if (tem) | |
2196 | return tem; | |
2197 | return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1); | |
2198 | } | |
2199 | ||
1753331b RS |
2200 | /* Subroutine of simplify_binary_operation. Simplify a binary operation |
2201 | CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or | |
2202 | OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the | |
2203 | actual constants. */ | |
2204 | ||
0a67e02c PB |
2205 | static rtx |
2206 | simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, | |
2207 | rtx op0, rtx op1, rtx trueop0, rtx trueop1) | |
2208 | { | |
bd1ef757 | 2209 | rtx tem, reversed, opleft, opright; |
0a67e02c | 2210 | HOST_WIDE_INT val; |
5511bc5a | 2211 | unsigned int width = GET_MODE_PRECISION (mode); |
0a67e02c PB |
2212 | |
2213 | /* Even if we can't compute a constant result, | |
2214 | there are some cases worth simplifying. */ | |
2215 | ||
2216 | switch (code) | |
852c8ba1 | 2217 | { |
0a67e02c PB |
2218 | case PLUS: |
2219 | /* Maybe simplify x + 0 to x. The two expressions are equivalent | |
2220 | when x is NaN, infinite, or finite and nonzero. They aren't | |
2221 | when x is -0 and the rounding mode is not towards -infinity, | |
2222 | since (-0) + 0 is then 0. */ | |
2223 | if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode)) | |
2224 | return op0; | |
2225 | ||
2226 | /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These | |
2227 | transformations are safe even for IEEE. */ | |
2228 | if (GET_CODE (op0) == NEG) | |
2229 | return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0)); | |
2230 | else if (GET_CODE (op1) == NEG) | |
2231 | return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0)); | |
2232 | ||
2233 | /* (~a) + 1 -> -a */ | |
2234 | if (INTEGRAL_MODE_P (mode) | |
2235 | && GET_CODE (op0) == NOT | |
2236 | && trueop1 == const1_rtx) | |
2237 | return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode); | |
2238 | ||
2239 | /* Handle both-operands-constant cases. We can only add | |
2240 | CONST_INTs to constants since the sum of relocatable symbols | |
2241 | can't be handled by most assemblers. Don't add CONST_INT | |
2242 | to CONST_INT since overflow won't be computed properly if wider | |
2243 | than HOST_BITS_PER_WIDE_INT. */ | |
2244 | ||
dd59ef13 RS |
2245 | if ((GET_CODE (op0) == CONST |
2246 | || GET_CODE (op0) == SYMBOL_REF | |
2247 | || GET_CODE (op0) == LABEL_REF) | |
481683e1 | 2248 | && CONST_INT_P (op1)) |
0a81f074 | 2249 | return plus_constant (mode, op0, INTVAL (op1)); |
dd59ef13 RS |
2250 | else if ((GET_CODE (op1) == CONST |
2251 | || GET_CODE (op1) == SYMBOL_REF | |
2252 | || GET_CODE (op1) == LABEL_REF) | |
481683e1 | 2253 | && CONST_INT_P (op0)) |
0a81f074 | 2254 | return plus_constant (mode, op1, INTVAL (op0)); |
0a67e02c PB |
2255 | |
2256 | /* See if this is something like X * C - X or vice versa or | |
2257 | if the multiplication is written as a shift. If so, we can | |
2258 | distribute and make a new multiply, shift, or maybe just | |
2259 | have X (if C is 2 in the example above). But don't make | |
2260 | something more expensive than we had before. */ | |
2261 | ||
6800ea5c | 2262 | if (SCALAR_INT_MODE_P (mode)) |
0a67e02c | 2263 | { |
54fb1ae0 | 2264 | double_int coeff0, coeff1; |
0a67e02c PB |
2265 | rtx lhs = op0, rhs = op1; |
2266 | ||
54fb1ae0 AS |
2267 | coeff0 = double_int_one; |
2268 | coeff1 = double_int_one; | |
2269 | ||
0a67e02c | 2270 | if (GET_CODE (lhs) == NEG) |
fab2f52c | 2271 | { |
54fb1ae0 | 2272 | coeff0 = double_int_minus_one; |
fab2f52c AO |
2273 | lhs = XEXP (lhs, 0); |
2274 | } | |
0a67e02c | 2275 | else if (GET_CODE (lhs) == MULT |
481683e1 | 2276 | && CONST_INT_P (XEXP (lhs, 1))) |
fab2f52c | 2277 | { |
27bcd47c | 2278 | coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1))); |
fab2f52c AO |
2279 | lhs = XEXP (lhs, 0); |
2280 | } | |
0a67e02c | 2281 | else if (GET_CODE (lhs) == ASHIFT |
481683e1 | 2282 | && CONST_INT_P (XEXP (lhs, 1)) |
54fb1ae0 | 2283 | && INTVAL (XEXP (lhs, 1)) >= 0 |
0a67e02c PB |
2284 | && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) |
2285 | { | |
27bcd47c | 2286 | coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1))); |
0a67e02c PB |
2287 | lhs = XEXP (lhs, 0); |
2288 | } | |
852c8ba1 | 2289 | |
0a67e02c | 2290 | if (GET_CODE (rhs) == NEG) |
fab2f52c | 2291 | { |
54fb1ae0 | 2292 | coeff1 = double_int_minus_one; |
fab2f52c AO |
2293 | rhs = XEXP (rhs, 0); |
2294 | } | |
0a67e02c | 2295 | else if (GET_CODE (rhs) == MULT |
481683e1 | 2296 | && CONST_INT_P (XEXP (rhs, 1))) |
0a67e02c | 2297 | { |
27bcd47c | 2298 | coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1))); |
fab2f52c | 2299 | rhs = XEXP (rhs, 0); |
0a67e02c PB |
2300 | } |
2301 | else if (GET_CODE (rhs) == ASHIFT | |
481683e1 | 2302 | && CONST_INT_P (XEXP (rhs, 1)) |
0a67e02c PB |
2303 | && INTVAL (XEXP (rhs, 1)) >= 0 |
2304 | && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
2305 | { | |
27bcd47c | 2306 | coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1))); |
0a67e02c PB |
2307 | rhs = XEXP (rhs, 0); |
2308 | } | |
2309 | ||
2310 | if (rtx_equal_p (lhs, rhs)) | |
2311 | { | |
2312 | rtx orig = gen_rtx_PLUS (mode, op0, op1); | |
fab2f52c | 2313 | rtx coeff; |
54fb1ae0 | 2314 | double_int val; |
f40751dd | 2315 | bool speed = optimize_function_for_speed_p (cfun); |
fab2f52c | 2316 | |
27bcd47c | 2317 | val = coeff0 + coeff1; |
54fb1ae0 | 2318 | coeff = immed_double_int_const (val, mode); |
fab2f52c AO |
2319 | |
2320 | tem = simplify_gen_binary (MULT, mode, lhs, coeff); | |
5e8f01f4 | 2321 | return set_src_cost (tem, speed) <= set_src_cost (orig, speed) |
0a67e02c PB |
2322 | ? tem : 0; |
2323 | } | |
2324 | } | |
2325 | ||
2326 | /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */ | |
33ffb5c5 | 2327 | if (CONST_SCALAR_INT_P (op1) |
0a67e02c | 2328 | && GET_CODE (op0) == XOR |
33ffb5c5 | 2329 | && CONST_SCALAR_INT_P (XEXP (op0, 1)) |
0a67e02c PB |
2330 | && mode_signbit_p (mode, op1)) |
2331 | return simplify_gen_binary (XOR, mode, XEXP (op0, 0), | |
2332 | simplify_gen_binary (XOR, mode, op1, | |
2333 | XEXP (op0, 1))); | |
2334 | ||
bd1ef757 | 2335 | /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */ |
4bf371ea RG |
2336 | if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) |
2337 | && GET_CODE (op0) == MULT | |
bd1ef757 PB |
2338 | && GET_CODE (XEXP (op0, 0)) == NEG) |
2339 | { | |
2340 | rtx in1, in2; | |
2341 | ||
2342 | in1 = XEXP (XEXP (op0, 0), 0); | |
2343 | in2 = XEXP (op0, 1); | |
2344 | return simplify_gen_binary (MINUS, mode, op1, | |
2345 | simplify_gen_binary (MULT, mode, | |
2346 | in1, in2)); | |
2347 | } | |
2348 | ||
2349 | /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if | |
2350 | C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE | |
2351 | is 1. */ | |
2352 | if (COMPARISON_P (op0) | |
2353 | && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx) | |
2354 | || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx)) | |
2355 | && (reversed = reversed_comparison (op0, mode))) | |
2356 | return | |
2357 | simplify_gen_unary (NEG, mode, reversed, mode); | |
2358 | ||
0a67e02c PB |
2359 | /* If one of the operands is a PLUS or a MINUS, see if we can |
2360 | simplify this by the associative law. | |
2361 | Don't use the associative law for floating point. | |
2362 | The inaccuracy makes it nonassociative, | |
2363 | and subtle programs can break if operations are associated. */ | |
2364 | ||
2365 | if (INTEGRAL_MODE_P (mode) | |
2366 | && (plus_minus_operand_p (op0) | |
2367 | || plus_minus_operand_p (op1)) | |
1941069a | 2368 | && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0) |
0a67e02c PB |
2369 | return tem; |
2370 | ||
2371 | /* Reassociate floating point addition only when the user | |
a1a82611 | 2372 | specifies associative math operations. */ |
0a67e02c | 2373 | if (FLOAT_MODE_P (mode) |
a1a82611 | 2374 | && flag_associative_math) |
852c8ba1 | 2375 | { |
0a67e02c PB |
2376 | tem = simplify_associative_operation (code, mode, op0, op1); |
2377 | if (tem) | |
2378 | return tem; | |
852c8ba1 | 2379 | } |
0a67e02c | 2380 | break; |
852c8ba1 | 2381 | |
0a67e02c | 2382 | case COMPARE: |
0a67e02c PB |
2383 | /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */ |
2384 | if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT) | |
2385 | || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU)) | |
2386 | && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx) | |
3198b947 | 2387 | { |
0a67e02c PB |
2388 | rtx xop00 = XEXP (op0, 0); |
2389 | rtx xop10 = XEXP (op1, 0); | |
3198b947 | 2390 | |
0a67e02c PB |
2391 | #ifdef HAVE_cc0 |
2392 | if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0) | |
2393 | #else | |
2394 | if (REG_P (xop00) && REG_P (xop10) | |
2395 | && GET_MODE (xop00) == GET_MODE (xop10) | |
2396 | && REGNO (xop00) == REGNO (xop10) | |
2397 | && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC | |
2398 | && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC) | |
2399 | #endif | |
2400 | return xop00; | |
3198b947 | 2401 | } |
0a67e02c PB |
2402 | break; |
2403 | ||
2404 | case MINUS: | |
2405 | /* We can't assume x-x is 0 even with non-IEEE floating point, | |
2406 | but since it is zero except in very strange circumstances, we | |
81d2fb02 | 2407 | will treat it as zero with -ffinite-math-only. */ |
0a67e02c PB |
2408 | if (rtx_equal_p (trueop0, trueop1) |
2409 | && ! side_effects_p (op0) | |
81d2fb02 | 2410 | && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode))) |
0a67e02c PB |
2411 | return CONST0_RTX (mode); |
2412 | ||
2413 | /* Change subtraction from zero into negation. (0 - x) is the | |
2414 | same as -x when x is NaN, infinite, or finite and nonzero. | |
2415 | But if the mode has signed zeros, and does not round towards | |
2416 | -infinity, then 0 - 0 is 0, not -0. */ | |
2417 | if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode)) | |
2418 | return simplify_gen_unary (NEG, mode, op1, mode); | |
2419 | ||
2420 | /* (-1 - a) is ~a. */ | |
2421 | if (trueop0 == constm1_rtx) | |
2422 | return simplify_gen_unary (NOT, mode, op1, mode); | |
2423 | ||
2424 | /* Subtracting 0 has no effect unless the mode has signed zeros | |
2425 | and supports rounding towards -infinity. In such a case, | |
2426 | 0 - 0 is -0. */ | |
2427 | if (!(HONOR_SIGNED_ZEROS (mode) | |
2428 | && HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
2429 | && trueop1 == CONST0_RTX (mode)) | |
2430 | return op0; | |
2431 | ||
2432 | /* See if this is something like X * C - X or vice versa or | |
2433 | if the multiplication is written as a shift. If so, we can | |
2434 | distribute and make a new multiply, shift, or maybe just | |
2435 | have X (if C is 2 in the example above). But don't make | |
2436 | something more expensive than we had before. */ | |
2437 | ||
6800ea5c | 2438 | if (SCALAR_INT_MODE_P (mode)) |
3198b947 | 2439 | { |
54fb1ae0 | 2440 | double_int coeff0, negcoeff1; |
0a67e02c | 2441 | rtx lhs = op0, rhs = op1; |
3198b947 | 2442 | |
54fb1ae0 AS |
2443 | coeff0 = double_int_one; |
2444 | negcoeff1 = double_int_minus_one; | |
2445 | ||
0a67e02c | 2446 | if (GET_CODE (lhs) == NEG) |
fab2f52c | 2447 | { |
54fb1ae0 | 2448 | coeff0 = double_int_minus_one; |
fab2f52c AO |
2449 | lhs = XEXP (lhs, 0); |
2450 | } | |
0a67e02c | 2451 | else if (GET_CODE (lhs) == MULT |
481683e1 | 2452 | && CONST_INT_P (XEXP (lhs, 1))) |
0a67e02c | 2453 | { |
27bcd47c | 2454 | coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1))); |
fab2f52c | 2455 | lhs = XEXP (lhs, 0); |
0a67e02c PB |
2456 | } |
2457 | else if (GET_CODE (lhs) == ASHIFT | |
481683e1 | 2458 | && CONST_INT_P (XEXP (lhs, 1)) |
0a67e02c PB |
2459 | && INTVAL (XEXP (lhs, 1)) >= 0 |
2460 | && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
2461 | { | |
27bcd47c | 2462 | coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1))); |
0a67e02c PB |
2463 | lhs = XEXP (lhs, 0); |
2464 | } | |
3198b947 | 2465 | |
0a67e02c | 2466 | if (GET_CODE (rhs) == NEG) |
fab2f52c | 2467 | { |
54fb1ae0 | 2468 | negcoeff1 = double_int_one; |
fab2f52c AO |
2469 | rhs = XEXP (rhs, 0); |
2470 | } | |
0a67e02c | 2471 | else if (GET_CODE (rhs) == MULT |
481683e1 | 2472 | && CONST_INT_P (XEXP (rhs, 1))) |
0a67e02c | 2473 | { |
27bcd47c | 2474 | negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1))); |
fab2f52c | 2475 | rhs = XEXP (rhs, 0); |
0a67e02c PB |
2476 | } |
2477 | else if (GET_CODE (rhs) == ASHIFT | |
481683e1 | 2478 | && CONST_INT_P (XEXP (rhs, 1)) |
0a67e02c PB |
2479 | && INTVAL (XEXP (rhs, 1)) >= 0 |
2480 | && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
2481 | { | |
27bcd47c LC |
2482 | negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1))); |
2483 | negcoeff1 = -negcoeff1; | |
0a67e02c PB |
2484 | rhs = XEXP (rhs, 0); |
2485 | } | |
2486 | ||
2487 | if (rtx_equal_p (lhs, rhs)) | |
2488 | { | |
2489 | rtx orig = gen_rtx_MINUS (mode, op0, op1); | |
fab2f52c | 2490 | rtx coeff; |
54fb1ae0 | 2491 | double_int val; |
f40751dd | 2492 | bool speed = optimize_function_for_speed_p (cfun); |
fab2f52c | 2493 | |
27bcd47c | 2494 | val = coeff0 + negcoeff1; |
54fb1ae0 | 2495 | coeff = immed_double_int_const (val, mode); |
fab2f52c AO |
2496 | |
2497 | tem = simplify_gen_binary (MULT, mode, lhs, coeff); | |
5e8f01f4 | 2498 | return set_src_cost (tem, speed) <= set_src_cost (orig, speed) |
0a67e02c PB |
2499 | ? tem : 0; |
2500 | } | |
3198b947 RH |
2501 | } |
2502 | ||
0a67e02c PB |
2503 | /* (a - (-b)) -> (a + b). True even for IEEE. */ |
2504 | if (GET_CODE (op1) == NEG) | |
2505 | return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0)); | |
3198b947 | 2506 | |
0a67e02c PB |
2507 | /* (-x - c) may be simplified as (-c - x). */ |
2508 | if (GET_CODE (op0) == NEG | |
33ffb5c5 | 2509 | && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1))) |
79ae63b1 | 2510 | { |
0a67e02c PB |
2511 | tem = simplify_unary_operation (NEG, mode, op1, mode); |
2512 | if (tem) | |
2513 | return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0)); | |
2514 | } | |
79ae63b1 | 2515 | |
0a67e02c | 2516 | /* Don't let a relocatable value get a negative coeff. */ |
481683e1 | 2517 | if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode) |
0a67e02c PB |
2518 | return simplify_gen_binary (PLUS, mode, |
2519 | op0, | |
2520 | neg_const_int (mode, op1)); | |
2521 | ||
2522 | /* (x - (x & y)) -> (x & ~y) */ | |
6b74529d | 2523 | if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND) |
0a67e02c PB |
2524 | { |
2525 | if (rtx_equal_p (op0, XEXP (op1, 0))) | |
79ae63b1 | 2526 | { |
0a67e02c PB |
2527 | tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1), |
2528 | GET_MODE (XEXP (op1, 1))); | |
2529 | return simplify_gen_binary (AND, mode, op0, tem); | |
2530 | } | |
2531 | if (rtx_equal_p (op0, XEXP (op1, 1))) | |
2532 | { | |
2533 | tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0), | |
2534 | GET_MODE (XEXP (op1, 0))); | |
2535 | return simplify_gen_binary (AND, mode, op0, tem); | |
79ae63b1 | 2536 | } |
79ae63b1 | 2537 | } |
1941069a | 2538 | |
bd1ef757 PB |
2539 | /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done |
2540 | by reversing the comparison code if valid. */ | |
2541 | if (STORE_FLAG_VALUE == 1 | |
2542 | && trueop0 == const1_rtx | |
2543 | && COMPARISON_P (op1) | |
2544 | && (reversed = reversed_comparison (op1, mode))) | |
2545 | return reversed; | |
2546 | ||
2547 | /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */ | |
4bf371ea RG |
2548 | if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) |
2549 | && GET_CODE (op1) == MULT | |
bd1ef757 PB |
2550 | && GET_CODE (XEXP (op1, 0)) == NEG) |
2551 | { | |
2552 | rtx in1, in2; | |
2553 | ||
2554 | in1 = XEXP (XEXP (op1, 0), 0); | |
2555 | in2 = XEXP (op1, 1); | |
2556 | return simplify_gen_binary (PLUS, mode, | |
2557 | simplify_gen_binary (MULT, mode, | |
2558 | in1, in2), | |
2559 | op0); | |
2560 | } | |
2561 | ||
2562 | /* Canonicalize (minus (neg A) (mult B C)) to | |
2563 | (minus (mult (neg B) C) A). */ | |
4bf371ea RG |
2564 | if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) |
2565 | && GET_CODE (op1) == MULT | |
bd1ef757 PB |
2566 | && GET_CODE (op0) == NEG) |
2567 | { | |
2568 | rtx in1, in2; | |
2569 | ||
2570 | in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode); | |
2571 | in2 = XEXP (op1, 1); | |
2572 | return simplify_gen_binary (MINUS, mode, | |
2573 | simplify_gen_binary (MULT, mode, | |
2574 | in1, in2), | |
2575 | XEXP (op0, 0)); | |
2576 | } | |
2577 | ||
1941069a PB |
2578 | /* If one of the operands is a PLUS or a MINUS, see if we can |
2579 | simplify this by the associative law. This will, for example, | |
2580 | canonicalize (minus A (plus B C)) to (minus (minus A B) C). | |
2581 | Don't use the associative law for floating point. | |
2582 | The inaccuracy makes it nonassociative, | |
2583 | and subtle programs can break if operations are associated. */ | |
2584 | ||
2585 | if (INTEGRAL_MODE_P (mode) | |
2586 | && (plus_minus_operand_p (op0) | |
2587 | || plus_minus_operand_p (op1)) | |
2588 | && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0) | |
2589 | return tem; | |
0a67e02c | 2590 | break; |
15e5ad76 | 2591 | |
0a67e02c PB |
2592 | case MULT: |
2593 | if (trueop1 == constm1_rtx) | |
2594 | return simplify_gen_unary (NEG, mode, op0, mode); | |
2595 | ||
29b40d79 BS |
2596 | if (GET_CODE (op0) == NEG) |
2597 | { | |
2598 | rtx temp = simplify_unary_operation (NEG, mode, op1, mode); | |
707f9919 JJ |
2599 | /* If op1 is a MULT as well and simplify_unary_operation |
2600 | just moved the NEG to the second operand, simplify_gen_binary | |
2601 | below could through simplify_associative_operation move | |
2602 | the NEG around again and recurse endlessly. */ | |
2603 | if (temp | |
2604 | && GET_CODE (op1) == MULT | |
2605 | && GET_CODE (temp) == MULT | |
2606 | && XEXP (op1, 0) == XEXP (temp, 0) | |
2607 | && GET_CODE (XEXP (temp, 1)) == NEG | |
2608 | && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0)) | |
2609 | temp = NULL_RTX; | |
29b40d79 BS |
2610 | if (temp) |
2611 | return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp); | |
2612 | } | |
2613 | if (GET_CODE (op1) == NEG) | |
2614 | { | |
2615 | rtx temp = simplify_unary_operation (NEG, mode, op0, mode); | |
707f9919 JJ |
2616 | /* If op0 is a MULT as well and simplify_unary_operation |
2617 | just moved the NEG to the second operand, simplify_gen_binary | |
2618 | below could through simplify_associative_operation move | |
2619 | the NEG around again and recurse endlessly. */ | |
2620 | if (temp | |
2621 | && GET_CODE (op0) == MULT | |
2622 | && GET_CODE (temp) == MULT | |
2623 | && XEXP (op0, 0) == XEXP (temp, 0) | |
2624 | && GET_CODE (XEXP (temp, 1)) == NEG | |
2625 | && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0)) | |
2626 | temp = NULL_RTX; | |
29b40d79 BS |
2627 | if (temp) |
2628 | return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0)); | |
2629 | } | |
2630 | ||
0a67e02c PB |
2631 | /* Maybe simplify x * 0 to 0. The reduction is not valid if |
2632 | x is NaN, since x * 0 is then also NaN. Nor is it valid | |
2633 | when the mode has signed zeros, since multiplying a negative | |
2634 | number by 0 will give -0, not 0. */ | |
2635 | if (!HONOR_NANS (mode) | |
2636 | && !HONOR_SIGNED_ZEROS (mode) | |
2637 | && trueop1 == CONST0_RTX (mode) | |
2638 | && ! side_effects_p (op0)) | |
2639 | return op1; | |
2640 | ||
2641 | /* In IEEE floating point, x*1 is not equivalent to x for | |
2642 | signalling NaNs. */ | |
2643 | if (!HONOR_SNANS (mode) | |
2644 | && trueop1 == CONST1_RTX (mode)) | |
2645 | return op0; | |
2646 | ||
2647 | /* Convert multiply by constant power of two into shift unless | |
2648 | we are still generating RTL. This test is a kludge. */ | |
481683e1 | 2649 | if (CONST_INT_P (trueop1) |
43c36287 | 2650 | && (val = exact_log2 (UINTVAL (trueop1))) >= 0 |
0a67e02c PB |
2651 | /* If the mode is larger than the host word size, and the |
2652 | uppermost bit is set, then this isn't a power of two due | |
2653 | to implicit sign extension. */ | |
2654 | && (width <= HOST_BITS_PER_WIDE_INT | |
2655 | || val != HOST_BITS_PER_WIDE_INT - 1)) | |
2656 | return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val)); | |
2657 | ||
fab2f52c | 2658 | /* Likewise for multipliers wider than a word. */ |
48175537 | 2659 | if (CONST_DOUBLE_AS_INT_P (trueop1) |
1753331b RS |
2660 | && GET_MODE (op0) == mode |
2661 | && CONST_DOUBLE_LOW (trueop1) == 0 | |
929e10f4 | 2662 | && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0 |
49ab6098 KZ |
2663 | && (val < HOST_BITS_PER_DOUBLE_INT - 1 |
2664 | || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT)) | |
fab2f52c AO |
2665 | return simplify_gen_binary (ASHIFT, mode, op0, |
2666 | GEN_INT (val + HOST_BITS_PER_WIDE_INT)); | |
2667 | ||
0a67e02c | 2668 | /* x*2 is x+x and x*(-1) is -x */ |
48175537 | 2669 | if (CONST_DOUBLE_AS_FLOAT_P (trueop1) |
3d8bf70f | 2670 | && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1)) |
50cd60be | 2671 | && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1)) |
0a67e02c PB |
2672 | && GET_MODE (op0) == mode) |
2673 | { | |
2674 | REAL_VALUE_TYPE d; | |
2675 | REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); | |
15e5ad76 | 2676 | |
0a67e02c PB |
2677 | if (REAL_VALUES_EQUAL (d, dconst2)) |
2678 | return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0)); | |
3e4093b6 | 2679 | |
1753331b RS |
2680 | if (!HONOR_SNANS (mode) |
2681 | && REAL_VALUES_EQUAL (d, dconstm1)) | |
0a67e02c PB |
2682 | return simplify_gen_unary (NEG, mode, op0, mode); |
2683 | } | |
15e5ad76 | 2684 | |
1753331b RS |
2685 | /* Optimize -x * -x as x * x. */ |
2686 | if (FLOAT_MODE_P (mode) | |
2687 | && GET_CODE (op0) == NEG | |
2688 | && GET_CODE (op1) == NEG | |
2689 | && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)) | |
2690 | && !side_effects_p (XEXP (op0, 0))) | |
2691 | return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0)); | |
2692 | ||
2693 | /* Likewise, optimize abs(x) * abs(x) as x * x. */ | |
2694 | if (SCALAR_FLOAT_MODE_P (mode) | |
2695 | && GET_CODE (op0) == ABS | |
2696 | && GET_CODE (op1) == ABS | |
2697 | && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)) | |
2698 | && !side_effects_p (XEXP (op0, 0))) | |
2699 | return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0)); | |
2700 | ||
0a67e02c PB |
2701 | /* Reassociate multiplication, but for floating point MULTs |
2702 | only when the user specifies unsafe math optimizations. */ | |
2703 | if (! FLOAT_MODE_P (mode) | |
2704 | || flag_unsafe_math_optimizations) | |
2705 | { | |
2706 | tem = simplify_associative_operation (code, mode, op0, op1); | |
2707 | if (tem) | |
2708 | return tem; | |
2709 | } | |
2710 | break; | |
6355b2d5 | 2711 | |
0a67e02c | 2712 | case IOR: |
a82e045d | 2713 | if (trueop1 == CONST0_RTX (mode)) |
0a67e02c | 2714 | return op0; |
e7160b27 JM |
2715 | if (INTEGRAL_MODE_P (mode) |
2716 | && trueop1 == CONSTM1_RTX (mode) | |
2717 | && !side_effects_p (op0)) | |
0a67e02c PB |
2718 | return op1; |
2719 | if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
2720 | return op0; | |
2721 | /* A | (~A) -> -1 */ | |
2722 | if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) | |
2723 | || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) | |
2724 | && ! side_effects_p (op0) | |
3f2960d5 | 2725 | && SCALAR_INT_MODE_P (mode)) |
0a67e02c | 2726 | return constm1_rtx; |
bd1ef757 PB |
2727 | |
2728 | /* (ior A C) is C if all bits of A that might be nonzero are on in C. */ | |
481683e1 | 2729 | if (CONST_INT_P (op1) |
46c9550f | 2730 | && HWI_COMPUTABLE_MODE_P (mode) |
e7160b27 JM |
2731 | && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0 |
2732 | && !side_effects_p (op0)) | |
bd1ef757 | 2733 | return op1; |
b8698a0f | 2734 | |
49e7a9d4 RS |
2735 | /* Canonicalize (X & C1) | C2. */ |
2736 | if (GET_CODE (op0) == AND | |
481683e1 SZ |
2737 | && CONST_INT_P (trueop1) |
2738 | && CONST_INT_P (XEXP (op0, 1))) | |
49e7a9d4 RS |
2739 | { |
2740 | HOST_WIDE_INT mask = GET_MODE_MASK (mode); | |
2741 | HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1)); | |
2742 | HOST_WIDE_INT c2 = INTVAL (trueop1); | |
2743 | ||
2744 | /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */ | |
2745 | if ((c1 & c2) == c1 | |
2746 | && !side_effects_p (XEXP (op0, 0))) | |
2747 | return trueop1; | |
2748 | ||
2749 | /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */ | |
2750 | if (((c1|c2) & mask) == mask) | |
2751 | return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1); | |
2752 | ||
2753 | /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */ | |
2754 | if (((c1 & ~c2) & mask) != (c1 & mask)) | |
2755 | { | |
2756 | tem = simplify_gen_binary (AND, mode, XEXP (op0, 0), | |
2757 | gen_int_mode (c1 & ~c2, mode)); | |
2758 | return simplify_gen_binary (IOR, mode, tem, op1); | |
2759 | } | |
2760 | } | |
2761 | ||
bd1ef757 PB |
2762 | /* Convert (A & B) | A to A. */ |
2763 | if (GET_CODE (op0) == AND | |
2764 | && (rtx_equal_p (XEXP (op0, 0), op1) | |
2765 | || rtx_equal_p (XEXP (op0, 1), op1)) | |
2766 | && ! side_effects_p (XEXP (op0, 0)) | |
2767 | && ! side_effects_p (XEXP (op0, 1))) | |
2768 | return op1; | |
2769 | ||
2770 | /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the | |
2771 | mode size to (rotate A CX). */ | |
2772 | ||
2773 | if (GET_CODE (op1) == ASHIFT | |
2774 | || GET_CODE (op1) == SUBREG) | |
2775 | { | |
2776 | opleft = op1; | |
2777 | opright = op0; | |
2778 | } | |
2779 | else | |
2780 | { | |
2781 | opright = op1; | |
2782 | opleft = op0; | |
2783 | } | |
2784 | ||
2785 | if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT | |
2786 | && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0)) | |
481683e1 SZ |
2787 | && CONST_INT_P (XEXP (opleft, 1)) |
2788 | && CONST_INT_P (XEXP (opright, 1)) | |
bd1ef757 | 2789 | && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1)) |
5511bc5a | 2790 | == GET_MODE_PRECISION (mode))) |
bd1ef757 PB |
2791 | return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1)); |
2792 | ||
2793 | /* Same, but for ashift that has been "simplified" to a wider mode | |
2794 | by simplify_shift_const. */ | |
2795 | ||
2796 | if (GET_CODE (opleft) == SUBREG | |
2797 | && GET_CODE (SUBREG_REG (opleft)) == ASHIFT | |
2798 | && GET_CODE (opright) == LSHIFTRT | |
2799 | && GET_CODE (XEXP (opright, 0)) == SUBREG | |
2800 | && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0)) | |
2801 | && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0)) | |
2802 | && (GET_MODE_SIZE (GET_MODE (opleft)) | |
2803 | < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft)))) | |
2804 | && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0), | |
2805 | SUBREG_REG (XEXP (opright, 0))) | |
481683e1 SZ |
2806 | && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1)) |
2807 | && CONST_INT_P (XEXP (opright, 1)) | |
bd1ef757 | 2808 | && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1)) |
5511bc5a | 2809 | == GET_MODE_PRECISION (mode))) |
bd1ef757 | 2810 | return gen_rtx_ROTATE (mode, XEXP (opright, 0), |
01578564 | 2811 | XEXP (SUBREG_REG (opleft), 1)); |
bd1ef757 PB |
2812 | |
2813 | /* If we have (ior (and (X C1) C2)), simplify this by making | |
2814 | C1 as small as possible if C1 actually changes. */ | |
481683e1 | 2815 | if (CONST_INT_P (op1) |
46c9550f | 2816 | && (HWI_COMPUTABLE_MODE_P (mode) |
bd1ef757 PB |
2817 | || INTVAL (op1) > 0) |
2818 | && GET_CODE (op0) == AND | |
481683e1 SZ |
2819 | && CONST_INT_P (XEXP (op0, 1)) |
2820 | && CONST_INT_P (op1) | |
43c36287 | 2821 | && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0) |
bd1ef757 PB |
2822 | return simplify_gen_binary (IOR, mode, |
2823 | simplify_gen_binary | |
2824 | (AND, mode, XEXP (op0, 0), | |
43c36287 EB |
2825 | GEN_INT (UINTVAL (XEXP (op0, 1)) |
2826 | & ~UINTVAL (op1))), | |
bd1ef757 PB |
2827 | op1); |
2828 | ||
2829 | /* If OP0 is (ashiftrt (plus ...) C), it might actually be | |
2830 | a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and | |
2831 | the PLUS does not affect any of the bits in OP1: then we can do | |
2832 | the IOR as a PLUS and we can associate. This is valid if OP1 | |
2833 | can be safely shifted left C bits. */ | |
481683e1 | 2834 | if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT |
bd1ef757 | 2835 | && GET_CODE (XEXP (op0, 0)) == PLUS |
481683e1 SZ |
2836 | && CONST_INT_P (XEXP (XEXP (op0, 0), 1)) |
2837 | && CONST_INT_P (XEXP (op0, 1)) | |
bd1ef757 PB |
2838 | && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT) |
2839 | { | |
2840 | int count = INTVAL (XEXP (op0, 1)); | |
2841 | HOST_WIDE_INT mask = INTVAL (trueop1) << count; | |
2842 | ||
2843 | if (mask >> count == INTVAL (trueop1) | |
046f1eee | 2844 | && trunc_int_for_mode (mask, mode) == mask |
bd1ef757 PB |
2845 | && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0) |
2846 | return simplify_gen_binary (ASHIFTRT, mode, | |
0a81f074 RS |
2847 | plus_constant (mode, XEXP (op0, 0), |
2848 | mask), | |
bd1ef757 PB |
2849 | XEXP (op0, 1)); |
2850 | } | |
2851 | ||
b17c024f EB |
2852 | tem = simplify_byte_swapping_operation (code, mode, op0, op1); |
2853 | if (tem) | |
2854 | return tem; | |
2855 | ||
0a67e02c PB |
2856 | tem = simplify_associative_operation (code, mode, op0, op1); |
2857 | if (tem) | |
2858 | return tem; | |
2859 | break; | |
2860 | ||
2861 | case XOR: | |
a82e045d | 2862 | if (trueop1 == CONST0_RTX (mode)) |
0a67e02c | 2863 | return op0; |
e7c82a99 | 2864 | if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode)) |
0a67e02c | 2865 | return simplify_gen_unary (NOT, mode, op0, mode); |
f5d1572a | 2866 | if (rtx_equal_p (trueop0, trueop1) |
0a67e02c PB |
2867 | && ! side_effects_p (op0) |
2868 | && GET_MODE_CLASS (mode) != MODE_CC) | |
6bd13540 | 2869 | return CONST0_RTX (mode); |
0a67e02c PB |
2870 | |
2871 | /* Canonicalize XOR of the most significant bit to PLUS. */ | |
33ffb5c5 | 2872 | if (CONST_SCALAR_INT_P (op1) |
0a67e02c PB |
2873 | && mode_signbit_p (mode, op1)) |
2874 | return simplify_gen_binary (PLUS, mode, op0, op1); | |
2875 | /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */ | |
33ffb5c5 | 2876 | if (CONST_SCALAR_INT_P (op1) |
0a67e02c | 2877 | && GET_CODE (op0) == PLUS |
33ffb5c5 | 2878 | && CONST_SCALAR_INT_P (XEXP (op0, 1)) |
0a67e02c PB |
2879 | && mode_signbit_p (mode, XEXP (op0, 1))) |
2880 | return simplify_gen_binary (XOR, mode, XEXP (op0, 0), | |
2881 | simplify_gen_binary (XOR, mode, op1, | |
2882 | XEXP (op0, 1))); | |
bd1ef757 PB |
2883 | |
2884 | /* If we are XORing two things that have no bits in common, | |
2885 | convert them into an IOR. This helps to detect rotation encoded | |
2886 | using those methods and possibly other simplifications. */ | |
2887 | ||
46c9550f | 2888 | if (HWI_COMPUTABLE_MODE_P (mode) |
bd1ef757 PB |
2889 | && (nonzero_bits (op0, mode) |
2890 | & nonzero_bits (op1, mode)) == 0) | |
2891 | return (simplify_gen_binary (IOR, mode, op0, op1)); | |
2892 | ||
2893 | /* Convert (XOR (NOT x) (NOT y)) to (XOR x y). | |
2894 | Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for | |
2895 | (NOT y). */ | |
2896 | { | |
2897 | int num_negated = 0; | |
2898 | ||
2899 | if (GET_CODE (op0) == NOT) | |
2900 | num_negated++, op0 = XEXP (op0, 0); | |
2901 | if (GET_CODE (op1) == NOT) | |
2902 | num_negated++, op1 = XEXP (op1, 0); | |
2903 | ||
2904 | if (num_negated == 2) | |
2905 | return simplify_gen_binary (XOR, mode, op0, op1); | |
2906 | else if (num_negated == 1) | |
2907 | return simplify_gen_unary (NOT, mode, | |
2908 | simplify_gen_binary (XOR, mode, op0, op1), | |
2909 | mode); | |
2910 | } | |
2911 | ||
2912 | /* Convert (xor (and A B) B) to (and (not A) B). The latter may | |
2913 | correspond to a machine insn or result in further simplifications | |
2914 | if B is a constant. */ | |
2915 | ||
2916 | if (GET_CODE (op0) == AND | |
2917 | && rtx_equal_p (XEXP (op0, 1), op1) | |
2918 | && ! side_effects_p (op1)) | |
2919 | return simplify_gen_binary (AND, mode, | |
2920 | simplify_gen_unary (NOT, mode, | |
2921 | XEXP (op0, 0), mode), | |
2922 | op1); | |
2923 | ||
2924 | else if (GET_CODE (op0) == AND | |
2925 | && rtx_equal_p (XEXP (op0, 0), op1) | |
2926 | && ! side_effects_p (op1)) | |
2927 | return simplify_gen_binary (AND, mode, | |
2928 | simplify_gen_unary (NOT, mode, | |
2929 | XEXP (op0, 1), mode), | |
2930 | op1); | |
2931 | ||
54833ec0 CLT |
2932 | /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P), |
2933 | we can transform like this: | |
2934 | (A&B)^C == ~(A&B)&C | ~C&(A&B) | |
2935 | == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law | |
2936 | == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order | |
2937 | Attempt a few simplifications when B and C are both constants. */ | |
2938 | if (GET_CODE (op0) == AND | |
2939 | && CONST_INT_P (op1) | |
2940 | && CONST_INT_P (XEXP (op0, 1))) | |
2941 | { | |
2942 | rtx a = XEXP (op0, 0); | |
2943 | rtx b = XEXP (op0, 1); | |
2944 | rtx c = op1; | |
2945 | HOST_WIDE_INT bval = INTVAL (b); | |
2946 | HOST_WIDE_INT cval = INTVAL (c); | |
2947 | ||
2948 | rtx na_c | |
2949 | = simplify_binary_operation (AND, mode, | |
2950 | simplify_gen_unary (NOT, mode, a, mode), | |
2951 | c); | |
2952 | if ((~cval & bval) == 0) | |
2953 | { | |
2954 | /* Try to simplify ~A&C | ~B&C. */ | |
2955 | if (na_c != NULL_RTX) | |
2956 | return simplify_gen_binary (IOR, mode, na_c, | |
2957 | GEN_INT (~bval & cval)); | |
2958 | } | |
2959 | else | |
2960 | { | |
2961 | /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */ | |
2962 | if (na_c == const0_rtx) | |
2963 | { | |
2964 | rtx a_nc_b = simplify_gen_binary (AND, mode, a, | |
2965 | GEN_INT (~cval & bval)); | |
2966 | return simplify_gen_binary (IOR, mode, a_nc_b, | |
2967 | GEN_INT (~bval & cval)); | |
2968 | } | |
2969 | } | |
2970 | } | |
2971 | ||
bd1ef757 PB |
2972 | /* (xor (comparison foo bar) (const_int 1)) can become the reversed |
2973 | comparison if STORE_FLAG_VALUE is 1. */ | |
2974 | if (STORE_FLAG_VALUE == 1 | |
2975 | && trueop1 == const1_rtx | |
2976 | && COMPARISON_P (op0) | |
2977 | && (reversed = reversed_comparison (op0, mode))) | |
2978 | return reversed; | |
2979 | ||
2980 | /* (lshiftrt foo C) where C is the number of bits in FOO minus 1 | |
2981 | is (lt foo (const_int 0)), so we can perform the above | |
2982 | simplification if STORE_FLAG_VALUE is 1. */ | |
2983 | ||
2984 | if (STORE_FLAG_VALUE == 1 | |
2985 | && trueop1 == const1_rtx | |
2986 | && GET_CODE (op0) == LSHIFTRT | |
481683e1 | 2987 | && CONST_INT_P (XEXP (op0, 1)) |
5511bc5a | 2988 | && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1) |
bd1ef757 PB |
2989 | return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx); |
2990 | ||
2991 | /* (xor (comparison foo bar) (const_int sign-bit)) | |
2992 | when STORE_FLAG_VALUE is the sign bit. */ | |
2d0c270f | 2993 | if (val_signbit_p (mode, STORE_FLAG_VALUE) |
bd1ef757 PB |
2994 | && trueop1 == const_true_rtx |
2995 | && COMPARISON_P (op0) | |
2996 | && (reversed = reversed_comparison (op0, mode))) | |
2997 | return reversed; | |
2998 | ||
b17c024f EB |
2999 | tem = simplify_byte_swapping_operation (code, mode, op0, op1); |
3000 | if (tem) | |
3001 | return tem; | |
3002 | ||
0a67e02c PB |
3003 | tem = simplify_associative_operation (code, mode, op0, op1); |
3004 | if (tem) | |
3005 | return tem; | |
3006 | break; | |
3007 | ||
3008 | case AND: | |
3f2960d5 RH |
3009 | if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0)) |
3010 | return trueop1; | |
e7c82a99 JJ |
3011 | if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode)) |
3012 | return op0; | |
46c9550f | 3013 | if (HWI_COMPUTABLE_MODE_P (mode)) |
dc5b3407 ZD |
3014 | { |
3015 | HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode); | |
f5a17c43 | 3016 | HOST_WIDE_INT nzop1; |
481683e1 | 3017 | if (CONST_INT_P (trueop1)) |
f5a17c43 BS |
3018 | { |
3019 | HOST_WIDE_INT val1 = INTVAL (trueop1); | |
3020 | /* If we are turning off bits already known off in OP0, we need | |
3021 | not do an AND. */ | |
3022 | if ((nzop0 & ~val1) == 0) | |
3023 | return op0; | |
3024 | } | |
3025 | nzop1 = nonzero_bits (trueop1, mode); | |
dc5b3407 | 3026 | /* If we are clearing all the nonzero bits, the result is zero. */ |
f5a17c43 BS |
3027 | if ((nzop1 & nzop0) == 0 |
3028 | && !side_effects_p (op0) && !side_effects_p (op1)) | |
dc5b3407 ZD |
3029 | return CONST0_RTX (mode); |
3030 | } | |
f5d1572a | 3031 | if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0) |
0a67e02c PB |
3032 | && GET_MODE_CLASS (mode) != MODE_CC) |
3033 | return op0; | |
3034 | /* A & (~A) -> 0 */ | |
3035 | if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) | |
3036 | || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) | |
3037 | && ! side_effects_p (op0) | |
3038 | && GET_MODE_CLASS (mode) != MODE_CC) | |
3f2960d5 | 3039 | return CONST0_RTX (mode); |
0a67e02c PB |
3040 | |
3041 | /* Transform (and (extend X) C) into (zero_extend (and X C)) if | |
3042 | there are no nonzero bits of C outside of X's mode. */ | |
3043 | if ((GET_CODE (op0) == SIGN_EXTEND | |
3044 | || GET_CODE (op0) == ZERO_EXTEND) | |
481683e1 | 3045 | && CONST_INT_P (trueop1) |
46c9550f | 3046 | && HWI_COMPUTABLE_MODE_P (mode) |
0a67e02c | 3047 | && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0))) |
43c36287 | 3048 | & UINTVAL (trueop1)) == 0) |
0a67e02c PB |
3049 | { |
3050 | enum machine_mode imode = GET_MODE (XEXP (op0, 0)); | |
3051 | tem = simplify_gen_binary (AND, imode, XEXP (op0, 0), | |
3052 | gen_int_mode (INTVAL (trueop1), | |
3053 | imode)); | |
3054 | return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode); | |
3055 | } | |
3056 | ||
fcaf7e12 AN |
3057 | /* Transform (and (truncate X) C) into (truncate (and X C)). This way |
3058 | we might be able to further simplify the AND with X and potentially | |
3059 | remove the truncation altogether. */ | |
3060 | if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1)) | |
3061 | { | |
3062 | rtx x = XEXP (op0, 0); | |
3063 | enum machine_mode xmode = GET_MODE (x); | |
3064 | tem = simplify_gen_binary (AND, xmode, x, | |
3065 | gen_int_mode (INTVAL (trueop1), xmode)); | |
3066 | return simplify_gen_unary (TRUNCATE, mode, tem, xmode); | |
3067 | } | |
3068 | ||
49e7a9d4 RS |
3069 | /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */ |
3070 | if (GET_CODE (op0) == IOR | |
481683e1 SZ |
3071 | && CONST_INT_P (trueop1) |
3072 | && CONST_INT_P (XEXP (op0, 1))) | |
49e7a9d4 RS |
3073 | { |
3074 | HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1)); | |
3075 | return simplify_gen_binary (IOR, mode, | |
3076 | simplify_gen_binary (AND, mode, | |
3077 | XEXP (op0, 0), op1), | |
3078 | gen_int_mode (tmp, mode)); | |
3079 | } | |
3080 | ||
bd1ef757 PB |
3081 | /* Convert (A ^ B) & A to A & (~B) since the latter is often a single |
3082 | insn (and may simplify more). */ | |
3083 | if (GET_CODE (op0) == XOR | |
3084 | && rtx_equal_p (XEXP (op0, 0), op1) | |
3085 | && ! side_effects_p (op1)) | |
3086 | return simplify_gen_binary (AND, mode, | |
3087 | simplify_gen_unary (NOT, mode, | |
3088 | XEXP (op0, 1), mode), | |
3089 | op1); | |
3090 | ||
3091 | if (GET_CODE (op0) == XOR | |
3092 | && rtx_equal_p (XEXP (op0, 1), op1) | |
3093 | && ! side_effects_p (op1)) | |
3094 | return simplify_gen_binary (AND, mode, | |
3095 | simplify_gen_unary (NOT, mode, | |
3096 | XEXP (op0, 0), mode), | |
3097 | op1); | |
3098 | ||
3099 | /* Similarly for (~(A ^ B)) & A. */ | |
3100 | if (GET_CODE (op0) == NOT | |
3101 | && GET_CODE (XEXP (op0, 0)) == XOR | |
3102 | && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1) | |
3103 | && ! side_effects_p (op1)) | |
3104 | return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1); | |
3105 | ||
3106 | if (GET_CODE (op0) == NOT | |
3107 | && GET_CODE (XEXP (op0, 0)) == XOR | |
3108 | && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1) | |
3109 | && ! side_effects_p (op1)) | |
3110 | return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1); | |
3111 | ||
3112 | /* Convert (A | B) & A to A. */ | |
3113 | if (GET_CODE (op0) == IOR | |
3114 | && (rtx_equal_p (XEXP (op0, 0), op1) | |
3115 | || rtx_equal_p (XEXP (op0, 1), op1)) | |
3116 | && ! side_effects_p (XEXP (op0, 0)) | |
3117 | && ! side_effects_p (XEXP (op0, 1))) | |
3118 | return op1; | |
3119 | ||
0a67e02c PB |
3120 | /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, |
3121 | ((A & N) + B) & M -> (A + B) & M | |
3122 | Similarly if (N & M) == 0, | |
3123 | ((A | N) + B) & M -> (A + B) & M | |
dc5b3407 ZD |
3124 | and for - instead of + and/or ^ instead of |. |
3125 | Also, if (N & M) == 0, then | |
3126 | (A +- N) & M -> A & M. */ | |
481683e1 | 3127 | if (CONST_INT_P (trueop1) |
46c9550f | 3128 | && HWI_COMPUTABLE_MODE_P (mode) |
43c36287 EB |
3129 | && ~UINTVAL (trueop1) |
3130 | && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0 | |
0a67e02c PB |
3131 | && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS)) |
3132 | { | |
3133 | rtx pmop[2]; | |
3134 | int which; | |
3135 | ||
3136 | pmop[0] = XEXP (op0, 0); | |
3137 | pmop[1] = XEXP (op0, 1); | |
3138 | ||
481683e1 | 3139 | if (CONST_INT_P (pmop[1]) |
43c36287 | 3140 | && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0) |
dc5b3407 ZD |
3141 | return simplify_gen_binary (AND, mode, pmop[0], op1); |
3142 | ||
0a67e02c PB |
3143 | for (which = 0; which < 2; which++) |
3144 | { | |
3145 | tem = pmop[which]; | |
3146 | switch (GET_CODE (tem)) | |
6355b2d5 | 3147 | { |
0a67e02c | 3148 | case AND: |
481683e1 | 3149 | if (CONST_INT_P (XEXP (tem, 1)) |
43c36287 EB |
3150 | && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) |
3151 | == UINTVAL (trueop1)) | |
0a67e02c | 3152 | pmop[which] = XEXP (tem, 0); |
6355b2d5 | 3153 | break; |
0a67e02c PB |
3154 | case IOR: |
3155 | case XOR: | |
481683e1 | 3156 | if (CONST_INT_P (XEXP (tem, 1)) |
43c36287 | 3157 | && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0) |
0a67e02c | 3158 | pmop[which] = XEXP (tem, 0); |
6355b2d5 | 3159 | break; |
6355b2d5 JJ |
3160 | default: |
3161 | break; | |
3162 | } | |
3163 | } | |
3164 | ||
0a67e02c PB |
3165 | if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1)) |
3166 | { | |
3167 | tem = simplify_gen_binary (GET_CODE (op0), mode, | |
3168 | pmop[0], pmop[1]); | |
3169 | return simplify_gen_binary (code, mode, tem, op1); | |
3170 | } | |
3171 | } | |
f79db4f6 AP |
3172 | |
3173 | /* (and X (ior (not X) Y) -> (and X Y) */ | |
3174 | if (GET_CODE (op1) == IOR | |
3175 | && GET_CODE (XEXP (op1, 0)) == NOT | |
3176 | && op0 == XEXP (XEXP (op1, 0), 0)) | |
3177 | return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1)); | |
3178 | ||
3179 | /* (and (ior (not X) Y) X) -> (and X Y) */ | |
3180 | if (GET_CODE (op0) == IOR | |
3181 | && GET_CODE (XEXP (op0, 0)) == NOT | |
3182 | && op1 == XEXP (XEXP (op0, 0), 0)) | |
3183 | return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1)); | |
3184 | ||
b17c024f EB |
3185 | tem = simplify_byte_swapping_operation (code, mode, op0, op1); |
3186 | if (tem) | |
3187 | return tem; | |
3188 | ||
0a67e02c PB |
3189 | tem = simplify_associative_operation (code, mode, op0, op1); |
3190 | if (tem) | |
3191 | return tem; | |
3192 | break; | |
762297d9 | 3193 | |
0a67e02c PB |
3194 | case UDIV: |
3195 | /* 0/x is 0 (or x&0 if x has side-effects). */ | |
3f2960d5 RH |
3196 | if (trueop0 == CONST0_RTX (mode)) |
3197 | { | |
3198 | if (side_effects_p (op1)) | |
3199 | return simplify_gen_binary (AND, mode, op1, trueop0); | |
3200 | return trueop0; | |
3201 | } | |
3202 | /* x/1 is x. */ | |
3203 | if (trueop1 == CONST1_RTX (mode)) | |
76bd29f6 JJ |
3204 | { |
3205 | tem = rtl_hooks.gen_lowpart_no_emit (mode, op0); | |
3206 | if (tem) | |
3207 | return tem; | |
3208 | } | |
3f2960d5 | 3209 | /* Convert divide by power of two into shift. */ |
481683e1 | 3210 | if (CONST_INT_P (trueop1) |
43c36287 | 3211 | && (val = exact_log2 (UINTVAL (trueop1))) > 0) |
3f2960d5 RH |
3212 | return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val)); |
3213 | break; | |
d284eb28 | 3214 | |
0a67e02c PB |
3215 | case DIV: |
3216 | /* Handle floating point and integers separately. */ | |
3d8bf70f | 3217 | if (SCALAR_FLOAT_MODE_P (mode)) |
0a67e02c PB |
3218 | { |
3219 | /* Maybe change 0.0 / x to 0.0. This transformation isn't | |
3220 | safe for modes with NaNs, since 0.0 / 0.0 will then be | |
3221 | NaN rather than 0.0. Nor is it safe for modes with signed | |
3222 | zeros, since dividing 0 by a negative number gives -0.0 */ | |
3223 | if (trueop0 == CONST0_RTX (mode) | |
3224 | && !HONOR_NANS (mode) | |
3225 | && !HONOR_SIGNED_ZEROS (mode) | |
3226 | && ! side_effects_p (op1)) | |
3227 | return op0; | |
3228 | /* x/1.0 is x. */ | |
3229 | if (trueop1 == CONST1_RTX (mode) | |
3230 | && !HONOR_SNANS (mode)) | |
3231 | return op0; | |
0cedb36c | 3232 | |
48175537 | 3233 | if (CONST_DOUBLE_AS_FLOAT_P (trueop1) |
0a67e02c PB |
3234 | && trueop1 != CONST0_RTX (mode)) |
3235 | { | |
3236 | REAL_VALUE_TYPE d; | |
3237 | REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); | |
0cedb36c | 3238 | |
0a67e02c PB |
3239 | /* x/-1.0 is -x. */ |
3240 | if (REAL_VALUES_EQUAL (d, dconstm1) | |
3241 | && !HONOR_SNANS (mode)) | |
3242 | return simplify_gen_unary (NEG, mode, op0, mode); | |
0cedb36c | 3243 | |
0a67e02c | 3244 | /* Change FP division by a constant into multiplication. |
a1a82611 RE |
3245 | Only do this with -freciprocal-math. */ |
3246 | if (flag_reciprocal_math | |
0a67e02c PB |
3247 | && !REAL_VALUES_EQUAL (d, dconst0)) |
3248 | { | |
3249 | REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d); | |
3250 | tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
3251 | return simplify_gen_binary (MULT, mode, op0, tem); | |
3252 | } | |
3253 | } | |
3254 | } | |
e46bf5d6 | 3255 | else if (SCALAR_INT_MODE_P (mode)) |
0cedb36c | 3256 | { |
0a67e02c | 3257 | /* 0/x is 0 (or x&0 if x has side-effects). */ |
0e1b8b10 ILT |
3258 | if (trueop0 == CONST0_RTX (mode) |
3259 | && !cfun->can_throw_non_call_exceptions) | |
3f2960d5 RH |
3260 | { |
3261 | if (side_effects_p (op1)) | |
3262 | return simplify_gen_binary (AND, mode, op1, trueop0); | |
3263 | return trueop0; | |
3264 | } | |
0a67e02c | 3265 | /* x/1 is x. */ |
3f2960d5 | 3266 | if (trueop1 == CONST1_RTX (mode)) |
76bd29f6 JJ |
3267 | { |
3268 | tem = rtl_hooks.gen_lowpart_no_emit (mode, op0); | |
3269 | if (tem) | |
3270 | return tem; | |
3271 | } | |
0a67e02c PB |
3272 | /* x/-1 is -x. */ |
3273 | if (trueop1 == constm1_rtx) | |
3274 | { | |
9ce921ab | 3275 | rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0); |
76bd29f6 JJ |
3276 | if (x) |
3277 | return simplify_gen_unary (NEG, mode, x, mode); | |
0a67e02c PB |
3278 | } |
3279 | } | |
3280 | break; | |
0cedb36c | 3281 | |
0a67e02c PB |
3282 | case UMOD: |
3283 | /* 0%x is 0 (or x&0 if x has side-effects). */ | |
3f2960d5 RH |
3284 | if (trueop0 == CONST0_RTX (mode)) |
3285 | { | |
3286 | if (side_effects_p (op1)) | |
3287 | return simplify_gen_binary (AND, mode, op1, trueop0); | |
3288 | return trueop0; | |
3289 | } | |
3290 | /* x%1 is 0 (of x&0 if x has side-effects). */ | |
3291 | if (trueop1 == CONST1_RTX (mode)) | |
3292 | { | |
3293 | if (side_effects_p (op0)) | |
3294 | return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode)); | |
3295 | return CONST0_RTX (mode); | |
3296 | } | |
3297 | /* Implement modulus by power of two as AND. */ | |
481683e1 | 3298 | if (CONST_INT_P (trueop1) |
43c36287 | 3299 | && exact_log2 (UINTVAL (trueop1)) > 0) |
3f2960d5 RH |
3300 | return simplify_gen_binary (AND, mode, op0, |
3301 | GEN_INT (INTVAL (op1) - 1)); | |
3302 | break; | |
0cedb36c | 3303 | |
0a67e02c PB |
3304 | case MOD: |
3305 | /* 0%x is 0 (or x&0 if x has side-effects). */ | |
3f2960d5 RH |
3306 | if (trueop0 == CONST0_RTX (mode)) |
3307 | { | |
3308 | if (side_effects_p (op1)) | |
3309 | return simplify_gen_binary (AND, mode, op1, trueop0); | |
3310 | return trueop0; | |
3311 | } | |
3312 | /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */ | |
3313 | if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx) | |
3314 | { | |
3315 | if (side_effects_p (op0)) | |
3316 | return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode)); | |
3317 | return CONST0_RTX (mode); | |
3318 | } | |
3319 | break; | |
0cedb36c | 3320 | |
0a67e02c PB |
3321 | case ROTATERT: |
3322 | case ROTATE: | |
75776c6d JJ |
3323 | /* Canonicalize rotates by constant amount. If op1 is bitsize / 2, |
3324 | prefer left rotation, if op1 is from bitsize / 2 + 1 to | |
3325 | bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1 | |
3326 | amount instead. */ | |
3327 | if (CONST_INT_P (trueop1) | |
3328 | && IN_RANGE (INTVAL (trueop1), | |
3329 | GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE), | |
3330 | GET_MODE_BITSIZE (mode) - 1)) | |
3331 | return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE, | |
3332 | mode, op0, GEN_INT (GET_MODE_BITSIZE (mode) | |
3333 | - INTVAL (trueop1))); | |
3334 | /* FALLTHRU */ | |
0a67e02c | 3335 | case ASHIFTRT: |
70233f37 RS |
3336 | if (trueop1 == CONST0_RTX (mode)) |
3337 | return op0; | |
3338 | if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) | |
3339 | return op0; | |
0a67e02c | 3340 | /* Rotating ~0 always results in ~0. */ |
481683e1 | 3341 | if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT |
43c36287 | 3342 | && UINTVAL (trueop0) == GET_MODE_MASK (mode) |
0a67e02c PB |
3343 | && ! side_effects_p (op1)) |
3344 | return op0; | |
cbc9503d | 3345 | canonicalize_shift: |
481683e1 | 3346 | if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1)) |
cbc9503d RS |
3347 | { |
3348 | val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1); | |
3349 | if (val != INTVAL (op1)) | |
3350 | return simplify_gen_binary (code, mode, op0, GEN_INT (val)); | |
3351 | } | |
70233f37 | 3352 | break; |
9d317251 | 3353 | |
0a67e02c | 3354 | case ASHIFT: |
e551ad26 | 3355 | case SS_ASHIFT: |
14c931f1 | 3356 | case US_ASHIFT: |
70233f37 RS |
3357 | if (trueop1 == CONST0_RTX (mode)) |
3358 | return op0; | |
3359 | if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) | |
3360 | return op0; | |
cbc9503d | 3361 | goto canonicalize_shift; |
70233f37 | 3362 | |
0a67e02c | 3363 | case LSHIFTRT: |
3f2960d5 | 3364 | if (trueop1 == CONST0_RTX (mode)) |
0a67e02c | 3365 | return op0; |
3f2960d5 | 3366 | if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) |
0a67e02c | 3367 | return op0; |
70233f37 RS |
3368 | /* Optimize (lshiftrt (clz X) C) as (eq X 0). */ |
3369 | if (GET_CODE (op0) == CLZ | |
481683e1 | 3370 | && CONST_INT_P (trueop1) |
70233f37 | 3371 | && STORE_FLAG_VALUE == 1 |
e40122f0 | 3372 | && INTVAL (trueop1) < (HOST_WIDE_INT)width) |
70233f37 RS |
3373 | { |
3374 | enum machine_mode imode = GET_MODE (XEXP (op0, 0)); | |
3375 | unsigned HOST_WIDE_INT zero_val = 0; | |
3376 | ||
3377 | if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val) | |
5511bc5a | 3378 | && zero_val == GET_MODE_PRECISION (imode) |
70233f37 RS |
3379 | && INTVAL (trueop1) == exact_log2 (zero_val)) |
3380 | return simplify_gen_relational (EQ, mode, imode, | |
3381 | XEXP (op0, 0), const0_rtx); | |
3382 | } | |
cbc9503d | 3383 | goto canonicalize_shift; |
9d317251 | 3384 | |
0a67e02c PB |
3385 | case SMIN: |
3386 | if (width <= HOST_BITS_PER_WIDE_INT | |
2d0c270f | 3387 | && mode_signbit_p (mode, trueop1) |
0a67e02c PB |
3388 | && ! side_effects_p (op0)) |
3389 | return op1; | |
3390 | if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
3391 | return op0; | |
3392 | tem = simplify_associative_operation (code, mode, op0, op1); | |
3393 | if (tem) | |
3394 | return tem; | |
3395 | break; | |
0cedb36c | 3396 | |
0a67e02c PB |
3397 | case SMAX: |
3398 | if (width <= HOST_BITS_PER_WIDE_INT | |
481683e1 | 3399 | && CONST_INT_P (trueop1) |
43c36287 | 3400 | && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1) |
0a67e02c PB |
3401 | && ! side_effects_p (op0)) |
3402 | return op1; | |
3403 | if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
3404 | return op0; | |
3405 | tem = simplify_associative_operation (code, mode, op0, op1); | |
3406 | if (tem) | |
3407 | return tem; | |
3408 | break; | |
0cedb36c | 3409 | |
0a67e02c | 3410 | case UMIN: |
3f2960d5 | 3411 | if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0)) |
0a67e02c PB |
3412 | return op1; |
3413 | if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
3414 | return op0; | |
3415 | tem = simplify_associative_operation (code, mode, op0, op1); | |
3416 | if (tem) | |
3417 | return tem; | |
3418 | break; | |
0cedb36c | 3419 | |
0a67e02c PB |
3420 | case UMAX: |
3421 | if (trueop1 == constm1_rtx && ! side_effects_p (op0)) | |
3422 | return op1; | |
3423 | if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
3424 | return op0; | |
3425 | tem = simplify_associative_operation (code, mode, op0, op1); | |
3426 | if (tem) | |
3427 | return tem; | |
3428 | break; | |
0cedb36c | 3429 | |
0a67e02c PB |
3430 | case SS_PLUS: |
3431 | case US_PLUS: | |
3432 | case SS_MINUS: | |
3433 | case US_MINUS: | |
14c931f1 CF |
3434 | case SS_MULT: |
3435 | case US_MULT: | |
3436 | case SS_DIV: | |
3437 | case US_DIV: | |
0a67e02c PB |
3438 | /* ??? There are simplifications that can be done. */ |
3439 | return 0; | |
0cedb36c | 3440 | |
0a67e02c PB |
3441 | case VEC_SELECT: |
3442 | if (!VECTOR_MODE_P (mode)) | |
3443 | { | |
3444 | gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0))); | |
3445 | gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0))); | |
3446 | gcc_assert (GET_CODE (trueop1) == PARALLEL); | |
3447 | gcc_assert (XVECLEN (trueop1, 0) == 1); | |
481683e1 | 3448 | gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0))); |
0a67e02c PB |
3449 | |
3450 | if (GET_CODE (trueop0) == CONST_VECTOR) | |
3451 | return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP | |
3452 | (trueop1, 0, 0))); | |
7f97f938 UB |
3453 | |
3454 | /* Extract a scalar element from a nested VEC_SELECT expression | |
3455 | (with optional nested VEC_CONCAT expression). Some targets | |
3456 | (i386) extract scalar element from a vector using chain of | |
3457 | nested VEC_SELECT expressions. When input operand is a memory | |
3458 | operand, this operation can be simplified to a simple scalar | |
3459 | load from an offseted memory address. */ | |
3460 | if (GET_CODE (trueop0) == VEC_SELECT) | |
3461 | { | |
3462 | rtx op0 = XEXP (trueop0, 0); | |
3463 | rtx op1 = XEXP (trueop0, 1); | |
3464 | ||
3465 | enum machine_mode opmode = GET_MODE (op0); | |
3466 | int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode)); | |
3467 | int n_elts = GET_MODE_SIZE (opmode) / elt_size; | |
3468 | ||
3469 | int i = INTVAL (XVECEXP (trueop1, 0, 0)); | |
3470 | int elem; | |
3471 | ||
3472 | rtvec vec; | |
3473 | rtx tmp_op, tmp; | |
3474 | ||
3475 | gcc_assert (GET_CODE (op1) == PARALLEL); | |
3476 | gcc_assert (i < n_elts); | |
3477 | ||
3478 | /* Select element, pointed by nested selector. */ | |
3743c639 | 3479 | elem = INTVAL (XVECEXP (op1, 0, i)); |
7f97f938 UB |
3480 | |
3481 | /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */ | |
3482 | if (GET_CODE (op0) == VEC_CONCAT) | |
3483 | { | |
3484 | rtx op00 = XEXP (op0, 0); | |
3485 | rtx op01 = XEXP (op0, 1); | |
3486 | ||
3487 | enum machine_mode mode00, mode01; | |
3488 | int n_elts00, n_elts01; | |
3489 | ||
3490 | mode00 = GET_MODE (op00); | |
3491 | mode01 = GET_MODE (op01); | |
3492 | ||
3493 | /* Find out number of elements of each operand. */ | |
3494 | if (VECTOR_MODE_P (mode00)) | |
3495 | { | |
3496 | elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00)); | |
3497 | n_elts00 = GET_MODE_SIZE (mode00) / elt_size; | |
3498 | } | |
3499 | else | |
3500 | n_elts00 = 1; | |
3501 | ||
3502 | if (VECTOR_MODE_P (mode01)) | |
3503 | { | |
3504 | elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01)); | |
3505 | n_elts01 = GET_MODE_SIZE (mode01) / elt_size; | |
3506 | } | |
3507 | else | |
3508 | n_elts01 = 1; | |
3509 | ||
3510 | gcc_assert (n_elts == n_elts00 + n_elts01); | |
3511 | ||
3512 | /* Select correct operand of VEC_CONCAT | |
3513 | and adjust selector. */ | |
3514 | if (elem < n_elts01) | |
3515 | tmp_op = op00; | |
3516 | else | |
3517 | { | |
3518 | tmp_op = op01; | |
3519 | elem -= n_elts00; | |
3520 | } | |
3521 | } | |
3522 | else | |
3523 | tmp_op = op0; | |
3524 | ||
3525 | vec = rtvec_alloc (1); | |
3526 | RTVEC_ELT (vec, 0) = GEN_INT (elem); | |
3527 | ||
3528 | tmp = gen_rtx_fmt_ee (code, mode, | |
3529 | tmp_op, gen_rtx_PARALLEL (VOIDmode, vec)); | |
3530 | return tmp; | |
3531 | } | |
0e159e0f AP |
3532 | if (GET_CODE (trueop0) == VEC_DUPLICATE |
3533 | && GET_MODE (XEXP (trueop0, 0)) == mode) | |
3534 | return XEXP (trueop0, 0); | |
0a67e02c PB |
3535 | } |
3536 | else | |
3537 | { | |
3538 | gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0))); | |
3539 | gcc_assert (GET_MODE_INNER (mode) | |
3540 | == GET_MODE_INNER (GET_MODE (trueop0))); | |
3541 | gcc_assert (GET_CODE (trueop1) == PARALLEL); | |
0cedb36c | 3542 | |
0a67e02c PB |
3543 | if (GET_CODE (trueop0) == CONST_VECTOR) |
3544 | { | |
3545 | int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
3546 | unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); | |
3547 | rtvec v = rtvec_alloc (n_elts); | |
3548 | unsigned int i; | |
0cedb36c | 3549 | |
0a67e02c PB |
3550 | gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts); |
3551 | for (i = 0; i < n_elts; i++) | |
3552 | { | |
3553 | rtx x = XVECEXP (trueop1, 0, i); | |
0cedb36c | 3554 | |
481683e1 | 3555 | gcc_assert (CONST_INT_P (x)); |
0a67e02c PB |
3556 | RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, |
3557 | INTVAL (x)); | |
0cedb36c JL |
3558 | } |
3559 | ||
0a67e02c | 3560 | return gen_rtx_CONST_VECTOR (mode, v); |
dd61aa98 | 3561 | } |
66c540d2 | 3562 | |
5f6e1c55 MG |
3563 | /* Recognize the identity. */ |
3564 | if (GET_MODE (trueop0) == mode) | |
3565 | { | |
3566 | bool maybe_ident = true; | |
3567 | for (int i = 0; i < XVECLEN (trueop1, 0); i++) | |
3568 | { | |
3569 | rtx j = XVECEXP (trueop1, 0, i); | |
3570 | if (!CONST_INT_P (j) || INTVAL (j) != i) | |
3571 | { | |
3572 | maybe_ident = false; | |
3573 | break; | |
3574 | } | |
3575 | } | |
3576 | if (maybe_ident) | |
3577 | return trueop0; | |
3578 | } | |
3579 | ||
66c540d2 MG |
3580 | /* If we build {a,b} then permute it, build the result directly. */ |
3581 | if (XVECLEN (trueop1, 0) == 2 | |
3582 | && CONST_INT_P (XVECEXP (trueop1, 0, 0)) | |
3583 | && CONST_INT_P (XVECEXP (trueop1, 0, 1)) | |
3584 | && GET_CODE (trueop0) == VEC_CONCAT | |
3585 | && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT | |
3586 | && GET_MODE (XEXP (trueop0, 0)) == mode | |
3587 | && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT | |
3588 | && GET_MODE (XEXP (trueop0, 1)) == mode) | |
3589 | { | |
3590 | unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0)); | |
3591 | unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1)); | |
3592 | rtx subop0, subop1; | |
3593 | ||
3594 | gcc_assert (i0 < 4 && i1 < 4); | |
3595 | subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2); | |
3596 | subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2); | |
3597 | ||
fd9da2c8 MG |
3598 | return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1); |
3599 | } | |
3600 | ||
3601 | if (XVECLEN (trueop1, 0) == 2 | |
3602 | && CONST_INT_P (XVECEXP (trueop1, 0, 0)) | |
3603 | && CONST_INT_P (XVECEXP (trueop1, 0, 1)) | |
3604 | && GET_CODE (trueop0) == VEC_CONCAT | |
3605 | && GET_MODE (trueop0) == mode) | |
3606 | { | |
3607 | unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0)); | |
3608 | unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1)); | |
3609 | rtx subop0, subop1; | |
3610 | ||
3611 | gcc_assert (i0 < 2 && i1 < 2); | |
3612 | subop0 = XEXP (trueop0, i0); | |
3613 | subop1 = XEXP (trueop0, i1); | |
3614 | ||
66c540d2 MG |
3615 | return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1); |
3616 | } | |
0a67e02c | 3617 | } |
bd1ef757 PB |
3618 | |
3619 | if (XVECLEN (trueop1, 0) == 1 | |
481683e1 | 3620 | && CONST_INT_P (XVECEXP (trueop1, 0, 0)) |
bd1ef757 PB |
3621 | && GET_CODE (trueop0) == VEC_CONCAT) |
3622 | { | |
3623 | rtx vec = trueop0; | |
3624 | int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode); | |
3625 | ||
3626 | /* Try to find the element in the VEC_CONCAT. */ | |
3627 | while (GET_MODE (vec) != mode | |
3628 | && GET_CODE (vec) == VEC_CONCAT) | |
3629 | { | |
3630 | HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0))); | |
3631 | if (offset < vec_size) | |
3632 | vec = XEXP (vec, 0); | |
3633 | else | |
3634 | { | |
3635 | offset -= vec_size; | |
3636 | vec = XEXP (vec, 1); | |
3637 | } | |
3638 | vec = avoid_constant_pool_reference (vec); | |
3639 | } | |
3640 | ||
3641 | if (GET_MODE (vec) == mode) | |
3642 | return vec; | |
3643 | } | |
3644 | ||
da694a77 MG |
3645 | /* If we select elements in a vec_merge that all come from the same |
3646 | operand, select from that operand directly. */ | |
3647 | if (GET_CODE (op0) == VEC_MERGE) | |
3648 | { | |
3649 | rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2)); | |
3650 | if (CONST_INT_P (trueop02)) | |
3651 | { | |
3652 | unsigned HOST_WIDE_INT sel = UINTVAL (trueop02); | |
3653 | bool all_operand0 = true; | |
3654 | bool all_operand1 = true; | |
3655 | for (int i = 0; i < XVECLEN (trueop1, 0); i++) | |
3656 | { | |
3657 | rtx j = XVECEXP (trueop1, 0, i); | |
3658 | if (sel & (1 << UINTVAL (j))) | |
3659 | all_operand1 = false; | |
3660 | else | |
3661 | all_operand0 = false; | |
3662 | } | |
3663 | if (all_operand0 && !side_effects_p (XEXP (op0, 1))) | |
3664 | return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1); | |
3665 | if (all_operand1 && !side_effects_p (XEXP (op0, 0))) | |
3666 | return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1); | |
3667 | } | |
3668 | } | |
3669 | ||
0a67e02c PB |
3670 | return 0; |
3671 | case VEC_CONCAT: | |
3672 | { | |
3673 | enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode | |
3674 | ? GET_MODE (trueop0) | |
3675 | : GET_MODE_INNER (mode)); | |
3676 | enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode | |
3677 | ? GET_MODE (trueop1) | |
3678 | : GET_MODE_INNER (mode)); | |
3679 | ||
3680 | gcc_assert (VECTOR_MODE_P (mode)); | |
3681 | gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode) | |
3682 | == GET_MODE_SIZE (mode)); | |
3683 | ||
3684 | if (VECTOR_MODE_P (op0_mode)) | |
3685 | gcc_assert (GET_MODE_INNER (mode) | |
3686 | == GET_MODE_INNER (op0_mode)); | |
3687 | else | |
3688 | gcc_assert (GET_MODE_INNER (mode) == op0_mode); | |
0cedb36c | 3689 | |
0a67e02c PB |
3690 | if (VECTOR_MODE_P (op1_mode)) |
3691 | gcc_assert (GET_MODE_INNER (mode) | |
3692 | == GET_MODE_INNER (op1_mode)); | |
3693 | else | |
3694 | gcc_assert (GET_MODE_INNER (mode) == op1_mode); | |
3695 | ||
3696 | if ((GET_CODE (trueop0) == CONST_VECTOR | |
33ffb5c5 KZ |
3697 | || CONST_SCALAR_INT_P (trueop0) |
3698 | || CONST_DOUBLE_AS_FLOAT_P (trueop0)) | |
0a67e02c | 3699 | && (GET_CODE (trueop1) == CONST_VECTOR |
33ffb5c5 KZ |
3700 | || CONST_SCALAR_INT_P (trueop1) |
3701 | || CONST_DOUBLE_AS_FLOAT_P (trueop1))) | |
0a67e02c PB |
3702 | { |
3703 | int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
3704 | unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); | |
3705 | rtvec v = rtvec_alloc (n_elts); | |
3706 | unsigned int i; | |
3707 | unsigned in_n_elts = 1; | |
c877353c | 3708 | |
0a67e02c PB |
3709 | if (VECTOR_MODE_P (op0_mode)) |
3710 | in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size); | |
3711 | for (i = 0; i < n_elts; i++) | |
3712 | { | |
3713 | if (i < in_n_elts) | |
3714 | { | |
3715 | if (!VECTOR_MODE_P (op0_mode)) | |
3716 | RTVEC_ELT (v, i) = trueop0; | |
3717 | else | |
3718 | RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i); | |
3719 | } | |
3720 | else | |
3721 | { | |
3722 | if (!VECTOR_MODE_P (op1_mode)) | |
3723 | RTVEC_ELT (v, i) = trueop1; | |
3724 | else | |
3725 | RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1, | |
3726 | i - in_n_elts); | |
3727 | } | |
3728 | } | |
0cedb36c | 3729 | |
0a67e02c PB |
3730 | return gen_rtx_CONST_VECTOR (mode, v); |
3731 | } | |
5f6e1c55 | 3732 | |
d08633b4 MG |
3733 | /* Try to merge two VEC_SELECTs from the same vector into a single one. |
3734 | Restrict the transformation to avoid generating a VEC_SELECT with a | |
3735 | mode unrelated to its operand. */ | |
5f6e1c55 MG |
3736 | if (GET_CODE (trueop0) == VEC_SELECT |
3737 | && GET_CODE (trueop1) == VEC_SELECT | |
d08633b4 MG |
3738 | && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)) |
3739 | && GET_MODE (XEXP (trueop0, 0)) == mode) | |
5f6e1c55 MG |
3740 | { |
3741 | rtx par0 = XEXP (trueop0, 1); | |
3742 | rtx par1 = XEXP (trueop1, 1); | |
3743 | int len0 = XVECLEN (par0, 0); | |
3744 | int len1 = XVECLEN (par1, 0); | |
3745 | rtvec vec = rtvec_alloc (len0 + len1); | |
3746 | for (int i = 0; i < len0; i++) | |
3747 | RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i); | |
3748 | for (int i = 0; i < len1; i++) | |
3749 | RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i); | |
3750 | return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0), | |
3751 | gen_rtx_PARALLEL (VOIDmode, vec)); | |
3752 | } | |
0a67e02c PB |
3753 | } |
3754 | return 0; | |
0cedb36c | 3755 | |
0a67e02c PB |
3756 | default: |
3757 | gcc_unreachable (); | |
3758 | } | |
0cedb36c | 3759 | |
0a67e02c PB |
3760 | return 0; |
3761 | } | |
0cedb36c | 3762 | |
0a67e02c PB |
3763 | rtx |
3764 | simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, | |
3765 | rtx op0, rtx op1) | |
3766 | { | |
3767 | HOST_WIDE_INT arg0, arg1, arg0s, arg1s; | |
3768 | HOST_WIDE_INT val; | |
5511bc5a | 3769 | unsigned int width = GET_MODE_PRECISION (mode); |
0cedb36c | 3770 | |
0a67e02c PB |
3771 | if (VECTOR_MODE_P (mode) |
3772 | && code != VEC_CONCAT | |
3773 | && GET_CODE (op0) == CONST_VECTOR | |
3774 | && GET_CODE (op1) == CONST_VECTOR) | |
3775 | { | |
3776 | unsigned n_elts = GET_MODE_NUNITS (mode); | |
3777 | enum machine_mode op0mode = GET_MODE (op0); | |
3778 | unsigned op0_n_elts = GET_MODE_NUNITS (op0mode); | |
3779 | enum machine_mode op1mode = GET_MODE (op1); | |
3780 | unsigned op1_n_elts = GET_MODE_NUNITS (op1mode); | |
3781 | rtvec v = rtvec_alloc (n_elts); | |
3782 | unsigned int i; | |
0cedb36c | 3783 | |
0a67e02c PB |
3784 | gcc_assert (op0_n_elts == n_elts); |
3785 | gcc_assert (op1_n_elts == n_elts); | |
3786 | for (i = 0; i < n_elts; i++) | |
3787 | { | |
3788 | rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode), | |
3789 | CONST_VECTOR_ELT (op0, i), | |
3790 | CONST_VECTOR_ELT (op1, i)); | |
3791 | if (!x) | |
3792 | return 0; | |
3793 | RTVEC_ELT (v, i) = x; | |
3794 | } | |
0cedb36c | 3795 | |
0a67e02c PB |
3796 | return gen_rtx_CONST_VECTOR (mode, v); |
3797 | } | |
0cedb36c | 3798 | |
0a67e02c PB |
3799 | if (VECTOR_MODE_P (mode) |
3800 | && code == VEC_CONCAT | |
33ffb5c5 | 3801 | && (CONST_SCALAR_INT_P (op0) |
48175537 | 3802 | || GET_CODE (op0) == CONST_FIXED |
33ffb5c5 KZ |
3803 | || CONST_DOUBLE_AS_FLOAT_P (op0)) |
3804 | && (CONST_SCALAR_INT_P (op1) | |
3805 | || CONST_DOUBLE_AS_FLOAT_P (op1) | |
d1f0728e | 3806 | || GET_CODE (op1) == CONST_FIXED)) |
0a67e02c PB |
3807 | { |
3808 | unsigned n_elts = GET_MODE_NUNITS (mode); | |
3809 | rtvec v = rtvec_alloc (n_elts); | |
0cedb36c | 3810 | |
0a67e02c PB |
3811 | gcc_assert (n_elts >= 2); |
3812 | if (n_elts == 2) | |
3813 | { | |
3814 | gcc_assert (GET_CODE (op0) != CONST_VECTOR); | |
3815 | gcc_assert (GET_CODE (op1) != CONST_VECTOR); | |
dd61aa98 | 3816 | |
0a67e02c PB |
3817 | RTVEC_ELT (v, 0) = op0; |
3818 | RTVEC_ELT (v, 1) = op1; | |
3819 | } | |
3820 | else | |
3821 | { | |
3822 | unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)); | |
3823 | unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)); | |
3824 | unsigned i; | |
0cedb36c | 3825 | |
0a67e02c PB |
3826 | gcc_assert (GET_CODE (op0) == CONST_VECTOR); |
3827 | gcc_assert (GET_CODE (op1) == CONST_VECTOR); | |
3828 | gcc_assert (op0_n_elts + op1_n_elts == n_elts); | |
0cedb36c | 3829 | |
0a67e02c PB |
3830 | for (i = 0; i < op0_n_elts; ++i) |
3831 | RTVEC_ELT (v, i) = XVECEXP (op0, 0, i); | |
3832 | for (i = 0; i < op1_n_elts; ++i) | |
3833 | RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i); | |
3834 | } | |
0b24db88 | 3835 | |
0a67e02c PB |
3836 | return gen_rtx_CONST_VECTOR (mode, v); |
3837 | } | |
0cedb36c | 3838 | |
3d8bf70f | 3839 | if (SCALAR_FLOAT_MODE_P (mode) |
48175537 KZ |
3840 | && CONST_DOUBLE_AS_FLOAT_P (op0) |
3841 | && CONST_DOUBLE_AS_FLOAT_P (op1) | |
0a67e02c PB |
3842 | && mode == GET_MODE (op0) && mode == GET_MODE (op1)) |
3843 | { | |
3844 | if (code == AND | |
3845 | || code == IOR | |
3846 | || code == XOR) | |
3847 | { | |
3848 | long tmp0[4]; | |
3849 | long tmp1[4]; | |
3850 | REAL_VALUE_TYPE r; | |
3851 | int i; | |
a0ee8b5f | 3852 | |
0a67e02c PB |
3853 | real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0), |
3854 | GET_MODE (op0)); | |
3855 | real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1), | |
3856 | GET_MODE (op1)); | |
3857 | for (i = 0; i < 4; i++) | |
a0ee8b5f | 3858 | { |
0a67e02c PB |
3859 | switch (code) |
3860 | { | |
3861 | case AND: | |
3862 | tmp0[i] &= tmp1[i]; | |
3863 | break; | |
3864 | case IOR: | |
3865 | tmp0[i] |= tmp1[i]; | |
3866 | break; | |
3867 | case XOR: | |
3868 | tmp0[i] ^= tmp1[i]; | |
3869 | break; | |
3870 | default: | |
3871 | gcc_unreachable (); | |
3872 | } | |
a0ee8b5f | 3873 | } |
0a67e02c PB |
3874 | real_from_target (&r, tmp0, mode); |
3875 | return CONST_DOUBLE_FROM_REAL_VALUE (r, mode); | |
3876 | } | |
3877 | else | |
3878 | { | |
3879 | REAL_VALUE_TYPE f0, f1, value, result; | |
3880 | bool inexact; | |
a0ee8b5f | 3881 | |
0a67e02c PB |
3882 | REAL_VALUE_FROM_CONST_DOUBLE (f0, op0); |
3883 | REAL_VALUE_FROM_CONST_DOUBLE (f1, op1); | |
3884 | real_convert (&f0, mode, &f0); | |
3885 | real_convert (&f1, mode, &f1); | |
df62f18a | 3886 | |
0a67e02c PB |
3887 | if (HONOR_SNANS (mode) |
3888 | && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1))) | |
3889 | return 0; | |
0cedb36c | 3890 | |
0a67e02c PB |
3891 | if (code == DIV |
3892 | && REAL_VALUES_EQUAL (f1, dconst0) | |
3893 | && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode))) | |
3894 | return 0; | |
0cedb36c | 3895 | |
0a67e02c PB |
3896 | if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) |
3897 | && flag_trapping_math | |
3898 | && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1)) | |
0cedb36c | 3899 | { |
0a67e02c PB |
3900 | int s0 = REAL_VALUE_NEGATIVE (f0); |
3901 | int s1 = REAL_VALUE_NEGATIVE (f1); | |
0cedb36c | 3902 | |
0a67e02c | 3903 | switch (code) |
1e9b78b0 | 3904 | { |
0a67e02c PB |
3905 | case PLUS: |
3906 | /* Inf + -Inf = NaN plus exception. */ | |
3907 | if (s0 != s1) | |
3908 | return 0; | |
3909 | break; | |
3910 | case MINUS: | |
3911 | /* Inf - Inf = NaN plus exception. */ | |
3912 | if (s0 == s1) | |
3913 | return 0; | |
3914 | break; | |
3915 | case DIV: | |
3916 | /* Inf / Inf = NaN plus exception. */ | |
3917 | return 0; | |
3918 | default: | |
3919 | break; | |
0cedb36c JL |
3920 | } |
3921 | } | |
0cedb36c | 3922 | |
0a67e02c PB |
3923 | if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) |
3924 | && flag_trapping_math | |
3925 | && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0)) | |
3926 | || (REAL_VALUE_ISINF (f1) | |
3927 | && REAL_VALUES_EQUAL (f0, dconst0)))) | |
3928 | /* Inf * 0 = NaN plus exception. */ | |
3929 | return 0; | |
852c8ba1 | 3930 | |
0a67e02c PB |
3931 | inexact = real_arithmetic (&value, rtx_to_tree_code (code), |
3932 | &f0, &f1); | |
3933 | real_convert (&result, mode, &value); | |
41374e13 | 3934 | |
68328cda EB |
3935 | /* Don't constant fold this floating point operation if |
3936 | the result has overflowed and flag_trapping_math. */ | |
3937 | ||
3938 | if (flag_trapping_math | |
3939 | && MODE_HAS_INFINITIES (mode) | |
3940 | && REAL_VALUE_ISINF (result) | |
3941 | && !REAL_VALUE_ISINF (f0) | |
3942 | && !REAL_VALUE_ISINF (f1)) | |
3943 | /* Overflow plus exception. */ | |
3944 | return 0; | |
3945 | ||
0a67e02c PB |
3946 | /* Don't constant fold this floating point operation if the |
3947 | result may dependent upon the run-time rounding mode and | |
3948 | flag_rounding_math is set, or if GCC's software emulation | |
3949 | is unable to accurately represent the result. */ | |
852c8ba1 | 3950 | |
0a67e02c | 3951 | if ((flag_rounding_math |
4099e2c2 | 3952 | || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations)) |
0a67e02c PB |
3953 | && (inexact || !real_identical (&result, &value))) |
3954 | return NULL_RTX; | |
d9deed68 | 3955 | |
0a67e02c | 3956 | return CONST_DOUBLE_FROM_REAL_VALUE (result, mode); |
0cedb36c | 3957 | } |
0cedb36c JL |
3958 | } |
3959 | ||
0a67e02c PB |
3960 | /* We can fold some multi-word operations. */ |
3961 | if (GET_MODE_CLASS (mode) == MODE_INT | |
fd7de64c | 3962 | && width == HOST_BITS_PER_DOUBLE_INT |
48175537 KZ |
3963 | && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0)) |
3964 | && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1))) | |
0a67e02c | 3965 | { |
fd7de64c | 3966 | double_int o0, o1, res, tmp; |
9be0ac8c | 3967 | bool overflow; |
0cedb36c | 3968 | |
fd7de64c AS |
3969 | o0 = rtx_to_double_int (op0); |
3970 | o1 = rtx_to_double_int (op1); | |
0cedb36c | 3971 | |
0a67e02c PB |
3972 | switch (code) |
3973 | { | |
3974 | case MINUS: | |
3975 | /* A - B == A + (-B). */ | |
27bcd47c | 3976 | o1 = -o1; |
0cedb36c | 3977 | |
0a67e02c | 3978 | /* Fall through.... */ |
0cedb36c | 3979 | |
0a67e02c | 3980 | case PLUS: |
27bcd47c | 3981 | res = o0 + o1; |
0a67e02c | 3982 | break; |
0cedb36c | 3983 | |
0a67e02c | 3984 | case MULT: |
27bcd47c | 3985 | res = o0 * o1; |
0a67e02c | 3986 | break; |
0cedb36c | 3987 | |
0a67e02c | 3988 | case DIV: |
9be0ac8c LC |
3989 | res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR, |
3990 | &tmp, &overflow); | |
3991 | if (overflow) | |
0a67e02c PB |
3992 | return 0; |
3993 | break; | |
0cedb36c | 3994 | |
0a67e02c | 3995 | case MOD: |
9be0ac8c LC |
3996 | tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR, |
3997 | &res, &overflow); | |
3998 | if (overflow) | |
0a67e02c PB |
3999 | return 0; |
4000 | break; | |
0cedb36c | 4001 | |
0a67e02c | 4002 | case UDIV: |
9be0ac8c LC |
4003 | res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR, |
4004 | &tmp, &overflow); | |
4005 | if (overflow) | |
0a67e02c PB |
4006 | return 0; |
4007 | break; | |
0cedb36c | 4008 | |
0a67e02c | 4009 | case UMOD: |
9be0ac8c LC |
4010 | tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR, |
4011 | &res, &overflow); | |
4012 | if (overflow) | |
0a67e02c PB |
4013 | return 0; |
4014 | break; | |
0cedb36c | 4015 | |
0a67e02c | 4016 | case AND: |
27bcd47c | 4017 | res = o0 & o1; |
0a67e02c | 4018 | break; |
0cedb36c | 4019 | |
0a67e02c | 4020 | case IOR: |
27bcd47c | 4021 | res = o0 | o1; |
0a67e02c | 4022 | break; |
0cedb36c | 4023 | |
0a67e02c | 4024 | case XOR: |
27bcd47c | 4025 | res = o0 ^ o1; |
0a67e02c | 4026 | break; |
0cedb36c | 4027 | |
0a67e02c | 4028 | case SMIN: |
27bcd47c | 4029 | res = o0.smin (o1); |
0a67e02c | 4030 | break; |
0cedb36c | 4031 | |
0a67e02c | 4032 | case SMAX: |
27bcd47c | 4033 | res = o0.smax (o1); |
0a67e02c | 4034 | break; |
0cedb36c | 4035 | |
0a67e02c | 4036 | case UMIN: |
27bcd47c | 4037 | res = o0.umin (o1); |
0a67e02c | 4038 | break; |
0cedb36c | 4039 | |
0a67e02c | 4040 | case UMAX: |
27bcd47c | 4041 | res = o0.umax (o1); |
0a67e02c | 4042 | break; |
0cedb36c | 4043 | |
0a67e02c PB |
4044 | case LSHIFTRT: case ASHIFTRT: |
4045 | case ASHIFT: | |
4046 | case ROTATE: case ROTATERT: | |
fd7de64c AS |
4047 | { |
4048 | unsigned HOST_WIDE_INT cnt; | |
4049 | ||
4050 | if (SHIFT_COUNT_TRUNCATED) | |
aef2b1d1 KZ |
4051 | { |
4052 | o1.high = 0; | |
4053 | o1.low &= GET_MODE_PRECISION (mode) - 1; | |
4054 | } | |
fd7de64c | 4055 | |
27bcd47c LC |
4056 | if (!o1.fits_uhwi () |
4057 | || o1.to_uhwi () >= GET_MODE_PRECISION (mode)) | |
fd7de64c AS |
4058 | return 0; |
4059 | ||
27bcd47c LC |
4060 | cnt = o1.to_uhwi (); |
4061 | unsigned short prec = GET_MODE_PRECISION (mode); | |
fd7de64c AS |
4062 | |
4063 | if (code == LSHIFTRT || code == ASHIFTRT) | |
27bcd47c | 4064 | res = o0.rshift (cnt, prec, code == ASHIFTRT); |
fd7de64c | 4065 | else if (code == ASHIFT) |
27bcd47c | 4066 | res = o0.alshift (cnt, prec); |
fd7de64c | 4067 | else if (code == ROTATE) |
27bcd47c | 4068 | res = o0.lrotate (cnt, prec); |
fd7de64c | 4069 | else /* code == ROTATERT */ |
27bcd47c | 4070 | res = o0.rrotate (cnt, prec); |
fd7de64c | 4071 | } |
0a67e02c | 4072 | break; |
0cedb36c | 4073 | |
0a67e02c PB |
4074 | default: |
4075 | return 0; | |
4076 | } | |
0cedb36c | 4077 | |
fd7de64c | 4078 | return immed_double_int_const (res, mode); |
0a67e02c | 4079 | } |
0cedb36c | 4080 | |
481683e1 | 4081 | if (CONST_INT_P (op0) && CONST_INT_P (op1) |
0a67e02c PB |
4082 | && width <= HOST_BITS_PER_WIDE_INT && width != 0) |
4083 | { | |
4084 | /* Get the integer argument values in two forms: | |
4085 | zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */ | |
0cedb36c | 4086 | |
0a67e02c PB |
4087 | arg0 = INTVAL (op0); |
4088 | arg1 = INTVAL (op1); | |
0cedb36c | 4089 | |
0a67e02c PB |
4090 | if (width < HOST_BITS_PER_WIDE_INT) |
4091 | { | |
2d0c270f BS |
4092 | arg0 &= GET_MODE_MASK (mode); |
4093 | arg1 &= GET_MODE_MASK (mode); | |
0cedb36c | 4094 | |
0a67e02c | 4095 | arg0s = arg0; |
2d0c270f BS |
4096 | if (val_signbit_known_set_p (mode, arg0s)) |
4097 | arg0s |= ~GET_MODE_MASK (mode); | |
4f5c0f7e | 4098 | |
2d0c270f BS |
4099 | arg1s = arg1; |
4100 | if (val_signbit_known_set_p (mode, arg1s)) | |
4101 | arg1s |= ~GET_MODE_MASK (mode); | |
0a67e02c PB |
4102 | } |
4103 | else | |
4104 | { | |
4105 | arg0s = arg0; | |
4106 | arg1s = arg1; | |
4107 | } | |
b8698a0f | 4108 | |
0a67e02c | 4109 | /* Compute the value of the arithmetic. */ |
b8698a0f | 4110 | |
0a67e02c PB |
4111 | switch (code) |
4112 | { | |
4113 | case PLUS: | |
4114 | val = arg0s + arg1s; | |
4115 | break; | |
b8698a0f | 4116 | |
0a67e02c PB |
4117 | case MINUS: |
4118 | val = arg0s - arg1s; | |
4119 | break; | |
b8698a0f | 4120 | |
0a67e02c PB |
4121 | case MULT: |
4122 | val = arg0s * arg1s; | |
4123 | break; | |
b8698a0f | 4124 | |
0a67e02c PB |
4125 | case DIV: |
4126 | if (arg1s == 0 | |
43c36287 EB |
4127 | || ((unsigned HOST_WIDE_INT) arg0s |
4128 | == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
0a67e02c PB |
4129 | && arg1s == -1)) |
4130 | return 0; | |
4131 | val = arg0s / arg1s; | |
4132 | break; | |
b8698a0f | 4133 | |
0a67e02c PB |
4134 | case MOD: |
4135 | if (arg1s == 0 | |
43c36287 EB |
4136 | || ((unsigned HOST_WIDE_INT) arg0s |
4137 | == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
0a67e02c PB |
4138 | && arg1s == -1)) |
4139 | return 0; | |
4140 | val = arg0s % arg1s; | |
4141 | break; | |
b8698a0f | 4142 | |
0a67e02c PB |
4143 | case UDIV: |
4144 | if (arg1 == 0 | |
43c36287 EB |
4145 | || ((unsigned HOST_WIDE_INT) arg0s |
4146 | == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
0a67e02c PB |
4147 | && arg1s == -1)) |
4148 | return 0; | |
4149 | val = (unsigned HOST_WIDE_INT) arg0 / arg1; | |
4150 | break; | |
b8698a0f | 4151 | |
0a67e02c PB |
4152 | case UMOD: |
4153 | if (arg1 == 0 | |
43c36287 EB |
4154 | || ((unsigned HOST_WIDE_INT) arg0s |
4155 | == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
0a67e02c PB |
4156 | && arg1s == -1)) |
4157 | return 0; | |
4158 | val = (unsigned HOST_WIDE_INT) arg0 % arg1; | |
4159 | break; | |
b8698a0f | 4160 | |
0a67e02c PB |
4161 | case AND: |
4162 | val = arg0 & arg1; | |
4163 | break; | |
b8698a0f | 4164 | |
0a67e02c PB |
4165 | case IOR: |
4166 | val = arg0 | arg1; | |
4167 | break; | |
b8698a0f | 4168 | |
0a67e02c PB |
4169 | case XOR: |
4170 | val = arg0 ^ arg1; | |
4171 | break; | |
b8698a0f | 4172 | |
0a67e02c PB |
4173 | case LSHIFTRT: |
4174 | case ASHIFT: | |
4175 | case ASHIFTRT: | |
4176 | /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure | |
4177 | the value is in range. We can't return any old value for | |
4178 | out-of-range arguments because either the middle-end (via | |
4179 | shift_truncation_mask) or the back-end might be relying on | |
4180 | target-specific knowledge. Nor can we rely on | |
4181 | shift_truncation_mask, since the shift might not be part of an | |
4182 | ashlM3, lshrM3 or ashrM3 instruction. */ | |
4183 | if (SHIFT_COUNT_TRUNCATED) | |
d58c1a38 BS |
4184 | arg1 = (unsigned HOST_WIDE_INT) arg1 % width; |
4185 | else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode)) | |
0a67e02c | 4186 | return 0; |
b8698a0f | 4187 | |
0a67e02c PB |
4188 | val = (code == ASHIFT |
4189 | ? ((unsigned HOST_WIDE_INT) arg0) << arg1 | |
4190 | : ((unsigned HOST_WIDE_INT) arg0) >> arg1); | |
b8698a0f | 4191 | |
0a67e02c PB |
4192 | /* Sign-extend the result for arithmetic right shifts. */ |
4193 | if (code == ASHIFTRT && arg0s < 0 && arg1 > 0) | |
43c36287 | 4194 | val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1); |
0a67e02c | 4195 | break; |
b8698a0f | 4196 | |
0a67e02c PB |
4197 | case ROTATERT: |
4198 | if (arg1 < 0) | |
4199 | return 0; | |
b8698a0f | 4200 | |
0a67e02c PB |
4201 | arg1 %= width; |
4202 | val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1)) | |
4203 | | (((unsigned HOST_WIDE_INT) arg0) >> arg1)); | |
4204 | break; | |
b8698a0f | 4205 | |
0a67e02c PB |
4206 | case ROTATE: |
4207 | if (arg1 < 0) | |
4208 | return 0; | |
b8698a0f | 4209 | |
0a67e02c PB |
4210 | arg1 %= width; |
4211 | val = ((((unsigned HOST_WIDE_INT) arg0) << arg1) | |
4212 | | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1))); | |
4213 | break; | |
b8698a0f | 4214 | |
0a67e02c PB |
4215 | case COMPARE: |
4216 | /* Do nothing here. */ | |
4217 | return 0; | |
b8698a0f | 4218 | |
0a67e02c PB |
4219 | case SMIN: |
4220 | val = arg0s <= arg1s ? arg0s : arg1s; | |
4221 | break; | |
b8698a0f | 4222 | |
0a67e02c PB |
4223 | case UMIN: |
4224 | val = ((unsigned HOST_WIDE_INT) arg0 | |
4225 | <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); | |
4226 | break; | |
b8698a0f | 4227 | |
0a67e02c PB |
4228 | case SMAX: |
4229 | val = arg0s > arg1s ? arg0s : arg1s; | |
4230 | break; | |
b8698a0f | 4231 | |
0a67e02c PB |
4232 | case UMAX: |
4233 | val = ((unsigned HOST_WIDE_INT) arg0 | |
4234 | > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); | |
4235 | break; | |
b8698a0f | 4236 | |
0a67e02c PB |
4237 | case SS_PLUS: |
4238 | case US_PLUS: | |
4239 | case SS_MINUS: | |
4240 | case US_MINUS: | |
14c931f1 CF |
4241 | case SS_MULT: |
4242 | case US_MULT: | |
4243 | case SS_DIV: | |
4244 | case US_DIV: | |
e551ad26 | 4245 | case SS_ASHIFT: |
14c931f1 | 4246 | case US_ASHIFT: |
0a67e02c PB |
4247 | /* ??? There are simplifications that can be done. */ |
4248 | return 0; | |
b8698a0f | 4249 | |
0a67e02c PB |
4250 | default: |
4251 | gcc_unreachable (); | |
4252 | } | |
0cedb36c | 4253 | |
bb80db7b | 4254 | return gen_int_mode (val, mode); |
0a67e02c | 4255 | } |
0cedb36c | 4256 | |
0a67e02c | 4257 | return NULL_RTX; |
0cedb36c | 4258 | } |
0a67e02c PB |
4259 | |
4260 | ||
0cedb36c JL |
4261 | \f |
4262 | /* Simplify a PLUS or MINUS, at least one of whose operands may be another | |
4263 | PLUS or MINUS. | |
4264 | ||
4265 | Rather than test for specific case, we do this by a brute-force method | |
4266 | and do all possible simplifications until no more changes occur. Then | |
1941069a | 4267 | we rebuild the operation. */ |
0cedb36c | 4268 | |
9b3bd424 RH |
4269 | struct simplify_plus_minus_op_data |
4270 | { | |
4271 | rtx op; | |
f805670f | 4272 | short neg; |
9b3bd424 RH |
4273 | }; |
4274 | ||
7e0b4eae PB |
4275 | static bool |
4276 | simplify_plus_minus_op_data_cmp (rtx x, rtx y) | |
9b3bd424 | 4277 | { |
f805670f | 4278 | int result; |
9b3bd424 | 4279 | |
7e0b4eae PB |
4280 | result = (commutative_operand_precedence (y) |
4281 | - commutative_operand_precedence (x)); | |
f805670f | 4282 | if (result) |
7e0b4eae | 4283 | return result > 0; |
d26cef13 PB |
4284 | |
4285 | /* Group together equal REGs to do more simplification. */ | |
7e0b4eae PB |
4286 | if (REG_P (x) && REG_P (y)) |
4287 | return REGNO (x) > REGNO (y); | |
d26cef13 | 4288 | else |
7e0b4eae | 4289 | return false; |
9b3bd424 RH |
4290 | } |
4291 | ||
0cedb36c | 4292 | static rtx |
46c5ad27 | 4293 | simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, |
1941069a | 4294 | rtx op1) |
0cedb36c | 4295 | { |
9b3bd424 | 4296 | struct simplify_plus_minus_op_data ops[8]; |
0cedb36c | 4297 | rtx result, tem; |
36686ad6 | 4298 | int n_ops = 2, input_ops = 2; |
d26cef13 | 4299 | int changed, n_constants = 0, canonicalized = 0; |
0cedb36c JL |
4300 | int i, j; |
4301 | ||
703ad42b | 4302 | memset (ops, 0, sizeof ops); |
786de7eb | 4303 | |
0cedb36c JL |
4304 | /* Set up the two operands and then expand them until nothing has been |
4305 | changed. If we run out of room in our array, give up; this should | |
4306 | almost never happen. */ | |
4307 | ||
9b3bd424 RH |
4308 | ops[0].op = op0; |
4309 | ops[0].neg = 0; | |
4310 | ops[1].op = op1; | |
4311 | ops[1].neg = (code == MINUS); | |
0cedb36c | 4312 | |
9b3bd424 | 4313 | do |
0cedb36c JL |
4314 | { |
4315 | changed = 0; | |
4316 | ||
4317 | for (i = 0; i < n_ops; i++) | |
9b3bd424 RH |
4318 | { |
4319 | rtx this_op = ops[i].op; | |
4320 | int this_neg = ops[i].neg; | |
4321 | enum rtx_code this_code = GET_CODE (this_op); | |
0cedb36c | 4322 | |
9b3bd424 RH |
4323 | switch (this_code) |
4324 | { | |
4325 | case PLUS: | |
4326 | case MINUS: | |
4327 | if (n_ops == 7) | |
e16e3291 | 4328 | return NULL_RTX; |
0cedb36c | 4329 | |
9b3bd424 RH |
4330 | ops[n_ops].op = XEXP (this_op, 1); |
4331 | ops[n_ops].neg = (this_code == MINUS) ^ this_neg; | |
4332 | n_ops++; | |
4333 | ||
4334 | ops[i].op = XEXP (this_op, 0); | |
4335 | input_ops++; | |
4336 | changed = 1; | |
1941069a | 4337 | canonicalized |= this_neg; |
9b3bd424 RH |
4338 | break; |
4339 | ||
4340 | case NEG: | |
4341 | ops[i].op = XEXP (this_op, 0); | |
4342 | ops[i].neg = ! this_neg; | |
4343 | changed = 1; | |
1941069a | 4344 | canonicalized = 1; |
9b3bd424 RH |
4345 | break; |
4346 | ||
4347 | case CONST: | |
e3c8ea67 RH |
4348 | if (n_ops < 7 |
4349 | && GET_CODE (XEXP (this_op, 0)) == PLUS | |
4350 | && CONSTANT_P (XEXP (XEXP (this_op, 0), 0)) | |
4351 | && CONSTANT_P (XEXP (XEXP (this_op, 0), 1))) | |
4352 | { | |
4353 | ops[i].op = XEXP (XEXP (this_op, 0), 0); | |
4354 | ops[n_ops].op = XEXP (XEXP (this_op, 0), 1); | |
4355 | ops[n_ops].neg = this_neg; | |
4356 | n_ops++; | |
e3c8ea67 | 4357 | changed = 1; |
1941069a | 4358 | canonicalized = 1; |
e3c8ea67 | 4359 | } |
9b3bd424 RH |
4360 | break; |
4361 | ||
4362 | case NOT: | |
4363 | /* ~a -> (-a - 1) */ | |
4364 | if (n_ops != 7) | |
4365 | { | |
ea72cc1d | 4366 | ops[n_ops].op = CONSTM1_RTX (mode); |
2e951384 | 4367 | ops[n_ops++].neg = this_neg; |
9b3bd424 RH |
4368 | ops[i].op = XEXP (this_op, 0); |
4369 | ops[i].neg = !this_neg; | |
4370 | changed = 1; | |
1941069a | 4371 | canonicalized = 1; |
9b3bd424 RH |
4372 | } |
4373 | break; | |
0cedb36c | 4374 | |
9b3bd424 | 4375 | case CONST_INT: |
d26cef13 | 4376 | n_constants++; |
9b3bd424 RH |
4377 | if (this_neg) |
4378 | { | |
aff8a8d5 | 4379 | ops[i].op = neg_const_int (mode, this_op); |
9b3bd424 RH |
4380 | ops[i].neg = 0; |
4381 | changed = 1; | |
1941069a | 4382 | canonicalized = 1; |
9b3bd424 RH |
4383 | } |
4384 | break; | |
0cedb36c | 4385 | |
9b3bd424 RH |
4386 | default: |
4387 | break; | |
4388 | } | |
4389 | } | |
0cedb36c | 4390 | } |
9b3bd424 | 4391 | while (changed); |
0cedb36c | 4392 | |
d26cef13 PB |
4393 | if (n_constants > 1) |
4394 | canonicalized = 1; | |
36686ad6 | 4395 | |
d26cef13 | 4396 | gcc_assert (n_ops >= 2); |
0cedb36c | 4397 | |
1941069a PB |
4398 | /* If we only have two operands, we can avoid the loops. */ |
4399 | if (n_ops == 2) | |
4400 | { | |
4401 | enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS; | |
4402 | rtx lhs, rhs; | |
4403 | ||
4404 | /* Get the two operands. Be careful with the order, especially for | |
4405 | the cases where code == MINUS. */ | |
4406 | if (ops[0].neg && ops[1].neg) | |
4407 | { | |
4408 | lhs = gen_rtx_NEG (mode, ops[0].op); | |
4409 | rhs = ops[1].op; | |
4410 | } | |
4411 | else if (ops[0].neg) | |
4412 | { | |
4413 | lhs = ops[1].op; | |
4414 | rhs = ops[0].op; | |
4415 | } | |
4416 | else | |
4417 | { | |
4418 | lhs = ops[0].op; | |
4419 | rhs = ops[1].op; | |
4420 | } | |
4421 | ||
4422 | return simplify_const_binary_operation (code, mode, lhs, rhs); | |
4423 | } | |
4424 | ||
d26cef13 | 4425 | /* Now simplify each pair of operands until nothing changes. */ |
9b3bd424 | 4426 | do |
0cedb36c | 4427 | { |
d26cef13 PB |
4428 | /* Insertion sort is good enough for an eight-element array. */ |
4429 | for (i = 1; i < n_ops; i++) | |
4430 | { | |
4431 | struct simplify_plus_minus_op_data save; | |
4432 | j = i - 1; | |
7e0b4eae | 4433 | if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op)) |
d26cef13 PB |
4434 | continue; |
4435 | ||
4436 | canonicalized = 1; | |
4437 | save = ops[i]; | |
4438 | do | |
4439 | ops[j + 1] = ops[j]; | |
7e0b4eae | 4440 | while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op)); |
d26cef13 PB |
4441 | ops[j + 1] = save; |
4442 | } | |
0cedb36c | 4443 | |
d26cef13 PB |
4444 | changed = 0; |
4445 | for (i = n_ops - 1; i > 0; i--) | |
4446 | for (j = i - 1; j >= 0; j--) | |
9b3bd424 | 4447 | { |
d26cef13 PB |
4448 | rtx lhs = ops[j].op, rhs = ops[i].op; |
4449 | int lneg = ops[j].neg, rneg = ops[i].neg; | |
0cedb36c | 4450 | |
d26cef13 | 4451 | if (lhs != 0 && rhs != 0) |
9b3bd424 RH |
4452 | { |
4453 | enum rtx_code ncode = PLUS; | |
4454 | ||
4455 | if (lneg != rneg) | |
4456 | { | |
4457 | ncode = MINUS; | |
4458 | if (lneg) | |
4459 | tem = lhs, lhs = rhs, rhs = tem; | |
4460 | } | |
4461 | else if (swap_commutative_operands_p (lhs, rhs)) | |
4462 | tem = lhs, lhs = rhs, rhs = tem; | |
4463 | ||
481683e1 SZ |
4464 | if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs)) |
4465 | && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs))) | |
349f4ea1 AK |
4466 | { |
4467 | rtx tem_lhs, tem_rhs; | |
4468 | ||
4469 | tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs; | |
4470 | tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs; | |
4471 | tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs); | |
9b3bd424 | 4472 | |
349f4ea1 AK |
4473 | if (tem && !CONSTANT_P (tem)) |
4474 | tem = gen_rtx_CONST (GET_MODE (tem), tem); | |
4475 | } | |
4476 | else | |
4477 | tem = simplify_binary_operation (ncode, mode, lhs, rhs); | |
b8698a0f | 4478 | |
786de7eb | 4479 | /* Reject "simplifications" that just wrap the two |
9b3bd424 RH |
4480 | arguments in a CONST. Failure to do so can result |
4481 | in infinite recursion with simplify_binary_operation | |
4482 | when it calls us to simplify CONST operations. */ | |
4483 | if (tem | |
4484 | && ! (GET_CODE (tem) == CONST | |
4485 | && GET_CODE (XEXP (tem, 0)) == ncode | |
4486 | && XEXP (XEXP (tem, 0), 0) == lhs | |
d26cef13 | 4487 | && XEXP (XEXP (tem, 0), 1) == rhs)) |
9b3bd424 RH |
4488 | { |
4489 | lneg &= rneg; | |
4490 | if (GET_CODE (tem) == NEG) | |
4491 | tem = XEXP (tem, 0), lneg = !lneg; | |
481683e1 | 4492 | if (CONST_INT_P (tem) && lneg) |
aff8a8d5 | 4493 | tem = neg_const_int (mode, tem), lneg = 0; |
9b3bd424 RH |
4494 | |
4495 | ops[i].op = tem; | |
4496 | ops[i].neg = lneg; | |
4497 | ops[j].op = NULL_RTX; | |
4498 | changed = 1; | |
dc5b3407 | 4499 | canonicalized = 1; |
9b3bd424 RH |
4500 | } |
4501 | } | |
4502 | } | |
0cedb36c | 4503 | |
dc5b3407 ZD |
4504 | /* If nothing changed, fail. */ |
4505 | if (!canonicalized) | |
4506 | return NULL_RTX; | |
4507 | ||
d26cef13 PB |
4508 | /* Pack all the operands to the lower-numbered entries. */ |
4509 | for (i = 0, j = 0; j < n_ops; j++) | |
4510 | if (ops[j].op) | |
4511 | { | |
4512 | ops[i] = ops[j]; | |
4513 | i++; | |
4514 | } | |
4515 | n_ops = i; | |
0cedb36c | 4516 | } |
9b3bd424 | 4517 | while (changed); |
0cedb36c | 4518 | |
c877353c RS |
4519 | /* Create (minus -C X) instead of (neg (const (plus X C))). */ |
4520 | if (n_ops == 2 | |
481683e1 | 4521 | && CONST_INT_P (ops[1].op) |
c877353c RS |
4522 | && CONSTANT_P (ops[0].op) |
4523 | && ops[0].neg) | |
4524 | return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op); | |
b8698a0f | 4525 | |
9b3bd424 RH |
4526 | /* We suppressed creation of trivial CONST expressions in the |
4527 | combination loop to avoid recursion. Create one manually now. | |
4528 | The combination loop should have ensured that there is exactly | |
4529 | one CONST_INT, and the sort will have ensured that it is last | |
4530 | in the array and that any other constant will be next-to-last. */ | |
0cedb36c | 4531 | |
9b3bd424 | 4532 | if (n_ops > 1 |
481683e1 | 4533 | && CONST_INT_P (ops[n_ops - 1].op) |
9b3bd424 RH |
4534 | && CONSTANT_P (ops[n_ops - 2].op)) |
4535 | { | |
aff8a8d5 | 4536 | rtx value = ops[n_ops - 1].op; |
4768dbdd | 4537 | if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg) |
aff8a8d5 | 4538 | value = neg_const_int (mode, value); |
0a81f074 RS |
4539 | ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op, |
4540 | INTVAL (value)); | |
9b3bd424 RH |
4541 | n_ops--; |
4542 | } | |
4543 | ||
0786ca87 | 4544 | /* Put a non-negated operand first, if possible. */ |
0cedb36c | 4545 | |
9b3bd424 RH |
4546 | for (i = 0; i < n_ops && ops[i].neg; i++) |
4547 | continue; | |
0cedb36c | 4548 | if (i == n_ops) |
0786ca87 | 4549 | ops[0].op = gen_rtx_NEG (mode, ops[0].op); |
0cedb36c JL |
4550 | else if (i != 0) |
4551 | { | |
9b3bd424 RH |
4552 | tem = ops[0].op; |
4553 | ops[0] = ops[i]; | |
4554 | ops[i].op = tem; | |
4555 | ops[i].neg = 1; | |
0cedb36c JL |
4556 | } |
4557 | ||
4558 | /* Now make the result by performing the requested operations. */ | |
9b3bd424 | 4559 | result = ops[0].op; |
0cedb36c | 4560 | for (i = 1; i < n_ops; i++) |
9b3bd424 RH |
4561 | result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS, |
4562 | mode, result, ops[i].op); | |
0cedb36c | 4563 | |
0786ca87 | 4564 | return result; |
0cedb36c JL |
4565 | } |
4566 | ||
5ac20c1a RS |
4567 | /* Check whether an operand is suitable for calling simplify_plus_minus. */ |
4568 | static bool | |
f7d504c2 | 4569 | plus_minus_operand_p (const_rtx x) |
5ac20c1a RS |
4570 | { |
4571 | return GET_CODE (x) == PLUS | |
4572 | || GET_CODE (x) == MINUS | |
4573 | || (GET_CODE (x) == CONST | |
4574 | && GET_CODE (XEXP (x, 0)) == PLUS | |
4575 | && CONSTANT_P (XEXP (XEXP (x, 0), 0)) | |
4576 | && CONSTANT_P (XEXP (XEXP (x, 0), 1))); | |
4577 | } | |
4578 | ||
0cedb36c | 4579 | /* Like simplify_binary_operation except used for relational operators. |
c6fb08ad | 4580 | MODE is the mode of the result. If MODE is VOIDmode, both operands must |
fc7ca5fd | 4581 | not also be VOIDmode. |
c6fb08ad PB |
4582 | |
4583 | CMP_MODE specifies in which mode the comparison is done in, so it is | |
4584 | the mode of the operands. If CMP_MODE is VOIDmode, it is taken from | |
4585 | the operands or, if both are VOIDmode, the operands are compared in | |
4586 | "infinite precision". */ | |
4587 | rtx | |
4588 | simplify_relational_operation (enum rtx_code code, enum machine_mode mode, | |
4589 | enum machine_mode cmp_mode, rtx op0, rtx op1) | |
4590 | { | |
4591 | rtx tem, trueop0, trueop1; | |
4592 | ||
4593 | if (cmp_mode == VOIDmode) | |
4594 | cmp_mode = GET_MODE (op0); | |
4595 | if (cmp_mode == VOIDmode) | |
4596 | cmp_mode = GET_MODE (op1); | |
4597 | ||
4598 | tem = simplify_const_relational_operation (code, cmp_mode, op0, op1); | |
4599 | if (tem) | |
4600 | { | |
3d8bf70f | 4601 | if (SCALAR_FLOAT_MODE_P (mode)) |
c6fb08ad PB |
4602 | { |
4603 | if (tem == const0_rtx) | |
4604 | return CONST0_RTX (mode); | |
fc7ca5fd RS |
4605 | #ifdef FLOAT_STORE_FLAG_VALUE |
4606 | { | |
4607 | REAL_VALUE_TYPE val; | |
4608 | val = FLOAT_STORE_FLAG_VALUE (mode); | |
4609 | return CONST_DOUBLE_FROM_REAL_VALUE (val, mode); | |
4610 | } | |
4611 | #else | |
4612 | return NULL_RTX; | |
b8698a0f | 4613 | #endif |
c6fb08ad | 4614 | } |
fc7ca5fd RS |
4615 | if (VECTOR_MODE_P (mode)) |
4616 | { | |
4617 | if (tem == const0_rtx) | |
4618 | return CONST0_RTX (mode); | |
4619 | #ifdef VECTOR_STORE_FLAG_VALUE | |
4620 | { | |
4621 | int i, units; | |
21e5076a | 4622 | rtvec v; |
fc7ca5fd RS |
4623 | |
4624 | rtx val = VECTOR_STORE_FLAG_VALUE (mode); | |
4625 | if (val == NULL_RTX) | |
4626 | return NULL_RTX; | |
4627 | if (val == const1_rtx) | |
4628 | return CONST1_RTX (mode); | |
4629 | ||
4630 | units = GET_MODE_NUNITS (mode); | |
4631 | v = rtvec_alloc (units); | |
4632 | for (i = 0; i < units; i++) | |
4633 | RTVEC_ELT (v, i) = val; | |
4634 | return gen_rtx_raw_CONST_VECTOR (mode, v); | |
4635 | } | |
4636 | #else | |
4637 | return NULL_RTX; | |
c6fb08ad | 4638 | #endif |
fc7ca5fd | 4639 | } |
c6fb08ad PB |
4640 | |
4641 | return tem; | |
4642 | } | |
4643 | ||
4644 | /* For the following tests, ensure const0_rtx is op1. */ | |
4645 | if (swap_commutative_operands_p (op0, op1) | |
4646 | || (op0 == const0_rtx && op1 != const0_rtx)) | |
4647 | tem = op0, op0 = op1, op1 = tem, code = swap_condition (code); | |
4648 | ||
4649 | /* If op0 is a compare, extract the comparison arguments from it. */ | |
4650 | if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) | |
f90b7a5a PB |
4651 | return simplify_gen_relational (code, mode, VOIDmode, |
4652 | XEXP (op0, 0), XEXP (op0, 1)); | |
c6fb08ad | 4653 | |
30a440a7 | 4654 | if (GET_MODE_CLASS (cmp_mode) == MODE_CC |
c6fb08ad PB |
4655 | || CC0_P (op0)) |
4656 | return NULL_RTX; | |
4657 | ||
4658 | trueop0 = avoid_constant_pool_reference (op0); | |
4659 | trueop1 = avoid_constant_pool_reference (op1); | |
4660 | return simplify_relational_operation_1 (code, mode, cmp_mode, | |
4661 | trueop0, trueop1); | |
4662 | } | |
4663 | ||
4664 | /* This part of simplify_relational_operation is only used when CMP_MODE | |
4665 | is not in class MODE_CC (i.e. it is a real comparison). | |
4666 | ||
4667 | MODE is the mode of the result, while CMP_MODE specifies in which | |
4668 | mode the comparison is done in, so it is the mode of the operands. */ | |
bc4ad38c ZD |
4669 | |
4670 | static rtx | |
c6fb08ad PB |
4671 | simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode, |
4672 | enum machine_mode cmp_mode, rtx op0, rtx op1) | |
4673 | { | |
bc4ad38c ZD |
4674 | enum rtx_code op0code = GET_CODE (op0); |
4675 | ||
3fa0cacd | 4676 | if (op1 == const0_rtx && COMPARISON_P (op0)) |
c6fb08ad | 4677 | { |
3fa0cacd RS |
4678 | /* If op0 is a comparison, extract the comparison arguments |
4679 | from it. */ | |
4680 | if (code == NE) | |
c6fb08ad | 4681 | { |
3fa0cacd RS |
4682 | if (GET_MODE (op0) == mode) |
4683 | return simplify_rtx (op0); | |
4684 | else | |
4685 | return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode, | |
4686 | XEXP (op0, 0), XEXP (op0, 1)); | |
4687 | } | |
4688 | else if (code == EQ) | |
4689 | { | |
4690 | enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX); | |
4691 | if (new_code != UNKNOWN) | |
4692 | return simplify_gen_relational (new_code, mode, VOIDmode, | |
4693 | XEXP (op0, 0), XEXP (op0, 1)); | |
4694 | } | |
4695 | } | |
4696 | ||
1d1eb80c BS |
4697 | /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to |
4698 | (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */ | |
4699 | if ((code == LTU || code == GEU) | |
4700 | && GET_CODE (op0) == PLUS | |
481683e1 | 4701 | && CONST_INT_P (XEXP (op0, 1)) |
1d1eb80c | 4702 | && (rtx_equal_p (op1, XEXP (op0, 0)) |
5352ea68 AP |
4703 | || rtx_equal_p (op1, XEXP (op0, 1))) |
4704 | /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */ | |
4705 | && XEXP (op0, 1) != const0_rtx) | |
1d1eb80c BS |
4706 | { |
4707 | rtx new_cmp | |
4708 | = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode); | |
4709 | return simplify_gen_relational ((code == LTU ? GEU : LTU), mode, | |
4710 | cmp_mode, XEXP (op0, 0), new_cmp); | |
4711 | } | |
4712 | ||
921c4418 RIL |
4713 | /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */ |
4714 | if ((code == LTU || code == GEU) | |
4715 | && GET_CODE (op0) == PLUS | |
cf369845 HPN |
4716 | && rtx_equal_p (op1, XEXP (op0, 1)) |
4717 | /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */ | |
4718 | && !rtx_equal_p (op1, XEXP (op0, 0))) | |
4942b76b JJ |
4719 | return simplify_gen_relational (code, mode, cmp_mode, op0, |
4720 | copy_rtx (XEXP (op0, 0))); | |
921c4418 | 4721 | |
3fa0cacd RS |
4722 | if (op1 == const0_rtx) |
4723 | { | |
4724 | /* Canonicalize (GTU x 0) as (NE x 0). */ | |
4725 | if (code == GTU) | |
4726 | return simplify_gen_relational (NE, mode, cmp_mode, op0, op1); | |
4727 | /* Canonicalize (LEU x 0) as (EQ x 0). */ | |
4728 | if (code == LEU) | |
4729 | return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1); | |
4730 | } | |
4731 | else if (op1 == const1_rtx) | |
4732 | { | |
4733 | switch (code) | |
4734 | { | |
4735 | case GE: | |
4736 | /* Canonicalize (GE x 1) as (GT x 0). */ | |
4737 | return simplify_gen_relational (GT, mode, cmp_mode, | |
4738 | op0, const0_rtx); | |
4739 | case GEU: | |
4740 | /* Canonicalize (GEU x 1) as (NE x 0). */ | |
4741 | return simplify_gen_relational (NE, mode, cmp_mode, | |
4742 | op0, const0_rtx); | |
4743 | case LT: | |
4744 | /* Canonicalize (LT x 1) as (LE x 0). */ | |
4745 | return simplify_gen_relational (LE, mode, cmp_mode, | |
4746 | op0, const0_rtx); | |
4747 | case LTU: | |
4748 | /* Canonicalize (LTU x 1) as (EQ x 0). */ | |
4749 | return simplify_gen_relational (EQ, mode, cmp_mode, | |
4750 | op0, const0_rtx); | |
4751 | default: | |
4752 | break; | |
c6fb08ad PB |
4753 | } |
4754 | } | |
3fa0cacd RS |
4755 | else if (op1 == constm1_rtx) |
4756 | { | |
4757 | /* Canonicalize (LE x -1) as (LT x 0). */ | |
4758 | if (code == LE) | |
4759 | return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx); | |
4760 | /* Canonicalize (GT x -1) as (GE x 0). */ | |
4761 | if (code == GT) | |
4762 | return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx); | |
4763 | } | |
0cedb36c | 4764 | |
bc4ad38c ZD |
4765 | /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */ |
4766 | if ((code == EQ || code == NE) | |
4767 | && (op0code == PLUS || op0code == MINUS) | |
4768 | && CONSTANT_P (op1) | |
551a3297 RH |
4769 | && CONSTANT_P (XEXP (op0, 1)) |
4770 | && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations)) | |
bc4ad38c ZD |
4771 | { |
4772 | rtx x = XEXP (op0, 0); | |
4773 | rtx c = XEXP (op0, 1); | |
d303c992 CLT |
4774 | enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS; |
4775 | rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c); | |
4776 | ||
4777 | /* Detect an infinite recursive condition, where we oscillate at this | |
4778 | simplification case between: | |
4779 | A + B == C <---> C - B == A, | |
4780 | where A, B, and C are all constants with non-simplifiable expressions, | |
4781 | usually SYMBOL_REFs. */ | |
4782 | if (GET_CODE (tem) == invcode | |
4783 | && CONSTANT_P (x) | |
4784 | && rtx_equal_p (c, XEXP (tem, 1))) | |
4785 | return NULL_RTX; | |
4786 | ||
4787 | return simplify_gen_relational (code, mode, cmp_mode, x, tem); | |
bc4ad38c ZD |
4788 | } |
4789 | ||
1419a885 RS |
4790 | /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is |
4791 | the same as (zero_extract:SI FOO (const_int 1) BAR). */ | |
4792 | if (code == NE | |
4793 | && op1 == const0_rtx | |
4794 | && GET_MODE_CLASS (mode) == MODE_INT | |
4795 | && cmp_mode != VOIDmode | |
61961eff RS |
4796 | /* ??? Work-around BImode bugs in the ia64 backend. */ |
4797 | && mode != BImode | |
f8eacd97 | 4798 | && cmp_mode != BImode |
1419a885 RS |
4799 | && nonzero_bits (op0, cmp_mode) == 1 |
4800 | && STORE_FLAG_VALUE == 1) | |
f8eacd97 RS |
4801 | return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode) |
4802 | ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode) | |
4803 | : lowpart_subreg (mode, op0, cmp_mode); | |
1419a885 | 4804 | |
5484a3c3 RS |
4805 | /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */ |
4806 | if ((code == EQ || code == NE) | |
4807 | && op1 == const0_rtx | |
4808 | && op0code == XOR) | |
4809 | return simplify_gen_relational (code, mode, cmp_mode, | |
4810 | XEXP (op0, 0), XEXP (op0, 1)); | |
4811 | ||
4d49d44d | 4812 | /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */ |
5484a3c3 RS |
4813 | if ((code == EQ || code == NE) |
4814 | && op0code == XOR | |
4815 | && rtx_equal_p (XEXP (op0, 0), op1) | |
4d49d44d KH |
4816 | && !side_effects_p (XEXP (op0, 0))) |
4817 | return simplify_gen_relational (code, mode, cmp_mode, | |
4818 | XEXP (op0, 1), const0_rtx); | |
4819 | ||
4820 | /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */ | |
5484a3c3 RS |
4821 | if ((code == EQ || code == NE) |
4822 | && op0code == XOR | |
4823 | && rtx_equal_p (XEXP (op0, 1), op1) | |
4d49d44d KH |
4824 | && !side_effects_p (XEXP (op0, 1))) |
4825 | return simplify_gen_relational (code, mode, cmp_mode, | |
4826 | XEXP (op0, 0), const0_rtx); | |
5484a3c3 RS |
4827 | |
4828 | /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */ | |
4829 | if ((code == EQ || code == NE) | |
4830 | && op0code == XOR | |
33ffb5c5 KZ |
4831 | && CONST_SCALAR_INT_P (op1) |
4832 | && CONST_SCALAR_INT_P (XEXP (op0, 1))) | |
5484a3c3 RS |
4833 | return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0), |
4834 | simplify_gen_binary (XOR, cmp_mode, | |
4835 | XEXP (op0, 1), op1)); | |
4836 | ||
b17c024f EB |
4837 | /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */ |
4838 | if ((code == EQ || code == NE) | |
4839 | && GET_CODE (op0) == BSWAP | |
a8c50132 | 4840 | && CONST_SCALAR_INT_P (op1)) |
b17c024f EB |
4841 | return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0), |
4842 | simplify_gen_unary (BSWAP, cmp_mode, | |
4843 | op1, cmp_mode)); | |
4844 | ||
4845 | /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */ | |
4846 | if ((code == EQ || code == NE) | |
4847 | && GET_CODE (op0) == BSWAP | |
4848 | && GET_CODE (op1) == BSWAP) | |
4849 | return simplify_gen_relational (code, mode, cmp_mode, | |
4850 | XEXP (op0, 0), XEXP (op1, 0)); | |
4851 | ||
69fce32f RS |
4852 | if (op0code == POPCOUNT && op1 == const0_rtx) |
4853 | switch (code) | |
4854 | { | |
4855 | case EQ: | |
4856 | case LE: | |
4857 | case LEU: | |
4858 | /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */ | |
4859 | return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)), | |
4860 | XEXP (op0, 0), const0_rtx); | |
4861 | ||
4862 | case NE: | |
4863 | case GT: | |
4864 | case GTU: | |
4865 | /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */ | |
2376c58f | 4866 | return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)), |
69fce32f RS |
4867 | XEXP (op0, 0), const0_rtx); |
4868 | ||
4869 | default: | |
4870 | break; | |
4871 | } | |
4872 | ||
c6fb08ad PB |
4873 | return NULL_RTX; |
4874 | } | |
4875 | ||
b8698a0f | 4876 | enum |
39641489 | 4877 | { |
a567207e PB |
4878 | CMP_EQ = 1, |
4879 | CMP_LT = 2, | |
4880 | CMP_GT = 4, | |
4881 | CMP_LTU = 8, | |
4882 | CMP_GTU = 16 | |
39641489 PB |
4883 | }; |
4884 | ||
4885 | ||
4886 | /* Convert the known results for EQ, LT, GT, LTU, GTU contained in | |
4887 | KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE | |
b8698a0f | 4888 | For KNOWN_RESULT to make sense it should be either CMP_EQ, or the |
a567207e | 4889 | logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU). |
39641489 PB |
4890 | For floating-point comparisons, assume that the operands were ordered. */ |
4891 | ||
4892 | static rtx | |
4893 | comparison_result (enum rtx_code code, int known_results) | |
4894 | { | |
39641489 PB |
4895 | switch (code) |
4896 | { | |
4897 | case EQ: | |
4898 | case UNEQ: | |
a567207e | 4899 | return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx; |
39641489 PB |
4900 | case NE: |
4901 | case LTGT: | |
a567207e | 4902 | return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx; |
39641489 PB |
4903 | |
4904 | case LT: | |
4905 | case UNLT: | |
a567207e | 4906 | return (known_results & CMP_LT) ? const_true_rtx : const0_rtx; |
39641489 PB |
4907 | case GE: |
4908 | case UNGE: | |
a567207e | 4909 | return (known_results & CMP_LT) ? const0_rtx : const_true_rtx; |
39641489 PB |
4910 | |
4911 | case GT: | |
4912 | case UNGT: | |
a567207e | 4913 | return (known_results & CMP_GT) ? const_true_rtx : const0_rtx; |
39641489 PB |
4914 | case LE: |
4915 | case UNLE: | |
a567207e | 4916 | return (known_results & CMP_GT) ? const0_rtx : const_true_rtx; |
39641489 PB |
4917 | |
4918 | case LTU: | |
a567207e | 4919 | return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx; |
39641489 | 4920 | case GEU: |
a567207e | 4921 | return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx; |
39641489 PB |
4922 | |
4923 | case GTU: | |
a567207e | 4924 | return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx; |
39641489 | 4925 | case LEU: |
a567207e | 4926 | return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx; |
39641489 PB |
4927 | |
4928 | case ORDERED: | |
4929 | return const_true_rtx; | |
4930 | case UNORDERED: | |
4931 | return const0_rtx; | |
4932 | default: | |
4933 | gcc_unreachable (); | |
4934 | } | |
4935 | } | |
4936 | ||
c6fb08ad PB |
4937 | /* Check if the given comparison (done in the given MODE) is actually a |
4938 | tautology or a contradiction. | |
7ce3e360 RS |
4939 | If no simplification is possible, this function returns zero. |
4940 | Otherwise, it returns either const_true_rtx or const0_rtx. */ | |
0cedb36c JL |
4941 | |
4942 | rtx | |
7ce3e360 RS |
4943 | simplify_const_relational_operation (enum rtx_code code, |
4944 | enum machine_mode mode, | |
4945 | rtx op0, rtx op1) | |
0cedb36c | 4946 | { |
0cedb36c | 4947 | rtx tem; |
4ba5f925 JH |
4948 | rtx trueop0; |
4949 | rtx trueop1; | |
0cedb36c | 4950 | |
41374e13 NS |
4951 | gcc_assert (mode != VOIDmode |
4952 | || (GET_MODE (op0) == VOIDmode | |
4953 | && GET_MODE (op1) == VOIDmode)); | |
47b1e19b | 4954 | |
0cedb36c JL |
4955 | /* If op0 is a compare, extract the comparison arguments from it. */ |
4956 | if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) | |
5b5dc475 UW |
4957 | { |
4958 | op1 = XEXP (op0, 1); | |
4959 | op0 = XEXP (op0, 0); | |
4960 | ||
4961 | if (GET_MODE (op0) != VOIDmode) | |
4962 | mode = GET_MODE (op0); | |
4963 | else if (GET_MODE (op1) != VOIDmode) | |
4964 | mode = GET_MODE (op1); | |
4965 | else | |
4966 | return 0; | |
4967 | } | |
0cedb36c JL |
4968 | |
4969 | /* We can't simplify MODE_CC values since we don't know what the | |
4970 | actual comparison is. */ | |
8beccec8 | 4971 | if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0)) |
0cedb36c JL |
4972 | return 0; |
4973 | ||
52a75c3c | 4974 | /* Make sure the constant is second. */ |
9ce79a7a | 4975 | if (swap_commutative_operands_p (op0, op1)) |
52a75c3c RH |
4976 | { |
4977 | tem = op0, op0 = op1, op1 = tem; | |
4978 | code = swap_condition (code); | |
4979 | } | |
4980 | ||
9ce79a7a RS |
4981 | trueop0 = avoid_constant_pool_reference (op0); |
4982 | trueop1 = avoid_constant_pool_reference (op1); | |
4983 | ||
0cedb36c JL |
4984 | /* For integer comparisons of A and B maybe we can simplify A - B and can |
4985 | then simplify a comparison of that with zero. If A and B are both either | |
4986 | a register or a CONST_INT, this can't help; testing for these cases will | |
4987 | prevent infinite recursion here and speed things up. | |
4988 | ||
e0d0c193 RG |
4989 | We can only do this for EQ and NE comparisons as otherwise we may |
4990 | lose or introduce overflow which we cannot disregard as undefined as | |
4991 | we do not know the signedness of the operation on either the left or | |
4992 | the right hand side of the comparison. */ | |
0cedb36c | 4993 | |
e0d0c193 RG |
4994 | if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx |
4995 | && (code == EQ || code == NE) | |
481683e1 SZ |
4996 | && ! ((REG_P (op0) || CONST_INT_P (trueop0)) |
4997 | && (REG_P (op1) || CONST_INT_P (trueop1))) | |
0cedb36c | 4998 | && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1)) |
e0d0c193 RG |
4999 | /* We cannot do this if tem is a nonzero address. */ |
5000 | && ! nonzero_address_p (tem)) | |
7ce3e360 RS |
5001 | return simplify_const_relational_operation (signed_condition (code), |
5002 | mode, tem, const0_rtx); | |
0cedb36c | 5003 | |
bdbb0460 | 5004 | if (! HONOR_NANS (mode) && code == ORDERED) |
1f36a2dd JH |
5005 | return const_true_rtx; |
5006 | ||
bdbb0460 | 5007 | if (! HONOR_NANS (mode) && code == UNORDERED) |
1f36a2dd JH |
5008 | return const0_rtx; |
5009 | ||
71925bc0 | 5010 | /* For modes without NaNs, if the two operands are equal, we know the |
39641489 PB |
5011 | result except if they have side-effects. Even with NaNs we know |
5012 | the result of unordered comparisons and, if signaling NaNs are | |
5013 | irrelevant, also the result of LT/GT/LTGT. */ | |
5014 | if ((! HONOR_NANS (GET_MODE (trueop0)) | |
5015 | || code == UNEQ || code == UNLE || code == UNGE | |
5016 | || ((code == LT || code == GT || code == LTGT) | |
5017 | && ! HONOR_SNANS (GET_MODE (trueop0)))) | |
8821d091 EB |
5018 | && rtx_equal_p (trueop0, trueop1) |
5019 | && ! side_effects_p (trueop0)) | |
a567207e | 5020 | return comparison_result (code, CMP_EQ); |
0cedb36c JL |
5021 | |
5022 | /* If the operands are floating-point constants, see if we can fold | |
5023 | the result. */ | |
48175537 KZ |
5024 | if (CONST_DOUBLE_AS_FLOAT_P (trueop0) |
5025 | && CONST_DOUBLE_AS_FLOAT_P (trueop1) | |
39641489 | 5026 | && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0))) |
0cedb36c | 5027 | { |
15e5ad76 | 5028 | REAL_VALUE_TYPE d0, d1; |
0cedb36c | 5029 | |
15e5ad76 ZW |
5030 | REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0); |
5031 | REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1); | |
90a74703 | 5032 | |
1eeeb6a4 | 5033 | /* Comparisons are unordered iff at least one of the values is NaN. */ |
15e5ad76 | 5034 | if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1)) |
90a74703 JH |
5035 | switch (code) |
5036 | { | |
5037 | case UNEQ: | |
5038 | case UNLT: | |
5039 | case UNGT: | |
5040 | case UNLE: | |
5041 | case UNGE: | |
5042 | case NE: | |
5043 | case UNORDERED: | |
5044 | return const_true_rtx; | |
5045 | case EQ: | |
5046 | case LT: | |
5047 | case GT: | |
5048 | case LE: | |
5049 | case GE: | |
5050 | case LTGT: | |
5051 | case ORDERED: | |
5052 | return const0_rtx; | |
5053 | default: | |
5054 | return 0; | |
5055 | } | |
0cedb36c | 5056 | |
39641489 | 5057 | return comparison_result (code, |
a567207e PB |
5058 | (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ : |
5059 | REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT)); | |
0cedb36c | 5060 | } |
0cedb36c JL |
5061 | |
5062 | /* Otherwise, see if the operands are both integers. */ | |
39641489 | 5063 | if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode) |
48175537 KZ |
5064 | && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0)) |
5065 | && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1))) | |
0cedb36c | 5066 | { |
5511bc5a | 5067 | int width = GET_MODE_PRECISION (mode); |
0cedb36c JL |
5068 | HOST_WIDE_INT l0s, h0s, l1s, h1s; |
5069 | unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u; | |
5070 | ||
5071 | /* Get the two words comprising each integer constant. */ | |
48175537 | 5072 | if (CONST_DOUBLE_AS_INT_P (trueop0)) |
0cedb36c | 5073 | { |
4ba5f925 JH |
5074 | l0u = l0s = CONST_DOUBLE_LOW (trueop0); |
5075 | h0u = h0s = CONST_DOUBLE_HIGH (trueop0); | |
0cedb36c JL |
5076 | } |
5077 | else | |
5078 | { | |
4ba5f925 | 5079 | l0u = l0s = INTVAL (trueop0); |
ba34d877 | 5080 | h0u = h0s = HWI_SIGN_EXTEND (l0s); |
0cedb36c | 5081 | } |
786de7eb | 5082 | |
48175537 | 5083 | if (CONST_DOUBLE_AS_INT_P (trueop1)) |
0cedb36c | 5084 | { |
4ba5f925 JH |
5085 | l1u = l1s = CONST_DOUBLE_LOW (trueop1); |
5086 | h1u = h1s = CONST_DOUBLE_HIGH (trueop1); | |
0cedb36c JL |
5087 | } |
5088 | else | |
5089 | { | |
4ba5f925 | 5090 | l1u = l1s = INTVAL (trueop1); |
ba34d877 | 5091 | h1u = h1s = HWI_SIGN_EXTEND (l1s); |
0cedb36c JL |
5092 | } |
5093 | ||
5094 | /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT, | |
5095 | we have to sign or zero-extend the values. */ | |
0cedb36c JL |
5096 | if (width != 0 && width < HOST_BITS_PER_WIDE_INT) |
5097 | { | |
2d0c270f BS |
5098 | l0u &= GET_MODE_MASK (mode); |
5099 | l1u &= GET_MODE_MASK (mode); | |
0cedb36c | 5100 | |
2d0c270f BS |
5101 | if (val_signbit_known_set_p (mode, l0s)) |
5102 | l0s |= ~GET_MODE_MASK (mode); | |
0cedb36c | 5103 | |
2d0c270f BS |
5104 | if (val_signbit_known_set_p (mode, l1s)) |
5105 | l1s |= ~GET_MODE_MASK (mode); | |
0cedb36c | 5106 | } |
d4f1c1fa RH |
5107 | if (width != 0 && width <= HOST_BITS_PER_WIDE_INT) |
5108 | h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s); | |
0cedb36c | 5109 | |
39641489 | 5110 | if (h0u == h1u && l0u == l1u) |
a567207e | 5111 | return comparison_result (code, CMP_EQ); |
39641489 PB |
5112 | else |
5113 | { | |
5114 | int cr; | |
a567207e PB |
5115 | cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT; |
5116 | cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU; | |
5117 | return comparison_result (code, cr); | |
39641489 | 5118 | } |
0cedb36c JL |
5119 | } |
5120 | ||
39641489 | 5121 | /* Optimize comparisons with upper and lower bounds. */ |
46c9550f | 5122 | if (HWI_COMPUTABLE_MODE_P (mode) |
481683e1 | 5123 | && CONST_INT_P (trueop1)) |
0cedb36c | 5124 | { |
39641489 PB |
5125 | int sign; |
5126 | unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode); | |
5127 | HOST_WIDE_INT val = INTVAL (trueop1); | |
5128 | HOST_WIDE_INT mmin, mmax; | |
5129 | ||
5130 | if (code == GEU | |
5131 | || code == LEU | |
5132 | || code == GTU | |
5133 | || code == LTU) | |
5134 | sign = 0; | |
5135 | else | |
5136 | sign = 1; | |
0aea6467 | 5137 | |
39641489 PB |
5138 | /* Get a reduced range if the sign bit is zero. */ |
5139 | if (nonzero <= (GET_MODE_MASK (mode) >> 1)) | |
5140 | { | |
5141 | mmin = 0; | |
5142 | mmax = nonzero; | |
5143 | } | |
5144 | else | |
5145 | { | |
5146 | rtx mmin_rtx, mmax_rtx; | |
a567207e | 5147 | get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx); |
39641489 | 5148 | |
dc7c279e JJ |
5149 | mmin = INTVAL (mmin_rtx); |
5150 | mmax = INTVAL (mmax_rtx); | |
5151 | if (sign) | |
5152 | { | |
5153 | unsigned int sign_copies = num_sign_bit_copies (trueop0, mode); | |
5154 | ||
5155 | mmin >>= (sign_copies - 1); | |
5156 | mmax >>= (sign_copies - 1); | |
5157 | } | |
0aea6467 ZD |
5158 | } |
5159 | ||
0cedb36c JL |
5160 | switch (code) |
5161 | { | |
39641489 PB |
5162 | /* x >= y is always true for y <= mmin, always false for y > mmax. */ |
5163 | case GEU: | |
5164 | if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin) | |
5165 | return const_true_rtx; | |
5166 | if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax) | |
5167 | return const0_rtx; | |
5168 | break; | |
5169 | case GE: | |
5170 | if (val <= mmin) | |
5171 | return const_true_rtx; | |
5172 | if (val > mmax) | |
5173 | return const0_rtx; | |
5174 | break; | |
5175 | ||
5176 | /* x <= y is always true for y >= mmax, always false for y < mmin. */ | |
5177 | case LEU: | |
5178 | if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax) | |
5179 | return const_true_rtx; | |
5180 | if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin) | |
5181 | return const0_rtx; | |
5182 | break; | |
5183 | case LE: | |
5184 | if (val >= mmax) | |
5185 | return const_true_rtx; | |
5186 | if (val < mmin) | |
5187 | return const0_rtx; | |
5188 | break; | |
5189 | ||
0cedb36c | 5190 | case EQ: |
39641489 PB |
5191 | /* x == y is always false for y out of range. */ |
5192 | if (val < mmin || val > mmax) | |
5193 | return const0_rtx; | |
5194 | break; | |
5195 | ||
5196 | /* x > y is always false for y >= mmax, always true for y < mmin. */ | |
5197 | case GTU: | |
5198 | if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax) | |
5199 | return const0_rtx; | |
5200 | if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin) | |
5201 | return const_true_rtx; | |
5202 | break; | |
5203 | case GT: | |
5204 | if (val >= mmax) | |
5205 | return const0_rtx; | |
5206 | if (val < mmin) | |
5207 | return const_true_rtx; | |
5208 | break; | |
5209 | ||
5210 | /* x < y is always false for y <= mmin, always true for y > mmax. */ | |
5211 | case LTU: | |
5212 | if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin) | |
5213 | return const0_rtx; | |
5214 | if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax) | |
5215 | return const_true_rtx; | |
5216 | break; | |
5217 | case LT: | |
5218 | if (val <= mmin) | |
0cedb36c | 5219 | return const0_rtx; |
39641489 PB |
5220 | if (val > mmax) |
5221 | return const_true_rtx; | |
0cedb36c JL |
5222 | break; |
5223 | ||
5224 | case NE: | |
39641489 PB |
5225 | /* x != y is always true for y out of range. */ |
5226 | if (val < mmin || val > mmax) | |
0cedb36c JL |
5227 | return const_true_rtx; |
5228 | break; | |
5229 | ||
39641489 PB |
5230 | default: |
5231 | break; | |
5232 | } | |
5233 | } | |
5234 | ||
5235 | /* Optimize integer comparisons with zero. */ | |
5236 | if (trueop1 == const0_rtx) | |
5237 | { | |
5238 | /* Some addresses are known to be nonzero. We don't know | |
a567207e | 5239 | their sign, but equality comparisons are known. */ |
39641489 | 5240 | if (nonzero_address_p (trueop0)) |
a567207e | 5241 | { |
39641489 PB |
5242 | if (code == EQ || code == LEU) |
5243 | return const0_rtx; | |
5244 | if (code == NE || code == GTU) | |
5245 | return const_true_rtx; | |
a567207e | 5246 | } |
39641489 PB |
5247 | |
5248 | /* See if the first operand is an IOR with a constant. If so, we | |
5249 | may be able to determine the result of this comparison. */ | |
5250 | if (GET_CODE (op0) == IOR) | |
a567207e | 5251 | { |
39641489 | 5252 | rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1)); |
481683e1 | 5253 | if (CONST_INT_P (inner_const) && inner_const != const0_rtx) |
39641489 | 5254 | { |
5511bc5a | 5255 | int sign_bitnum = GET_MODE_PRECISION (mode) - 1; |
a567207e | 5256 | int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum |
43c36287 EB |
5257 | && (UINTVAL (inner_const) |
5258 | & ((unsigned HOST_WIDE_INT) 1 | |
5259 | << sign_bitnum))); | |
a567207e PB |
5260 | |
5261 | switch (code) | |
5262 | { | |
5263 | case EQ: | |
39641489 | 5264 | case LEU: |
a567207e PB |
5265 | return const0_rtx; |
5266 | case NE: | |
39641489 | 5267 | case GTU: |
a567207e PB |
5268 | return const_true_rtx; |
5269 | case LT: | |
5270 | case LE: | |
5271 | if (has_sign) | |
5272 | return const_true_rtx; | |
5273 | break; | |
5274 | case GT: | |
39641489 | 5275 | case GE: |
a567207e PB |
5276 | if (has_sign) |
5277 | return const0_rtx; | |
5278 | break; | |
5279 | default: | |
5280 | break; | |
5281 | } | |
5282 | } | |
39641489 PB |
5283 | } |
5284 | } | |
5285 | ||
5286 | /* Optimize comparison of ABS with zero. */ | |
5287 | if (trueop1 == CONST0_RTX (mode) | |
5288 | && (GET_CODE (trueop0) == ABS | |
5289 | || (GET_CODE (trueop0) == FLOAT_EXTEND | |
5290 | && GET_CODE (XEXP (trueop0, 0)) == ABS))) | |
5291 | { | |
5292 | switch (code) | |
5293 | { | |
0da65b89 RS |
5294 | case LT: |
5295 | /* Optimize abs(x) < 0.0. */ | |
39641489 | 5296 | if (!HONOR_SNANS (mode) |
eeef0e45 ILT |
5297 | && (!INTEGRAL_MODE_P (mode) |
5298 | || (!flag_wrapv && !flag_trapv && flag_strict_overflow))) | |
0da65b89 | 5299 | { |
39641489 PB |
5300 | if (INTEGRAL_MODE_P (mode) |
5301 | && (issue_strict_overflow_warning | |
5302 | (WARN_STRICT_OVERFLOW_CONDITIONAL))) | |
5303 | warning (OPT_Wstrict_overflow, | |
5304 | ("assuming signed overflow does not occur when " | |
5305 | "assuming abs (x) < 0 is false")); | |
5306 | return const0_rtx; | |
0da65b89 RS |
5307 | } |
5308 | break; | |
5309 | ||
5310 | case GE: | |
5311 | /* Optimize abs(x) >= 0.0. */ | |
39641489 | 5312 | if (!HONOR_NANS (mode) |
eeef0e45 ILT |
5313 | && (!INTEGRAL_MODE_P (mode) |
5314 | || (!flag_wrapv && !flag_trapv && flag_strict_overflow))) | |
0da65b89 | 5315 | { |
39641489 PB |
5316 | if (INTEGRAL_MODE_P (mode) |
5317 | && (issue_strict_overflow_warning | |
5318 | (WARN_STRICT_OVERFLOW_CONDITIONAL))) | |
5319 | warning (OPT_Wstrict_overflow, | |
5320 | ("assuming signed overflow does not occur when " | |
5321 | "assuming abs (x) >= 0 is true")); | |
5322 | return const_true_rtx; | |
0da65b89 RS |
5323 | } |
5324 | break; | |
5325 | ||
8d90f9c0 GK |
5326 | case UNGE: |
5327 | /* Optimize ! (abs(x) < 0.0). */ | |
39641489 | 5328 | return const_true_rtx; |
46c5ad27 | 5329 | |
0cedb36c JL |
5330 | default: |
5331 | break; | |
5332 | } | |
0cedb36c JL |
5333 | } |
5334 | ||
39641489 | 5335 | return 0; |
0cedb36c JL |
5336 | } |
5337 | \f | |
5338 | /* Simplify CODE, an operation with result mode MODE and three operands, | |
5339 | OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became | |
5340 | a constant. Return 0 if no simplifications is possible. */ | |
5341 | ||
5342 | rtx | |
46c5ad27 AJ |
5343 | simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, |
5344 | enum machine_mode op0_mode, rtx op0, rtx op1, | |
5345 | rtx op2) | |
0cedb36c | 5346 | { |
5511bc5a | 5347 | unsigned int width = GET_MODE_PRECISION (mode); |
5c822194 | 5348 | bool any_change = false; |
da694a77 | 5349 | rtx tem, trueop2; |
0cedb36c JL |
5350 | |
5351 | /* VOIDmode means "infinite" precision. */ | |
5352 | if (width == 0) | |
5353 | width = HOST_BITS_PER_WIDE_INT; | |
5354 | ||
5355 | switch (code) | |
5356 | { | |
1b1562a5 | 5357 | case FMA: |
5c822194 RH |
5358 | /* Simplify negations around the multiplication. */ |
5359 | /* -a * -b + c => a * b + c. */ | |
5360 | if (GET_CODE (op0) == NEG) | |
5361 | { | |
5362 | tem = simplify_unary_operation (NEG, mode, op1, mode); | |
5363 | if (tem) | |
5364 | op1 = tem, op0 = XEXP (op0, 0), any_change = true; | |
5365 | } | |
5366 | else if (GET_CODE (op1) == NEG) | |
5367 | { | |
5368 | tem = simplify_unary_operation (NEG, mode, op0, mode); | |
5369 | if (tem) | |
5370 | op0 = tem, op1 = XEXP (op1, 0), any_change = true; | |
5371 | } | |
5372 | ||
5373 | /* Canonicalize the two multiplication operands. */ | |
5374 | /* a * -b + c => -b * a + c. */ | |
5375 | if (swap_commutative_operands_p (op0, op1)) | |
5376 | tem = op0, op0 = op1, op1 = tem, any_change = true; | |
5377 | ||
5378 | if (any_change) | |
5379 | return gen_rtx_FMA (mode, op0, op1, op2); | |
1b1562a5 MM |
5380 | return NULL_RTX; |
5381 | ||
0cedb36c JL |
5382 | case SIGN_EXTRACT: |
5383 | case ZERO_EXTRACT: | |
481683e1 SZ |
5384 | if (CONST_INT_P (op0) |
5385 | && CONST_INT_P (op1) | |
5386 | && CONST_INT_P (op2) | |
d882fe51 | 5387 | && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width) |
f9e158c3 | 5388 | && width <= (unsigned) HOST_BITS_PER_WIDE_INT) |
0cedb36c JL |
5389 | { |
5390 | /* Extracting a bit-field from a constant */ | |
43c36287 | 5391 | unsigned HOST_WIDE_INT val = UINTVAL (op0); |
5511bc5a BS |
5392 | HOST_WIDE_INT op1val = INTVAL (op1); |
5393 | HOST_WIDE_INT op2val = INTVAL (op2); | |
0cedb36c | 5394 | if (BITS_BIG_ENDIAN) |
5511bc5a | 5395 | val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val; |
0cedb36c | 5396 | else |
5511bc5a | 5397 | val >>= op2val; |
0cedb36c | 5398 | |
5511bc5a | 5399 | if (HOST_BITS_PER_WIDE_INT != op1val) |
0cedb36c JL |
5400 | { |
5401 | /* First zero-extend. */ | |
5511bc5a | 5402 | val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1; |
0cedb36c JL |
5403 | /* If desired, propagate sign bit. */ |
5404 | if (code == SIGN_EXTRACT | |
5511bc5a | 5405 | && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1))) |
43c36287 | 5406 | != 0) |
5511bc5a | 5407 | val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1); |
0cedb36c JL |
5408 | } |
5409 | ||
449ecb09 | 5410 | return gen_int_mode (val, mode); |
0cedb36c JL |
5411 | } |
5412 | break; | |
5413 | ||
5414 | case IF_THEN_ELSE: | |
481683e1 | 5415 | if (CONST_INT_P (op0)) |
0cedb36c JL |
5416 | return op0 != const0_rtx ? op1 : op2; |
5417 | ||
31f0f571 RS |
5418 | /* Convert c ? a : a into "a". */ |
5419 | if (rtx_equal_p (op1, op2) && ! side_effects_p (op0)) | |
0cedb36c | 5420 | return op1; |
31f0f571 RS |
5421 | |
5422 | /* Convert a != b ? a : b into "a". */ | |
5423 | if (GET_CODE (op0) == NE | |
5424 | && ! side_effects_p (op0) | |
5425 | && ! HONOR_NANS (mode) | |
5426 | && ! HONOR_SIGNED_ZEROS (mode) | |
5427 | && ((rtx_equal_p (XEXP (op0, 0), op1) | |
5428 | && rtx_equal_p (XEXP (op0, 1), op2)) | |
5429 | || (rtx_equal_p (XEXP (op0, 0), op2) | |
5430 | && rtx_equal_p (XEXP (op0, 1), op1)))) | |
5431 | return op1; | |
5432 | ||
5433 | /* Convert a == b ? a : b into "b". */ | |
5434 | if (GET_CODE (op0) == EQ | |
5435 | && ! side_effects_p (op0) | |
5436 | && ! HONOR_NANS (mode) | |
5437 | && ! HONOR_SIGNED_ZEROS (mode) | |
5438 | && ((rtx_equal_p (XEXP (op0, 0), op1) | |
5439 | && rtx_equal_p (XEXP (op0, 1), op2)) | |
5440 | || (rtx_equal_p (XEXP (op0, 0), op2) | |
5441 | && rtx_equal_p (XEXP (op0, 1), op1)))) | |
0cedb36c | 5442 | return op2; |
31f0f571 | 5443 | |
ec8e098d | 5444 | if (COMPARISON_P (op0) && ! side_effects_p (op0)) |
0cedb36c | 5445 | { |
47b1e19b JH |
5446 | enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode |
5447 | ? GET_MODE (XEXP (op0, 1)) | |
5448 | : GET_MODE (XEXP (op0, 0))); | |
3e882897 | 5449 | rtx temp; |
a774e06e RH |
5450 | |
5451 | /* Look for happy constants in op1 and op2. */ | |
481683e1 | 5452 | if (CONST_INT_P (op1) && CONST_INT_P (op2)) |
a774e06e RH |
5453 | { |
5454 | HOST_WIDE_INT t = INTVAL (op1); | |
5455 | HOST_WIDE_INT f = INTVAL (op2); | |
786de7eb | 5456 | |
a774e06e RH |
5457 | if (t == STORE_FLAG_VALUE && f == 0) |
5458 | code = GET_CODE (op0); | |
261efdef JH |
5459 | else if (t == 0 && f == STORE_FLAG_VALUE) |
5460 | { | |
5461 | enum rtx_code tmp; | |
5462 | tmp = reversed_comparison_code (op0, NULL_RTX); | |
5463 | if (tmp == UNKNOWN) | |
5464 | break; | |
5465 | code = tmp; | |
5466 | } | |
a774e06e RH |
5467 | else |
5468 | break; | |
5469 | ||
77306e3e | 5470 | return simplify_gen_relational (code, mode, cmp_mode, |
c6fb08ad PB |
5471 | XEXP (op0, 0), XEXP (op0, 1)); |
5472 | } | |
5473 | ||
5474 | if (cmp_mode == VOIDmode) | |
5475 | cmp_mode = op0_mode; | |
5476 | temp = simplify_relational_operation (GET_CODE (op0), op0_mode, | |
5477 | cmp_mode, XEXP (op0, 0), | |
5478 | XEXP (op0, 1)); | |
5479 | ||
5480 | /* See if any simplifications were possible. */ | |
5481 | if (temp) | |
5482 | { | |
481683e1 | 5483 | if (CONST_INT_P (temp)) |
c6fb08ad PB |
5484 | return temp == const0_rtx ? op2 : op1; |
5485 | else if (temp) | |
5486 | return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2); | |
a774e06e | 5487 | } |
0cedb36c JL |
5488 | } |
5489 | break; | |
31f0f571 | 5490 | |
d9deed68 | 5491 | case VEC_MERGE: |
41374e13 NS |
5492 | gcc_assert (GET_MODE (op0) == mode); |
5493 | gcc_assert (GET_MODE (op1) == mode); | |
5494 | gcc_assert (VECTOR_MODE_P (mode)); | |
da694a77 MG |
5495 | trueop2 = avoid_constant_pool_reference (op2); |
5496 | if (CONST_INT_P (trueop2)) | |
d9deed68 | 5497 | { |
da694a77 | 5498 | int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); |
d9deed68 | 5499 | unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); |
da694a77 MG |
5500 | unsigned HOST_WIDE_INT sel = UINTVAL (trueop2); |
5501 | unsigned HOST_WIDE_INT mask; | |
5502 | if (n_elts == HOST_BITS_PER_WIDE_INT) | |
5503 | mask = -1; | |
5504 | else | |
5505 | mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1; | |
d9deed68 | 5506 | |
da694a77 | 5507 | if (!(sel & mask) && !side_effects_p (op0)) |
852c8ba1 | 5508 | return op1; |
da694a77 | 5509 | if ((sel & mask) == mask && !side_effects_p (op1)) |
852c8ba1 JH |
5510 | return op0; |
5511 | ||
da694a77 MG |
5512 | rtx trueop0 = avoid_constant_pool_reference (op0); |
5513 | rtx trueop1 = avoid_constant_pool_reference (op1); | |
5514 | if (GET_CODE (trueop0) == CONST_VECTOR | |
5515 | && GET_CODE (trueop1) == CONST_VECTOR) | |
852c8ba1 JH |
5516 | { |
5517 | rtvec v = rtvec_alloc (n_elts); | |
5518 | unsigned int i; | |
5519 | ||
5520 | for (i = 0; i < n_elts; i++) | |
da694a77 MG |
5521 | RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i)) |
5522 | ? CONST_VECTOR_ELT (trueop0, i) | |
5523 | : CONST_VECTOR_ELT (trueop1, i)); | |
852c8ba1 JH |
5524 | return gen_rtx_CONST_VECTOR (mode, v); |
5525 | } | |
da694a77 MG |
5526 | |
5527 | /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n) | |
5528 | if no element from a appears in the result. */ | |
5529 | if (GET_CODE (op0) == VEC_MERGE) | |
5530 | { | |
5531 | tem = avoid_constant_pool_reference (XEXP (op0, 2)); | |
5532 | if (CONST_INT_P (tem)) | |
5533 | { | |
5534 | unsigned HOST_WIDE_INT sel0 = UINTVAL (tem); | |
5535 | if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0))) | |
5536 | return simplify_gen_ternary (code, mode, mode, | |
5537 | XEXP (op0, 1), op1, op2); | |
5538 | if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1))) | |
5539 | return simplify_gen_ternary (code, mode, mode, | |
5540 | XEXP (op0, 0), op1, op2); | |
5541 | } | |
5542 | } | |
5543 | if (GET_CODE (op1) == VEC_MERGE) | |
5544 | { | |
5545 | tem = avoid_constant_pool_reference (XEXP (op1, 2)); | |
5546 | if (CONST_INT_P (tem)) | |
5547 | { | |
5548 | unsigned HOST_WIDE_INT sel1 = UINTVAL (tem); | |
5549 | if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0))) | |
5550 | return simplify_gen_ternary (code, mode, mode, | |
5551 | op0, XEXP (op1, 1), op2); | |
5552 | if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1))) | |
5553 | return simplify_gen_ternary (code, mode, mode, | |
5554 | op0, XEXP (op1, 0), op2); | |
5555 | } | |
5556 | } | |
d9deed68 | 5557 | } |
da694a77 MG |
5558 | |
5559 | if (rtx_equal_p (op0, op1) | |
5560 | && !side_effects_p (op2) && !side_effects_p (op1)) | |
5561 | return op0; | |
5562 | ||
d9deed68 | 5563 | break; |
0cedb36c JL |
5564 | |
5565 | default: | |
41374e13 | 5566 | gcc_unreachable (); |
0cedb36c JL |
5567 | } |
5568 | ||
5569 | return 0; | |
5570 | } | |
5571 | ||
14c931f1 CF |
5572 | /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED |
5573 | or CONST_VECTOR, | |
5574 | returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR. | |
eea50aa0 | 5575 | |
550d1387 GK |
5576 | Works by unpacking OP into a collection of 8-bit values |
5577 | represented as a little-endian array of 'unsigned char', selecting by BYTE, | |
5578 | and then repacking them again for OUTERMODE. */ | |
eea50aa0 | 5579 | |
550d1387 | 5580 | static rtx |
b8698a0f | 5581 | simplify_immed_subreg (enum machine_mode outermode, rtx op, |
550d1387 GK |
5582 | enum machine_mode innermode, unsigned int byte) |
5583 | { | |
5584 | /* We support up to 512-bit values (for V8DFmode). */ | |
5585 | enum { | |
5586 | max_bitsize = 512, | |
5587 | value_bit = 8, | |
5588 | value_mask = (1 << value_bit) - 1 | |
5589 | }; | |
5590 | unsigned char value[max_bitsize / value_bit]; | |
5591 | int value_start; | |
5592 | int i; | |
5593 | int elem; | |
5594 | ||
5595 | int num_elem; | |
5596 | rtx * elems; | |
5597 | int elem_bitsize; | |
5598 | rtx result_s; | |
5599 | rtvec result_v = NULL; | |
5600 | enum mode_class outer_class; | |
5601 | enum machine_mode outer_submode; | |
5602 | ||
5603 | /* Some ports misuse CCmode. */ | |
481683e1 | 5604 | if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op)) |
e5c56fd9 JH |
5605 | return op; |
5606 | ||
6e4b5aaf RH |
5607 | /* We have no way to represent a complex constant at the rtl level. */ |
5608 | if (COMPLEX_MODE_P (outermode)) | |
5609 | return NULL_RTX; | |
5610 | ||
550d1387 GK |
5611 | /* Unpack the value. */ |
5612 | ||
cb2a532e AH |
5613 | if (GET_CODE (op) == CONST_VECTOR) |
5614 | { | |
550d1387 GK |
5615 | num_elem = CONST_VECTOR_NUNITS (op); |
5616 | elems = &CONST_VECTOR_ELT (op, 0); | |
5617 | elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode)); | |
5618 | } | |
5619 | else | |
5620 | { | |
5621 | num_elem = 1; | |
5622 | elems = &op; | |
5623 | elem_bitsize = max_bitsize; | |
5624 | } | |
41374e13 NS |
5625 | /* If this asserts, it is too complicated; reducing value_bit may help. */ |
5626 | gcc_assert (BITS_PER_UNIT % value_bit == 0); | |
5627 | /* I don't know how to handle endianness of sub-units. */ | |
5628 | gcc_assert (elem_bitsize % BITS_PER_UNIT == 0); | |
b8698a0f | 5629 | |
550d1387 GK |
5630 | for (elem = 0; elem < num_elem; elem++) |
5631 | { | |
5632 | unsigned char * vp; | |
5633 | rtx el = elems[elem]; | |
b8698a0f | 5634 | |
550d1387 GK |
5635 | /* Vectors are kept in target memory order. (This is probably |
5636 | a mistake.) */ | |
5637 | { | |
5638 | unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT; | |
558c51c5 | 5639 | unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) |
550d1387 GK |
5640 | / BITS_PER_UNIT); |
5641 | unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; | |
5642 | unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; | |
5643 | unsigned bytele = (subword_byte % UNITS_PER_WORD | |
5644 | + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); | |
5645 | vp = value + (bytele * BITS_PER_UNIT) / value_bit; | |
5646 | } | |
b8698a0f | 5647 | |
550d1387 | 5648 | switch (GET_CODE (el)) |
34a80643 | 5649 | { |
550d1387 GK |
5650 | case CONST_INT: |
5651 | for (i = 0; | |
b8698a0f | 5652 | i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; |
550d1387 GK |
5653 | i += value_bit) |
5654 | *vp++ = INTVAL (el) >> i; | |
5655 | /* CONST_INTs are always logically sign-extended. */ | |
5656 | for (; i < elem_bitsize; i += value_bit) | |
5657 | *vp++ = INTVAL (el) < 0 ? -1 : 0; | |
5658 | break; | |
b8698a0f | 5659 | |
550d1387 GK |
5660 | case CONST_DOUBLE: |
5661 | if (GET_MODE (el) == VOIDmode) | |
5662 | { | |
929e10f4 | 5663 | unsigned char extend = 0; |
550d1387 GK |
5664 | /* If this triggers, someone should have generated a |
5665 | CONST_INT instead. */ | |
41374e13 | 5666 | gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT); |
cb2a532e | 5667 | |
550d1387 GK |
5668 | for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit) |
5669 | *vp++ = CONST_DOUBLE_LOW (el) >> i; | |
49ab6098 | 5670 | while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize) |
550d1387 | 5671 | { |
8064d930 RE |
5672 | *vp++ |
5673 | = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT); | |
550d1387 GK |
5674 | i += value_bit; |
5675 | } | |
929e10f4 MS |
5676 | |
5677 | if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1)) | |
5678 | extend = -1; | |
1125164c | 5679 | for (; i < elem_bitsize; i += value_bit) |
929e10f4 | 5680 | *vp++ = extend; |
550d1387 | 5681 | } |
41374e13 | 5682 | else |
34a80643 | 5683 | { |
550d1387 GK |
5684 | long tmp[max_bitsize / 32]; |
5685 | int bitsize = GET_MODE_BITSIZE (GET_MODE (el)); | |
41374e13 | 5686 | |
3d8bf70f | 5687 | gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el))); |
41374e13 NS |
5688 | gcc_assert (bitsize <= elem_bitsize); |
5689 | gcc_assert (bitsize % value_bit == 0); | |
550d1387 GK |
5690 | |
5691 | real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el), | |
5692 | GET_MODE (el)); | |
5693 | ||
5694 | /* real_to_target produces its result in words affected by | |
5695 | FLOAT_WORDS_BIG_ENDIAN. However, we ignore this, | |
5696 | and use WORDS_BIG_ENDIAN instead; see the documentation | |
5697 | of SUBREG in rtl.texi. */ | |
5698 | for (i = 0; i < bitsize; i += value_bit) | |
226cfe61 | 5699 | { |
550d1387 GK |
5700 | int ibase; |
5701 | if (WORDS_BIG_ENDIAN) | |
5702 | ibase = bitsize - 1 - i; | |
5703 | else | |
5704 | ibase = i; | |
5705 | *vp++ = tmp[ibase / 32] >> i % 32; | |
226cfe61 | 5706 | } |
b8698a0f | 5707 | |
550d1387 GK |
5708 | /* It shouldn't matter what's done here, so fill it with |
5709 | zero. */ | |
5710 | for (; i < elem_bitsize; i += value_bit) | |
5711 | *vp++ = 0; | |
34a80643 | 5712 | } |
550d1387 | 5713 | break; |
14c931f1 CF |
5714 | |
5715 | case CONST_FIXED: | |
5716 | if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) | |
5717 | { | |
5718 | for (i = 0; i < elem_bitsize; i += value_bit) | |
5719 | *vp++ = CONST_FIXED_VALUE_LOW (el) >> i; | |
5720 | } | |
5721 | else | |
5722 | { | |
5723 | for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit) | |
5724 | *vp++ = CONST_FIXED_VALUE_LOW (el) >> i; | |
49ab6098 | 5725 | for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize; |
14c931f1 CF |
5726 | i += value_bit) |
5727 | *vp++ = CONST_FIXED_VALUE_HIGH (el) | |
5728 | >> (i - HOST_BITS_PER_WIDE_INT); | |
5729 | for (; i < elem_bitsize; i += value_bit) | |
5730 | *vp++ = 0; | |
5731 | } | |
5732 | break; | |
b8698a0f | 5733 | |
550d1387 | 5734 | default: |
41374e13 | 5735 | gcc_unreachable (); |
226cfe61 | 5736 | } |
cb2a532e AH |
5737 | } |
5738 | ||
550d1387 GK |
5739 | /* Now, pick the right byte to start with. */ |
5740 | /* Renumber BYTE so that the least-significant byte is byte 0. A special | |
5741 | case is paradoxical SUBREGs, which shouldn't be adjusted since they | |
5742 | will already have offset 0. */ | |
5743 | if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode)) | |
eea50aa0 | 5744 | { |
558c51c5 | 5745 | unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode) |
550d1387 GK |
5746 | - byte); |
5747 | unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; | |
5748 | unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; | |
5749 | byte = (subword_byte % UNITS_PER_WORD | |
5750 | + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); | |
5751 | } | |
eea50aa0 | 5752 | |
550d1387 GK |
5753 | /* BYTE should still be inside OP. (Note that BYTE is unsigned, |
5754 | so if it's become negative it will instead be very large.) */ | |
41374e13 | 5755 | gcc_assert (byte < GET_MODE_SIZE (innermode)); |
3767c0fd | 5756 | |
550d1387 GK |
5757 | /* Convert from bytes to chunks of size value_bit. */ |
5758 | value_start = byte * (BITS_PER_UNIT / value_bit); | |
eea50aa0 | 5759 | |
550d1387 | 5760 | /* Re-pack the value. */ |
b8698a0f | 5761 | |
550d1387 GK |
5762 | if (VECTOR_MODE_P (outermode)) |
5763 | { | |
5764 | num_elem = GET_MODE_NUNITS (outermode); | |
5765 | result_v = rtvec_alloc (num_elem); | |
5766 | elems = &RTVEC_ELT (result_v, 0); | |
5767 | outer_submode = GET_MODE_INNER (outermode); | |
5768 | } | |
5769 | else | |
5770 | { | |
5771 | num_elem = 1; | |
5772 | elems = &result_s; | |
5773 | outer_submode = outermode; | |
5774 | } | |
eea50aa0 | 5775 | |
550d1387 GK |
5776 | outer_class = GET_MODE_CLASS (outer_submode); |
5777 | elem_bitsize = GET_MODE_BITSIZE (outer_submode); | |
451f86fd | 5778 | |
41374e13 NS |
5779 | gcc_assert (elem_bitsize % value_bit == 0); |
5780 | gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize); | |
451f86fd | 5781 | |
550d1387 GK |
5782 | for (elem = 0; elem < num_elem; elem++) |
5783 | { | |
5784 | unsigned char *vp; | |
b8698a0f | 5785 | |
550d1387 GK |
5786 | /* Vectors are stored in target memory order. (This is probably |
5787 | a mistake.) */ | |
5788 | { | |
5789 | unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT; | |
558c51c5 | 5790 | unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) |
550d1387 GK |
5791 | / BITS_PER_UNIT); |
5792 | unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; | |
5793 | unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; | |
5794 | unsigned bytele = (subword_byte % UNITS_PER_WORD | |
5795 | + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); | |
5796 | vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit; | |
5797 | } | |
5798 | ||
5799 | switch (outer_class) | |
eea50aa0 | 5800 | { |
550d1387 GK |
5801 | case MODE_INT: |
5802 | case MODE_PARTIAL_INT: | |
5803 | { | |
5804 | unsigned HOST_WIDE_INT hi = 0, lo = 0; | |
5805 | ||
5806 | for (i = 0; | |
5807 | i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; | |
5808 | i += value_bit) | |
43c36287 | 5809 | lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i; |
550d1387 | 5810 | for (; i < elem_bitsize; i += value_bit) |
43c36287 EB |
5811 | hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) |
5812 | << (i - HOST_BITS_PER_WIDE_INT); | |
b8698a0f | 5813 | |
550d1387 GK |
5814 | /* immed_double_const doesn't call trunc_int_for_mode. I don't |
5815 | know why. */ | |
5816 | if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) | |
5817 | elems[elem] = gen_int_mode (lo, outer_submode); | |
49ab6098 | 5818 | else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT) |
550d1387 | 5819 | elems[elem] = immed_double_const (lo, hi, outer_submode); |
3242fbd8 UB |
5820 | else |
5821 | return NULL_RTX; | |
550d1387 GK |
5822 | } |
5823 | break; | |
b8698a0f | 5824 | |
550d1387 | 5825 | case MODE_FLOAT: |
15ed7b52 | 5826 | case MODE_DECIMAL_FLOAT: |
550d1387 GK |
5827 | { |
5828 | REAL_VALUE_TYPE r; | |
5829 | long tmp[max_bitsize / 32]; | |
b8698a0f | 5830 | |
550d1387 GK |
5831 | /* real_from_target wants its input in words affected by |
5832 | FLOAT_WORDS_BIG_ENDIAN. However, we ignore this, | |
5833 | and use WORDS_BIG_ENDIAN instead; see the documentation | |
5834 | of SUBREG in rtl.texi. */ | |
5835 | for (i = 0; i < max_bitsize / 32; i++) | |
5836 | tmp[i] = 0; | |
5837 | for (i = 0; i < elem_bitsize; i += value_bit) | |
5838 | { | |
5839 | int ibase; | |
5840 | if (WORDS_BIG_ENDIAN) | |
5841 | ibase = elem_bitsize - 1 - i; | |
5842 | else | |
5843 | ibase = i; | |
effdb493 | 5844 | tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32; |
550d1387 | 5845 | } |
eea50aa0 | 5846 | |
550d1387 GK |
5847 | real_from_target (&r, tmp, outer_submode); |
5848 | elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode); | |
5849 | } | |
5850 | break; | |
14c931f1 CF |
5851 | |
5852 | case MODE_FRACT: | |
5853 | case MODE_UFRACT: | |
5854 | case MODE_ACCUM: | |
5855 | case MODE_UACCUM: | |
5856 | { | |
5857 | FIXED_VALUE_TYPE f; | |
5858 | f.data.low = 0; | |
5859 | f.data.high = 0; | |
5860 | f.mode = outer_submode; | |
5861 | ||
5862 | for (i = 0; | |
5863 | i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; | |
5864 | i += value_bit) | |
43c36287 | 5865 | f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i; |
14c931f1 | 5866 | for (; i < elem_bitsize; i += value_bit) |
43c36287 | 5867 | f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask) |
14c931f1 CF |
5868 | << (i - HOST_BITS_PER_WIDE_INT)); |
5869 | ||
5870 | elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode); | |
5871 | } | |
5872 | break; | |
b8698a0f | 5873 | |
550d1387 | 5874 | default: |
41374e13 | 5875 | gcc_unreachable (); |
550d1387 GK |
5876 | } |
5877 | } | |
5878 | if (VECTOR_MODE_P (outermode)) | |
5879 | return gen_rtx_CONST_VECTOR (outermode, result_v); | |
5880 | else | |
5881 | return result_s; | |
5882 | } | |
eea50aa0 | 5883 | |
550d1387 GK |
5884 | /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE) |
5885 | Return 0 if no simplifications are possible. */ | |
5886 | rtx | |
5887 | simplify_subreg (enum machine_mode outermode, rtx op, | |
5888 | enum machine_mode innermode, unsigned int byte) | |
5889 | { | |
5890 | /* Little bit of sanity checking. */ | |
41374e13 NS |
5891 | gcc_assert (innermode != VOIDmode); |
5892 | gcc_assert (outermode != VOIDmode); | |
5893 | gcc_assert (innermode != BLKmode); | |
5894 | gcc_assert (outermode != BLKmode); | |
eea50aa0 | 5895 | |
41374e13 NS |
5896 | gcc_assert (GET_MODE (op) == innermode |
5897 | || GET_MODE (op) == VOIDmode); | |
eea50aa0 | 5898 | |
0343822b RS |
5899 | if ((byte % GET_MODE_SIZE (outermode)) != 0) |
5900 | return NULL_RTX; | |
5901 | ||
5902 | if (byte >= GET_MODE_SIZE (innermode)) | |
5903 | return NULL_RTX; | |
eea50aa0 | 5904 | |
550d1387 GK |
5905 | if (outermode == innermode && !byte) |
5906 | return op; | |
eea50aa0 | 5907 | |
33ffb5c5 KZ |
5908 | if (CONST_SCALAR_INT_P (op) |
5909 | || CONST_DOUBLE_AS_FLOAT_P (op) | |
14c931f1 | 5910 | || GET_CODE (op) == CONST_FIXED |
550d1387 GK |
5911 | || GET_CODE (op) == CONST_VECTOR) |
5912 | return simplify_immed_subreg (outermode, op, innermode, byte); | |
eea50aa0 JH |
5913 | |
5914 | /* Changing mode twice with SUBREG => just change it once, | |
5915 | or not at all if changing back op starting mode. */ | |
5916 | if (GET_CODE (op) == SUBREG) | |
5917 | { | |
5918 | enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op)); | |
1ffb3f9a | 5919 | int final_offset = byte + SUBREG_BYTE (op); |
53ed1a12 | 5920 | rtx newx; |
eea50aa0 JH |
5921 | |
5922 | if (outermode == innermostmode | |
5923 | && byte == 0 && SUBREG_BYTE (op) == 0) | |
5924 | return SUBREG_REG (op); | |
5925 | ||
1ffb3f9a JH |
5926 | /* The SUBREG_BYTE represents offset, as if the value were stored |
5927 | in memory. Irritating exception is paradoxical subreg, where | |
5928 | we define SUBREG_BYTE to be 0. On big endian machines, this | |
2d76cb1a | 5929 | value should be negative. For a moment, undo this exception. */ |
1ffb3f9a | 5930 | if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode)) |
eea50aa0 | 5931 | { |
1ffb3f9a JH |
5932 | int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)); |
5933 | if (WORDS_BIG_ENDIAN) | |
5934 | final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
5935 | if (BYTES_BIG_ENDIAN) | |
5936 | final_offset += difference % UNITS_PER_WORD; | |
5937 | } | |
5938 | if (SUBREG_BYTE (op) == 0 | |
5939 | && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode)) | |
5940 | { | |
5941 | int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode)); | |
5942 | if (WORDS_BIG_ENDIAN) | |
5943 | final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
5944 | if (BYTES_BIG_ENDIAN) | |
5945 | final_offset += difference % UNITS_PER_WORD; | |
5946 | } | |
5947 | ||
5948 | /* See whether resulting subreg will be paradoxical. */ | |
2fe7bb35 | 5949 | if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode)) |
1ffb3f9a JH |
5950 | { |
5951 | /* In nonparadoxical subregs we can't handle negative offsets. */ | |
5952 | if (final_offset < 0) | |
5953 | return NULL_RTX; | |
5954 | /* Bail out in case resulting subreg would be incorrect. */ | |
5955 | if (final_offset % GET_MODE_SIZE (outermode) | |
ae0ed63a JM |
5956 | || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode)) |
5957 | return NULL_RTX; | |
1ffb3f9a JH |
5958 | } |
5959 | else | |
5960 | { | |
5961 | int offset = 0; | |
5962 | int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode)); | |
5963 | ||
5964 | /* In paradoxical subreg, see if we are still looking on lower part. | |
5965 | If so, our SUBREG_BYTE will be 0. */ | |
5966 | if (WORDS_BIG_ENDIAN) | |
5967 | offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
5968 | if (BYTES_BIG_ENDIAN) | |
5969 | offset += difference % UNITS_PER_WORD; | |
5970 | if (offset == final_offset) | |
5971 | final_offset = 0; | |
eea50aa0 | 5972 | else |
ae0ed63a | 5973 | return NULL_RTX; |
eea50aa0 JH |
5974 | } |
5975 | ||
4d6922ee | 5976 | /* Recurse for further possible simplifications. */ |
beb72684 RH |
5977 | newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode, |
5978 | final_offset); | |
53ed1a12 BI |
5979 | if (newx) |
5980 | return newx; | |
beb72684 RH |
5981 | if (validate_subreg (outermode, innermostmode, |
5982 | SUBREG_REG (op), final_offset)) | |
4613543f RS |
5983 | { |
5984 | newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset); | |
5985 | if (SUBREG_PROMOTED_VAR_P (op) | |
5986 | && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0 | |
5987 | && GET_MODE_CLASS (outermode) == MODE_INT | |
5988 | && IN_RANGE (GET_MODE_SIZE (outermode), | |
5989 | GET_MODE_SIZE (innermode), | |
5990 | GET_MODE_SIZE (innermostmode)) | |
5991 | && subreg_lowpart_p (newx)) | |
5992 | { | |
5993 | SUBREG_PROMOTED_VAR_P (newx) = 1; | |
5994 | SUBREG_PROMOTED_UNSIGNED_SET | |
5995 | (newx, SUBREG_PROMOTED_UNSIGNED_P (op)); | |
5996 | } | |
5997 | return newx; | |
5998 | } | |
beb72684 | 5999 | return NULL_RTX; |
eea50aa0 JH |
6000 | } |
6001 | ||
6002 | /* SUBREG of a hard register => just change the register number | |
6003 | and/or mode. If the hard register is not valid in that mode, | |
6004 | suppress this simplification. If the hard register is the stack, | |
6005 | frame, or argument pointer, leave this as a SUBREG. */ | |
6006 | ||
eef302d2 | 6007 | if (REG_P (op) && HARD_REGISTER_P (op)) |
eea50aa0 | 6008 | { |
eef302d2 RS |
6009 | unsigned int regno, final_regno; |
6010 | ||
6011 | regno = REGNO (op); | |
6012 | final_regno = simplify_subreg_regno (regno, innermode, byte, outermode); | |
6013 | if (HARD_REGISTER_NUM_P (final_regno)) | |
49d801d3 | 6014 | { |
dedc1e6d AO |
6015 | rtx x; |
6016 | int final_offset = byte; | |
6017 | ||
6018 | /* Adjust offset for paradoxical subregs. */ | |
6019 | if (byte == 0 | |
6020 | && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode)) | |
6021 | { | |
6022 | int difference = (GET_MODE_SIZE (innermode) | |
6023 | - GET_MODE_SIZE (outermode)); | |
6024 | if (WORDS_BIG_ENDIAN) | |
6025 | final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
6026 | if (BYTES_BIG_ENDIAN) | |
6027 | final_offset += difference % UNITS_PER_WORD; | |
6028 | } | |
6029 | ||
6030 | x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset); | |
49d801d3 JH |
6031 | |
6032 | /* Propagate original regno. We don't have any way to specify | |
14b493d6 | 6033 | the offset inside original regno, so do so only for lowpart. |
49d801d3 JH |
6034 | The information is used only by alias analysis that can not |
6035 | grog partial register anyway. */ | |
6036 | ||
6037 | if (subreg_lowpart_offset (outermode, innermode) == byte) | |
6038 | ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op); | |
6039 | return x; | |
6040 | } | |
eea50aa0 JH |
6041 | } |
6042 | ||
6043 | /* If we have a SUBREG of a register that we are replacing and we are | |
6044 | replacing it with a MEM, make a new MEM and try replacing the | |
6045 | SUBREG with it. Don't do this if the MEM has a mode-dependent address | |
6046 | or if we would be widening it. */ | |
6047 | ||
3c0cb5de | 6048 | if (MEM_P (op) |
5bfed9a9 | 6049 | && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)) |
04864a46 JH |
6050 | /* Allow splitting of volatile memory references in case we don't |
6051 | have instruction to move the whole thing. */ | |
6052 | && (! MEM_VOLATILE_P (op) | |
ef89d648 | 6053 | || ! have_insn_for (SET, innermode)) |
eea50aa0 | 6054 | && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op))) |
f1ec5147 | 6055 | return adjust_address_nv (op, outermode, byte); |
e5c56fd9 JH |
6056 | |
6057 | /* Handle complex values represented as CONCAT | |
6058 | of real and imaginary part. */ | |
6059 | if (GET_CODE (op) == CONCAT) | |
6060 | { | |
a957d77f | 6061 | unsigned int part_size, final_offset; |
4f1da2e9 RS |
6062 | rtx part, res; |
6063 | ||
a957d77f RS |
6064 | part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0))); |
6065 | if (byte < part_size) | |
6066 | { | |
6067 | part = XEXP (op, 0); | |
6068 | final_offset = byte; | |
6069 | } | |
6070 | else | |
6071 | { | |
6072 | part = XEXP (op, 1); | |
6073 | final_offset = byte - part_size; | |
6074 | } | |
6075 | ||
6076 | if (final_offset + GET_MODE_SIZE (outermode) > part_size) | |
4f1da2e9 | 6077 | return NULL_RTX; |
e5c56fd9 | 6078 | |
9199d62b DD |
6079 | res = simplify_subreg (outermode, part, GET_MODE (part), final_offset); |
6080 | if (res) | |
6081 | return res; | |
beb72684 | 6082 | if (validate_subreg (outermode, GET_MODE (part), part, final_offset)) |
4f1da2e9 | 6083 | return gen_rtx_SUBREG (outermode, part, final_offset); |
beb72684 | 6084 | return NULL_RTX; |
e5c56fd9 JH |
6085 | } |
6086 | ||
40c5ed5b RS |
6087 | /* A SUBREG resulting from a zero extension may fold to zero if |
6088 | it extracts higher bits that the ZERO_EXTEND's source bits. */ | |
373b9e78 | 6089 | if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode)) |
bb51e270 RS |
6090 | { |
6091 | unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte); | |
40c5ed5b | 6092 | if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))) |
bb51e270 RS |
6093 | return CONST0_RTX (outermode); |
6094 | } | |
6095 | ||
40c5ed5b | 6096 | if (SCALAR_INT_MODE_P (outermode) |
992103ad UB |
6097 | && SCALAR_INT_MODE_P (innermode) |
6098 | && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode) | |
6099 | && byte == subreg_lowpart_offset (outermode, innermode)) | |
6100 | { | |
40c5ed5b RS |
6101 | rtx tem = simplify_truncation (outermode, op, innermode); |
6102 | if (tem) | |
6103 | return tem; | |
509dd380 JJ |
6104 | } |
6105 | ||
eea50aa0 JH |
6106 | return NULL_RTX; |
6107 | } | |
550d1387 | 6108 | |
949c5d62 JH |
6109 | /* Make a SUBREG operation or equivalent if it folds. */ |
6110 | ||
6111 | rtx | |
46c5ad27 AJ |
6112 | simplify_gen_subreg (enum machine_mode outermode, rtx op, |
6113 | enum machine_mode innermode, unsigned int byte) | |
949c5d62 | 6114 | { |
53ed1a12 | 6115 | rtx newx; |
949c5d62 | 6116 | |
53ed1a12 BI |
6117 | newx = simplify_subreg (outermode, op, innermode, byte); |
6118 | if (newx) | |
6119 | return newx; | |
949c5d62 | 6120 | |
4f1da2e9 RS |
6121 | if (GET_CODE (op) == SUBREG |
6122 | || GET_CODE (op) == CONCAT | |
6123 | || GET_MODE (op) == VOIDmode) | |
949c5d62 JH |
6124 | return NULL_RTX; |
6125 | ||
beb72684 RH |
6126 | if (validate_subreg (outermode, innermode, op, byte)) |
6127 | return gen_rtx_SUBREG (outermode, op, byte); | |
6128 | ||
6129 | return NULL_RTX; | |
949c5d62 | 6130 | } |
beb72684 | 6131 | |
0cedb36c JL |
6132 | /* Simplify X, an rtx expression. |
6133 | ||
6134 | Return the simplified expression or NULL if no simplifications | |
6135 | were possible. | |
6136 | ||
6137 | This is the preferred entry point into the simplification routines; | |
6138 | however, we still allow passes to call the more specific routines. | |
6139 | ||
14b493d6 | 6140 | Right now GCC has three (yes, three) major bodies of RTL simplification |
0cedb36c JL |
6141 | code that need to be unified. |
6142 | ||
6143 | 1. fold_rtx in cse.c. This code uses various CSE specific | |
6144 | information to aid in RTL simplification. | |
6145 | ||
6146 | 2. simplify_rtx in combine.c. Similar to fold_rtx, except that | |
6147 | it uses combine specific information to aid in RTL | |
6148 | simplification. | |
6149 | ||
6150 | 3. The routines in this file. | |
6151 | ||
6152 | ||
6153 | Long term we want to only have one body of simplification code; to | |
6154 | get to that state I recommend the following steps: | |
6155 | ||
6156 | 1. Pour over fold_rtx & simplify_rtx and move any simplifications | |
6157 | which are not pass dependent state into these routines. | |
6158 | ||
6159 | 2. As code is moved by #1, change fold_rtx & simplify_rtx to | |
6160 | use this routine whenever possible. | |
6161 | ||
6162 | 3. Allow for pass dependent state to be provided to these | |
6163 | routines and add simplifications based on the pass dependent | |
6164 | state. Remove code from cse.c & combine.c that becomes | |
6165 | redundant/dead. | |
6166 | ||
6167 | It will take time, but ultimately the compiler will be easier to | |
6168 | maintain and improve. It's totally silly that when we add a | |
6169 | simplification that it needs to be added to 4 places (3 for RTL | |
6170 | simplification and 1 for tree simplification. */ | |
786de7eb | 6171 | |
0cedb36c | 6172 | rtx |
58f9752a | 6173 | simplify_rtx (const_rtx x) |
0cedb36c | 6174 | { |
58f9752a KG |
6175 | const enum rtx_code code = GET_CODE (x); |
6176 | const enum machine_mode mode = GET_MODE (x); | |
0cedb36c JL |
6177 | |
6178 | switch (GET_RTX_CLASS (code)) | |
6179 | { | |
ec8e098d | 6180 | case RTX_UNARY: |
0cedb36c JL |
6181 | return simplify_unary_operation (code, mode, |
6182 | XEXP (x, 0), GET_MODE (XEXP (x, 0))); | |
ec8e098d | 6183 | case RTX_COMM_ARITH: |
df0afdbe | 6184 | if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) |
cf6bcbd0 | 6185 | return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0)); |
b42abad8 | 6186 | |
2b72593e | 6187 | /* Fall through.... */ |
b42abad8 | 6188 | |
ec8e098d | 6189 | case RTX_BIN_ARITH: |
0cedb36c JL |
6190 | return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1)); |
6191 | ||
ec8e098d PB |
6192 | case RTX_TERNARY: |
6193 | case RTX_BITFIELD_OPS: | |
0cedb36c | 6194 | return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)), |
d9c695ff RK |
6195 | XEXP (x, 0), XEXP (x, 1), |
6196 | XEXP (x, 2)); | |
0cedb36c | 6197 | |
ec8e098d PB |
6198 | case RTX_COMPARE: |
6199 | case RTX_COMM_COMPARE: | |
c6fb08ad PB |
6200 | return simplify_relational_operation (code, mode, |
6201 | ((GET_MODE (XEXP (x, 0)) | |
6202 | != VOIDmode) | |
6203 | ? GET_MODE (XEXP (x, 0)) | |
6204 | : GET_MODE (XEXP (x, 1))), | |
6205 | XEXP (x, 0), | |
6206 | XEXP (x, 1)); | |
d41ba56f | 6207 | |
ec8e098d | 6208 | case RTX_EXTRA: |
949c5d62 | 6209 | if (code == SUBREG) |
e2561558 RS |
6210 | return simplify_subreg (mode, SUBREG_REG (x), |
6211 | GET_MODE (SUBREG_REG (x)), | |
6212 | SUBREG_BYTE (x)); | |
d41ba56f RS |
6213 | break; |
6214 | ||
ec8e098d | 6215 | case RTX_OBJ: |
d41ba56f RS |
6216 | if (code == LO_SUM) |
6217 | { | |
6218 | /* Convert (lo_sum (high FOO) FOO) to FOO. */ | |
6219 | if (GET_CODE (XEXP (x, 0)) == HIGH | |
6220 | && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1))) | |
6221 | return XEXP (x, 1); | |
6222 | } | |
6223 | break; | |
6224 | ||
0cedb36c | 6225 | default: |
d41ba56f | 6226 | break; |
0cedb36c | 6227 | } |
d41ba56f | 6228 | return NULL; |
0cedb36c | 6229 | } |