]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
2013-03-21 Marc Glisse <marc.glisse@inria.fr>
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
38
39 /* Simplification and canonicalization of RTL. */
40
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
47
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
61 \f
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
66 {
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
68 }
69
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
72
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
75 {
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
78
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
81
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
85
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 else if (width <= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x)
91 && CONST_DOUBLE_LOW (x) == 0)
92 {
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
95 }
96 else
97 /* FIXME: We don't yet have a representation for wider modes. */
98 return false;
99
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
103 }
104
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
108
109 bool
110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
111 {
112 unsigned int width;
113
114 if (GET_MODE_CLASS (mode) != MODE_INT)
115 return false;
116
117 width = GET_MODE_PRECISION (mode);
118 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119 return false;
120
121 val &= GET_MODE_MASK (mode);
122 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
123 }
124
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
127 bool
128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
129 {
130 unsigned int width;
131
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
134
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
138
139 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140 return val != 0;
141 }
142
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
147 {
148 unsigned int width;
149
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
152
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
156
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val == 0;
159 }
160 \f
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
163
164 rtx
165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 rtx op1)
167 {
168 rtx tem;
169
170 /* If this simplifies, do it. */
171 tem = simplify_binary_operation (code, mode, op0, op1);
172 if (tem)
173 return tem;
174
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0, op1))
178 tem = op0, op0 = op1, op1 = tem;
179
180 return gen_rtx_fmt_ee (code, mode, op0, op1);
181 }
182 \f
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
185 rtx
186 avoid_constant_pool_reference (rtx x)
187 {
188 rtx c, tmp, addr;
189 enum machine_mode cmode;
190 HOST_WIDE_INT offset = 0;
191
192 switch (GET_CODE (x))
193 {
194 case MEM:
195 break;
196
197 case FLOAT_EXTEND:
198 /* Handle float extensions of constant pool references. */
199 tmp = XEXP (x, 0);
200 c = avoid_constant_pool_reference (tmp);
201 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
202 {
203 REAL_VALUE_TYPE d;
204
205 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
207 }
208 return x;
209
210 default:
211 return x;
212 }
213
214 if (GET_MODE (x) == BLKmode)
215 return x;
216
217 addr = XEXP (x, 0);
218
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr = targetm.delegitimize_address (addr);
221
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr) == CONST
224 && GET_CODE (XEXP (addr, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
226 {
227 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228 addr = XEXP (XEXP (addr, 0), 0);
229 }
230
231 if (GET_CODE (addr) == LO_SUM)
232 addr = XEXP (addr, 1);
233
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr))
238 {
239 c = get_pool_constant (addr);
240 cmode = get_pool_mode (addr);
241
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset != 0 || cmode != GET_MODE (x))
246 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
247 {
248 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249 if (tem && CONSTANT_P (tem))
250 return tem;
251 }
252 else
253 return c;
254 }
255
256 return x;
257 }
258 \f
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
262
263 rtx
264 delegitimize_mem_from_attrs (rtx x)
265 {
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
271 {
272 tree decl = MEM_EXPR (x);
273 enum machine_mode mode = GET_MODE (x);
274 HOST_WIDE_INT offset = 0;
275
276 switch (TREE_CODE (decl))
277 {
278 default:
279 decl = NULL;
280 break;
281
282 case VAR_DECL:
283 break;
284
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
292 {
293 HOST_WIDE_INT bitsize, bitpos;
294 tree toffset;
295 int unsignedp, volatilep = 0;
296
297 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 &mode, &unsignedp, &volatilep, false);
299 if (bitsize != GET_MODE_BITSIZE (mode)
300 || (bitpos % BITS_PER_UNIT)
301 || (toffset && !host_integerp (toffset, 0)))
302 decl = NULL;
303 else
304 {
305 offset += bitpos / BITS_PER_UNIT;
306 if (toffset)
307 offset += TREE_INT_CST_LOW (toffset);
308 }
309 break;
310 }
311 }
312
313 if (decl
314 && mode == GET_MODE (x)
315 && TREE_CODE (decl) == VAR_DECL
316 && (TREE_STATIC (decl)
317 || DECL_THREAD_LOCAL_P (decl))
318 && DECL_RTL_SET_P (decl)
319 && MEM_P (DECL_RTL (decl)))
320 {
321 rtx newx;
322
323 offset += MEM_OFFSET (x);
324
325 newx = DECL_RTL (decl);
326
327 if (MEM_P (newx))
328 {
329 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
330
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
337 if (!((offset == 0
338 || (GET_CODE (o) == PLUS
339 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 && (offset == INTVAL (XEXP (o, 1))
341 || (GET_CODE (n) == PLUS
342 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 && (INTVAL (XEXP (n, 1)) + offset
344 == INTVAL (XEXP (o, 1)))
345 && (n = XEXP (n, 0))))
346 && (o = XEXP (o, 0))))
347 && rtx_equal_p (o, n)))
348 x = adjust_address_nv (newx, mode, offset);
349 }
350 else if (GET_MODE (x) == GET_MODE (newx)
351 && offset == 0)
352 x = newx;
353 }
354 }
355
356 return x;
357 }
358 \f
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
361
362 rtx
363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 enum machine_mode op_mode)
365 {
366 rtx tem;
367
368 /* If this simplifies, use it. */
369 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370 return tem;
371
372 return gen_rtx_fmt_e (code, mode, op);
373 }
374
375 /* Likewise for ternary operations. */
376
377 rtx
378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
380 {
381 rtx tem;
382
383 /* If this simplifies, use it. */
384 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 op0, op1, op2)))
386 return tem;
387
388 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
389 }
390
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
393
394 rtx
395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode cmp_mode, rtx op0, rtx op1)
397 {
398 rtx tem;
399
400 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 op0, op1)))
402 return tem;
403
404 return gen_rtx_fmt_ee (code, mode, op0, op1);
405 }
406 \f
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
410 result. */
411
412 rtx
413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 rtx (*fn) (rtx, const_rtx, void *), void *data)
415 {
416 enum rtx_code code = GET_CODE (x);
417 enum machine_mode mode = GET_MODE (x);
418 enum machine_mode op_mode;
419 const char *fmt;
420 rtx op0, op1, op2, newx, op;
421 rtvec vec, newvec;
422 int i, j;
423
424 if (__builtin_expect (fn != NULL, 0))
425 {
426 newx = fn (x, old_rtx, data);
427 if (newx)
428 return newx;
429 }
430 else if (rtx_equal_p (x, old_rtx))
431 return copy_rtx ((rtx) data);
432
433 switch (GET_RTX_CLASS (code))
434 {
435 case RTX_UNARY:
436 op0 = XEXP (x, 0);
437 op_mode = GET_MODE (op0);
438 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439 if (op0 == XEXP (x, 0))
440 return x;
441 return simplify_gen_unary (code, mode, op0, op_mode);
442
443 case RTX_BIN_ARITH:
444 case RTX_COMM_ARITH:
445 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 return x;
449 return simplify_gen_binary (code, mode, op0, op1);
450
451 case RTX_COMPARE:
452 case RTX_COMM_COMPARE:
453 op0 = XEXP (x, 0);
454 op1 = XEXP (x, 1);
455 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 return x;
460 return simplify_gen_relational (code, mode, op_mode, op0, op1);
461
462 case RTX_TERNARY:
463 case RTX_BITFIELD_OPS:
464 op0 = XEXP (x, 0);
465 op_mode = GET_MODE (op0);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 return x;
471 if (op_mode == VOIDmode)
472 op_mode = GET_MODE (op0);
473 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
474
475 case RTX_EXTRA:
476 if (code == SUBREG)
477 {
478 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 if (op0 == SUBREG_REG (x))
480 return x;
481 op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 GET_MODE (SUBREG_REG (x)),
483 SUBREG_BYTE (x));
484 return op0 ? op0 : x;
485 }
486 break;
487
488 case RTX_OBJ:
489 if (code == MEM)
490 {
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 if (op0 == XEXP (x, 0))
493 return x;
494 return replace_equiv_address_nv (x, op0);
495 }
496 else if (code == LO_SUM)
497 {
498 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
500
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 return op1;
504
505 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 return x;
507 return gen_rtx_LO_SUM (mode, op0, op1);
508 }
509 break;
510
511 default:
512 break;
513 }
514
515 newx = x;
516 fmt = GET_RTX_FORMAT (code);
517 for (i = 0; fmt[i]; i++)
518 switch (fmt[i])
519 {
520 case 'E':
521 vec = XVEC (x, i);
522 newvec = XVEC (newx, i);
523 for (j = 0; j < GET_NUM_ELEM (vec); j++)
524 {
525 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 old_rtx, fn, data);
527 if (op != RTVEC_ELT (vec, j))
528 {
529 if (newvec == vec)
530 {
531 newvec = shallow_copy_rtvec (vec);
532 if (x == newx)
533 newx = shallow_copy_rtx (x);
534 XVEC (newx, i) = newvec;
535 }
536 RTVEC_ELT (newvec, j) = op;
537 }
538 }
539 break;
540
541 case 'e':
542 if (XEXP (x, i))
543 {
544 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 if (op != XEXP (x, i))
546 {
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XEXP (newx, i) = op;
550 }
551 }
552 break;
553 }
554 return newx;
555 }
556
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
559
560 rtx
561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
562 {
563 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
564 }
565 \f
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
568
569 RTL provides two ways of truncating a value:
570
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
574 an rvalue.
575
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581
582 2. a TRUNCATE. This form handles both scalar and compound integers.
583
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
588
589 simplify_gen_unary (TRUNCATE, ...)
590
591 and leave simplify_unary_operation to work out which representation
592 should be used.
593
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
598 truncation of:
599
600 (and:DI X Y)
601
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
606
607 (and:DI (reg:DI X) (const_int 63))
608
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
612
613 static rtx
614 simplify_truncation (enum machine_mode mode, rtx op,
615 enum machine_mode op_mode)
616 {
617 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619 gcc_assert (precision <= op_precision);
620
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op) == ZERO_EXTEND
623 || GET_CODE (op) == SIGN_EXTEND)
624 {
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
630 mode. */
631 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632 if (mode == origmode)
633 return XEXP (op, 0);
634 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 return simplify_gen_unary (TRUNCATE, mode,
636 XEXP (op, 0), origmode);
637 else
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (op, 0), origmode);
640 }
641
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op) == PLUS
645 || GET_CODE (op) == MINUS
646 || GET_CODE (op) == MULT)
647 {
648 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649 if (op0)
650 {
651 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 if (op1)
653 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
654 }
655 }
656
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op) == LSHIFTRT
661 || GET_CODE (op) == ASHIFTRT)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision <= op_precision
667 && CONST_INT_P (XEXP (op, 1))
668 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670 && UINTVAL (XEXP (op, 1)) < precision)
671 return simplify_gen_binary (ASHIFTRT, mode,
672 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
673
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (LSHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
685
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op) == ASHIFT
690 && CONST_INT_P (XEXP (op, 1))
691 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
697
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op) == LSHIFTRT
700 || GET_CODE (op) == ASHIFTRT)
701 && SCALAR_INT_MODE_P (mode)
702 && SCALAR_INT_MODE_P (op_mode)
703 && precision >= BITS_PER_WORD
704 && 2 * precision <= op_precision
705 && CONST_INT_P (XEXP (op, 1))
706 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707 && UINTVAL (XEXP (op, 1)) < op_precision)
708 {
709 int byte = subreg_lowpart_offset (mode, op_mode);
710 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 (WORDS_BIG_ENDIAN
713 ? byte - shifted_bytes
714 : byte + shifted_bytes));
715 }
716
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op) == LSHIFTRT
721 || GET_CODE (op) == ASHIFTRT)
722 && SCALAR_INT_MODE_P (op_mode)
723 && MEM_P (XEXP (op, 0))
724 && CONST_INT_P (XEXP (op, 1))
725 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726 && INTVAL (XEXP (op, 1)) > 0
727 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op, 0))
731 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
733 {
734 int byte = subreg_lowpart_offset (mode, op_mode);
735 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736 return adjust_address_nv (XEXP (op, 0), mode,
737 (WORDS_BIG_ENDIAN
738 ? byte - shifted_bytes
739 : byte + shifted_bytes));
740 }
741
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op) == ABS
745 || GET_CODE (op) == NEG)
746 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (XEXP (op, 0), 0), mode);
751
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 (truncate:A X). */
754 if (GET_CODE (op) == SUBREG
755 && SCALAR_INT_MODE_P (mode)
756 && SCALAR_INT_MODE_P (op_mode)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759 && subreg_lowpart_p (op))
760 {
761 rtx inner = XEXP (SUBREG_REG (op), 0);
762 if (GET_MODE_PRECISION (mode)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
765 else
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode, SUBREG_REG (op),
769 GET_MODE (SUBREG_REG (op)), 0);
770 }
771
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op) == TRUNCATE)
774 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 GET_MODE (XEXP (op, 0)));
776
777 return NULL_RTX;
778 }
779 \f
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
783 rtx
784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 rtx op, enum machine_mode op_mode)
786 {
787 rtx trueop, tem;
788
789 trueop = avoid_constant_pool_reference (op);
790
791 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
792 if (tem)
793 return tem;
794
795 return simplify_unary_operation_1 (code, mode, op);
796 }
797
798 /* Perform some simplifications we can do even if the operands
799 aren't constant. */
800 static rtx
801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
802 {
803 enum rtx_code reversed;
804 rtx temp;
805
806 switch (code)
807 {
808 case NOT:
809 /* (not (not X)) == X. */
810 if (GET_CODE (op) == NOT)
811 return XEXP (op, 0);
812
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op)
816 && (mode == BImode || STORE_FLAG_VALUE == -1)
817 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 return simplify_gen_relational (reversed, mode, VOIDmode,
819 XEXP (op, 0), XEXP (op, 1));
820
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op) == PLUS
823 && XEXP (op, 1) == constm1_rtx)
824 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
825
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op) == NEG)
828 return plus_constant (mode, XEXP (op, 0), -1);
829
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op) == XOR
832 && CONST_INT_P (XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
836
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op) == PLUS
839 && CONST_INT_P (XEXP (op, 1))
840 && mode_signbit_p (mode, XEXP (op, 1))
841 && (temp = simplify_unary_operation (NOT, mode,
842 XEXP (op, 1), mode)) != 0)
843 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
844
845
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
850 bother with. */
851 if (GET_CODE (op) == ASHIFT
852 && XEXP (op, 0) == const1_rtx)
853 {
854 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
855 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
856 }
857
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
861
862 if (STORE_FLAG_VALUE == -1
863 && GET_CODE (op) == ASHIFTRT
864 && GET_CODE (XEXP (op, 1))
865 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
866 return simplify_gen_relational (GE, mode, VOIDmode,
867 XEXP (op, 0), const0_rtx);
868
869
870 if (GET_CODE (op) == SUBREG
871 && subreg_lowpart_p (op)
872 && (GET_MODE_SIZE (GET_MODE (op))
873 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
874 && GET_CODE (SUBREG_REG (op)) == ASHIFT
875 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
876 {
877 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
878 rtx x;
879
880 x = gen_rtx_ROTATE (inner_mode,
881 simplify_gen_unary (NOT, inner_mode, const1_rtx,
882 inner_mode),
883 XEXP (SUBREG_REG (op), 1));
884 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
885 if (temp)
886 return temp;
887 }
888
889 /* Apply De Morgan's laws to reduce number of patterns for machines
890 with negating logical insns (and-not, nand, etc.). If result has
891 only one NOT, put it first, since that is how the patterns are
892 coded. */
893
894 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
895 {
896 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
897 enum machine_mode op_mode;
898
899 op_mode = GET_MODE (in1);
900 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
901
902 op_mode = GET_MODE (in2);
903 if (op_mode == VOIDmode)
904 op_mode = mode;
905 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
906
907 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
908 {
909 rtx tem = in2;
910 in2 = in1; in1 = tem;
911 }
912
913 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
914 mode, in1, in2);
915 }
916 break;
917
918 case NEG:
919 /* (neg (neg X)) == X. */
920 if (GET_CODE (op) == NEG)
921 return XEXP (op, 0);
922
923 /* (neg (plus X 1)) can become (not X). */
924 if (GET_CODE (op) == PLUS
925 && XEXP (op, 1) == const1_rtx)
926 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
927
928 /* Similarly, (neg (not X)) is (plus X 1). */
929 if (GET_CODE (op) == NOT)
930 return plus_constant (mode, XEXP (op, 0), 1);
931
932 /* (neg (minus X Y)) can become (minus Y X). This transformation
933 isn't safe for modes with signed zeros, since if X and Y are
934 both +0, (minus Y X) is the same as (minus X Y). If the
935 rounding mode is towards +infinity (or -infinity) then the two
936 expressions will be rounded differently. */
937 if (GET_CODE (op) == MINUS
938 && !HONOR_SIGNED_ZEROS (mode)
939 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
940 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
941
942 if (GET_CODE (op) == PLUS
943 && !HONOR_SIGNED_ZEROS (mode)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
945 {
946 /* (neg (plus A C)) is simplified to (minus -C A). */
947 if (CONST_SCALAR_INT_P (XEXP (op, 1))
948 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
949 {
950 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
951 if (temp)
952 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
953 }
954
955 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
956 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
957 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
958 }
959
960 /* (neg (mult A B)) becomes (mult A (neg B)).
961 This works even for floating-point values. */
962 if (GET_CODE (op) == MULT
963 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
964 {
965 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
966 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
967 }
968
969 /* NEG commutes with ASHIFT since it is multiplication. Only do
970 this if we can then eliminate the NEG (e.g., if the operand
971 is a constant). */
972 if (GET_CODE (op) == ASHIFT)
973 {
974 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
975 if (temp)
976 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
977 }
978
979 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
980 C is equal to the width of MODE minus 1. */
981 if (GET_CODE (op) == ASHIFTRT
982 && CONST_INT_P (XEXP (op, 1))
983 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
984 return simplify_gen_binary (LSHIFTRT, mode,
985 XEXP (op, 0), XEXP (op, 1));
986
987 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
988 C is equal to the width of MODE minus 1. */
989 if (GET_CODE (op) == LSHIFTRT
990 && CONST_INT_P (XEXP (op, 1))
991 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
992 return simplify_gen_binary (ASHIFTRT, mode,
993 XEXP (op, 0), XEXP (op, 1));
994
995 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
996 if (GET_CODE (op) == XOR
997 && XEXP (op, 1) == const1_rtx
998 && nonzero_bits (XEXP (op, 0), mode) == 1)
999 return plus_constant (mode, XEXP (op, 0), -1);
1000
1001 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1002 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1003 if (GET_CODE (op) == LT
1004 && XEXP (op, 1) == const0_rtx
1005 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1006 {
1007 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1008 int isize = GET_MODE_PRECISION (inner);
1009 if (STORE_FLAG_VALUE == 1)
1010 {
1011 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1012 GEN_INT (isize - 1));
1013 if (mode == inner)
1014 return temp;
1015 if (GET_MODE_PRECISION (mode) > isize)
1016 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1017 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1018 }
1019 else if (STORE_FLAG_VALUE == -1)
1020 {
1021 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1022 GEN_INT (isize - 1));
1023 if (mode == inner)
1024 return temp;
1025 if (GET_MODE_PRECISION (mode) > isize)
1026 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1027 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1028 }
1029 }
1030 break;
1031
1032 case TRUNCATE:
1033 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1034 with the umulXi3_highpart patterns. */
1035 if (GET_CODE (op) == LSHIFTRT
1036 && GET_CODE (XEXP (op, 0)) == MULT)
1037 break;
1038
1039 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1040 {
1041 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1042 {
1043 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1044 if (temp)
1045 return temp;
1046 }
1047 /* We can't handle truncation to a partial integer mode here
1048 because we don't know the real bitsize of the partial
1049 integer mode. */
1050 break;
1051 }
1052
1053 if (GET_MODE (op) != VOIDmode)
1054 {
1055 temp = simplify_truncation (mode, op, GET_MODE (op));
1056 if (temp)
1057 return temp;
1058 }
1059
1060 /* If we know that the value is already truncated, we can
1061 replace the TRUNCATE with a SUBREG. */
1062 if (GET_MODE_NUNITS (mode) == 1
1063 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1064 || truncated_to_mode (mode, op)))
1065 {
1066 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1067 if (temp)
1068 return temp;
1069 }
1070
1071 /* A truncate of a comparison can be replaced with a subreg if
1072 STORE_FLAG_VALUE permits. This is like the previous test,
1073 but it works even if the comparison is done in a mode larger
1074 than HOST_BITS_PER_WIDE_INT. */
1075 if (HWI_COMPUTABLE_MODE_P (mode)
1076 && COMPARISON_P (op)
1077 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1078 {
1079 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1080 if (temp)
1081 return temp;
1082 }
1083
1084 /* A truncate of a memory is just loading the low part of the memory
1085 if we are not changing the meaning of the address. */
1086 if (GET_CODE (op) == MEM
1087 && !VECTOR_MODE_P (mode)
1088 && !MEM_VOLATILE_P (op)
1089 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1090 {
1091 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1092 if (temp)
1093 return temp;
1094 }
1095
1096 break;
1097
1098 case FLOAT_TRUNCATE:
1099 if (DECIMAL_FLOAT_MODE_P (mode))
1100 break;
1101
1102 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1103 if (GET_CODE (op) == FLOAT_EXTEND
1104 && GET_MODE (XEXP (op, 0)) == mode)
1105 return XEXP (op, 0);
1106
1107 /* (float_truncate:SF (float_truncate:DF foo:XF))
1108 = (float_truncate:SF foo:XF).
1109 This may eliminate double rounding, so it is unsafe.
1110
1111 (float_truncate:SF (float_extend:XF foo:DF))
1112 = (float_truncate:SF foo:DF).
1113
1114 (float_truncate:DF (float_extend:XF foo:SF))
1115 = (float_extend:SF foo:DF). */
1116 if ((GET_CODE (op) == FLOAT_TRUNCATE
1117 && flag_unsafe_math_optimizations)
1118 || GET_CODE (op) == FLOAT_EXTEND)
1119 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1120 0)))
1121 > GET_MODE_SIZE (mode)
1122 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1123 mode,
1124 XEXP (op, 0), mode);
1125
1126 /* (float_truncate (float x)) is (float x) */
1127 if (GET_CODE (op) == FLOAT
1128 && (flag_unsafe_math_optimizations
1129 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1130 && ((unsigned)significand_size (GET_MODE (op))
1131 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1132 - num_sign_bit_copies (XEXP (op, 0),
1133 GET_MODE (XEXP (op, 0))))))))
1134 return simplify_gen_unary (FLOAT, mode,
1135 XEXP (op, 0),
1136 GET_MODE (XEXP (op, 0)));
1137
1138 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1139 (OP:SF foo:SF) if OP is NEG or ABS. */
1140 if ((GET_CODE (op) == ABS
1141 || GET_CODE (op) == NEG)
1142 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1143 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1144 return simplify_gen_unary (GET_CODE (op), mode,
1145 XEXP (XEXP (op, 0), 0), mode);
1146
1147 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1148 is (float_truncate:SF x). */
1149 if (GET_CODE (op) == SUBREG
1150 && subreg_lowpart_p (op)
1151 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1152 return SUBREG_REG (op);
1153 break;
1154
1155 case FLOAT_EXTEND:
1156 if (DECIMAL_FLOAT_MODE_P (mode))
1157 break;
1158
1159 /* (float_extend (float_extend x)) is (float_extend x)
1160
1161 (float_extend (float x)) is (float x) assuming that double
1162 rounding can't happen.
1163 */
1164 if (GET_CODE (op) == FLOAT_EXTEND
1165 || (GET_CODE (op) == FLOAT
1166 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1167 && ((unsigned)significand_size (GET_MODE (op))
1168 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1169 - num_sign_bit_copies (XEXP (op, 0),
1170 GET_MODE (XEXP (op, 0)))))))
1171 return simplify_gen_unary (GET_CODE (op), mode,
1172 XEXP (op, 0),
1173 GET_MODE (XEXP (op, 0)));
1174
1175 break;
1176
1177 case ABS:
1178 /* (abs (neg <foo>)) -> (abs <foo>) */
1179 if (GET_CODE (op) == NEG)
1180 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1181 GET_MODE (XEXP (op, 0)));
1182
1183 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1184 do nothing. */
1185 if (GET_MODE (op) == VOIDmode)
1186 break;
1187
1188 /* If operand is something known to be positive, ignore the ABS. */
1189 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1190 || val_signbit_known_clear_p (GET_MODE (op),
1191 nonzero_bits (op, GET_MODE (op))))
1192 return op;
1193
1194 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1195 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1196 return gen_rtx_NEG (mode, op);
1197
1198 break;
1199
1200 case FFS:
1201 /* (ffs (*_extend <X>)) = (ffs <X>) */
1202 if (GET_CODE (op) == SIGN_EXTEND
1203 || GET_CODE (op) == ZERO_EXTEND)
1204 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1205 GET_MODE (XEXP (op, 0)));
1206 break;
1207
1208 case POPCOUNT:
1209 switch (GET_CODE (op))
1210 {
1211 case BSWAP:
1212 case ZERO_EXTEND:
1213 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1214 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1215 GET_MODE (XEXP (op, 0)));
1216
1217 case ROTATE:
1218 case ROTATERT:
1219 /* Rotations don't affect popcount. */
1220 if (!side_effects_p (XEXP (op, 1)))
1221 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1222 GET_MODE (XEXP (op, 0)));
1223 break;
1224
1225 default:
1226 break;
1227 }
1228 break;
1229
1230 case PARITY:
1231 switch (GET_CODE (op))
1232 {
1233 case NOT:
1234 case BSWAP:
1235 case ZERO_EXTEND:
1236 case SIGN_EXTEND:
1237 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1238 GET_MODE (XEXP (op, 0)));
1239
1240 case ROTATE:
1241 case ROTATERT:
1242 /* Rotations don't affect parity. */
1243 if (!side_effects_p (XEXP (op, 1)))
1244 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1246 break;
1247
1248 default:
1249 break;
1250 }
1251 break;
1252
1253 case BSWAP:
1254 /* (bswap (bswap x)) -> x. */
1255 if (GET_CODE (op) == BSWAP)
1256 return XEXP (op, 0);
1257 break;
1258
1259 case FLOAT:
1260 /* (float (sign_extend <X>)) = (float <X>). */
1261 if (GET_CODE (op) == SIGN_EXTEND)
1262 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1263 GET_MODE (XEXP (op, 0)));
1264 break;
1265
1266 case SIGN_EXTEND:
1267 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1268 becomes just the MINUS if its mode is MODE. This allows
1269 folding switch statements on machines using casesi (such as
1270 the VAX). */
1271 if (GET_CODE (op) == TRUNCATE
1272 && GET_MODE (XEXP (op, 0)) == mode
1273 && GET_CODE (XEXP (op, 0)) == MINUS
1274 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1275 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1276 return XEXP (op, 0);
1277
1278 /* Extending a widening multiplication should be canonicalized to
1279 a wider widening multiplication. */
1280 if (GET_CODE (op) == MULT)
1281 {
1282 rtx lhs = XEXP (op, 0);
1283 rtx rhs = XEXP (op, 1);
1284 enum rtx_code lcode = GET_CODE (lhs);
1285 enum rtx_code rcode = GET_CODE (rhs);
1286
1287 /* Widening multiplies usually extend both operands, but sometimes
1288 they use a shift to extract a portion of a register. */
1289 if ((lcode == SIGN_EXTEND
1290 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1291 && (rcode == SIGN_EXTEND
1292 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1293 {
1294 enum machine_mode lmode = GET_MODE (lhs);
1295 enum machine_mode rmode = GET_MODE (rhs);
1296 int bits;
1297
1298 if (lcode == ASHIFTRT)
1299 /* Number of bits not shifted off the end. */
1300 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1301 else /* lcode == SIGN_EXTEND */
1302 /* Size of inner mode. */
1303 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1304
1305 if (rcode == ASHIFTRT)
1306 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1307 else /* rcode == SIGN_EXTEND */
1308 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1309
1310 /* We can only widen multiplies if the result is mathematiclly
1311 equivalent. I.e. if overflow was impossible. */
1312 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1313 return simplify_gen_binary
1314 (MULT, mode,
1315 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1316 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1317 }
1318 }
1319
1320 /* Check for a sign extension of a subreg of a promoted
1321 variable, where the promotion is sign-extended, and the
1322 target mode is the same as the variable's promotion. */
1323 if (GET_CODE (op) == SUBREG
1324 && SUBREG_PROMOTED_VAR_P (op)
1325 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1326 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1327 {
1328 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1329 if (temp)
1330 return temp;
1331 }
1332
1333 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1334 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1335 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1336 {
1337 gcc_assert (GET_MODE_BITSIZE (mode)
1338 > GET_MODE_BITSIZE (GET_MODE (op)));
1339 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1341 }
1342
1343 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1344 is (sign_extend:M (subreg:O <X>)) if there is mode with
1345 GET_MODE_BITSIZE (N) - I bits.
1346 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1347 is similarly (zero_extend:M (subreg:O <X>)). */
1348 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1349 && GET_CODE (XEXP (op, 0)) == ASHIFT
1350 && CONST_INT_P (XEXP (op, 1))
1351 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1352 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1353 {
1354 enum machine_mode tmode
1355 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1356 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1357 gcc_assert (GET_MODE_BITSIZE (mode)
1358 > GET_MODE_BITSIZE (GET_MODE (op)));
1359 if (tmode != BLKmode)
1360 {
1361 rtx inner =
1362 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1363 if (inner)
1364 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1365 ? SIGN_EXTEND : ZERO_EXTEND,
1366 mode, inner, tmode);
1367 }
1368 }
1369
1370 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1371 /* As we do not know which address space the pointer is referring to,
1372 we can do this only if the target does not support different pointer
1373 or address modes depending on the address space. */
1374 if (target_default_pointer_address_modes_p ()
1375 && ! POINTERS_EXTEND_UNSIGNED
1376 && mode == Pmode && GET_MODE (op) == ptr_mode
1377 && (CONSTANT_P (op)
1378 || (GET_CODE (op) == SUBREG
1379 && REG_P (SUBREG_REG (op))
1380 && REG_POINTER (SUBREG_REG (op))
1381 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1382 return convert_memory_address (Pmode, op);
1383 #endif
1384 break;
1385
1386 case ZERO_EXTEND:
1387 /* Check for a zero extension of a subreg of a promoted
1388 variable, where the promotion is zero-extended, and the
1389 target mode is the same as the variable's promotion. */
1390 if (GET_CODE (op) == SUBREG
1391 && SUBREG_PROMOTED_VAR_P (op)
1392 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1393 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1394 {
1395 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1396 if (temp)
1397 return temp;
1398 }
1399
1400 /* Extending a widening multiplication should be canonicalized to
1401 a wider widening multiplication. */
1402 if (GET_CODE (op) == MULT)
1403 {
1404 rtx lhs = XEXP (op, 0);
1405 rtx rhs = XEXP (op, 1);
1406 enum rtx_code lcode = GET_CODE (lhs);
1407 enum rtx_code rcode = GET_CODE (rhs);
1408
1409 /* Widening multiplies usually extend both operands, but sometimes
1410 they use a shift to extract a portion of a register. */
1411 if ((lcode == ZERO_EXTEND
1412 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1413 && (rcode == ZERO_EXTEND
1414 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1415 {
1416 enum machine_mode lmode = GET_MODE (lhs);
1417 enum machine_mode rmode = GET_MODE (rhs);
1418 int bits;
1419
1420 if (lcode == LSHIFTRT)
1421 /* Number of bits not shifted off the end. */
1422 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1423 else /* lcode == ZERO_EXTEND */
1424 /* Size of inner mode. */
1425 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1426
1427 if (rcode == LSHIFTRT)
1428 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1429 else /* rcode == ZERO_EXTEND */
1430 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1431
1432 /* We can only widen multiplies if the result is mathematiclly
1433 equivalent. I.e. if overflow was impossible. */
1434 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1435 return simplify_gen_binary
1436 (MULT, mode,
1437 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1438 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1439 }
1440 }
1441
1442 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1443 if (GET_CODE (op) == ZERO_EXTEND)
1444 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1445 GET_MODE (XEXP (op, 0)));
1446
1447 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1448 is (zero_extend:M (subreg:O <X>)) if there is mode with
1449 GET_MODE_BITSIZE (N) - I bits. */
1450 if (GET_CODE (op) == LSHIFTRT
1451 && GET_CODE (XEXP (op, 0)) == ASHIFT
1452 && CONST_INT_P (XEXP (op, 1))
1453 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1454 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1455 {
1456 enum machine_mode tmode
1457 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1458 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1459 if (tmode != BLKmode)
1460 {
1461 rtx inner =
1462 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1463 if (inner)
1464 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1465 }
1466 }
1467
1468 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1469 /* As we do not know which address space the pointer is referring to,
1470 we can do this only if the target does not support different pointer
1471 or address modes depending on the address space. */
1472 if (target_default_pointer_address_modes_p ()
1473 && POINTERS_EXTEND_UNSIGNED > 0
1474 && mode == Pmode && GET_MODE (op) == ptr_mode
1475 && (CONSTANT_P (op)
1476 || (GET_CODE (op) == SUBREG
1477 && REG_P (SUBREG_REG (op))
1478 && REG_POINTER (SUBREG_REG (op))
1479 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1480 return convert_memory_address (Pmode, op);
1481 #endif
1482 break;
1483
1484 default:
1485 break;
1486 }
1487
1488 return 0;
1489 }
1490
1491 /* Try to compute the value of a unary operation CODE whose output mode is to
1492 be MODE with input operand OP whose mode was originally OP_MODE.
1493 Return zero if the value cannot be computed. */
1494 rtx
1495 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1496 rtx op, enum machine_mode op_mode)
1497 {
1498 unsigned int width = GET_MODE_PRECISION (mode);
1499 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1500
1501 if (code == VEC_DUPLICATE)
1502 {
1503 gcc_assert (VECTOR_MODE_P (mode));
1504 if (GET_MODE (op) != VOIDmode)
1505 {
1506 if (!VECTOR_MODE_P (GET_MODE (op)))
1507 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1508 else
1509 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1510 (GET_MODE (op)));
1511 }
1512 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1513 || GET_CODE (op) == CONST_VECTOR)
1514 {
1515 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1516 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1517 rtvec v = rtvec_alloc (n_elts);
1518 unsigned int i;
1519
1520 if (GET_CODE (op) != CONST_VECTOR)
1521 for (i = 0; i < n_elts; i++)
1522 RTVEC_ELT (v, i) = op;
1523 else
1524 {
1525 enum machine_mode inmode = GET_MODE (op);
1526 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1527 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1528
1529 gcc_assert (in_n_elts < n_elts);
1530 gcc_assert ((n_elts % in_n_elts) == 0);
1531 for (i = 0; i < n_elts; i++)
1532 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1533 }
1534 return gen_rtx_CONST_VECTOR (mode, v);
1535 }
1536 }
1537
1538 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1539 {
1540 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1541 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1542 enum machine_mode opmode = GET_MODE (op);
1543 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1544 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1545 rtvec v = rtvec_alloc (n_elts);
1546 unsigned int i;
1547
1548 gcc_assert (op_n_elts == n_elts);
1549 for (i = 0; i < n_elts; i++)
1550 {
1551 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1552 CONST_VECTOR_ELT (op, i),
1553 GET_MODE_INNER (opmode));
1554 if (!x)
1555 return 0;
1556 RTVEC_ELT (v, i) = x;
1557 }
1558 return gen_rtx_CONST_VECTOR (mode, v);
1559 }
1560
1561 /* The order of these tests is critical so that, for example, we don't
1562 check the wrong mode (input vs. output) for a conversion operation,
1563 such as FIX. At some point, this should be simplified. */
1564
1565 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1566 {
1567 HOST_WIDE_INT hv, lv;
1568 REAL_VALUE_TYPE d;
1569
1570 if (CONST_INT_P (op))
1571 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1572 else
1573 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1574
1575 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1576 d = real_value_truncate (mode, d);
1577 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1578 }
1579 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1580 {
1581 HOST_WIDE_INT hv, lv;
1582 REAL_VALUE_TYPE d;
1583
1584 if (CONST_INT_P (op))
1585 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1586 else
1587 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1588
1589 if (op_mode == VOIDmode
1590 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1591 /* We should never get a negative number. */
1592 gcc_assert (hv >= 0);
1593 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1594 hv = 0, lv &= GET_MODE_MASK (op_mode);
1595
1596 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1597 d = real_value_truncate (mode, d);
1598 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1599 }
1600
1601 if (CONST_INT_P (op)
1602 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1603 {
1604 HOST_WIDE_INT arg0 = INTVAL (op);
1605 HOST_WIDE_INT val;
1606
1607 switch (code)
1608 {
1609 case NOT:
1610 val = ~ arg0;
1611 break;
1612
1613 case NEG:
1614 val = - arg0;
1615 break;
1616
1617 case ABS:
1618 val = (arg0 >= 0 ? arg0 : - arg0);
1619 break;
1620
1621 case FFS:
1622 arg0 &= GET_MODE_MASK (mode);
1623 val = ffs_hwi (arg0);
1624 break;
1625
1626 case CLZ:
1627 arg0 &= GET_MODE_MASK (mode);
1628 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1629 ;
1630 else
1631 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1632 break;
1633
1634 case CLRSB:
1635 arg0 &= GET_MODE_MASK (mode);
1636 if (arg0 == 0)
1637 val = GET_MODE_PRECISION (mode) - 1;
1638 else if (arg0 >= 0)
1639 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1640 else if (arg0 < 0)
1641 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1642 break;
1643
1644 case CTZ:
1645 arg0 &= GET_MODE_MASK (mode);
1646 if (arg0 == 0)
1647 {
1648 /* Even if the value at zero is undefined, we have to come
1649 up with some replacement. Seems good enough. */
1650 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1651 val = GET_MODE_PRECISION (mode);
1652 }
1653 else
1654 val = ctz_hwi (arg0);
1655 break;
1656
1657 case POPCOUNT:
1658 arg0 &= GET_MODE_MASK (mode);
1659 val = 0;
1660 while (arg0)
1661 val++, arg0 &= arg0 - 1;
1662 break;
1663
1664 case PARITY:
1665 arg0 &= GET_MODE_MASK (mode);
1666 val = 0;
1667 while (arg0)
1668 val++, arg0 &= arg0 - 1;
1669 val &= 1;
1670 break;
1671
1672 case BSWAP:
1673 {
1674 unsigned int s;
1675
1676 val = 0;
1677 for (s = 0; s < width; s += 8)
1678 {
1679 unsigned int d = width - s - 8;
1680 unsigned HOST_WIDE_INT byte;
1681 byte = (arg0 >> s) & 0xff;
1682 val |= byte << d;
1683 }
1684 }
1685 break;
1686
1687 case TRUNCATE:
1688 val = arg0;
1689 break;
1690
1691 case ZERO_EXTEND:
1692 /* When zero-extending a CONST_INT, we need to know its
1693 original mode. */
1694 gcc_assert (op_mode != VOIDmode);
1695 if (op_width == HOST_BITS_PER_WIDE_INT)
1696 {
1697 /* If we were really extending the mode,
1698 we would have to distinguish between zero-extension
1699 and sign-extension. */
1700 gcc_assert (width == op_width);
1701 val = arg0;
1702 }
1703 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1704 val = arg0 & GET_MODE_MASK (op_mode);
1705 else
1706 return 0;
1707 break;
1708
1709 case SIGN_EXTEND:
1710 if (op_mode == VOIDmode)
1711 op_mode = mode;
1712 op_width = GET_MODE_PRECISION (op_mode);
1713 if (op_width == HOST_BITS_PER_WIDE_INT)
1714 {
1715 /* If we were really extending the mode,
1716 we would have to distinguish between zero-extension
1717 and sign-extension. */
1718 gcc_assert (width == op_width);
1719 val = arg0;
1720 }
1721 else if (op_width < HOST_BITS_PER_WIDE_INT)
1722 {
1723 val = arg0 & GET_MODE_MASK (op_mode);
1724 if (val_signbit_known_set_p (op_mode, val))
1725 val |= ~GET_MODE_MASK (op_mode);
1726 }
1727 else
1728 return 0;
1729 break;
1730
1731 case SQRT:
1732 case FLOAT_EXTEND:
1733 case FLOAT_TRUNCATE:
1734 case SS_TRUNCATE:
1735 case US_TRUNCATE:
1736 case SS_NEG:
1737 case US_NEG:
1738 case SS_ABS:
1739 return 0;
1740
1741 default:
1742 gcc_unreachable ();
1743 }
1744
1745 return gen_int_mode (val, mode);
1746 }
1747
1748 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1749 for a DImode operation on a CONST_INT. */
1750 else if (width <= HOST_BITS_PER_DOUBLE_INT
1751 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1752 {
1753 double_int first, value;
1754
1755 if (CONST_DOUBLE_AS_INT_P (op))
1756 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1757 CONST_DOUBLE_LOW (op));
1758 else
1759 first = double_int::from_shwi (INTVAL (op));
1760
1761 switch (code)
1762 {
1763 case NOT:
1764 value = ~first;
1765 break;
1766
1767 case NEG:
1768 value = -first;
1769 break;
1770
1771 case ABS:
1772 if (first.is_negative ())
1773 value = -first;
1774 else
1775 value = first;
1776 break;
1777
1778 case FFS:
1779 value.high = 0;
1780 if (first.low != 0)
1781 value.low = ffs_hwi (first.low);
1782 else if (first.high != 0)
1783 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1784 else
1785 value.low = 0;
1786 break;
1787
1788 case CLZ:
1789 value.high = 0;
1790 if (first.high != 0)
1791 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1792 - HOST_BITS_PER_WIDE_INT;
1793 else if (first.low != 0)
1794 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1795 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1796 value.low = GET_MODE_PRECISION (mode);
1797 break;
1798
1799 case CTZ:
1800 value.high = 0;
1801 if (first.low != 0)
1802 value.low = ctz_hwi (first.low);
1803 else if (first.high != 0)
1804 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1805 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1806 value.low = GET_MODE_PRECISION (mode);
1807 break;
1808
1809 case POPCOUNT:
1810 value = double_int_zero;
1811 while (first.low)
1812 {
1813 value.low++;
1814 first.low &= first.low - 1;
1815 }
1816 while (first.high)
1817 {
1818 value.low++;
1819 first.high &= first.high - 1;
1820 }
1821 break;
1822
1823 case PARITY:
1824 value = double_int_zero;
1825 while (first.low)
1826 {
1827 value.low++;
1828 first.low &= first.low - 1;
1829 }
1830 while (first.high)
1831 {
1832 value.low++;
1833 first.high &= first.high - 1;
1834 }
1835 value.low &= 1;
1836 break;
1837
1838 case BSWAP:
1839 {
1840 unsigned int s;
1841
1842 value = double_int_zero;
1843 for (s = 0; s < width; s += 8)
1844 {
1845 unsigned int d = width - s - 8;
1846 unsigned HOST_WIDE_INT byte;
1847
1848 if (s < HOST_BITS_PER_WIDE_INT)
1849 byte = (first.low >> s) & 0xff;
1850 else
1851 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1852
1853 if (d < HOST_BITS_PER_WIDE_INT)
1854 value.low |= byte << d;
1855 else
1856 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1857 }
1858 }
1859 break;
1860
1861 case TRUNCATE:
1862 /* This is just a change-of-mode, so do nothing. */
1863 value = first;
1864 break;
1865
1866 case ZERO_EXTEND:
1867 gcc_assert (op_mode != VOIDmode);
1868
1869 if (op_width > HOST_BITS_PER_WIDE_INT)
1870 return 0;
1871
1872 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1873 break;
1874
1875 case SIGN_EXTEND:
1876 if (op_mode == VOIDmode
1877 || op_width > HOST_BITS_PER_WIDE_INT)
1878 return 0;
1879 else
1880 {
1881 value.low = first.low & GET_MODE_MASK (op_mode);
1882 if (val_signbit_known_set_p (op_mode, value.low))
1883 value.low |= ~GET_MODE_MASK (op_mode);
1884
1885 value.high = HWI_SIGN_EXTEND (value.low);
1886 }
1887 break;
1888
1889 case SQRT:
1890 return 0;
1891
1892 default:
1893 return 0;
1894 }
1895
1896 return immed_double_int_const (value, mode);
1897 }
1898
1899 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1900 && SCALAR_FLOAT_MODE_P (mode)
1901 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1902 {
1903 REAL_VALUE_TYPE d, t;
1904 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1905
1906 switch (code)
1907 {
1908 case SQRT:
1909 if (HONOR_SNANS (mode) && real_isnan (&d))
1910 return 0;
1911 real_sqrt (&t, mode, &d);
1912 d = t;
1913 break;
1914 case ABS:
1915 d = real_value_abs (&d);
1916 break;
1917 case NEG:
1918 d = real_value_negate (&d);
1919 break;
1920 case FLOAT_TRUNCATE:
1921 d = real_value_truncate (mode, d);
1922 break;
1923 case FLOAT_EXTEND:
1924 /* All this does is change the mode, unless changing
1925 mode class. */
1926 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1927 real_convert (&d, mode, &d);
1928 break;
1929 case FIX:
1930 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1931 break;
1932 case NOT:
1933 {
1934 long tmp[4];
1935 int i;
1936
1937 real_to_target (tmp, &d, GET_MODE (op));
1938 for (i = 0; i < 4; i++)
1939 tmp[i] = ~tmp[i];
1940 real_from_target (&d, tmp, mode);
1941 break;
1942 }
1943 default:
1944 gcc_unreachable ();
1945 }
1946 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1947 }
1948
1949 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1950 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1951 && GET_MODE_CLASS (mode) == MODE_INT
1952 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1953 {
1954 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1955 operators are intentionally left unspecified (to ease implementation
1956 by target backends), for consistency, this routine implements the
1957 same semantics for constant folding as used by the middle-end. */
1958
1959 /* This was formerly used only for non-IEEE float.
1960 eggert@twinsun.com says it is safe for IEEE also. */
1961 HOST_WIDE_INT xh, xl, th, tl;
1962 REAL_VALUE_TYPE x, t;
1963 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1964 switch (code)
1965 {
1966 case FIX:
1967 if (REAL_VALUE_ISNAN (x))
1968 return const0_rtx;
1969
1970 /* Test against the signed upper bound. */
1971 if (width > HOST_BITS_PER_WIDE_INT)
1972 {
1973 th = ((unsigned HOST_WIDE_INT) 1
1974 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1975 tl = -1;
1976 }
1977 else
1978 {
1979 th = 0;
1980 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1981 }
1982 real_from_integer (&t, VOIDmode, tl, th, 0);
1983 if (REAL_VALUES_LESS (t, x))
1984 {
1985 xh = th;
1986 xl = tl;
1987 break;
1988 }
1989
1990 /* Test against the signed lower bound. */
1991 if (width > HOST_BITS_PER_WIDE_INT)
1992 {
1993 th = (unsigned HOST_WIDE_INT) (-1)
1994 << (width - HOST_BITS_PER_WIDE_INT - 1);
1995 tl = 0;
1996 }
1997 else
1998 {
1999 th = -1;
2000 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
2001 }
2002 real_from_integer (&t, VOIDmode, tl, th, 0);
2003 if (REAL_VALUES_LESS (x, t))
2004 {
2005 xh = th;
2006 xl = tl;
2007 break;
2008 }
2009 REAL_VALUE_TO_INT (&xl, &xh, x);
2010 break;
2011
2012 case UNSIGNED_FIX:
2013 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2014 return const0_rtx;
2015
2016 /* Test against the unsigned upper bound. */
2017 if (width == HOST_BITS_PER_DOUBLE_INT)
2018 {
2019 th = -1;
2020 tl = -1;
2021 }
2022 else if (width >= HOST_BITS_PER_WIDE_INT)
2023 {
2024 th = ((unsigned HOST_WIDE_INT) 1
2025 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2026 tl = -1;
2027 }
2028 else
2029 {
2030 th = 0;
2031 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2032 }
2033 real_from_integer (&t, VOIDmode, tl, th, 1);
2034 if (REAL_VALUES_LESS (t, x))
2035 {
2036 xh = th;
2037 xl = tl;
2038 break;
2039 }
2040
2041 REAL_VALUE_TO_INT (&xl, &xh, x);
2042 break;
2043
2044 default:
2045 gcc_unreachable ();
2046 }
2047 return immed_double_const (xl, xh, mode);
2048 }
2049
2050 return NULL_RTX;
2051 }
2052 \f
2053 /* Subroutine of simplify_binary_operation to simplify a commutative,
2054 associative binary operation CODE with result mode MODE, operating
2055 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2056 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2057 canonicalization is possible. */
2058
2059 static rtx
2060 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2061 rtx op0, rtx op1)
2062 {
2063 rtx tem;
2064
2065 /* Linearize the operator to the left. */
2066 if (GET_CODE (op1) == code)
2067 {
2068 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2069 if (GET_CODE (op0) == code)
2070 {
2071 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2072 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2073 }
2074
2075 /* "a op (b op c)" becomes "(b op c) op a". */
2076 if (! swap_commutative_operands_p (op1, op0))
2077 return simplify_gen_binary (code, mode, op1, op0);
2078
2079 tem = op0;
2080 op0 = op1;
2081 op1 = tem;
2082 }
2083
2084 if (GET_CODE (op0) == code)
2085 {
2086 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2087 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2088 {
2089 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2090 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2091 }
2092
2093 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2094 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2095 if (tem != 0)
2096 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2097
2098 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2099 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2100 if (tem != 0)
2101 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2102 }
2103
2104 return 0;
2105 }
2106
2107
2108 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2109 and OP1. Return 0 if no simplification is possible.
2110
2111 Don't use this for relational operations such as EQ or LT.
2112 Use simplify_relational_operation instead. */
2113 rtx
2114 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2115 rtx op0, rtx op1)
2116 {
2117 rtx trueop0, trueop1;
2118 rtx tem;
2119
2120 /* Relational operations don't work here. We must know the mode
2121 of the operands in order to do the comparison correctly.
2122 Assuming a full word can give incorrect results.
2123 Consider comparing 128 with -128 in QImode. */
2124 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2125 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2126
2127 /* Make sure the constant is second. */
2128 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2129 && swap_commutative_operands_p (op0, op1))
2130 {
2131 tem = op0, op0 = op1, op1 = tem;
2132 }
2133
2134 trueop0 = avoid_constant_pool_reference (op0);
2135 trueop1 = avoid_constant_pool_reference (op1);
2136
2137 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2138 if (tem)
2139 return tem;
2140 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2141 }
2142
2143 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2144 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2145 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2146 actual constants. */
2147
2148 static rtx
2149 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2150 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2151 {
2152 rtx tem, reversed, opleft, opright;
2153 HOST_WIDE_INT val;
2154 unsigned int width = GET_MODE_PRECISION (mode);
2155
2156 /* Even if we can't compute a constant result,
2157 there are some cases worth simplifying. */
2158
2159 switch (code)
2160 {
2161 case PLUS:
2162 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2163 when x is NaN, infinite, or finite and nonzero. They aren't
2164 when x is -0 and the rounding mode is not towards -infinity,
2165 since (-0) + 0 is then 0. */
2166 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2167 return op0;
2168
2169 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2170 transformations are safe even for IEEE. */
2171 if (GET_CODE (op0) == NEG)
2172 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2173 else if (GET_CODE (op1) == NEG)
2174 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2175
2176 /* (~a) + 1 -> -a */
2177 if (INTEGRAL_MODE_P (mode)
2178 && GET_CODE (op0) == NOT
2179 && trueop1 == const1_rtx)
2180 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2181
2182 /* Handle both-operands-constant cases. We can only add
2183 CONST_INTs to constants since the sum of relocatable symbols
2184 can't be handled by most assemblers. Don't add CONST_INT
2185 to CONST_INT since overflow won't be computed properly if wider
2186 than HOST_BITS_PER_WIDE_INT. */
2187
2188 if ((GET_CODE (op0) == CONST
2189 || GET_CODE (op0) == SYMBOL_REF
2190 || GET_CODE (op0) == LABEL_REF)
2191 && CONST_INT_P (op1))
2192 return plus_constant (mode, op0, INTVAL (op1));
2193 else if ((GET_CODE (op1) == CONST
2194 || GET_CODE (op1) == SYMBOL_REF
2195 || GET_CODE (op1) == LABEL_REF)
2196 && CONST_INT_P (op0))
2197 return plus_constant (mode, op1, INTVAL (op0));
2198
2199 /* See if this is something like X * C - X or vice versa or
2200 if the multiplication is written as a shift. If so, we can
2201 distribute and make a new multiply, shift, or maybe just
2202 have X (if C is 2 in the example above). But don't make
2203 something more expensive than we had before. */
2204
2205 if (SCALAR_INT_MODE_P (mode))
2206 {
2207 double_int coeff0, coeff1;
2208 rtx lhs = op0, rhs = op1;
2209
2210 coeff0 = double_int_one;
2211 coeff1 = double_int_one;
2212
2213 if (GET_CODE (lhs) == NEG)
2214 {
2215 coeff0 = double_int_minus_one;
2216 lhs = XEXP (lhs, 0);
2217 }
2218 else if (GET_CODE (lhs) == MULT
2219 && CONST_INT_P (XEXP (lhs, 1)))
2220 {
2221 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2222 lhs = XEXP (lhs, 0);
2223 }
2224 else if (GET_CODE (lhs) == ASHIFT
2225 && CONST_INT_P (XEXP (lhs, 1))
2226 && INTVAL (XEXP (lhs, 1)) >= 0
2227 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2228 {
2229 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2230 lhs = XEXP (lhs, 0);
2231 }
2232
2233 if (GET_CODE (rhs) == NEG)
2234 {
2235 coeff1 = double_int_minus_one;
2236 rhs = XEXP (rhs, 0);
2237 }
2238 else if (GET_CODE (rhs) == MULT
2239 && CONST_INT_P (XEXP (rhs, 1)))
2240 {
2241 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2242 rhs = XEXP (rhs, 0);
2243 }
2244 else if (GET_CODE (rhs) == ASHIFT
2245 && CONST_INT_P (XEXP (rhs, 1))
2246 && INTVAL (XEXP (rhs, 1)) >= 0
2247 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2248 {
2249 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2250 rhs = XEXP (rhs, 0);
2251 }
2252
2253 if (rtx_equal_p (lhs, rhs))
2254 {
2255 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2256 rtx coeff;
2257 double_int val;
2258 bool speed = optimize_function_for_speed_p (cfun);
2259
2260 val = coeff0 + coeff1;
2261 coeff = immed_double_int_const (val, mode);
2262
2263 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2264 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2265 ? tem : 0;
2266 }
2267 }
2268
2269 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2270 if (CONST_SCALAR_INT_P (op1)
2271 && GET_CODE (op0) == XOR
2272 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2273 && mode_signbit_p (mode, op1))
2274 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2275 simplify_gen_binary (XOR, mode, op1,
2276 XEXP (op0, 1)));
2277
2278 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2279 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2280 && GET_CODE (op0) == MULT
2281 && GET_CODE (XEXP (op0, 0)) == NEG)
2282 {
2283 rtx in1, in2;
2284
2285 in1 = XEXP (XEXP (op0, 0), 0);
2286 in2 = XEXP (op0, 1);
2287 return simplify_gen_binary (MINUS, mode, op1,
2288 simplify_gen_binary (MULT, mode,
2289 in1, in2));
2290 }
2291
2292 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2293 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2294 is 1. */
2295 if (COMPARISON_P (op0)
2296 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2297 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2298 && (reversed = reversed_comparison (op0, mode)))
2299 return
2300 simplify_gen_unary (NEG, mode, reversed, mode);
2301
2302 /* If one of the operands is a PLUS or a MINUS, see if we can
2303 simplify this by the associative law.
2304 Don't use the associative law for floating point.
2305 The inaccuracy makes it nonassociative,
2306 and subtle programs can break if operations are associated. */
2307
2308 if (INTEGRAL_MODE_P (mode)
2309 && (plus_minus_operand_p (op0)
2310 || plus_minus_operand_p (op1))
2311 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2312 return tem;
2313
2314 /* Reassociate floating point addition only when the user
2315 specifies associative math operations. */
2316 if (FLOAT_MODE_P (mode)
2317 && flag_associative_math)
2318 {
2319 tem = simplify_associative_operation (code, mode, op0, op1);
2320 if (tem)
2321 return tem;
2322 }
2323 break;
2324
2325 case COMPARE:
2326 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2327 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2328 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2329 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2330 {
2331 rtx xop00 = XEXP (op0, 0);
2332 rtx xop10 = XEXP (op1, 0);
2333
2334 #ifdef HAVE_cc0
2335 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2336 #else
2337 if (REG_P (xop00) && REG_P (xop10)
2338 && GET_MODE (xop00) == GET_MODE (xop10)
2339 && REGNO (xop00) == REGNO (xop10)
2340 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2341 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2342 #endif
2343 return xop00;
2344 }
2345 break;
2346
2347 case MINUS:
2348 /* We can't assume x-x is 0 even with non-IEEE floating point,
2349 but since it is zero except in very strange circumstances, we
2350 will treat it as zero with -ffinite-math-only. */
2351 if (rtx_equal_p (trueop0, trueop1)
2352 && ! side_effects_p (op0)
2353 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2354 return CONST0_RTX (mode);
2355
2356 /* Change subtraction from zero into negation. (0 - x) is the
2357 same as -x when x is NaN, infinite, or finite and nonzero.
2358 But if the mode has signed zeros, and does not round towards
2359 -infinity, then 0 - 0 is 0, not -0. */
2360 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2361 return simplify_gen_unary (NEG, mode, op1, mode);
2362
2363 /* (-1 - a) is ~a. */
2364 if (trueop0 == constm1_rtx)
2365 return simplify_gen_unary (NOT, mode, op1, mode);
2366
2367 /* Subtracting 0 has no effect unless the mode has signed zeros
2368 and supports rounding towards -infinity. In such a case,
2369 0 - 0 is -0. */
2370 if (!(HONOR_SIGNED_ZEROS (mode)
2371 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2372 && trueop1 == CONST0_RTX (mode))
2373 return op0;
2374
2375 /* See if this is something like X * C - X or vice versa or
2376 if the multiplication is written as a shift. If so, we can
2377 distribute and make a new multiply, shift, or maybe just
2378 have X (if C is 2 in the example above). But don't make
2379 something more expensive than we had before. */
2380
2381 if (SCALAR_INT_MODE_P (mode))
2382 {
2383 double_int coeff0, negcoeff1;
2384 rtx lhs = op0, rhs = op1;
2385
2386 coeff0 = double_int_one;
2387 negcoeff1 = double_int_minus_one;
2388
2389 if (GET_CODE (lhs) == NEG)
2390 {
2391 coeff0 = double_int_minus_one;
2392 lhs = XEXP (lhs, 0);
2393 }
2394 else if (GET_CODE (lhs) == MULT
2395 && CONST_INT_P (XEXP (lhs, 1)))
2396 {
2397 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2398 lhs = XEXP (lhs, 0);
2399 }
2400 else if (GET_CODE (lhs) == ASHIFT
2401 && CONST_INT_P (XEXP (lhs, 1))
2402 && INTVAL (XEXP (lhs, 1)) >= 0
2403 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2404 {
2405 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2406 lhs = XEXP (lhs, 0);
2407 }
2408
2409 if (GET_CODE (rhs) == NEG)
2410 {
2411 negcoeff1 = double_int_one;
2412 rhs = XEXP (rhs, 0);
2413 }
2414 else if (GET_CODE (rhs) == MULT
2415 && CONST_INT_P (XEXP (rhs, 1)))
2416 {
2417 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2418 rhs = XEXP (rhs, 0);
2419 }
2420 else if (GET_CODE (rhs) == ASHIFT
2421 && CONST_INT_P (XEXP (rhs, 1))
2422 && INTVAL (XEXP (rhs, 1)) >= 0
2423 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2424 {
2425 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2426 negcoeff1 = -negcoeff1;
2427 rhs = XEXP (rhs, 0);
2428 }
2429
2430 if (rtx_equal_p (lhs, rhs))
2431 {
2432 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2433 rtx coeff;
2434 double_int val;
2435 bool speed = optimize_function_for_speed_p (cfun);
2436
2437 val = coeff0 + negcoeff1;
2438 coeff = immed_double_int_const (val, mode);
2439
2440 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2441 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2442 ? tem : 0;
2443 }
2444 }
2445
2446 /* (a - (-b)) -> (a + b). True even for IEEE. */
2447 if (GET_CODE (op1) == NEG)
2448 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2449
2450 /* (-x - c) may be simplified as (-c - x). */
2451 if (GET_CODE (op0) == NEG
2452 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2453 {
2454 tem = simplify_unary_operation (NEG, mode, op1, mode);
2455 if (tem)
2456 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2457 }
2458
2459 /* Don't let a relocatable value get a negative coeff. */
2460 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2461 return simplify_gen_binary (PLUS, mode,
2462 op0,
2463 neg_const_int (mode, op1));
2464
2465 /* (x - (x & y)) -> (x & ~y) */
2466 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2467 {
2468 if (rtx_equal_p (op0, XEXP (op1, 0)))
2469 {
2470 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2471 GET_MODE (XEXP (op1, 1)));
2472 return simplify_gen_binary (AND, mode, op0, tem);
2473 }
2474 if (rtx_equal_p (op0, XEXP (op1, 1)))
2475 {
2476 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2477 GET_MODE (XEXP (op1, 0)));
2478 return simplify_gen_binary (AND, mode, op0, tem);
2479 }
2480 }
2481
2482 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2483 by reversing the comparison code if valid. */
2484 if (STORE_FLAG_VALUE == 1
2485 && trueop0 == const1_rtx
2486 && COMPARISON_P (op1)
2487 && (reversed = reversed_comparison (op1, mode)))
2488 return reversed;
2489
2490 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2491 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2492 && GET_CODE (op1) == MULT
2493 && GET_CODE (XEXP (op1, 0)) == NEG)
2494 {
2495 rtx in1, in2;
2496
2497 in1 = XEXP (XEXP (op1, 0), 0);
2498 in2 = XEXP (op1, 1);
2499 return simplify_gen_binary (PLUS, mode,
2500 simplify_gen_binary (MULT, mode,
2501 in1, in2),
2502 op0);
2503 }
2504
2505 /* Canonicalize (minus (neg A) (mult B C)) to
2506 (minus (mult (neg B) C) A). */
2507 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2508 && GET_CODE (op1) == MULT
2509 && GET_CODE (op0) == NEG)
2510 {
2511 rtx in1, in2;
2512
2513 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2514 in2 = XEXP (op1, 1);
2515 return simplify_gen_binary (MINUS, mode,
2516 simplify_gen_binary (MULT, mode,
2517 in1, in2),
2518 XEXP (op0, 0));
2519 }
2520
2521 /* If one of the operands is a PLUS or a MINUS, see if we can
2522 simplify this by the associative law. This will, for example,
2523 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2524 Don't use the associative law for floating point.
2525 The inaccuracy makes it nonassociative,
2526 and subtle programs can break if operations are associated. */
2527
2528 if (INTEGRAL_MODE_P (mode)
2529 && (plus_minus_operand_p (op0)
2530 || plus_minus_operand_p (op1))
2531 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2532 return tem;
2533 break;
2534
2535 case MULT:
2536 if (trueop1 == constm1_rtx)
2537 return simplify_gen_unary (NEG, mode, op0, mode);
2538
2539 if (GET_CODE (op0) == NEG)
2540 {
2541 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2542 /* If op1 is a MULT as well and simplify_unary_operation
2543 just moved the NEG to the second operand, simplify_gen_binary
2544 below could through simplify_associative_operation move
2545 the NEG around again and recurse endlessly. */
2546 if (temp
2547 && GET_CODE (op1) == MULT
2548 && GET_CODE (temp) == MULT
2549 && XEXP (op1, 0) == XEXP (temp, 0)
2550 && GET_CODE (XEXP (temp, 1)) == NEG
2551 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2552 temp = NULL_RTX;
2553 if (temp)
2554 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2555 }
2556 if (GET_CODE (op1) == NEG)
2557 {
2558 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2559 /* If op0 is a MULT as well and simplify_unary_operation
2560 just moved the NEG to the second operand, simplify_gen_binary
2561 below could through simplify_associative_operation move
2562 the NEG around again and recurse endlessly. */
2563 if (temp
2564 && GET_CODE (op0) == MULT
2565 && GET_CODE (temp) == MULT
2566 && XEXP (op0, 0) == XEXP (temp, 0)
2567 && GET_CODE (XEXP (temp, 1)) == NEG
2568 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2569 temp = NULL_RTX;
2570 if (temp)
2571 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2572 }
2573
2574 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2575 x is NaN, since x * 0 is then also NaN. Nor is it valid
2576 when the mode has signed zeros, since multiplying a negative
2577 number by 0 will give -0, not 0. */
2578 if (!HONOR_NANS (mode)
2579 && !HONOR_SIGNED_ZEROS (mode)
2580 && trueop1 == CONST0_RTX (mode)
2581 && ! side_effects_p (op0))
2582 return op1;
2583
2584 /* In IEEE floating point, x*1 is not equivalent to x for
2585 signalling NaNs. */
2586 if (!HONOR_SNANS (mode)
2587 && trueop1 == CONST1_RTX (mode))
2588 return op0;
2589
2590 /* Convert multiply by constant power of two into shift unless
2591 we are still generating RTL. This test is a kludge. */
2592 if (CONST_INT_P (trueop1)
2593 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2594 /* If the mode is larger than the host word size, and the
2595 uppermost bit is set, then this isn't a power of two due
2596 to implicit sign extension. */
2597 && (width <= HOST_BITS_PER_WIDE_INT
2598 || val != HOST_BITS_PER_WIDE_INT - 1))
2599 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2600
2601 /* Likewise for multipliers wider than a word. */
2602 if (CONST_DOUBLE_AS_INT_P (trueop1)
2603 && GET_MODE (op0) == mode
2604 && CONST_DOUBLE_LOW (trueop1) == 0
2605 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2606 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2607 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2608 return simplify_gen_binary (ASHIFT, mode, op0,
2609 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2610
2611 /* x*2 is x+x and x*(-1) is -x */
2612 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2613 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2614 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2615 && GET_MODE (op0) == mode)
2616 {
2617 REAL_VALUE_TYPE d;
2618 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2619
2620 if (REAL_VALUES_EQUAL (d, dconst2))
2621 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2622
2623 if (!HONOR_SNANS (mode)
2624 && REAL_VALUES_EQUAL (d, dconstm1))
2625 return simplify_gen_unary (NEG, mode, op0, mode);
2626 }
2627
2628 /* Optimize -x * -x as x * x. */
2629 if (FLOAT_MODE_P (mode)
2630 && GET_CODE (op0) == NEG
2631 && GET_CODE (op1) == NEG
2632 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2633 && !side_effects_p (XEXP (op0, 0)))
2634 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2635
2636 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2637 if (SCALAR_FLOAT_MODE_P (mode)
2638 && GET_CODE (op0) == ABS
2639 && GET_CODE (op1) == ABS
2640 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2641 && !side_effects_p (XEXP (op0, 0)))
2642 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2643
2644 /* Reassociate multiplication, but for floating point MULTs
2645 only when the user specifies unsafe math optimizations. */
2646 if (! FLOAT_MODE_P (mode)
2647 || flag_unsafe_math_optimizations)
2648 {
2649 tem = simplify_associative_operation (code, mode, op0, op1);
2650 if (tem)
2651 return tem;
2652 }
2653 break;
2654
2655 case IOR:
2656 if (trueop1 == CONST0_RTX (mode))
2657 return op0;
2658 if (INTEGRAL_MODE_P (mode)
2659 && trueop1 == CONSTM1_RTX (mode)
2660 && !side_effects_p (op0))
2661 return op1;
2662 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2663 return op0;
2664 /* A | (~A) -> -1 */
2665 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2666 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2667 && ! side_effects_p (op0)
2668 && SCALAR_INT_MODE_P (mode))
2669 return constm1_rtx;
2670
2671 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2672 if (CONST_INT_P (op1)
2673 && HWI_COMPUTABLE_MODE_P (mode)
2674 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2675 && !side_effects_p (op0))
2676 return op1;
2677
2678 /* Canonicalize (X & C1) | C2. */
2679 if (GET_CODE (op0) == AND
2680 && CONST_INT_P (trueop1)
2681 && CONST_INT_P (XEXP (op0, 1)))
2682 {
2683 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2684 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2685 HOST_WIDE_INT c2 = INTVAL (trueop1);
2686
2687 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2688 if ((c1 & c2) == c1
2689 && !side_effects_p (XEXP (op0, 0)))
2690 return trueop1;
2691
2692 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2693 if (((c1|c2) & mask) == mask)
2694 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2695
2696 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2697 if (((c1 & ~c2) & mask) != (c1 & mask))
2698 {
2699 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2700 gen_int_mode (c1 & ~c2, mode));
2701 return simplify_gen_binary (IOR, mode, tem, op1);
2702 }
2703 }
2704
2705 /* Convert (A & B) | A to A. */
2706 if (GET_CODE (op0) == AND
2707 && (rtx_equal_p (XEXP (op0, 0), op1)
2708 || rtx_equal_p (XEXP (op0, 1), op1))
2709 && ! side_effects_p (XEXP (op0, 0))
2710 && ! side_effects_p (XEXP (op0, 1)))
2711 return op1;
2712
2713 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2714 mode size to (rotate A CX). */
2715
2716 if (GET_CODE (op1) == ASHIFT
2717 || GET_CODE (op1) == SUBREG)
2718 {
2719 opleft = op1;
2720 opright = op0;
2721 }
2722 else
2723 {
2724 opright = op1;
2725 opleft = op0;
2726 }
2727
2728 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2729 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2730 && CONST_INT_P (XEXP (opleft, 1))
2731 && CONST_INT_P (XEXP (opright, 1))
2732 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2733 == GET_MODE_PRECISION (mode)))
2734 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2735
2736 /* Same, but for ashift that has been "simplified" to a wider mode
2737 by simplify_shift_const. */
2738
2739 if (GET_CODE (opleft) == SUBREG
2740 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2741 && GET_CODE (opright) == LSHIFTRT
2742 && GET_CODE (XEXP (opright, 0)) == SUBREG
2743 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2744 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2745 && (GET_MODE_SIZE (GET_MODE (opleft))
2746 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2747 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2748 SUBREG_REG (XEXP (opright, 0)))
2749 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2750 && CONST_INT_P (XEXP (opright, 1))
2751 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2752 == GET_MODE_PRECISION (mode)))
2753 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2754 XEXP (SUBREG_REG (opleft), 1));
2755
2756 /* If we have (ior (and (X C1) C2)), simplify this by making
2757 C1 as small as possible if C1 actually changes. */
2758 if (CONST_INT_P (op1)
2759 && (HWI_COMPUTABLE_MODE_P (mode)
2760 || INTVAL (op1) > 0)
2761 && GET_CODE (op0) == AND
2762 && CONST_INT_P (XEXP (op0, 1))
2763 && CONST_INT_P (op1)
2764 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2765 return simplify_gen_binary (IOR, mode,
2766 simplify_gen_binary
2767 (AND, mode, XEXP (op0, 0),
2768 GEN_INT (UINTVAL (XEXP (op0, 1))
2769 & ~UINTVAL (op1))),
2770 op1);
2771
2772 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2773 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2774 the PLUS does not affect any of the bits in OP1: then we can do
2775 the IOR as a PLUS and we can associate. This is valid if OP1
2776 can be safely shifted left C bits. */
2777 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2778 && GET_CODE (XEXP (op0, 0)) == PLUS
2779 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2780 && CONST_INT_P (XEXP (op0, 1))
2781 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2782 {
2783 int count = INTVAL (XEXP (op0, 1));
2784 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2785
2786 if (mask >> count == INTVAL (trueop1)
2787 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2788 return simplify_gen_binary (ASHIFTRT, mode,
2789 plus_constant (mode, XEXP (op0, 0),
2790 mask),
2791 XEXP (op0, 1));
2792 }
2793
2794 tem = simplify_associative_operation (code, mode, op0, op1);
2795 if (tem)
2796 return tem;
2797 break;
2798
2799 case XOR:
2800 if (trueop1 == CONST0_RTX (mode))
2801 return op0;
2802 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2803 return simplify_gen_unary (NOT, mode, op0, mode);
2804 if (rtx_equal_p (trueop0, trueop1)
2805 && ! side_effects_p (op0)
2806 && GET_MODE_CLASS (mode) != MODE_CC)
2807 return CONST0_RTX (mode);
2808
2809 /* Canonicalize XOR of the most significant bit to PLUS. */
2810 if (CONST_SCALAR_INT_P (op1)
2811 && mode_signbit_p (mode, op1))
2812 return simplify_gen_binary (PLUS, mode, op0, op1);
2813 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2814 if (CONST_SCALAR_INT_P (op1)
2815 && GET_CODE (op0) == PLUS
2816 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2817 && mode_signbit_p (mode, XEXP (op0, 1)))
2818 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2819 simplify_gen_binary (XOR, mode, op1,
2820 XEXP (op0, 1)));
2821
2822 /* If we are XORing two things that have no bits in common,
2823 convert them into an IOR. This helps to detect rotation encoded
2824 using those methods and possibly other simplifications. */
2825
2826 if (HWI_COMPUTABLE_MODE_P (mode)
2827 && (nonzero_bits (op0, mode)
2828 & nonzero_bits (op1, mode)) == 0)
2829 return (simplify_gen_binary (IOR, mode, op0, op1));
2830
2831 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2832 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2833 (NOT y). */
2834 {
2835 int num_negated = 0;
2836
2837 if (GET_CODE (op0) == NOT)
2838 num_negated++, op0 = XEXP (op0, 0);
2839 if (GET_CODE (op1) == NOT)
2840 num_negated++, op1 = XEXP (op1, 0);
2841
2842 if (num_negated == 2)
2843 return simplify_gen_binary (XOR, mode, op0, op1);
2844 else if (num_negated == 1)
2845 return simplify_gen_unary (NOT, mode,
2846 simplify_gen_binary (XOR, mode, op0, op1),
2847 mode);
2848 }
2849
2850 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2851 correspond to a machine insn or result in further simplifications
2852 if B is a constant. */
2853
2854 if (GET_CODE (op0) == AND
2855 && rtx_equal_p (XEXP (op0, 1), op1)
2856 && ! side_effects_p (op1))
2857 return simplify_gen_binary (AND, mode,
2858 simplify_gen_unary (NOT, mode,
2859 XEXP (op0, 0), mode),
2860 op1);
2861
2862 else if (GET_CODE (op0) == AND
2863 && rtx_equal_p (XEXP (op0, 0), op1)
2864 && ! side_effects_p (op1))
2865 return simplify_gen_binary (AND, mode,
2866 simplify_gen_unary (NOT, mode,
2867 XEXP (op0, 1), mode),
2868 op1);
2869
2870 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2871 we can transform like this:
2872 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2873 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2874 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2875 Attempt a few simplifications when B and C are both constants. */
2876 if (GET_CODE (op0) == AND
2877 && CONST_INT_P (op1)
2878 && CONST_INT_P (XEXP (op0, 1)))
2879 {
2880 rtx a = XEXP (op0, 0);
2881 rtx b = XEXP (op0, 1);
2882 rtx c = op1;
2883 HOST_WIDE_INT bval = INTVAL (b);
2884 HOST_WIDE_INT cval = INTVAL (c);
2885
2886 rtx na_c
2887 = simplify_binary_operation (AND, mode,
2888 simplify_gen_unary (NOT, mode, a, mode),
2889 c);
2890 if ((~cval & bval) == 0)
2891 {
2892 /* Try to simplify ~A&C | ~B&C. */
2893 if (na_c != NULL_RTX)
2894 return simplify_gen_binary (IOR, mode, na_c,
2895 GEN_INT (~bval & cval));
2896 }
2897 else
2898 {
2899 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2900 if (na_c == const0_rtx)
2901 {
2902 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2903 GEN_INT (~cval & bval));
2904 return simplify_gen_binary (IOR, mode, a_nc_b,
2905 GEN_INT (~bval & cval));
2906 }
2907 }
2908 }
2909
2910 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2911 comparison if STORE_FLAG_VALUE is 1. */
2912 if (STORE_FLAG_VALUE == 1
2913 && trueop1 == const1_rtx
2914 && COMPARISON_P (op0)
2915 && (reversed = reversed_comparison (op0, mode)))
2916 return reversed;
2917
2918 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2919 is (lt foo (const_int 0)), so we can perform the above
2920 simplification if STORE_FLAG_VALUE is 1. */
2921
2922 if (STORE_FLAG_VALUE == 1
2923 && trueop1 == const1_rtx
2924 && GET_CODE (op0) == LSHIFTRT
2925 && CONST_INT_P (XEXP (op0, 1))
2926 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2927 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2928
2929 /* (xor (comparison foo bar) (const_int sign-bit))
2930 when STORE_FLAG_VALUE is the sign bit. */
2931 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2932 && trueop1 == const_true_rtx
2933 && COMPARISON_P (op0)
2934 && (reversed = reversed_comparison (op0, mode)))
2935 return reversed;
2936
2937 tem = simplify_associative_operation (code, mode, op0, op1);
2938 if (tem)
2939 return tem;
2940 break;
2941
2942 case AND:
2943 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2944 return trueop1;
2945 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2946 return op0;
2947 if (HWI_COMPUTABLE_MODE_P (mode))
2948 {
2949 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2950 HOST_WIDE_INT nzop1;
2951 if (CONST_INT_P (trueop1))
2952 {
2953 HOST_WIDE_INT val1 = INTVAL (trueop1);
2954 /* If we are turning off bits already known off in OP0, we need
2955 not do an AND. */
2956 if ((nzop0 & ~val1) == 0)
2957 return op0;
2958 }
2959 nzop1 = nonzero_bits (trueop1, mode);
2960 /* If we are clearing all the nonzero bits, the result is zero. */
2961 if ((nzop1 & nzop0) == 0
2962 && !side_effects_p (op0) && !side_effects_p (op1))
2963 return CONST0_RTX (mode);
2964 }
2965 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2966 && GET_MODE_CLASS (mode) != MODE_CC)
2967 return op0;
2968 /* A & (~A) -> 0 */
2969 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2970 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2971 && ! side_effects_p (op0)
2972 && GET_MODE_CLASS (mode) != MODE_CC)
2973 return CONST0_RTX (mode);
2974
2975 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2976 there are no nonzero bits of C outside of X's mode. */
2977 if ((GET_CODE (op0) == SIGN_EXTEND
2978 || GET_CODE (op0) == ZERO_EXTEND)
2979 && CONST_INT_P (trueop1)
2980 && HWI_COMPUTABLE_MODE_P (mode)
2981 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2982 & UINTVAL (trueop1)) == 0)
2983 {
2984 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2985 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2986 gen_int_mode (INTVAL (trueop1),
2987 imode));
2988 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2989 }
2990
2991 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2992 we might be able to further simplify the AND with X and potentially
2993 remove the truncation altogether. */
2994 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2995 {
2996 rtx x = XEXP (op0, 0);
2997 enum machine_mode xmode = GET_MODE (x);
2998 tem = simplify_gen_binary (AND, xmode, x,
2999 gen_int_mode (INTVAL (trueop1), xmode));
3000 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3001 }
3002
3003 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3004 if (GET_CODE (op0) == IOR
3005 && CONST_INT_P (trueop1)
3006 && CONST_INT_P (XEXP (op0, 1)))
3007 {
3008 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3009 return simplify_gen_binary (IOR, mode,
3010 simplify_gen_binary (AND, mode,
3011 XEXP (op0, 0), op1),
3012 gen_int_mode (tmp, mode));
3013 }
3014
3015 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3016 insn (and may simplify more). */
3017 if (GET_CODE (op0) == XOR
3018 && rtx_equal_p (XEXP (op0, 0), op1)
3019 && ! side_effects_p (op1))
3020 return simplify_gen_binary (AND, mode,
3021 simplify_gen_unary (NOT, mode,
3022 XEXP (op0, 1), mode),
3023 op1);
3024
3025 if (GET_CODE (op0) == XOR
3026 && rtx_equal_p (XEXP (op0, 1), op1)
3027 && ! side_effects_p (op1))
3028 return simplify_gen_binary (AND, mode,
3029 simplify_gen_unary (NOT, mode,
3030 XEXP (op0, 0), mode),
3031 op1);
3032
3033 /* Similarly for (~(A ^ B)) & A. */
3034 if (GET_CODE (op0) == NOT
3035 && GET_CODE (XEXP (op0, 0)) == XOR
3036 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3037 && ! side_effects_p (op1))
3038 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3039
3040 if (GET_CODE (op0) == NOT
3041 && GET_CODE (XEXP (op0, 0)) == XOR
3042 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3043 && ! side_effects_p (op1))
3044 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3045
3046 /* Convert (A | B) & A to A. */
3047 if (GET_CODE (op0) == IOR
3048 && (rtx_equal_p (XEXP (op0, 0), op1)
3049 || rtx_equal_p (XEXP (op0, 1), op1))
3050 && ! side_effects_p (XEXP (op0, 0))
3051 && ! side_effects_p (XEXP (op0, 1)))
3052 return op1;
3053
3054 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3055 ((A & N) + B) & M -> (A + B) & M
3056 Similarly if (N & M) == 0,
3057 ((A | N) + B) & M -> (A + B) & M
3058 and for - instead of + and/or ^ instead of |.
3059 Also, if (N & M) == 0, then
3060 (A +- N) & M -> A & M. */
3061 if (CONST_INT_P (trueop1)
3062 && HWI_COMPUTABLE_MODE_P (mode)
3063 && ~UINTVAL (trueop1)
3064 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3065 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3066 {
3067 rtx pmop[2];
3068 int which;
3069
3070 pmop[0] = XEXP (op0, 0);
3071 pmop[1] = XEXP (op0, 1);
3072
3073 if (CONST_INT_P (pmop[1])
3074 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3075 return simplify_gen_binary (AND, mode, pmop[0], op1);
3076
3077 for (which = 0; which < 2; which++)
3078 {
3079 tem = pmop[which];
3080 switch (GET_CODE (tem))
3081 {
3082 case AND:
3083 if (CONST_INT_P (XEXP (tem, 1))
3084 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3085 == UINTVAL (trueop1))
3086 pmop[which] = XEXP (tem, 0);
3087 break;
3088 case IOR:
3089 case XOR:
3090 if (CONST_INT_P (XEXP (tem, 1))
3091 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3092 pmop[which] = XEXP (tem, 0);
3093 break;
3094 default:
3095 break;
3096 }
3097 }
3098
3099 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3100 {
3101 tem = simplify_gen_binary (GET_CODE (op0), mode,
3102 pmop[0], pmop[1]);
3103 return simplify_gen_binary (code, mode, tem, op1);
3104 }
3105 }
3106
3107 /* (and X (ior (not X) Y) -> (and X Y) */
3108 if (GET_CODE (op1) == IOR
3109 && GET_CODE (XEXP (op1, 0)) == NOT
3110 && op0 == XEXP (XEXP (op1, 0), 0))
3111 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3112
3113 /* (and (ior (not X) Y) X) -> (and X Y) */
3114 if (GET_CODE (op0) == IOR
3115 && GET_CODE (XEXP (op0, 0)) == NOT
3116 && op1 == XEXP (XEXP (op0, 0), 0))
3117 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3118
3119 tem = simplify_associative_operation (code, mode, op0, op1);
3120 if (tem)
3121 return tem;
3122 break;
3123
3124 case UDIV:
3125 /* 0/x is 0 (or x&0 if x has side-effects). */
3126 if (trueop0 == CONST0_RTX (mode))
3127 {
3128 if (side_effects_p (op1))
3129 return simplify_gen_binary (AND, mode, op1, trueop0);
3130 return trueop0;
3131 }
3132 /* x/1 is x. */
3133 if (trueop1 == CONST1_RTX (mode))
3134 {
3135 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3136 if (tem)
3137 return tem;
3138 }
3139 /* Convert divide by power of two into shift. */
3140 if (CONST_INT_P (trueop1)
3141 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3142 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3143 break;
3144
3145 case DIV:
3146 /* Handle floating point and integers separately. */
3147 if (SCALAR_FLOAT_MODE_P (mode))
3148 {
3149 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3150 safe for modes with NaNs, since 0.0 / 0.0 will then be
3151 NaN rather than 0.0. Nor is it safe for modes with signed
3152 zeros, since dividing 0 by a negative number gives -0.0 */
3153 if (trueop0 == CONST0_RTX (mode)
3154 && !HONOR_NANS (mode)
3155 && !HONOR_SIGNED_ZEROS (mode)
3156 && ! side_effects_p (op1))
3157 return op0;
3158 /* x/1.0 is x. */
3159 if (trueop1 == CONST1_RTX (mode)
3160 && !HONOR_SNANS (mode))
3161 return op0;
3162
3163 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3164 && trueop1 != CONST0_RTX (mode))
3165 {
3166 REAL_VALUE_TYPE d;
3167 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3168
3169 /* x/-1.0 is -x. */
3170 if (REAL_VALUES_EQUAL (d, dconstm1)
3171 && !HONOR_SNANS (mode))
3172 return simplify_gen_unary (NEG, mode, op0, mode);
3173
3174 /* Change FP division by a constant into multiplication.
3175 Only do this with -freciprocal-math. */
3176 if (flag_reciprocal_math
3177 && !REAL_VALUES_EQUAL (d, dconst0))
3178 {
3179 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3180 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3181 return simplify_gen_binary (MULT, mode, op0, tem);
3182 }
3183 }
3184 }
3185 else if (SCALAR_INT_MODE_P (mode))
3186 {
3187 /* 0/x is 0 (or x&0 if x has side-effects). */
3188 if (trueop0 == CONST0_RTX (mode)
3189 && !cfun->can_throw_non_call_exceptions)
3190 {
3191 if (side_effects_p (op1))
3192 return simplify_gen_binary (AND, mode, op1, trueop0);
3193 return trueop0;
3194 }
3195 /* x/1 is x. */
3196 if (trueop1 == CONST1_RTX (mode))
3197 {
3198 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3199 if (tem)
3200 return tem;
3201 }
3202 /* x/-1 is -x. */
3203 if (trueop1 == constm1_rtx)
3204 {
3205 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3206 if (x)
3207 return simplify_gen_unary (NEG, mode, x, mode);
3208 }
3209 }
3210 break;
3211
3212 case UMOD:
3213 /* 0%x is 0 (or x&0 if x has side-effects). */
3214 if (trueop0 == CONST0_RTX (mode))
3215 {
3216 if (side_effects_p (op1))
3217 return simplify_gen_binary (AND, mode, op1, trueop0);
3218 return trueop0;
3219 }
3220 /* x%1 is 0 (of x&0 if x has side-effects). */
3221 if (trueop1 == CONST1_RTX (mode))
3222 {
3223 if (side_effects_p (op0))
3224 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3225 return CONST0_RTX (mode);
3226 }
3227 /* Implement modulus by power of two as AND. */
3228 if (CONST_INT_P (trueop1)
3229 && exact_log2 (UINTVAL (trueop1)) > 0)
3230 return simplify_gen_binary (AND, mode, op0,
3231 GEN_INT (INTVAL (op1) - 1));
3232 break;
3233
3234 case MOD:
3235 /* 0%x is 0 (or x&0 if x has side-effects). */
3236 if (trueop0 == CONST0_RTX (mode))
3237 {
3238 if (side_effects_p (op1))
3239 return simplify_gen_binary (AND, mode, op1, trueop0);
3240 return trueop0;
3241 }
3242 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3243 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3244 {
3245 if (side_effects_p (op0))
3246 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3247 return CONST0_RTX (mode);
3248 }
3249 break;
3250
3251 case ROTATERT:
3252 case ROTATE:
3253 case ASHIFTRT:
3254 if (trueop1 == CONST0_RTX (mode))
3255 return op0;
3256 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3257 return op0;
3258 /* Rotating ~0 always results in ~0. */
3259 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3260 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3261 && ! side_effects_p (op1))
3262 return op0;
3263 canonicalize_shift:
3264 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3265 {
3266 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3267 if (val != INTVAL (op1))
3268 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3269 }
3270 break;
3271
3272 case ASHIFT:
3273 case SS_ASHIFT:
3274 case US_ASHIFT:
3275 if (trueop1 == CONST0_RTX (mode))
3276 return op0;
3277 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3278 return op0;
3279 goto canonicalize_shift;
3280
3281 case LSHIFTRT:
3282 if (trueop1 == CONST0_RTX (mode))
3283 return op0;
3284 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3285 return op0;
3286 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3287 if (GET_CODE (op0) == CLZ
3288 && CONST_INT_P (trueop1)
3289 && STORE_FLAG_VALUE == 1
3290 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3291 {
3292 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3293 unsigned HOST_WIDE_INT zero_val = 0;
3294
3295 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3296 && zero_val == GET_MODE_PRECISION (imode)
3297 && INTVAL (trueop1) == exact_log2 (zero_val))
3298 return simplify_gen_relational (EQ, mode, imode,
3299 XEXP (op0, 0), const0_rtx);
3300 }
3301 goto canonicalize_shift;
3302
3303 case SMIN:
3304 if (width <= HOST_BITS_PER_WIDE_INT
3305 && mode_signbit_p (mode, trueop1)
3306 && ! side_effects_p (op0))
3307 return op1;
3308 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3309 return op0;
3310 tem = simplify_associative_operation (code, mode, op0, op1);
3311 if (tem)
3312 return tem;
3313 break;
3314
3315 case SMAX:
3316 if (width <= HOST_BITS_PER_WIDE_INT
3317 && CONST_INT_P (trueop1)
3318 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3319 && ! side_effects_p (op0))
3320 return op1;
3321 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3322 return op0;
3323 tem = simplify_associative_operation (code, mode, op0, op1);
3324 if (tem)
3325 return tem;
3326 break;
3327
3328 case UMIN:
3329 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3330 return op1;
3331 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3332 return op0;
3333 tem = simplify_associative_operation (code, mode, op0, op1);
3334 if (tem)
3335 return tem;
3336 break;
3337
3338 case UMAX:
3339 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3340 return op1;
3341 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3342 return op0;
3343 tem = simplify_associative_operation (code, mode, op0, op1);
3344 if (tem)
3345 return tem;
3346 break;
3347
3348 case SS_PLUS:
3349 case US_PLUS:
3350 case SS_MINUS:
3351 case US_MINUS:
3352 case SS_MULT:
3353 case US_MULT:
3354 case SS_DIV:
3355 case US_DIV:
3356 /* ??? There are simplifications that can be done. */
3357 return 0;
3358
3359 case VEC_SELECT:
3360 if (!VECTOR_MODE_P (mode))
3361 {
3362 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3363 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3364 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3365 gcc_assert (XVECLEN (trueop1, 0) == 1);
3366 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3367
3368 if (GET_CODE (trueop0) == CONST_VECTOR)
3369 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3370 (trueop1, 0, 0)));
3371
3372 /* Extract a scalar element from a nested VEC_SELECT expression
3373 (with optional nested VEC_CONCAT expression). Some targets
3374 (i386) extract scalar element from a vector using chain of
3375 nested VEC_SELECT expressions. When input operand is a memory
3376 operand, this operation can be simplified to a simple scalar
3377 load from an offseted memory address. */
3378 if (GET_CODE (trueop0) == VEC_SELECT)
3379 {
3380 rtx op0 = XEXP (trueop0, 0);
3381 rtx op1 = XEXP (trueop0, 1);
3382
3383 enum machine_mode opmode = GET_MODE (op0);
3384 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3385 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3386
3387 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3388 int elem;
3389
3390 rtvec vec;
3391 rtx tmp_op, tmp;
3392
3393 gcc_assert (GET_CODE (op1) == PARALLEL);
3394 gcc_assert (i < n_elts);
3395
3396 /* Select element, pointed by nested selector. */
3397 elem = INTVAL (XVECEXP (op1, 0, i));
3398
3399 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3400 if (GET_CODE (op0) == VEC_CONCAT)
3401 {
3402 rtx op00 = XEXP (op0, 0);
3403 rtx op01 = XEXP (op0, 1);
3404
3405 enum machine_mode mode00, mode01;
3406 int n_elts00, n_elts01;
3407
3408 mode00 = GET_MODE (op00);
3409 mode01 = GET_MODE (op01);
3410
3411 /* Find out number of elements of each operand. */
3412 if (VECTOR_MODE_P (mode00))
3413 {
3414 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3415 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3416 }
3417 else
3418 n_elts00 = 1;
3419
3420 if (VECTOR_MODE_P (mode01))
3421 {
3422 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3423 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3424 }
3425 else
3426 n_elts01 = 1;
3427
3428 gcc_assert (n_elts == n_elts00 + n_elts01);
3429
3430 /* Select correct operand of VEC_CONCAT
3431 and adjust selector. */
3432 if (elem < n_elts01)
3433 tmp_op = op00;
3434 else
3435 {
3436 tmp_op = op01;
3437 elem -= n_elts00;
3438 }
3439 }
3440 else
3441 tmp_op = op0;
3442
3443 vec = rtvec_alloc (1);
3444 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3445
3446 tmp = gen_rtx_fmt_ee (code, mode,
3447 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3448 return tmp;
3449 }
3450 if (GET_CODE (trueop0) == VEC_DUPLICATE
3451 && GET_MODE (XEXP (trueop0, 0)) == mode)
3452 return XEXP (trueop0, 0);
3453 }
3454 else
3455 {
3456 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3457 gcc_assert (GET_MODE_INNER (mode)
3458 == GET_MODE_INNER (GET_MODE (trueop0)));
3459 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3460
3461 if (GET_CODE (trueop0) == CONST_VECTOR)
3462 {
3463 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3464 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3465 rtvec v = rtvec_alloc (n_elts);
3466 unsigned int i;
3467
3468 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3469 for (i = 0; i < n_elts; i++)
3470 {
3471 rtx x = XVECEXP (trueop1, 0, i);
3472
3473 gcc_assert (CONST_INT_P (x));
3474 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3475 INTVAL (x));
3476 }
3477
3478 return gen_rtx_CONST_VECTOR (mode, v);
3479 }
3480
3481 /* Recognize the identity. */
3482 if (GET_MODE (trueop0) == mode)
3483 {
3484 bool maybe_ident = true;
3485 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3486 {
3487 rtx j = XVECEXP (trueop1, 0, i);
3488 if (!CONST_INT_P (j) || INTVAL (j) != i)
3489 {
3490 maybe_ident = false;
3491 break;
3492 }
3493 }
3494 if (maybe_ident)
3495 return trueop0;
3496 }
3497
3498 /* If we build {a,b} then permute it, build the result directly. */
3499 if (XVECLEN (trueop1, 0) == 2
3500 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3501 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3502 && GET_CODE (trueop0) == VEC_CONCAT
3503 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3504 && GET_MODE (XEXP (trueop0, 0)) == mode
3505 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3506 && GET_MODE (XEXP (trueop0, 1)) == mode)
3507 {
3508 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3509 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3510 rtx subop0, subop1;
3511
3512 gcc_assert (i0 < 4 && i1 < 4);
3513 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3514 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3515
3516 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3517 }
3518
3519 if (XVECLEN (trueop1, 0) == 2
3520 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3521 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3522 && GET_CODE (trueop0) == VEC_CONCAT
3523 && GET_MODE (trueop0) == mode)
3524 {
3525 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3526 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3527 rtx subop0, subop1;
3528
3529 gcc_assert (i0 < 2 && i1 < 2);
3530 subop0 = XEXP (trueop0, i0);
3531 subop1 = XEXP (trueop0, i1);
3532
3533 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3534 }
3535 }
3536
3537 if (XVECLEN (trueop1, 0) == 1
3538 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3539 && GET_CODE (trueop0) == VEC_CONCAT)
3540 {
3541 rtx vec = trueop0;
3542 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3543
3544 /* Try to find the element in the VEC_CONCAT. */
3545 while (GET_MODE (vec) != mode
3546 && GET_CODE (vec) == VEC_CONCAT)
3547 {
3548 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3549 if (offset < vec_size)
3550 vec = XEXP (vec, 0);
3551 else
3552 {
3553 offset -= vec_size;
3554 vec = XEXP (vec, 1);
3555 }
3556 vec = avoid_constant_pool_reference (vec);
3557 }
3558
3559 if (GET_MODE (vec) == mode)
3560 return vec;
3561 }
3562
3563 return 0;
3564 case VEC_CONCAT:
3565 {
3566 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3567 ? GET_MODE (trueop0)
3568 : GET_MODE_INNER (mode));
3569 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3570 ? GET_MODE (trueop1)
3571 : GET_MODE_INNER (mode));
3572
3573 gcc_assert (VECTOR_MODE_P (mode));
3574 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3575 == GET_MODE_SIZE (mode));
3576
3577 if (VECTOR_MODE_P (op0_mode))
3578 gcc_assert (GET_MODE_INNER (mode)
3579 == GET_MODE_INNER (op0_mode));
3580 else
3581 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3582
3583 if (VECTOR_MODE_P (op1_mode))
3584 gcc_assert (GET_MODE_INNER (mode)
3585 == GET_MODE_INNER (op1_mode));
3586 else
3587 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3588
3589 if ((GET_CODE (trueop0) == CONST_VECTOR
3590 || CONST_SCALAR_INT_P (trueop0)
3591 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3592 && (GET_CODE (trueop1) == CONST_VECTOR
3593 || CONST_SCALAR_INT_P (trueop1)
3594 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3595 {
3596 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3597 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3598 rtvec v = rtvec_alloc (n_elts);
3599 unsigned int i;
3600 unsigned in_n_elts = 1;
3601
3602 if (VECTOR_MODE_P (op0_mode))
3603 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3604 for (i = 0; i < n_elts; i++)
3605 {
3606 if (i < in_n_elts)
3607 {
3608 if (!VECTOR_MODE_P (op0_mode))
3609 RTVEC_ELT (v, i) = trueop0;
3610 else
3611 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3612 }
3613 else
3614 {
3615 if (!VECTOR_MODE_P (op1_mode))
3616 RTVEC_ELT (v, i) = trueop1;
3617 else
3618 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3619 i - in_n_elts);
3620 }
3621 }
3622
3623 return gen_rtx_CONST_VECTOR (mode, v);
3624 }
3625
3626 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3627 Restrict the transformation to avoid generating a VEC_SELECT with a
3628 mode unrelated to its operand. */
3629 if (GET_CODE (trueop0) == VEC_SELECT
3630 && GET_CODE (trueop1) == VEC_SELECT
3631 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3632 && GET_MODE (XEXP (trueop0, 0)) == mode)
3633 {
3634 rtx par0 = XEXP (trueop0, 1);
3635 rtx par1 = XEXP (trueop1, 1);
3636 int len0 = XVECLEN (par0, 0);
3637 int len1 = XVECLEN (par1, 0);
3638 rtvec vec = rtvec_alloc (len0 + len1);
3639 for (int i = 0; i < len0; i++)
3640 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3641 for (int i = 0; i < len1; i++)
3642 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3643 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3644 gen_rtx_PARALLEL (VOIDmode, vec));
3645 }
3646 }
3647 return 0;
3648
3649 default:
3650 gcc_unreachable ();
3651 }
3652
3653 return 0;
3654 }
3655
3656 rtx
3657 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3658 rtx op0, rtx op1)
3659 {
3660 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3661 HOST_WIDE_INT val;
3662 unsigned int width = GET_MODE_PRECISION (mode);
3663
3664 if (VECTOR_MODE_P (mode)
3665 && code != VEC_CONCAT
3666 && GET_CODE (op0) == CONST_VECTOR
3667 && GET_CODE (op1) == CONST_VECTOR)
3668 {
3669 unsigned n_elts = GET_MODE_NUNITS (mode);
3670 enum machine_mode op0mode = GET_MODE (op0);
3671 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3672 enum machine_mode op1mode = GET_MODE (op1);
3673 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3674 rtvec v = rtvec_alloc (n_elts);
3675 unsigned int i;
3676
3677 gcc_assert (op0_n_elts == n_elts);
3678 gcc_assert (op1_n_elts == n_elts);
3679 for (i = 0; i < n_elts; i++)
3680 {
3681 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3682 CONST_VECTOR_ELT (op0, i),
3683 CONST_VECTOR_ELT (op1, i));
3684 if (!x)
3685 return 0;
3686 RTVEC_ELT (v, i) = x;
3687 }
3688
3689 return gen_rtx_CONST_VECTOR (mode, v);
3690 }
3691
3692 if (VECTOR_MODE_P (mode)
3693 && code == VEC_CONCAT
3694 && (CONST_SCALAR_INT_P (op0)
3695 || GET_CODE (op0) == CONST_FIXED
3696 || CONST_DOUBLE_AS_FLOAT_P (op0))
3697 && (CONST_SCALAR_INT_P (op1)
3698 || CONST_DOUBLE_AS_FLOAT_P (op1)
3699 || GET_CODE (op1) == CONST_FIXED))
3700 {
3701 unsigned n_elts = GET_MODE_NUNITS (mode);
3702 rtvec v = rtvec_alloc (n_elts);
3703
3704 gcc_assert (n_elts >= 2);
3705 if (n_elts == 2)
3706 {
3707 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3708 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3709
3710 RTVEC_ELT (v, 0) = op0;
3711 RTVEC_ELT (v, 1) = op1;
3712 }
3713 else
3714 {
3715 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3716 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3717 unsigned i;
3718
3719 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3720 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3721 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3722
3723 for (i = 0; i < op0_n_elts; ++i)
3724 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3725 for (i = 0; i < op1_n_elts; ++i)
3726 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3727 }
3728
3729 return gen_rtx_CONST_VECTOR (mode, v);
3730 }
3731
3732 if (SCALAR_FLOAT_MODE_P (mode)
3733 && CONST_DOUBLE_AS_FLOAT_P (op0)
3734 && CONST_DOUBLE_AS_FLOAT_P (op1)
3735 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3736 {
3737 if (code == AND
3738 || code == IOR
3739 || code == XOR)
3740 {
3741 long tmp0[4];
3742 long tmp1[4];
3743 REAL_VALUE_TYPE r;
3744 int i;
3745
3746 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3747 GET_MODE (op0));
3748 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3749 GET_MODE (op1));
3750 for (i = 0; i < 4; i++)
3751 {
3752 switch (code)
3753 {
3754 case AND:
3755 tmp0[i] &= tmp1[i];
3756 break;
3757 case IOR:
3758 tmp0[i] |= tmp1[i];
3759 break;
3760 case XOR:
3761 tmp0[i] ^= tmp1[i];
3762 break;
3763 default:
3764 gcc_unreachable ();
3765 }
3766 }
3767 real_from_target (&r, tmp0, mode);
3768 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3769 }
3770 else
3771 {
3772 REAL_VALUE_TYPE f0, f1, value, result;
3773 bool inexact;
3774
3775 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3776 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3777 real_convert (&f0, mode, &f0);
3778 real_convert (&f1, mode, &f1);
3779
3780 if (HONOR_SNANS (mode)
3781 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3782 return 0;
3783
3784 if (code == DIV
3785 && REAL_VALUES_EQUAL (f1, dconst0)
3786 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3787 return 0;
3788
3789 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3790 && flag_trapping_math
3791 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3792 {
3793 int s0 = REAL_VALUE_NEGATIVE (f0);
3794 int s1 = REAL_VALUE_NEGATIVE (f1);
3795
3796 switch (code)
3797 {
3798 case PLUS:
3799 /* Inf + -Inf = NaN plus exception. */
3800 if (s0 != s1)
3801 return 0;
3802 break;
3803 case MINUS:
3804 /* Inf - Inf = NaN plus exception. */
3805 if (s0 == s1)
3806 return 0;
3807 break;
3808 case DIV:
3809 /* Inf / Inf = NaN plus exception. */
3810 return 0;
3811 default:
3812 break;
3813 }
3814 }
3815
3816 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3817 && flag_trapping_math
3818 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3819 || (REAL_VALUE_ISINF (f1)
3820 && REAL_VALUES_EQUAL (f0, dconst0))))
3821 /* Inf * 0 = NaN plus exception. */
3822 return 0;
3823
3824 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3825 &f0, &f1);
3826 real_convert (&result, mode, &value);
3827
3828 /* Don't constant fold this floating point operation if
3829 the result has overflowed and flag_trapping_math. */
3830
3831 if (flag_trapping_math
3832 && MODE_HAS_INFINITIES (mode)
3833 && REAL_VALUE_ISINF (result)
3834 && !REAL_VALUE_ISINF (f0)
3835 && !REAL_VALUE_ISINF (f1))
3836 /* Overflow plus exception. */
3837 return 0;
3838
3839 /* Don't constant fold this floating point operation if the
3840 result may dependent upon the run-time rounding mode and
3841 flag_rounding_math is set, or if GCC's software emulation
3842 is unable to accurately represent the result. */
3843
3844 if ((flag_rounding_math
3845 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3846 && (inexact || !real_identical (&result, &value)))
3847 return NULL_RTX;
3848
3849 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3850 }
3851 }
3852
3853 /* We can fold some multi-word operations. */
3854 if (GET_MODE_CLASS (mode) == MODE_INT
3855 && width == HOST_BITS_PER_DOUBLE_INT
3856 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3857 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3858 {
3859 double_int o0, o1, res, tmp;
3860 bool overflow;
3861
3862 o0 = rtx_to_double_int (op0);
3863 o1 = rtx_to_double_int (op1);
3864
3865 switch (code)
3866 {
3867 case MINUS:
3868 /* A - B == A + (-B). */
3869 o1 = -o1;
3870
3871 /* Fall through.... */
3872
3873 case PLUS:
3874 res = o0 + o1;
3875 break;
3876
3877 case MULT:
3878 res = o0 * o1;
3879 break;
3880
3881 case DIV:
3882 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3883 &tmp, &overflow);
3884 if (overflow)
3885 return 0;
3886 break;
3887
3888 case MOD:
3889 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3890 &res, &overflow);
3891 if (overflow)
3892 return 0;
3893 break;
3894
3895 case UDIV:
3896 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3897 &tmp, &overflow);
3898 if (overflow)
3899 return 0;
3900 break;
3901
3902 case UMOD:
3903 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3904 &res, &overflow);
3905 if (overflow)
3906 return 0;
3907 break;
3908
3909 case AND:
3910 res = o0 & o1;
3911 break;
3912
3913 case IOR:
3914 res = o0 | o1;
3915 break;
3916
3917 case XOR:
3918 res = o0 ^ o1;
3919 break;
3920
3921 case SMIN:
3922 res = o0.smin (o1);
3923 break;
3924
3925 case SMAX:
3926 res = o0.smax (o1);
3927 break;
3928
3929 case UMIN:
3930 res = o0.umin (o1);
3931 break;
3932
3933 case UMAX:
3934 res = o0.umax (o1);
3935 break;
3936
3937 case LSHIFTRT: case ASHIFTRT:
3938 case ASHIFT:
3939 case ROTATE: case ROTATERT:
3940 {
3941 unsigned HOST_WIDE_INT cnt;
3942
3943 if (SHIFT_COUNT_TRUNCATED)
3944 {
3945 o1.high = 0;
3946 o1.low &= GET_MODE_PRECISION (mode) - 1;
3947 }
3948
3949 if (!o1.fits_uhwi ()
3950 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3951 return 0;
3952
3953 cnt = o1.to_uhwi ();
3954 unsigned short prec = GET_MODE_PRECISION (mode);
3955
3956 if (code == LSHIFTRT || code == ASHIFTRT)
3957 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3958 else if (code == ASHIFT)
3959 res = o0.alshift (cnt, prec);
3960 else if (code == ROTATE)
3961 res = o0.lrotate (cnt, prec);
3962 else /* code == ROTATERT */
3963 res = o0.rrotate (cnt, prec);
3964 }
3965 break;
3966
3967 default:
3968 return 0;
3969 }
3970
3971 return immed_double_int_const (res, mode);
3972 }
3973
3974 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3975 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3976 {
3977 /* Get the integer argument values in two forms:
3978 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3979
3980 arg0 = INTVAL (op0);
3981 arg1 = INTVAL (op1);
3982
3983 if (width < HOST_BITS_PER_WIDE_INT)
3984 {
3985 arg0 &= GET_MODE_MASK (mode);
3986 arg1 &= GET_MODE_MASK (mode);
3987
3988 arg0s = arg0;
3989 if (val_signbit_known_set_p (mode, arg0s))
3990 arg0s |= ~GET_MODE_MASK (mode);
3991
3992 arg1s = arg1;
3993 if (val_signbit_known_set_p (mode, arg1s))
3994 arg1s |= ~GET_MODE_MASK (mode);
3995 }
3996 else
3997 {
3998 arg0s = arg0;
3999 arg1s = arg1;
4000 }
4001
4002 /* Compute the value of the arithmetic. */
4003
4004 switch (code)
4005 {
4006 case PLUS:
4007 val = arg0s + arg1s;
4008 break;
4009
4010 case MINUS:
4011 val = arg0s - arg1s;
4012 break;
4013
4014 case MULT:
4015 val = arg0s * arg1s;
4016 break;
4017
4018 case DIV:
4019 if (arg1s == 0
4020 || ((unsigned HOST_WIDE_INT) arg0s
4021 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4022 && arg1s == -1))
4023 return 0;
4024 val = arg0s / arg1s;
4025 break;
4026
4027 case MOD:
4028 if (arg1s == 0
4029 || ((unsigned HOST_WIDE_INT) arg0s
4030 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4031 && arg1s == -1))
4032 return 0;
4033 val = arg0s % arg1s;
4034 break;
4035
4036 case UDIV:
4037 if (arg1 == 0
4038 || ((unsigned HOST_WIDE_INT) arg0s
4039 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4040 && arg1s == -1))
4041 return 0;
4042 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4043 break;
4044
4045 case UMOD:
4046 if (arg1 == 0
4047 || ((unsigned HOST_WIDE_INT) arg0s
4048 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4049 && arg1s == -1))
4050 return 0;
4051 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4052 break;
4053
4054 case AND:
4055 val = arg0 & arg1;
4056 break;
4057
4058 case IOR:
4059 val = arg0 | arg1;
4060 break;
4061
4062 case XOR:
4063 val = arg0 ^ arg1;
4064 break;
4065
4066 case LSHIFTRT:
4067 case ASHIFT:
4068 case ASHIFTRT:
4069 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4070 the value is in range. We can't return any old value for
4071 out-of-range arguments because either the middle-end (via
4072 shift_truncation_mask) or the back-end might be relying on
4073 target-specific knowledge. Nor can we rely on
4074 shift_truncation_mask, since the shift might not be part of an
4075 ashlM3, lshrM3 or ashrM3 instruction. */
4076 if (SHIFT_COUNT_TRUNCATED)
4077 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4078 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4079 return 0;
4080
4081 val = (code == ASHIFT
4082 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4083 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4084
4085 /* Sign-extend the result for arithmetic right shifts. */
4086 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4087 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4088 break;
4089
4090 case ROTATERT:
4091 if (arg1 < 0)
4092 return 0;
4093
4094 arg1 %= width;
4095 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4096 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4097 break;
4098
4099 case ROTATE:
4100 if (arg1 < 0)
4101 return 0;
4102
4103 arg1 %= width;
4104 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4105 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4106 break;
4107
4108 case COMPARE:
4109 /* Do nothing here. */
4110 return 0;
4111
4112 case SMIN:
4113 val = arg0s <= arg1s ? arg0s : arg1s;
4114 break;
4115
4116 case UMIN:
4117 val = ((unsigned HOST_WIDE_INT) arg0
4118 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4119 break;
4120
4121 case SMAX:
4122 val = arg0s > arg1s ? arg0s : arg1s;
4123 break;
4124
4125 case UMAX:
4126 val = ((unsigned HOST_WIDE_INT) arg0
4127 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4128 break;
4129
4130 case SS_PLUS:
4131 case US_PLUS:
4132 case SS_MINUS:
4133 case US_MINUS:
4134 case SS_MULT:
4135 case US_MULT:
4136 case SS_DIV:
4137 case US_DIV:
4138 case SS_ASHIFT:
4139 case US_ASHIFT:
4140 /* ??? There are simplifications that can be done. */
4141 return 0;
4142
4143 default:
4144 gcc_unreachable ();
4145 }
4146
4147 return gen_int_mode (val, mode);
4148 }
4149
4150 return NULL_RTX;
4151 }
4152
4153
4154 \f
4155 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4156 PLUS or MINUS.
4157
4158 Rather than test for specific case, we do this by a brute-force method
4159 and do all possible simplifications until no more changes occur. Then
4160 we rebuild the operation. */
4161
4162 struct simplify_plus_minus_op_data
4163 {
4164 rtx op;
4165 short neg;
4166 };
4167
4168 static bool
4169 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4170 {
4171 int result;
4172
4173 result = (commutative_operand_precedence (y)
4174 - commutative_operand_precedence (x));
4175 if (result)
4176 return result > 0;
4177
4178 /* Group together equal REGs to do more simplification. */
4179 if (REG_P (x) && REG_P (y))
4180 return REGNO (x) > REGNO (y);
4181 else
4182 return false;
4183 }
4184
4185 static rtx
4186 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4187 rtx op1)
4188 {
4189 struct simplify_plus_minus_op_data ops[8];
4190 rtx result, tem;
4191 int n_ops = 2, input_ops = 2;
4192 int changed, n_constants = 0, canonicalized = 0;
4193 int i, j;
4194
4195 memset (ops, 0, sizeof ops);
4196
4197 /* Set up the two operands and then expand them until nothing has been
4198 changed. If we run out of room in our array, give up; this should
4199 almost never happen. */
4200
4201 ops[0].op = op0;
4202 ops[0].neg = 0;
4203 ops[1].op = op1;
4204 ops[1].neg = (code == MINUS);
4205
4206 do
4207 {
4208 changed = 0;
4209
4210 for (i = 0; i < n_ops; i++)
4211 {
4212 rtx this_op = ops[i].op;
4213 int this_neg = ops[i].neg;
4214 enum rtx_code this_code = GET_CODE (this_op);
4215
4216 switch (this_code)
4217 {
4218 case PLUS:
4219 case MINUS:
4220 if (n_ops == 7)
4221 return NULL_RTX;
4222
4223 ops[n_ops].op = XEXP (this_op, 1);
4224 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4225 n_ops++;
4226
4227 ops[i].op = XEXP (this_op, 0);
4228 input_ops++;
4229 changed = 1;
4230 canonicalized |= this_neg;
4231 break;
4232
4233 case NEG:
4234 ops[i].op = XEXP (this_op, 0);
4235 ops[i].neg = ! this_neg;
4236 changed = 1;
4237 canonicalized = 1;
4238 break;
4239
4240 case CONST:
4241 if (n_ops < 7
4242 && GET_CODE (XEXP (this_op, 0)) == PLUS
4243 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4244 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4245 {
4246 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4247 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4248 ops[n_ops].neg = this_neg;
4249 n_ops++;
4250 changed = 1;
4251 canonicalized = 1;
4252 }
4253 break;
4254
4255 case NOT:
4256 /* ~a -> (-a - 1) */
4257 if (n_ops != 7)
4258 {
4259 ops[n_ops].op = CONSTM1_RTX (mode);
4260 ops[n_ops++].neg = this_neg;
4261 ops[i].op = XEXP (this_op, 0);
4262 ops[i].neg = !this_neg;
4263 changed = 1;
4264 canonicalized = 1;
4265 }
4266 break;
4267
4268 case CONST_INT:
4269 n_constants++;
4270 if (this_neg)
4271 {
4272 ops[i].op = neg_const_int (mode, this_op);
4273 ops[i].neg = 0;
4274 changed = 1;
4275 canonicalized = 1;
4276 }
4277 break;
4278
4279 default:
4280 break;
4281 }
4282 }
4283 }
4284 while (changed);
4285
4286 if (n_constants > 1)
4287 canonicalized = 1;
4288
4289 gcc_assert (n_ops >= 2);
4290
4291 /* If we only have two operands, we can avoid the loops. */
4292 if (n_ops == 2)
4293 {
4294 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4295 rtx lhs, rhs;
4296
4297 /* Get the two operands. Be careful with the order, especially for
4298 the cases where code == MINUS. */
4299 if (ops[0].neg && ops[1].neg)
4300 {
4301 lhs = gen_rtx_NEG (mode, ops[0].op);
4302 rhs = ops[1].op;
4303 }
4304 else if (ops[0].neg)
4305 {
4306 lhs = ops[1].op;
4307 rhs = ops[0].op;
4308 }
4309 else
4310 {
4311 lhs = ops[0].op;
4312 rhs = ops[1].op;
4313 }
4314
4315 return simplify_const_binary_operation (code, mode, lhs, rhs);
4316 }
4317
4318 /* Now simplify each pair of operands until nothing changes. */
4319 do
4320 {
4321 /* Insertion sort is good enough for an eight-element array. */
4322 for (i = 1; i < n_ops; i++)
4323 {
4324 struct simplify_plus_minus_op_data save;
4325 j = i - 1;
4326 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4327 continue;
4328
4329 canonicalized = 1;
4330 save = ops[i];
4331 do
4332 ops[j + 1] = ops[j];
4333 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4334 ops[j + 1] = save;
4335 }
4336
4337 changed = 0;
4338 for (i = n_ops - 1; i > 0; i--)
4339 for (j = i - 1; j >= 0; j--)
4340 {
4341 rtx lhs = ops[j].op, rhs = ops[i].op;
4342 int lneg = ops[j].neg, rneg = ops[i].neg;
4343
4344 if (lhs != 0 && rhs != 0)
4345 {
4346 enum rtx_code ncode = PLUS;
4347
4348 if (lneg != rneg)
4349 {
4350 ncode = MINUS;
4351 if (lneg)
4352 tem = lhs, lhs = rhs, rhs = tem;
4353 }
4354 else if (swap_commutative_operands_p (lhs, rhs))
4355 tem = lhs, lhs = rhs, rhs = tem;
4356
4357 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4358 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4359 {
4360 rtx tem_lhs, tem_rhs;
4361
4362 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4363 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4364 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4365
4366 if (tem && !CONSTANT_P (tem))
4367 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4368 }
4369 else
4370 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4371
4372 /* Reject "simplifications" that just wrap the two
4373 arguments in a CONST. Failure to do so can result
4374 in infinite recursion with simplify_binary_operation
4375 when it calls us to simplify CONST operations. */
4376 if (tem
4377 && ! (GET_CODE (tem) == CONST
4378 && GET_CODE (XEXP (tem, 0)) == ncode
4379 && XEXP (XEXP (tem, 0), 0) == lhs
4380 && XEXP (XEXP (tem, 0), 1) == rhs))
4381 {
4382 lneg &= rneg;
4383 if (GET_CODE (tem) == NEG)
4384 tem = XEXP (tem, 0), lneg = !lneg;
4385 if (CONST_INT_P (tem) && lneg)
4386 tem = neg_const_int (mode, tem), lneg = 0;
4387
4388 ops[i].op = tem;
4389 ops[i].neg = lneg;
4390 ops[j].op = NULL_RTX;
4391 changed = 1;
4392 canonicalized = 1;
4393 }
4394 }
4395 }
4396
4397 /* If nothing changed, fail. */
4398 if (!canonicalized)
4399 return NULL_RTX;
4400
4401 /* Pack all the operands to the lower-numbered entries. */
4402 for (i = 0, j = 0; j < n_ops; j++)
4403 if (ops[j].op)
4404 {
4405 ops[i] = ops[j];
4406 i++;
4407 }
4408 n_ops = i;
4409 }
4410 while (changed);
4411
4412 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4413 if (n_ops == 2
4414 && CONST_INT_P (ops[1].op)
4415 && CONSTANT_P (ops[0].op)
4416 && ops[0].neg)
4417 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4418
4419 /* We suppressed creation of trivial CONST expressions in the
4420 combination loop to avoid recursion. Create one manually now.
4421 The combination loop should have ensured that there is exactly
4422 one CONST_INT, and the sort will have ensured that it is last
4423 in the array and that any other constant will be next-to-last. */
4424
4425 if (n_ops > 1
4426 && CONST_INT_P (ops[n_ops - 1].op)
4427 && CONSTANT_P (ops[n_ops - 2].op))
4428 {
4429 rtx value = ops[n_ops - 1].op;
4430 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4431 value = neg_const_int (mode, value);
4432 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4433 INTVAL (value));
4434 n_ops--;
4435 }
4436
4437 /* Put a non-negated operand first, if possible. */
4438
4439 for (i = 0; i < n_ops && ops[i].neg; i++)
4440 continue;
4441 if (i == n_ops)
4442 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4443 else if (i != 0)
4444 {
4445 tem = ops[0].op;
4446 ops[0] = ops[i];
4447 ops[i].op = tem;
4448 ops[i].neg = 1;
4449 }
4450
4451 /* Now make the result by performing the requested operations. */
4452 result = ops[0].op;
4453 for (i = 1; i < n_ops; i++)
4454 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4455 mode, result, ops[i].op);
4456
4457 return result;
4458 }
4459
4460 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4461 static bool
4462 plus_minus_operand_p (const_rtx x)
4463 {
4464 return GET_CODE (x) == PLUS
4465 || GET_CODE (x) == MINUS
4466 || (GET_CODE (x) == CONST
4467 && GET_CODE (XEXP (x, 0)) == PLUS
4468 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4469 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4470 }
4471
4472 /* Like simplify_binary_operation except used for relational operators.
4473 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4474 not also be VOIDmode.
4475
4476 CMP_MODE specifies in which mode the comparison is done in, so it is
4477 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4478 the operands or, if both are VOIDmode, the operands are compared in
4479 "infinite precision". */
4480 rtx
4481 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4482 enum machine_mode cmp_mode, rtx op0, rtx op1)
4483 {
4484 rtx tem, trueop0, trueop1;
4485
4486 if (cmp_mode == VOIDmode)
4487 cmp_mode = GET_MODE (op0);
4488 if (cmp_mode == VOIDmode)
4489 cmp_mode = GET_MODE (op1);
4490
4491 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4492 if (tem)
4493 {
4494 if (SCALAR_FLOAT_MODE_P (mode))
4495 {
4496 if (tem == const0_rtx)
4497 return CONST0_RTX (mode);
4498 #ifdef FLOAT_STORE_FLAG_VALUE
4499 {
4500 REAL_VALUE_TYPE val;
4501 val = FLOAT_STORE_FLAG_VALUE (mode);
4502 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4503 }
4504 #else
4505 return NULL_RTX;
4506 #endif
4507 }
4508 if (VECTOR_MODE_P (mode))
4509 {
4510 if (tem == const0_rtx)
4511 return CONST0_RTX (mode);
4512 #ifdef VECTOR_STORE_FLAG_VALUE
4513 {
4514 int i, units;
4515 rtvec v;
4516
4517 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4518 if (val == NULL_RTX)
4519 return NULL_RTX;
4520 if (val == const1_rtx)
4521 return CONST1_RTX (mode);
4522
4523 units = GET_MODE_NUNITS (mode);
4524 v = rtvec_alloc (units);
4525 for (i = 0; i < units; i++)
4526 RTVEC_ELT (v, i) = val;
4527 return gen_rtx_raw_CONST_VECTOR (mode, v);
4528 }
4529 #else
4530 return NULL_RTX;
4531 #endif
4532 }
4533
4534 return tem;
4535 }
4536
4537 /* For the following tests, ensure const0_rtx is op1. */
4538 if (swap_commutative_operands_p (op0, op1)
4539 || (op0 == const0_rtx && op1 != const0_rtx))
4540 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4541
4542 /* If op0 is a compare, extract the comparison arguments from it. */
4543 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4544 return simplify_gen_relational (code, mode, VOIDmode,
4545 XEXP (op0, 0), XEXP (op0, 1));
4546
4547 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4548 || CC0_P (op0))
4549 return NULL_RTX;
4550
4551 trueop0 = avoid_constant_pool_reference (op0);
4552 trueop1 = avoid_constant_pool_reference (op1);
4553 return simplify_relational_operation_1 (code, mode, cmp_mode,
4554 trueop0, trueop1);
4555 }
4556
4557 /* This part of simplify_relational_operation is only used when CMP_MODE
4558 is not in class MODE_CC (i.e. it is a real comparison).
4559
4560 MODE is the mode of the result, while CMP_MODE specifies in which
4561 mode the comparison is done in, so it is the mode of the operands. */
4562
4563 static rtx
4564 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4565 enum machine_mode cmp_mode, rtx op0, rtx op1)
4566 {
4567 enum rtx_code op0code = GET_CODE (op0);
4568
4569 if (op1 == const0_rtx && COMPARISON_P (op0))
4570 {
4571 /* If op0 is a comparison, extract the comparison arguments
4572 from it. */
4573 if (code == NE)
4574 {
4575 if (GET_MODE (op0) == mode)
4576 return simplify_rtx (op0);
4577 else
4578 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4579 XEXP (op0, 0), XEXP (op0, 1));
4580 }
4581 else if (code == EQ)
4582 {
4583 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4584 if (new_code != UNKNOWN)
4585 return simplify_gen_relational (new_code, mode, VOIDmode,
4586 XEXP (op0, 0), XEXP (op0, 1));
4587 }
4588 }
4589
4590 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4591 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4592 if ((code == LTU || code == GEU)
4593 && GET_CODE (op0) == PLUS
4594 && CONST_INT_P (XEXP (op0, 1))
4595 && (rtx_equal_p (op1, XEXP (op0, 0))
4596 || rtx_equal_p (op1, XEXP (op0, 1)))
4597 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4598 && XEXP (op0, 1) != const0_rtx)
4599 {
4600 rtx new_cmp
4601 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4602 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4603 cmp_mode, XEXP (op0, 0), new_cmp);
4604 }
4605
4606 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4607 if ((code == LTU || code == GEU)
4608 && GET_CODE (op0) == PLUS
4609 && rtx_equal_p (op1, XEXP (op0, 1))
4610 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4611 && !rtx_equal_p (op1, XEXP (op0, 0)))
4612 return simplify_gen_relational (code, mode, cmp_mode, op0,
4613 copy_rtx (XEXP (op0, 0)));
4614
4615 if (op1 == const0_rtx)
4616 {
4617 /* Canonicalize (GTU x 0) as (NE x 0). */
4618 if (code == GTU)
4619 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4620 /* Canonicalize (LEU x 0) as (EQ x 0). */
4621 if (code == LEU)
4622 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4623 }
4624 else if (op1 == const1_rtx)
4625 {
4626 switch (code)
4627 {
4628 case GE:
4629 /* Canonicalize (GE x 1) as (GT x 0). */
4630 return simplify_gen_relational (GT, mode, cmp_mode,
4631 op0, const0_rtx);
4632 case GEU:
4633 /* Canonicalize (GEU x 1) as (NE x 0). */
4634 return simplify_gen_relational (NE, mode, cmp_mode,
4635 op0, const0_rtx);
4636 case LT:
4637 /* Canonicalize (LT x 1) as (LE x 0). */
4638 return simplify_gen_relational (LE, mode, cmp_mode,
4639 op0, const0_rtx);
4640 case LTU:
4641 /* Canonicalize (LTU x 1) as (EQ x 0). */
4642 return simplify_gen_relational (EQ, mode, cmp_mode,
4643 op0, const0_rtx);
4644 default:
4645 break;
4646 }
4647 }
4648 else if (op1 == constm1_rtx)
4649 {
4650 /* Canonicalize (LE x -1) as (LT x 0). */
4651 if (code == LE)
4652 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4653 /* Canonicalize (GT x -1) as (GE x 0). */
4654 if (code == GT)
4655 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4656 }
4657
4658 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4659 if ((code == EQ || code == NE)
4660 && (op0code == PLUS || op0code == MINUS)
4661 && CONSTANT_P (op1)
4662 && CONSTANT_P (XEXP (op0, 1))
4663 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4664 {
4665 rtx x = XEXP (op0, 0);
4666 rtx c = XEXP (op0, 1);
4667 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4668 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4669
4670 /* Detect an infinite recursive condition, where we oscillate at this
4671 simplification case between:
4672 A + B == C <---> C - B == A,
4673 where A, B, and C are all constants with non-simplifiable expressions,
4674 usually SYMBOL_REFs. */
4675 if (GET_CODE (tem) == invcode
4676 && CONSTANT_P (x)
4677 && rtx_equal_p (c, XEXP (tem, 1)))
4678 return NULL_RTX;
4679
4680 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4681 }
4682
4683 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4684 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4685 if (code == NE
4686 && op1 == const0_rtx
4687 && GET_MODE_CLASS (mode) == MODE_INT
4688 && cmp_mode != VOIDmode
4689 /* ??? Work-around BImode bugs in the ia64 backend. */
4690 && mode != BImode
4691 && cmp_mode != BImode
4692 && nonzero_bits (op0, cmp_mode) == 1
4693 && STORE_FLAG_VALUE == 1)
4694 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4695 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4696 : lowpart_subreg (mode, op0, cmp_mode);
4697
4698 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4699 if ((code == EQ || code == NE)
4700 && op1 == const0_rtx
4701 && op0code == XOR)
4702 return simplify_gen_relational (code, mode, cmp_mode,
4703 XEXP (op0, 0), XEXP (op0, 1));
4704
4705 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4706 if ((code == EQ || code == NE)
4707 && op0code == XOR
4708 && rtx_equal_p (XEXP (op0, 0), op1)
4709 && !side_effects_p (XEXP (op0, 0)))
4710 return simplify_gen_relational (code, mode, cmp_mode,
4711 XEXP (op0, 1), const0_rtx);
4712
4713 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4714 if ((code == EQ || code == NE)
4715 && op0code == XOR
4716 && rtx_equal_p (XEXP (op0, 1), op1)
4717 && !side_effects_p (XEXP (op0, 1)))
4718 return simplify_gen_relational (code, mode, cmp_mode,
4719 XEXP (op0, 0), const0_rtx);
4720
4721 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4722 if ((code == EQ || code == NE)
4723 && op0code == XOR
4724 && CONST_SCALAR_INT_P (op1)
4725 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4726 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4727 simplify_gen_binary (XOR, cmp_mode,
4728 XEXP (op0, 1), op1));
4729
4730 if (op0code == POPCOUNT && op1 == const0_rtx)
4731 switch (code)
4732 {
4733 case EQ:
4734 case LE:
4735 case LEU:
4736 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4737 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4738 XEXP (op0, 0), const0_rtx);
4739
4740 case NE:
4741 case GT:
4742 case GTU:
4743 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4744 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4745 XEXP (op0, 0), const0_rtx);
4746
4747 default:
4748 break;
4749 }
4750
4751 return NULL_RTX;
4752 }
4753
4754 enum
4755 {
4756 CMP_EQ = 1,
4757 CMP_LT = 2,
4758 CMP_GT = 4,
4759 CMP_LTU = 8,
4760 CMP_GTU = 16
4761 };
4762
4763
4764 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4765 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4766 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4767 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4768 For floating-point comparisons, assume that the operands were ordered. */
4769
4770 static rtx
4771 comparison_result (enum rtx_code code, int known_results)
4772 {
4773 switch (code)
4774 {
4775 case EQ:
4776 case UNEQ:
4777 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4778 case NE:
4779 case LTGT:
4780 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4781
4782 case LT:
4783 case UNLT:
4784 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4785 case GE:
4786 case UNGE:
4787 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4788
4789 case GT:
4790 case UNGT:
4791 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4792 case LE:
4793 case UNLE:
4794 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4795
4796 case LTU:
4797 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4798 case GEU:
4799 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4800
4801 case GTU:
4802 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4803 case LEU:
4804 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4805
4806 case ORDERED:
4807 return const_true_rtx;
4808 case UNORDERED:
4809 return const0_rtx;
4810 default:
4811 gcc_unreachable ();
4812 }
4813 }
4814
4815 /* Check if the given comparison (done in the given MODE) is actually a
4816 tautology or a contradiction.
4817 If no simplification is possible, this function returns zero.
4818 Otherwise, it returns either const_true_rtx or const0_rtx. */
4819
4820 rtx
4821 simplify_const_relational_operation (enum rtx_code code,
4822 enum machine_mode mode,
4823 rtx op0, rtx op1)
4824 {
4825 rtx tem;
4826 rtx trueop0;
4827 rtx trueop1;
4828
4829 gcc_assert (mode != VOIDmode
4830 || (GET_MODE (op0) == VOIDmode
4831 && GET_MODE (op1) == VOIDmode));
4832
4833 /* If op0 is a compare, extract the comparison arguments from it. */
4834 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4835 {
4836 op1 = XEXP (op0, 1);
4837 op0 = XEXP (op0, 0);
4838
4839 if (GET_MODE (op0) != VOIDmode)
4840 mode = GET_MODE (op0);
4841 else if (GET_MODE (op1) != VOIDmode)
4842 mode = GET_MODE (op1);
4843 else
4844 return 0;
4845 }
4846
4847 /* We can't simplify MODE_CC values since we don't know what the
4848 actual comparison is. */
4849 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4850 return 0;
4851
4852 /* Make sure the constant is second. */
4853 if (swap_commutative_operands_p (op0, op1))
4854 {
4855 tem = op0, op0 = op1, op1 = tem;
4856 code = swap_condition (code);
4857 }
4858
4859 trueop0 = avoid_constant_pool_reference (op0);
4860 trueop1 = avoid_constant_pool_reference (op1);
4861
4862 /* For integer comparisons of A and B maybe we can simplify A - B and can
4863 then simplify a comparison of that with zero. If A and B are both either
4864 a register or a CONST_INT, this can't help; testing for these cases will
4865 prevent infinite recursion here and speed things up.
4866
4867 We can only do this for EQ and NE comparisons as otherwise we may
4868 lose or introduce overflow which we cannot disregard as undefined as
4869 we do not know the signedness of the operation on either the left or
4870 the right hand side of the comparison. */
4871
4872 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4873 && (code == EQ || code == NE)
4874 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4875 && (REG_P (op1) || CONST_INT_P (trueop1)))
4876 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4877 /* We cannot do this if tem is a nonzero address. */
4878 && ! nonzero_address_p (tem))
4879 return simplify_const_relational_operation (signed_condition (code),
4880 mode, tem, const0_rtx);
4881
4882 if (! HONOR_NANS (mode) && code == ORDERED)
4883 return const_true_rtx;
4884
4885 if (! HONOR_NANS (mode) && code == UNORDERED)
4886 return const0_rtx;
4887
4888 /* For modes without NaNs, if the two operands are equal, we know the
4889 result except if they have side-effects. Even with NaNs we know
4890 the result of unordered comparisons and, if signaling NaNs are
4891 irrelevant, also the result of LT/GT/LTGT. */
4892 if ((! HONOR_NANS (GET_MODE (trueop0))
4893 || code == UNEQ || code == UNLE || code == UNGE
4894 || ((code == LT || code == GT || code == LTGT)
4895 && ! HONOR_SNANS (GET_MODE (trueop0))))
4896 && rtx_equal_p (trueop0, trueop1)
4897 && ! side_effects_p (trueop0))
4898 return comparison_result (code, CMP_EQ);
4899
4900 /* If the operands are floating-point constants, see if we can fold
4901 the result. */
4902 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4903 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4904 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4905 {
4906 REAL_VALUE_TYPE d0, d1;
4907
4908 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4909 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4910
4911 /* Comparisons are unordered iff at least one of the values is NaN. */
4912 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4913 switch (code)
4914 {
4915 case UNEQ:
4916 case UNLT:
4917 case UNGT:
4918 case UNLE:
4919 case UNGE:
4920 case NE:
4921 case UNORDERED:
4922 return const_true_rtx;
4923 case EQ:
4924 case LT:
4925 case GT:
4926 case LE:
4927 case GE:
4928 case LTGT:
4929 case ORDERED:
4930 return const0_rtx;
4931 default:
4932 return 0;
4933 }
4934
4935 return comparison_result (code,
4936 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4937 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4938 }
4939
4940 /* Otherwise, see if the operands are both integers. */
4941 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4942 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4943 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4944 {
4945 int width = GET_MODE_PRECISION (mode);
4946 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4947 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4948
4949 /* Get the two words comprising each integer constant. */
4950 if (CONST_DOUBLE_AS_INT_P (trueop0))
4951 {
4952 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4953 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4954 }
4955 else
4956 {
4957 l0u = l0s = INTVAL (trueop0);
4958 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4959 }
4960
4961 if (CONST_DOUBLE_AS_INT_P (trueop1))
4962 {
4963 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4964 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4965 }
4966 else
4967 {
4968 l1u = l1s = INTVAL (trueop1);
4969 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4970 }
4971
4972 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4973 we have to sign or zero-extend the values. */
4974 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4975 {
4976 l0u &= GET_MODE_MASK (mode);
4977 l1u &= GET_MODE_MASK (mode);
4978
4979 if (val_signbit_known_set_p (mode, l0s))
4980 l0s |= ~GET_MODE_MASK (mode);
4981
4982 if (val_signbit_known_set_p (mode, l1s))
4983 l1s |= ~GET_MODE_MASK (mode);
4984 }
4985 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4986 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4987
4988 if (h0u == h1u && l0u == l1u)
4989 return comparison_result (code, CMP_EQ);
4990 else
4991 {
4992 int cr;
4993 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4994 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4995 return comparison_result (code, cr);
4996 }
4997 }
4998
4999 /* Optimize comparisons with upper and lower bounds. */
5000 if (HWI_COMPUTABLE_MODE_P (mode)
5001 && CONST_INT_P (trueop1))
5002 {
5003 int sign;
5004 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5005 HOST_WIDE_INT val = INTVAL (trueop1);
5006 HOST_WIDE_INT mmin, mmax;
5007
5008 if (code == GEU
5009 || code == LEU
5010 || code == GTU
5011 || code == LTU)
5012 sign = 0;
5013 else
5014 sign = 1;
5015
5016 /* Get a reduced range if the sign bit is zero. */
5017 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5018 {
5019 mmin = 0;
5020 mmax = nonzero;
5021 }
5022 else
5023 {
5024 rtx mmin_rtx, mmax_rtx;
5025 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5026
5027 mmin = INTVAL (mmin_rtx);
5028 mmax = INTVAL (mmax_rtx);
5029 if (sign)
5030 {
5031 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5032
5033 mmin >>= (sign_copies - 1);
5034 mmax >>= (sign_copies - 1);
5035 }
5036 }
5037
5038 switch (code)
5039 {
5040 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5041 case GEU:
5042 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5043 return const_true_rtx;
5044 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5045 return const0_rtx;
5046 break;
5047 case GE:
5048 if (val <= mmin)
5049 return const_true_rtx;
5050 if (val > mmax)
5051 return const0_rtx;
5052 break;
5053
5054 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5055 case LEU:
5056 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5057 return const_true_rtx;
5058 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5059 return const0_rtx;
5060 break;
5061 case LE:
5062 if (val >= mmax)
5063 return const_true_rtx;
5064 if (val < mmin)
5065 return const0_rtx;
5066 break;
5067
5068 case EQ:
5069 /* x == y is always false for y out of range. */
5070 if (val < mmin || val > mmax)
5071 return const0_rtx;
5072 break;
5073
5074 /* x > y is always false for y >= mmax, always true for y < mmin. */
5075 case GTU:
5076 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5077 return const0_rtx;
5078 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5079 return const_true_rtx;
5080 break;
5081 case GT:
5082 if (val >= mmax)
5083 return const0_rtx;
5084 if (val < mmin)
5085 return const_true_rtx;
5086 break;
5087
5088 /* x < y is always false for y <= mmin, always true for y > mmax. */
5089 case LTU:
5090 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5091 return const0_rtx;
5092 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5093 return const_true_rtx;
5094 break;
5095 case LT:
5096 if (val <= mmin)
5097 return const0_rtx;
5098 if (val > mmax)
5099 return const_true_rtx;
5100 break;
5101
5102 case NE:
5103 /* x != y is always true for y out of range. */
5104 if (val < mmin || val > mmax)
5105 return const_true_rtx;
5106 break;
5107
5108 default:
5109 break;
5110 }
5111 }
5112
5113 /* Optimize integer comparisons with zero. */
5114 if (trueop1 == const0_rtx)
5115 {
5116 /* Some addresses are known to be nonzero. We don't know
5117 their sign, but equality comparisons are known. */
5118 if (nonzero_address_p (trueop0))
5119 {
5120 if (code == EQ || code == LEU)
5121 return const0_rtx;
5122 if (code == NE || code == GTU)
5123 return const_true_rtx;
5124 }
5125
5126 /* See if the first operand is an IOR with a constant. If so, we
5127 may be able to determine the result of this comparison. */
5128 if (GET_CODE (op0) == IOR)
5129 {
5130 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5131 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5132 {
5133 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5134 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5135 && (UINTVAL (inner_const)
5136 & ((unsigned HOST_WIDE_INT) 1
5137 << sign_bitnum)));
5138
5139 switch (code)
5140 {
5141 case EQ:
5142 case LEU:
5143 return const0_rtx;
5144 case NE:
5145 case GTU:
5146 return const_true_rtx;
5147 case LT:
5148 case LE:
5149 if (has_sign)
5150 return const_true_rtx;
5151 break;
5152 case GT:
5153 case GE:
5154 if (has_sign)
5155 return const0_rtx;
5156 break;
5157 default:
5158 break;
5159 }
5160 }
5161 }
5162 }
5163
5164 /* Optimize comparison of ABS with zero. */
5165 if (trueop1 == CONST0_RTX (mode)
5166 && (GET_CODE (trueop0) == ABS
5167 || (GET_CODE (trueop0) == FLOAT_EXTEND
5168 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5169 {
5170 switch (code)
5171 {
5172 case LT:
5173 /* Optimize abs(x) < 0.0. */
5174 if (!HONOR_SNANS (mode)
5175 && (!INTEGRAL_MODE_P (mode)
5176 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5177 {
5178 if (INTEGRAL_MODE_P (mode)
5179 && (issue_strict_overflow_warning
5180 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5181 warning (OPT_Wstrict_overflow,
5182 ("assuming signed overflow does not occur when "
5183 "assuming abs (x) < 0 is false"));
5184 return const0_rtx;
5185 }
5186 break;
5187
5188 case GE:
5189 /* Optimize abs(x) >= 0.0. */
5190 if (!HONOR_NANS (mode)
5191 && (!INTEGRAL_MODE_P (mode)
5192 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5193 {
5194 if (INTEGRAL_MODE_P (mode)
5195 && (issue_strict_overflow_warning
5196 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5197 warning (OPT_Wstrict_overflow,
5198 ("assuming signed overflow does not occur when "
5199 "assuming abs (x) >= 0 is true"));
5200 return const_true_rtx;
5201 }
5202 break;
5203
5204 case UNGE:
5205 /* Optimize ! (abs(x) < 0.0). */
5206 return const_true_rtx;
5207
5208 default:
5209 break;
5210 }
5211 }
5212
5213 return 0;
5214 }
5215 \f
5216 /* Simplify CODE, an operation with result mode MODE and three operands,
5217 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5218 a constant. Return 0 if no simplifications is possible. */
5219
5220 rtx
5221 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5222 enum machine_mode op0_mode, rtx op0, rtx op1,
5223 rtx op2)
5224 {
5225 unsigned int width = GET_MODE_PRECISION (mode);
5226 bool any_change = false;
5227 rtx tem;
5228
5229 /* VOIDmode means "infinite" precision. */
5230 if (width == 0)
5231 width = HOST_BITS_PER_WIDE_INT;
5232
5233 switch (code)
5234 {
5235 case FMA:
5236 /* Simplify negations around the multiplication. */
5237 /* -a * -b + c => a * b + c. */
5238 if (GET_CODE (op0) == NEG)
5239 {
5240 tem = simplify_unary_operation (NEG, mode, op1, mode);
5241 if (tem)
5242 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5243 }
5244 else if (GET_CODE (op1) == NEG)
5245 {
5246 tem = simplify_unary_operation (NEG, mode, op0, mode);
5247 if (tem)
5248 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5249 }
5250
5251 /* Canonicalize the two multiplication operands. */
5252 /* a * -b + c => -b * a + c. */
5253 if (swap_commutative_operands_p (op0, op1))
5254 tem = op0, op0 = op1, op1 = tem, any_change = true;
5255
5256 if (any_change)
5257 return gen_rtx_FMA (mode, op0, op1, op2);
5258 return NULL_RTX;
5259
5260 case SIGN_EXTRACT:
5261 case ZERO_EXTRACT:
5262 if (CONST_INT_P (op0)
5263 && CONST_INT_P (op1)
5264 && CONST_INT_P (op2)
5265 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5266 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5267 {
5268 /* Extracting a bit-field from a constant */
5269 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5270 HOST_WIDE_INT op1val = INTVAL (op1);
5271 HOST_WIDE_INT op2val = INTVAL (op2);
5272 if (BITS_BIG_ENDIAN)
5273 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5274 else
5275 val >>= op2val;
5276
5277 if (HOST_BITS_PER_WIDE_INT != op1val)
5278 {
5279 /* First zero-extend. */
5280 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5281 /* If desired, propagate sign bit. */
5282 if (code == SIGN_EXTRACT
5283 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5284 != 0)
5285 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5286 }
5287
5288 return gen_int_mode (val, mode);
5289 }
5290 break;
5291
5292 case IF_THEN_ELSE:
5293 if (CONST_INT_P (op0))
5294 return op0 != const0_rtx ? op1 : op2;
5295
5296 /* Convert c ? a : a into "a". */
5297 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5298 return op1;
5299
5300 /* Convert a != b ? a : b into "a". */
5301 if (GET_CODE (op0) == NE
5302 && ! side_effects_p (op0)
5303 && ! HONOR_NANS (mode)
5304 && ! HONOR_SIGNED_ZEROS (mode)
5305 && ((rtx_equal_p (XEXP (op0, 0), op1)
5306 && rtx_equal_p (XEXP (op0, 1), op2))
5307 || (rtx_equal_p (XEXP (op0, 0), op2)
5308 && rtx_equal_p (XEXP (op0, 1), op1))))
5309 return op1;
5310
5311 /* Convert a == b ? a : b into "b". */
5312 if (GET_CODE (op0) == EQ
5313 && ! side_effects_p (op0)
5314 && ! HONOR_NANS (mode)
5315 && ! HONOR_SIGNED_ZEROS (mode)
5316 && ((rtx_equal_p (XEXP (op0, 0), op1)
5317 && rtx_equal_p (XEXP (op0, 1), op2))
5318 || (rtx_equal_p (XEXP (op0, 0), op2)
5319 && rtx_equal_p (XEXP (op0, 1), op1))))
5320 return op2;
5321
5322 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5323 {
5324 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5325 ? GET_MODE (XEXP (op0, 1))
5326 : GET_MODE (XEXP (op0, 0)));
5327 rtx temp;
5328
5329 /* Look for happy constants in op1 and op2. */
5330 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5331 {
5332 HOST_WIDE_INT t = INTVAL (op1);
5333 HOST_WIDE_INT f = INTVAL (op2);
5334
5335 if (t == STORE_FLAG_VALUE && f == 0)
5336 code = GET_CODE (op0);
5337 else if (t == 0 && f == STORE_FLAG_VALUE)
5338 {
5339 enum rtx_code tmp;
5340 tmp = reversed_comparison_code (op0, NULL_RTX);
5341 if (tmp == UNKNOWN)
5342 break;
5343 code = tmp;
5344 }
5345 else
5346 break;
5347
5348 return simplify_gen_relational (code, mode, cmp_mode,
5349 XEXP (op0, 0), XEXP (op0, 1));
5350 }
5351
5352 if (cmp_mode == VOIDmode)
5353 cmp_mode = op0_mode;
5354 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5355 cmp_mode, XEXP (op0, 0),
5356 XEXP (op0, 1));
5357
5358 /* See if any simplifications were possible. */
5359 if (temp)
5360 {
5361 if (CONST_INT_P (temp))
5362 return temp == const0_rtx ? op2 : op1;
5363 else if (temp)
5364 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5365 }
5366 }
5367 break;
5368
5369 case VEC_MERGE:
5370 gcc_assert (GET_MODE (op0) == mode);
5371 gcc_assert (GET_MODE (op1) == mode);
5372 gcc_assert (VECTOR_MODE_P (mode));
5373 op2 = avoid_constant_pool_reference (op2);
5374 if (CONST_INT_P (op2))
5375 {
5376 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5377 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5378 int mask = (1 << n_elts) - 1;
5379
5380 if (!(INTVAL (op2) & mask))
5381 return op1;
5382 if ((INTVAL (op2) & mask) == mask)
5383 return op0;
5384
5385 op0 = avoid_constant_pool_reference (op0);
5386 op1 = avoid_constant_pool_reference (op1);
5387 if (GET_CODE (op0) == CONST_VECTOR
5388 && GET_CODE (op1) == CONST_VECTOR)
5389 {
5390 rtvec v = rtvec_alloc (n_elts);
5391 unsigned int i;
5392
5393 for (i = 0; i < n_elts; i++)
5394 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5395 ? CONST_VECTOR_ELT (op0, i)
5396 : CONST_VECTOR_ELT (op1, i));
5397 return gen_rtx_CONST_VECTOR (mode, v);
5398 }
5399 }
5400 break;
5401
5402 default:
5403 gcc_unreachable ();
5404 }
5405
5406 return 0;
5407 }
5408
5409 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5410 or CONST_VECTOR,
5411 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5412
5413 Works by unpacking OP into a collection of 8-bit values
5414 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5415 and then repacking them again for OUTERMODE. */
5416
5417 static rtx
5418 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5419 enum machine_mode innermode, unsigned int byte)
5420 {
5421 /* We support up to 512-bit values (for V8DFmode). */
5422 enum {
5423 max_bitsize = 512,
5424 value_bit = 8,
5425 value_mask = (1 << value_bit) - 1
5426 };
5427 unsigned char value[max_bitsize / value_bit];
5428 int value_start;
5429 int i;
5430 int elem;
5431
5432 int num_elem;
5433 rtx * elems;
5434 int elem_bitsize;
5435 rtx result_s;
5436 rtvec result_v = NULL;
5437 enum mode_class outer_class;
5438 enum machine_mode outer_submode;
5439
5440 /* Some ports misuse CCmode. */
5441 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5442 return op;
5443
5444 /* We have no way to represent a complex constant at the rtl level. */
5445 if (COMPLEX_MODE_P (outermode))
5446 return NULL_RTX;
5447
5448 /* Unpack the value. */
5449
5450 if (GET_CODE (op) == CONST_VECTOR)
5451 {
5452 num_elem = CONST_VECTOR_NUNITS (op);
5453 elems = &CONST_VECTOR_ELT (op, 0);
5454 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5455 }
5456 else
5457 {
5458 num_elem = 1;
5459 elems = &op;
5460 elem_bitsize = max_bitsize;
5461 }
5462 /* If this asserts, it is too complicated; reducing value_bit may help. */
5463 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5464 /* I don't know how to handle endianness of sub-units. */
5465 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5466
5467 for (elem = 0; elem < num_elem; elem++)
5468 {
5469 unsigned char * vp;
5470 rtx el = elems[elem];
5471
5472 /* Vectors are kept in target memory order. (This is probably
5473 a mistake.) */
5474 {
5475 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5476 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5477 / BITS_PER_UNIT);
5478 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5479 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5480 unsigned bytele = (subword_byte % UNITS_PER_WORD
5481 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5482 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5483 }
5484
5485 switch (GET_CODE (el))
5486 {
5487 case CONST_INT:
5488 for (i = 0;
5489 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5490 i += value_bit)
5491 *vp++ = INTVAL (el) >> i;
5492 /* CONST_INTs are always logically sign-extended. */
5493 for (; i < elem_bitsize; i += value_bit)
5494 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5495 break;
5496
5497 case CONST_DOUBLE:
5498 if (GET_MODE (el) == VOIDmode)
5499 {
5500 unsigned char extend = 0;
5501 /* If this triggers, someone should have generated a
5502 CONST_INT instead. */
5503 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5504
5505 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5506 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5507 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5508 {
5509 *vp++
5510 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5511 i += value_bit;
5512 }
5513
5514 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5515 extend = -1;
5516 for (; i < elem_bitsize; i += value_bit)
5517 *vp++ = extend;
5518 }
5519 else
5520 {
5521 long tmp[max_bitsize / 32];
5522 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5523
5524 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5525 gcc_assert (bitsize <= elem_bitsize);
5526 gcc_assert (bitsize % value_bit == 0);
5527
5528 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5529 GET_MODE (el));
5530
5531 /* real_to_target produces its result in words affected by
5532 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5533 and use WORDS_BIG_ENDIAN instead; see the documentation
5534 of SUBREG in rtl.texi. */
5535 for (i = 0; i < bitsize; i += value_bit)
5536 {
5537 int ibase;
5538 if (WORDS_BIG_ENDIAN)
5539 ibase = bitsize - 1 - i;
5540 else
5541 ibase = i;
5542 *vp++ = tmp[ibase / 32] >> i % 32;
5543 }
5544
5545 /* It shouldn't matter what's done here, so fill it with
5546 zero. */
5547 for (; i < elem_bitsize; i += value_bit)
5548 *vp++ = 0;
5549 }
5550 break;
5551
5552 case CONST_FIXED:
5553 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5554 {
5555 for (i = 0; i < elem_bitsize; i += value_bit)
5556 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5557 }
5558 else
5559 {
5560 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5561 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5562 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5563 i += value_bit)
5564 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5565 >> (i - HOST_BITS_PER_WIDE_INT);
5566 for (; i < elem_bitsize; i += value_bit)
5567 *vp++ = 0;
5568 }
5569 break;
5570
5571 default:
5572 gcc_unreachable ();
5573 }
5574 }
5575
5576 /* Now, pick the right byte to start with. */
5577 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5578 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5579 will already have offset 0. */
5580 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5581 {
5582 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5583 - byte);
5584 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5585 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5586 byte = (subword_byte % UNITS_PER_WORD
5587 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5588 }
5589
5590 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5591 so if it's become negative it will instead be very large.) */
5592 gcc_assert (byte < GET_MODE_SIZE (innermode));
5593
5594 /* Convert from bytes to chunks of size value_bit. */
5595 value_start = byte * (BITS_PER_UNIT / value_bit);
5596
5597 /* Re-pack the value. */
5598
5599 if (VECTOR_MODE_P (outermode))
5600 {
5601 num_elem = GET_MODE_NUNITS (outermode);
5602 result_v = rtvec_alloc (num_elem);
5603 elems = &RTVEC_ELT (result_v, 0);
5604 outer_submode = GET_MODE_INNER (outermode);
5605 }
5606 else
5607 {
5608 num_elem = 1;
5609 elems = &result_s;
5610 outer_submode = outermode;
5611 }
5612
5613 outer_class = GET_MODE_CLASS (outer_submode);
5614 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5615
5616 gcc_assert (elem_bitsize % value_bit == 0);
5617 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5618
5619 for (elem = 0; elem < num_elem; elem++)
5620 {
5621 unsigned char *vp;
5622
5623 /* Vectors are stored in target memory order. (This is probably
5624 a mistake.) */
5625 {
5626 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5627 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5628 / BITS_PER_UNIT);
5629 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5630 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5631 unsigned bytele = (subword_byte % UNITS_PER_WORD
5632 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5633 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5634 }
5635
5636 switch (outer_class)
5637 {
5638 case MODE_INT:
5639 case MODE_PARTIAL_INT:
5640 {
5641 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5642
5643 for (i = 0;
5644 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5645 i += value_bit)
5646 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5647 for (; i < elem_bitsize; i += value_bit)
5648 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5649 << (i - HOST_BITS_PER_WIDE_INT);
5650
5651 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5652 know why. */
5653 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5654 elems[elem] = gen_int_mode (lo, outer_submode);
5655 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5656 elems[elem] = immed_double_const (lo, hi, outer_submode);
5657 else
5658 return NULL_RTX;
5659 }
5660 break;
5661
5662 case MODE_FLOAT:
5663 case MODE_DECIMAL_FLOAT:
5664 {
5665 REAL_VALUE_TYPE r;
5666 long tmp[max_bitsize / 32];
5667
5668 /* real_from_target wants its input in words affected by
5669 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5670 and use WORDS_BIG_ENDIAN instead; see the documentation
5671 of SUBREG in rtl.texi. */
5672 for (i = 0; i < max_bitsize / 32; i++)
5673 tmp[i] = 0;
5674 for (i = 0; i < elem_bitsize; i += value_bit)
5675 {
5676 int ibase;
5677 if (WORDS_BIG_ENDIAN)
5678 ibase = elem_bitsize - 1 - i;
5679 else
5680 ibase = i;
5681 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5682 }
5683
5684 real_from_target (&r, tmp, outer_submode);
5685 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5686 }
5687 break;
5688
5689 case MODE_FRACT:
5690 case MODE_UFRACT:
5691 case MODE_ACCUM:
5692 case MODE_UACCUM:
5693 {
5694 FIXED_VALUE_TYPE f;
5695 f.data.low = 0;
5696 f.data.high = 0;
5697 f.mode = outer_submode;
5698
5699 for (i = 0;
5700 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5701 i += value_bit)
5702 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5703 for (; i < elem_bitsize; i += value_bit)
5704 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5705 << (i - HOST_BITS_PER_WIDE_INT));
5706
5707 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5708 }
5709 break;
5710
5711 default:
5712 gcc_unreachable ();
5713 }
5714 }
5715 if (VECTOR_MODE_P (outermode))
5716 return gen_rtx_CONST_VECTOR (outermode, result_v);
5717 else
5718 return result_s;
5719 }
5720
5721 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5722 Return 0 if no simplifications are possible. */
5723 rtx
5724 simplify_subreg (enum machine_mode outermode, rtx op,
5725 enum machine_mode innermode, unsigned int byte)
5726 {
5727 /* Little bit of sanity checking. */
5728 gcc_assert (innermode != VOIDmode);
5729 gcc_assert (outermode != VOIDmode);
5730 gcc_assert (innermode != BLKmode);
5731 gcc_assert (outermode != BLKmode);
5732
5733 gcc_assert (GET_MODE (op) == innermode
5734 || GET_MODE (op) == VOIDmode);
5735
5736 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5737 return NULL_RTX;
5738
5739 if (byte >= GET_MODE_SIZE (innermode))
5740 return NULL_RTX;
5741
5742 if (outermode == innermode && !byte)
5743 return op;
5744
5745 if (CONST_SCALAR_INT_P (op)
5746 || CONST_DOUBLE_AS_FLOAT_P (op)
5747 || GET_CODE (op) == CONST_FIXED
5748 || GET_CODE (op) == CONST_VECTOR)
5749 return simplify_immed_subreg (outermode, op, innermode, byte);
5750
5751 /* Changing mode twice with SUBREG => just change it once,
5752 or not at all if changing back op starting mode. */
5753 if (GET_CODE (op) == SUBREG)
5754 {
5755 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5756 int final_offset = byte + SUBREG_BYTE (op);
5757 rtx newx;
5758
5759 if (outermode == innermostmode
5760 && byte == 0 && SUBREG_BYTE (op) == 0)
5761 return SUBREG_REG (op);
5762
5763 /* The SUBREG_BYTE represents offset, as if the value were stored
5764 in memory. Irritating exception is paradoxical subreg, where
5765 we define SUBREG_BYTE to be 0. On big endian machines, this
5766 value should be negative. For a moment, undo this exception. */
5767 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5768 {
5769 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5770 if (WORDS_BIG_ENDIAN)
5771 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5772 if (BYTES_BIG_ENDIAN)
5773 final_offset += difference % UNITS_PER_WORD;
5774 }
5775 if (SUBREG_BYTE (op) == 0
5776 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5777 {
5778 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5779 if (WORDS_BIG_ENDIAN)
5780 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5781 if (BYTES_BIG_ENDIAN)
5782 final_offset += difference % UNITS_PER_WORD;
5783 }
5784
5785 /* See whether resulting subreg will be paradoxical. */
5786 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5787 {
5788 /* In nonparadoxical subregs we can't handle negative offsets. */
5789 if (final_offset < 0)
5790 return NULL_RTX;
5791 /* Bail out in case resulting subreg would be incorrect. */
5792 if (final_offset % GET_MODE_SIZE (outermode)
5793 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5794 return NULL_RTX;
5795 }
5796 else
5797 {
5798 int offset = 0;
5799 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5800
5801 /* In paradoxical subreg, see if we are still looking on lower part.
5802 If so, our SUBREG_BYTE will be 0. */
5803 if (WORDS_BIG_ENDIAN)
5804 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5805 if (BYTES_BIG_ENDIAN)
5806 offset += difference % UNITS_PER_WORD;
5807 if (offset == final_offset)
5808 final_offset = 0;
5809 else
5810 return NULL_RTX;
5811 }
5812
5813 /* Recurse for further possible simplifications. */
5814 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5815 final_offset);
5816 if (newx)
5817 return newx;
5818 if (validate_subreg (outermode, innermostmode,
5819 SUBREG_REG (op), final_offset))
5820 {
5821 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5822 if (SUBREG_PROMOTED_VAR_P (op)
5823 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5824 && GET_MODE_CLASS (outermode) == MODE_INT
5825 && IN_RANGE (GET_MODE_SIZE (outermode),
5826 GET_MODE_SIZE (innermode),
5827 GET_MODE_SIZE (innermostmode))
5828 && subreg_lowpart_p (newx))
5829 {
5830 SUBREG_PROMOTED_VAR_P (newx) = 1;
5831 SUBREG_PROMOTED_UNSIGNED_SET
5832 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5833 }
5834 return newx;
5835 }
5836 return NULL_RTX;
5837 }
5838
5839 /* SUBREG of a hard register => just change the register number
5840 and/or mode. If the hard register is not valid in that mode,
5841 suppress this simplification. If the hard register is the stack,
5842 frame, or argument pointer, leave this as a SUBREG. */
5843
5844 if (REG_P (op) && HARD_REGISTER_P (op))
5845 {
5846 unsigned int regno, final_regno;
5847
5848 regno = REGNO (op);
5849 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5850 if (HARD_REGISTER_NUM_P (final_regno))
5851 {
5852 rtx x;
5853 int final_offset = byte;
5854
5855 /* Adjust offset for paradoxical subregs. */
5856 if (byte == 0
5857 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5858 {
5859 int difference = (GET_MODE_SIZE (innermode)
5860 - GET_MODE_SIZE (outermode));
5861 if (WORDS_BIG_ENDIAN)
5862 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5863 if (BYTES_BIG_ENDIAN)
5864 final_offset += difference % UNITS_PER_WORD;
5865 }
5866
5867 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5868
5869 /* Propagate original regno. We don't have any way to specify
5870 the offset inside original regno, so do so only for lowpart.
5871 The information is used only by alias analysis that can not
5872 grog partial register anyway. */
5873
5874 if (subreg_lowpart_offset (outermode, innermode) == byte)
5875 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5876 return x;
5877 }
5878 }
5879
5880 /* If we have a SUBREG of a register that we are replacing and we are
5881 replacing it with a MEM, make a new MEM and try replacing the
5882 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5883 or if we would be widening it. */
5884
5885 if (MEM_P (op)
5886 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5887 /* Allow splitting of volatile memory references in case we don't
5888 have instruction to move the whole thing. */
5889 && (! MEM_VOLATILE_P (op)
5890 || ! have_insn_for (SET, innermode))
5891 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5892 return adjust_address_nv (op, outermode, byte);
5893
5894 /* Handle complex values represented as CONCAT
5895 of real and imaginary part. */
5896 if (GET_CODE (op) == CONCAT)
5897 {
5898 unsigned int part_size, final_offset;
5899 rtx part, res;
5900
5901 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5902 if (byte < part_size)
5903 {
5904 part = XEXP (op, 0);
5905 final_offset = byte;
5906 }
5907 else
5908 {
5909 part = XEXP (op, 1);
5910 final_offset = byte - part_size;
5911 }
5912
5913 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5914 return NULL_RTX;
5915
5916 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5917 if (res)
5918 return res;
5919 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5920 return gen_rtx_SUBREG (outermode, part, final_offset);
5921 return NULL_RTX;
5922 }
5923
5924 /* A SUBREG resulting from a zero extension may fold to zero if
5925 it extracts higher bits that the ZERO_EXTEND's source bits. */
5926 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5927 {
5928 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5929 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5930 return CONST0_RTX (outermode);
5931 }
5932
5933 if (SCALAR_INT_MODE_P (outermode)
5934 && SCALAR_INT_MODE_P (innermode)
5935 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5936 && byte == subreg_lowpart_offset (outermode, innermode))
5937 {
5938 rtx tem = simplify_truncation (outermode, op, innermode);
5939 if (tem)
5940 return tem;
5941 }
5942
5943 return NULL_RTX;
5944 }
5945
5946 /* Make a SUBREG operation or equivalent if it folds. */
5947
5948 rtx
5949 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5950 enum machine_mode innermode, unsigned int byte)
5951 {
5952 rtx newx;
5953
5954 newx = simplify_subreg (outermode, op, innermode, byte);
5955 if (newx)
5956 return newx;
5957
5958 if (GET_CODE (op) == SUBREG
5959 || GET_CODE (op) == CONCAT
5960 || GET_MODE (op) == VOIDmode)
5961 return NULL_RTX;
5962
5963 if (validate_subreg (outermode, innermode, op, byte))
5964 return gen_rtx_SUBREG (outermode, op, byte);
5965
5966 return NULL_RTX;
5967 }
5968
5969 /* Simplify X, an rtx expression.
5970
5971 Return the simplified expression or NULL if no simplifications
5972 were possible.
5973
5974 This is the preferred entry point into the simplification routines;
5975 however, we still allow passes to call the more specific routines.
5976
5977 Right now GCC has three (yes, three) major bodies of RTL simplification
5978 code that need to be unified.
5979
5980 1. fold_rtx in cse.c. This code uses various CSE specific
5981 information to aid in RTL simplification.
5982
5983 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5984 it uses combine specific information to aid in RTL
5985 simplification.
5986
5987 3. The routines in this file.
5988
5989
5990 Long term we want to only have one body of simplification code; to
5991 get to that state I recommend the following steps:
5992
5993 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5994 which are not pass dependent state into these routines.
5995
5996 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5997 use this routine whenever possible.
5998
5999 3. Allow for pass dependent state to be provided to these
6000 routines and add simplifications based on the pass dependent
6001 state. Remove code from cse.c & combine.c that becomes
6002 redundant/dead.
6003
6004 It will take time, but ultimately the compiler will be easier to
6005 maintain and improve. It's totally silly that when we add a
6006 simplification that it needs to be added to 4 places (3 for RTL
6007 simplification and 1 for tree simplification. */
6008
6009 rtx
6010 simplify_rtx (const_rtx x)
6011 {
6012 const enum rtx_code code = GET_CODE (x);
6013 const enum machine_mode mode = GET_MODE (x);
6014
6015 switch (GET_RTX_CLASS (code))
6016 {
6017 case RTX_UNARY:
6018 return simplify_unary_operation (code, mode,
6019 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6020 case RTX_COMM_ARITH:
6021 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6022 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6023
6024 /* Fall through.... */
6025
6026 case RTX_BIN_ARITH:
6027 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6028
6029 case RTX_TERNARY:
6030 case RTX_BITFIELD_OPS:
6031 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6032 XEXP (x, 0), XEXP (x, 1),
6033 XEXP (x, 2));
6034
6035 case RTX_COMPARE:
6036 case RTX_COMM_COMPARE:
6037 return simplify_relational_operation (code, mode,
6038 ((GET_MODE (XEXP (x, 0))
6039 != VOIDmode)
6040 ? GET_MODE (XEXP (x, 0))
6041 : GET_MODE (XEXP (x, 1))),
6042 XEXP (x, 0),
6043 XEXP (x, 1));
6044
6045 case RTX_EXTRA:
6046 if (code == SUBREG)
6047 return simplify_subreg (mode, SUBREG_REG (x),
6048 GET_MODE (SUBREG_REG (x)),
6049 SUBREG_BYTE (x));
6050 break;
6051
6052 case RTX_OBJ:
6053 if (code == LO_SUM)
6054 {
6055 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6056 if (GET_CODE (XEXP (x, 0)) == HIGH
6057 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6058 return XEXP (x, 1);
6059 }
6060 break;
6061
6062 default:
6063 break;
6064 }
6065 return NULL;
6066 }