]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
Merge in trunk.
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
38
39 /* Simplification and canonicalization of RTL. */
40
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
47
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
61 \f
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
66 {
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
68 }
69
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
72
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
75 {
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
78
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
81
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
85
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 #if TARGET_SUPPORTS_WIDE_INT
90 else if (CONST_WIDE_INT_P (x))
91 {
92 unsigned int i;
93 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
94 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
95 return false;
96 for (i = 0; i < elts - 1; i++)
97 if (CONST_WIDE_INT_ELT (x, i) != 0)
98 return false;
99 val = CONST_WIDE_INT_ELT (x, elts - 1);
100 width %= HOST_BITS_PER_WIDE_INT;
101 if (width == 0)
102 width = HOST_BITS_PER_WIDE_INT;
103 }
104 #else
105 else if (width <= HOST_BITS_PER_DOUBLE_INT
106 && CONST_DOUBLE_AS_INT_P (x)
107 && CONST_DOUBLE_LOW (x) == 0)
108 {
109 val = CONST_DOUBLE_HIGH (x);
110 width -= HOST_BITS_PER_WIDE_INT;
111 }
112 #endif
113 else
114 /* X is not an integer constant. */
115 return false;
116
117 if (width < HOST_BITS_PER_WIDE_INT)
118 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
119 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
120 }
121
122 /* Test whether VAL is equal to the most significant bit of mode MODE
123 (after masking with the mode mask of MODE). Returns false if the
124 precision of MODE is too large to handle. */
125
126 bool
127 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
128 {
129 unsigned int width;
130
131 if (GET_MODE_CLASS (mode) != MODE_INT)
132 return false;
133
134 width = GET_MODE_PRECISION (mode);
135 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
136 return false;
137
138 val &= GET_MODE_MASK (mode);
139 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
140 }
141
142 /* Test whether the most significant bit of mode MODE is set in VAL.
143 Returns false if the precision of MODE is too large to handle. */
144 bool
145 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
146 {
147 unsigned int width;
148
149 if (GET_MODE_CLASS (mode) != MODE_INT)
150 return false;
151
152 width = GET_MODE_PRECISION (mode);
153 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
154 return false;
155
156 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
157 return val != 0;
158 }
159
160 /* Test whether the most significant bit of mode MODE is clear in VAL.
161 Returns false if the precision of MODE is too large to handle. */
162 bool
163 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
164 {
165 unsigned int width;
166
167 if (GET_MODE_CLASS (mode) != MODE_INT)
168 return false;
169
170 width = GET_MODE_PRECISION (mode);
171 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
172 return false;
173
174 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
175 return val == 0;
176 }
177 \f
178 /* Make a binary operation by properly ordering the operands and
179 seeing if the expression folds. */
180
181 rtx
182 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
183 rtx op1)
184 {
185 rtx tem;
186
187 /* If this simplifies, do it. */
188 tem = simplify_binary_operation (code, mode, op0, op1);
189 if (tem)
190 return tem;
191
192 /* Put complex operands first and constants second if commutative. */
193 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
194 && swap_commutative_operands_p (op0, op1))
195 tem = op0, op0 = op1, op1 = tem;
196
197 return gen_rtx_fmt_ee (code, mode, op0, op1);
198 }
199 \f
200 /* If X is a MEM referencing the constant pool, return the real value.
201 Otherwise return X. */
202 rtx
203 avoid_constant_pool_reference (rtx x)
204 {
205 rtx c, tmp, addr;
206 enum machine_mode cmode;
207 HOST_WIDE_INT offset = 0;
208
209 switch (GET_CODE (x))
210 {
211 case MEM:
212 break;
213
214 case FLOAT_EXTEND:
215 /* Handle float extensions of constant pool references. */
216 tmp = XEXP (x, 0);
217 c = avoid_constant_pool_reference (tmp);
218 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
219 {
220 REAL_VALUE_TYPE d;
221
222 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
223 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
224 }
225 return x;
226
227 default:
228 return x;
229 }
230
231 if (GET_MODE (x) == BLKmode)
232 return x;
233
234 addr = XEXP (x, 0);
235
236 /* Call target hook to avoid the effects of -fpic etc.... */
237 addr = targetm.delegitimize_address (addr);
238
239 /* Split the address into a base and integer offset. */
240 if (GET_CODE (addr) == CONST
241 && GET_CODE (XEXP (addr, 0)) == PLUS
242 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
243 {
244 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
245 addr = XEXP (XEXP (addr, 0), 0);
246 }
247
248 if (GET_CODE (addr) == LO_SUM)
249 addr = XEXP (addr, 1);
250
251 /* If this is a constant pool reference, we can turn it into its
252 constant and hope that simplifications happen. */
253 if (GET_CODE (addr) == SYMBOL_REF
254 && CONSTANT_POOL_ADDRESS_P (addr))
255 {
256 c = get_pool_constant (addr);
257 cmode = get_pool_mode (addr);
258
259 /* If we're accessing the constant in a different mode than it was
260 originally stored, attempt to fix that up via subreg simplifications.
261 If that fails we have no choice but to return the original memory. */
262 if ((offset != 0 || cmode != GET_MODE (x))
263 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
264 {
265 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
266 if (tem && CONSTANT_P (tem))
267 return tem;
268 }
269 else
270 return c;
271 }
272
273 return x;
274 }
275 \f
276 /* Simplify a MEM based on its attributes. This is the default
277 delegitimize_address target hook, and it's recommended that every
278 overrider call it. */
279
280 rtx
281 delegitimize_mem_from_attrs (rtx x)
282 {
283 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
284 use their base addresses as equivalent. */
285 if (MEM_P (x)
286 && MEM_EXPR (x)
287 && MEM_OFFSET_KNOWN_P (x))
288 {
289 tree decl = MEM_EXPR (x);
290 enum machine_mode mode = GET_MODE (x);
291 HOST_WIDE_INT offset = 0;
292
293 switch (TREE_CODE (decl))
294 {
295 default:
296 decl = NULL;
297 break;
298
299 case VAR_DECL:
300 break;
301
302 case ARRAY_REF:
303 case ARRAY_RANGE_REF:
304 case COMPONENT_REF:
305 case BIT_FIELD_REF:
306 case REALPART_EXPR:
307 case IMAGPART_EXPR:
308 case VIEW_CONVERT_EXPR:
309 {
310 HOST_WIDE_INT bitsize, bitpos;
311 tree toffset;
312 int unsignedp, volatilep = 0;
313
314 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
315 &mode, &unsignedp, &volatilep, false);
316 if (bitsize != GET_MODE_BITSIZE (mode)
317 || (bitpos % BITS_PER_UNIT)
318 || (toffset && !tree_fits_shwi_p (toffset)))
319 decl = NULL;
320 else
321 {
322 offset += bitpos / BITS_PER_UNIT;
323 if (toffset)
324 offset += tree_to_hwi (toffset);
325 }
326 break;
327 }
328 }
329
330 if (decl
331 && mode == GET_MODE (x)
332 && TREE_CODE (decl) == VAR_DECL
333 && (TREE_STATIC (decl)
334 || DECL_THREAD_LOCAL_P (decl))
335 && DECL_RTL_SET_P (decl)
336 && MEM_P (DECL_RTL (decl)))
337 {
338 rtx newx;
339
340 offset += MEM_OFFSET (x);
341
342 newx = DECL_RTL (decl);
343
344 if (MEM_P (newx))
345 {
346 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
347
348 /* Avoid creating a new MEM needlessly if we already had
349 the same address. We do if there's no OFFSET and the
350 old address X is identical to NEWX, or if X is of the
351 form (plus NEWX OFFSET), or the NEWX is of the form
352 (plus Y (const_int Z)) and X is that with the offset
353 added: (plus Y (const_int Z+OFFSET)). */
354 if (!((offset == 0
355 || (GET_CODE (o) == PLUS
356 && GET_CODE (XEXP (o, 1)) == CONST_INT
357 && (offset == INTVAL (XEXP (o, 1))
358 || (GET_CODE (n) == PLUS
359 && GET_CODE (XEXP (n, 1)) == CONST_INT
360 && (INTVAL (XEXP (n, 1)) + offset
361 == INTVAL (XEXP (o, 1)))
362 && (n = XEXP (n, 0))))
363 && (o = XEXP (o, 0))))
364 && rtx_equal_p (o, n)))
365 x = adjust_address_nv (newx, mode, offset);
366 }
367 else if (GET_MODE (x) == GET_MODE (newx)
368 && offset == 0)
369 x = newx;
370 }
371 }
372
373 return x;
374 }
375 \f
376 /* Make a unary operation by first seeing if it folds and otherwise making
377 the specified operation. */
378
379 rtx
380 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
381 enum machine_mode op_mode)
382 {
383 rtx tem;
384
385 /* If this simplifies, use it. */
386 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
387 return tem;
388
389 return gen_rtx_fmt_e (code, mode, op);
390 }
391
392 /* Likewise for ternary operations. */
393
394 rtx
395 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
397 {
398 rtx tem;
399
400 /* If this simplifies, use it. */
401 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
402 op0, op1, op2)))
403 return tem;
404
405 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
406 }
407
408 /* Likewise, for relational operations.
409 CMP_MODE specifies mode comparison is done in. */
410
411 rtx
412 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
413 enum machine_mode cmp_mode, rtx op0, rtx op1)
414 {
415 rtx tem;
416
417 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
418 op0, op1)))
419 return tem;
420
421 return gen_rtx_fmt_ee (code, mode, op0, op1);
422 }
423 \f
424 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
425 and simplify the result. If FN is non-NULL, call this callback on each
426 X, if it returns non-NULL, replace X with its return value and simplify the
427 result. */
428
429 rtx
430 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
431 rtx (*fn) (rtx, const_rtx, void *), void *data)
432 {
433 enum rtx_code code = GET_CODE (x);
434 enum machine_mode mode = GET_MODE (x);
435 enum machine_mode op_mode;
436 const char *fmt;
437 rtx op0, op1, op2, newx, op;
438 rtvec vec, newvec;
439 int i, j;
440
441 if (__builtin_expect (fn != NULL, 0))
442 {
443 newx = fn (x, old_rtx, data);
444 if (newx)
445 return newx;
446 }
447 else if (rtx_equal_p (x, old_rtx))
448 return copy_rtx ((rtx) data);
449
450 switch (GET_RTX_CLASS (code))
451 {
452 case RTX_UNARY:
453 op0 = XEXP (x, 0);
454 op_mode = GET_MODE (op0);
455 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
456 if (op0 == XEXP (x, 0))
457 return x;
458 return simplify_gen_unary (code, mode, op0, op_mode);
459
460 case RTX_BIN_ARITH:
461 case RTX_COMM_ARITH:
462 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
463 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
464 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
465 return x;
466 return simplify_gen_binary (code, mode, op0, op1);
467
468 case RTX_COMPARE:
469 case RTX_COMM_COMPARE:
470 op0 = XEXP (x, 0);
471 op1 = XEXP (x, 1);
472 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
473 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
474 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
475 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
476 return x;
477 return simplify_gen_relational (code, mode, op_mode, op0, op1);
478
479 case RTX_TERNARY:
480 case RTX_BITFIELD_OPS:
481 op0 = XEXP (x, 0);
482 op_mode = GET_MODE (op0);
483 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
484 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
485 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
486 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
487 return x;
488 if (op_mode == VOIDmode)
489 op_mode = GET_MODE (op0);
490 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
491
492 case RTX_EXTRA:
493 if (code == SUBREG)
494 {
495 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
496 if (op0 == SUBREG_REG (x))
497 return x;
498 op0 = simplify_gen_subreg (GET_MODE (x), op0,
499 GET_MODE (SUBREG_REG (x)),
500 SUBREG_BYTE (x));
501 return op0 ? op0 : x;
502 }
503 break;
504
505 case RTX_OBJ:
506 if (code == MEM)
507 {
508 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
509 if (op0 == XEXP (x, 0))
510 return x;
511 return replace_equiv_address_nv (x, op0);
512 }
513 else if (code == LO_SUM)
514 {
515 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
516 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
517
518 /* (lo_sum (high x) x) -> x */
519 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
520 return op1;
521
522 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
523 return x;
524 return gen_rtx_LO_SUM (mode, op0, op1);
525 }
526 break;
527
528 default:
529 break;
530 }
531
532 newx = x;
533 fmt = GET_RTX_FORMAT (code);
534 for (i = 0; fmt[i]; i++)
535 switch (fmt[i])
536 {
537 case 'E':
538 vec = XVEC (x, i);
539 newvec = XVEC (newx, i);
540 for (j = 0; j < GET_NUM_ELEM (vec); j++)
541 {
542 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
543 old_rtx, fn, data);
544 if (op != RTVEC_ELT (vec, j))
545 {
546 if (newvec == vec)
547 {
548 newvec = shallow_copy_rtvec (vec);
549 if (x == newx)
550 newx = shallow_copy_rtx (x);
551 XVEC (newx, i) = newvec;
552 }
553 RTVEC_ELT (newvec, j) = op;
554 }
555 }
556 break;
557
558 case 'e':
559 if (XEXP (x, i))
560 {
561 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
562 if (op != XEXP (x, i))
563 {
564 if (x == newx)
565 newx = shallow_copy_rtx (x);
566 XEXP (newx, i) = op;
567 }
568 }
569 break;
570 }
571 return newx;
572 }
573
574 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
575 resulting RTX. Return a new RTX which is as simplified as possible. */
576
577 rtx
578 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
579 {
580 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
581 }
582 \f
583 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
584 Only handle cases where the truncated value is inherently an rvalue.
585
586 RTL provides two ways of truncating a value:
587
588 1. a lowpart subreg. This form is only a truncation when both
589 the outer and inner modes (here MODE and OP_MODE respectively)
590 are scalar integers, and only then when the subreg is used as
591 an rvalue.
592
593 It is only valid to form such truncating subregs if the
594 truncation requires no action by the target. The onus for
595 proving this is on the creator of the subreg -- e.g. the
596 caller to simplify_subreg or simplify_gen_subreg -- and typically
597 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
598
599 2. a TRUNCATE. This form handles both scalar and compound integers.
600
601 The first form is preferred where valid. However, the TRUNCATE
602 handling in simplify_unary_operation turns the second form into the
603 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
604 so it is generally safe to form rvalue truncations using:
605
606 simplify_gen_unary (TRUNCATE, ...)
607
608 and leave simplify_unary_operation to work out which representation
609 should be used.
610
611 Because of the proof requirements on (1), simplify_truncation must
612 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
613 regardless of whether the outer truncation came from a SUBREG or a
614 TRUNCATE. For example, if the caller has proven that an SImode
615 truncation of:
616
617 (and:DI X Y)
618
619 is a no-op and can be represented as a subreg, it does not follow
620 that SImode truncations of X and Y are also no-ops. On a target
621 like 64-bit MIPS that requires SImode values to be stored in
622 sign-extended form, an SImode truncation of:
623
624 (and:DI (reg:DI X) (const_int 63))
625
626 is trivially a no-op because only the lower 6 bits can be set.
627 However, X is still an arbitrary 64-bit number and so we cannot
628 assume that truncating it too is a no-op. */
629
630 static rtx
631 simplify_truncation (enum machine_mode mode, rtx op,
632 enum machine_mode op_mode)
633 {
634 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
635 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
636 gcc_assert (precision <= op_precision);
637
638 /* Optimize truncations of zero and sign extended values. */
639 if (GET_CODE (op) == ZERO_EXTEND
640 || GET_CODE (op) == SIGN_EXTEND)
641 {
642 /* There are three possibilities. If MODE is the same as the
643 origmode, we can omit both the extension and the subreg.
644 If MODE is not larger than the origmode, we can apply the
645 truncation without the extension. Finally, if the outermode
646 is larger than the origmode, we can just extend to the appropriate
647 mode. */
648 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
649 if (mode == origmode)
650 return XEXP (op, 0);
651 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
652 return simplify_gen_unary (TRUNCATE, mode,
653 XEXP (op, 0), origmode);
654 else
655 return simplify_gen_unary (GET_CODE (op), mode,
656 XEXP (op, 0), origmode);
657 }
658
659 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
660 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
661 if (GET_CODE (op) == PLUS
662 || GET_CODE (op) == MINUS
663 || GET_CODE (op) == MULT)
664 {
665 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
666 if (op0)
667 {
668 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
669 if (op1)
670 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
671 }
672 }
673
674 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
675 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 /* Ensure that OP_MODE is at least twice as wide as MODE
680 to avoid the possibility that an outer LSHIFTRT shifts by more
681 than the sign extension's sign_bit_copies and introduces zeros
682 into the high bits of the result. */
683 && 2 * precision <= op_precision
684 && CONST_INT_P (XEXP (op, 1))
685 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
686 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
687 && UINTVAL (XEXP (op, 1)) < precision)
688 return simplify_gen_binary (ASHIFTRT, mode,
689 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
690
691 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
692 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
693 the outer subreg is effectively a truncation to the original mode. */
694 if ((GET_CODE (op) == LSHIFTRT
695 || GET_CODE (op) == ASHIFTRT)
696 && CONST_INT_P (XEXP (op, 1))
697 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
698 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
699 && UINTVAL (XEXP (op, 1)) < precision)
700 return simplify_gen_binary (LSHIFTRT, mode,
701 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
702
703 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
704 to (ashift:QI (x:QI) C), where C is a suitable small constant and
705 the outer subreg is effectively a truncation to the original mode. */
706 if (GET_CODE (op) == ASHIFT
707 && CONST_INT_P (XEXP (op, 1))
708 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
709 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
710 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
711 && UINTVAL (XEXP (op, 1)) < precision)
712 return simplify_gen_binary (ASHIFT, mode,
713 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
714
715 /* Recognize a word extraction from a multi-word subreg. */
716 if ((GET_CODE (op) == LSHIFTRT
717 || GET_CODE (op) == ASHIFTRT)
718 && SCALAR_INT_MODE_P (mode)
719 && SCALAR_INT_MODE_P (op_mode)
720 && precision >= BITS_PER_WORD
721 && 2 * precision <= op_precision
722 && CONST_INT_P (XEXP (op, 1))
723 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
724 && UINTVAL (XEXP (op, 1)) < op_precision)
725 {
726 int byte = subreg_lowpart_offset (mode, op_mode);
727 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
728 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
729 (WORDS_BIG_ENDIAN
730 ? byte - shifted_bytes
731 : byte + shifted_bytes));
732 }
733
734 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
735 and try replacing the TRUNCATE and shift with it. Don't do this
736 if the MEM has a mode-dependent address. */
737 if ((GET_CODE (op) == LSHIFTRT
738 || GET_CODE (op) == ASHIFTRT)
739 && SCALAR_INT_MODE_P (op_mode)
740 && MEM_P (XEXP (op, 0))
741 && CONST_INT_P (XEXP (op, 1))
742 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
743 && INTVAL (XEXP (op, 1)) > 0
744 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
745 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
746 MEM_ADDR_SPACE (XEXP (op, 0)))
747 && ! MEM_VOLATILE_P (XEXP (op, 0))
748 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
749 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
750 {
751 int byte = subreg_lowpart_offset (mode, op_mode);
752 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
753 return adjust_address_nv (XEXP (op, 0), mode,
754 (WORDS_BIG_ENDIAN
755 ? byte - shifted_bytes
756 : byte + shifted_bytes));
757 }
758
759 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
760 (OP:SI foo:SI) if OP is NEG or ABS. */
761 if ((GET_CODE (op) == ABS
762 || GET_CODE (op) == NEG)
763 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
764 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
765 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
766 return simplify_gen_unary (GET_CODE (op), mode,
767 XEXP (XEXP (op, 0), 0), mode);
768
769 /* (truncate:A (subreg:B (truncate:C X) 0)) is
770 (truncate:A X). */
771 if (GET_CODE (op) == SUBREG
772 && SCALAR_INT_MODE_P (mode)
773 && SCALAR_INT_MODE_P (op_mode)
774 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
775 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
776 && subreg_lowpart_p (op))
777 {
778 rtx inner = XEXP (SUBREG_REG (op), 0);
779 if (GET_MODE_PRECISION (mode)
780 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
781 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
782 else
783 /* If subreg above is paradoxical and C is narrower
784 than A, return (subreg:A (truncate:C X) 0). */
785 return simplify_gen_subreg (mode, SUBREG_REG (op),
786 GET_MODE (SUBREG_REG (op)), 0);
787 }
788
789 /* (truncate:A (truncate:B X)) is (truncate:A X). */
790 if (GET_CODE (op) == TRUNCATE)
791 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
792 GET_MODE (XEXP (op, 0)));
793
794 return NULL_RTX;
795 }
796 \f
797 /* Try to simplify a unary operation CODE whose output mode is to be
798 MODE with input operand OP whose mode was originally OP_MODE.
799 Return zero if no simplification can be made. */
800 rtx
801 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
802 rtx op, enum machine_mode op_mode)
803 {
804 rtx trueop, tem;
805
806 trueop = avoid_constant_pool_reference (op);
807
808 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
809 if (tem)
810 return tem;
811
812 return simplify_unary_operation_1 (code, mode, op);
813 }
814
815 /* Perform some simplifications we can do even if the operands
816 aren't constant. */
817 static rtx
818 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
819 {
820 enum rtx_code reversed;
821 rtx temp;
822
823 switch (code)
824 {
825 case NOT:
826 /* (not (not X)) == X. */
827 if (GET_CODE (op) == NOT)
828 return XEXP (op, 0);
829
830 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
831 comparison is all ones. */
832 if (COMPARISON_P (op)
833 && (mode == BImode || STORE_FLAG_VALUE == -1)
834 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
835 return simplify_gen_relational (reversed, mode, VOIDmode,
836 XEXP (op, 0), XEXP (op, 1));
837
838 /* (not (plus X -1)) can become (neg X). */
839 if (GET_CODE (op) == PLUS
840 && XEXP (op, 1) == constm1_rtx)
841 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
842
843 /* Similarly, (not (neg X)) is (plus X -1). */
844 if (GET_CODE (op) == NEG)
845 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
846 CONSTM1_RTX (mode));
847
848 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
849 if (GET_CODE (op) == XOR
850 && CONST_INT_P (XEXP (op, 1))
851 && (temp = simplify_unary_operation (NOT, mode,
852 XEXP (op, 1), mode)) != 0)
853 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
854
855 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
856 if (GET_CODE (op) == PLUS
857 && CONST_INT_P (XEXP (op, 1))
858 && mode_signbit_p (mode, XEXP (op, 1))
859 && (temp = simplify_unary_operation (NOT, mode,
860 XEXP (op, 1), mode)) != 0)
861 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
862
863
864 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
865 operands other than 1, but that is not valid. We could do a
866 similar simplification for (not (lshiftrt C X)) where C is
867 just the sign bit, but this doesn't seem common enough to
868 bother with. */
869 if (GET_CODE (op) == ASHIFT
870 && XEXP (op, 0) == const1_rtx)
871 {
872 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
873 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
874 }
875
876 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
877 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
878 so we can perform the above simplification. */
879 if (STORE_FLAG_VALUE == -1
880 && GET_CODE (op) == ASHIFTRT
881 && GET_CODE (XEXP (op, 1))
882 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
883 return simplify_gen_relational (GE, mode, VOIDmode,
884 XEXP (op, 0), const0_rtx);
885
886
887 if (GET_CODE (op) == SUBREG
888 && subreg_lowpart_p (op)
889 && (GET_MODE_SIZE (GET_MODE (op))
890 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
891 && GET_CODE (SUBREG_REG (op)) == ASHIFT
892 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
893 {
894 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
895 rtx x;
896
897 x = gen_rtx_ROTATE (inner_mode,
898 simplify_gen_unary (NOT, inner_mode, const1_rtx,
899 inner_mode),
900 XEXP (SUBREG_REG (op), 1));
901 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
902 if (temp)
903 return temp;
904 }
905
906 /* Apply De Morgan's laws to reduce number of patterns for machines
907 with negating logical insns (and-not, nand, etc.). If result has
908 only one NOT, put it first, since that is how the patterns are
909 coded. */
910 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
911 {
912 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
913 enum machine_mode op_mode;
914
915 op_mode = GET_MODE (in1);
916 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
917
918 op_mode = GET_MODE (in2);
919 if (op_mode == VOIDmode)
920 op_mode = mode;
921 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
922
923 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
924 {
925 rtx tem = in2;
926 in2 = in1; in1 = tem;
927 }
928
929 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
930 mode, in1, in2);
931 }
932
933 /* (not (bswap x)) -> (bswap (not x)). */
934 if (GET_CODE (op) == BSWAP)
935 {
936 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
937 return simplify_gen_unary (BSWAP, mode, x, mode);
938 }
939 break;
940
941 case NEG:
942 /* (neg (neg X)) == X. */
943 if (GET_CODE (op) == NEG)
944 return XEXP (op, 0);
945
946 /* (neg (plus X 1)) can become (not X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == const1_rtx)
949 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
950
951 /* Similarly, (neg (not X)) is (plus X 1). */
952 if (GET_CODE (op) == NOT)
953 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
954 CONST1_RTX (mode));
955
956 /* (neg (minus X Y)) can become (minus Y X). This transformation
957 isn't safe for modes with signed zeros, since if X and Y are
958 both +0, (minus Y X) is the same as (minus X Y). If the
959 rounding mode is towards +infinity (or -infinity) then the two
960 expressions will be rounded differently. */
961 if (GET_CODE (op) == MINUS
962 && !HONOR_SIGNED_ZEROS (mode)
963 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
964 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
965
966 if (GET_CODE (op) == PLUS
967 && !HONOR_SIGNED_ZEROS (mode)
968 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
969 {
970 /* (neg (plus A C)) is simplified to (minus -C A). */
971 if (CONST_SCALAR_INT_P (XEXP (op, 1))
972 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
973 {
974 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
975 if (temp)
976 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
977 }
978
979 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
980 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
981 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
982 }
983
984 /* (neg (mult A B)) becomes (mult A (neg B)).
985 This works even for floating-point values. */
986 if (GET_CODE (op) == MULT
987 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
988 {
989 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
990 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
991 }
992
993 /* NEG commutes with ASHIFT since it is multiplication. Only do
994 this if we can then eliminate the NEG (e.g., if the operand
995 is a constant). */
996 if (GET_CODE (op) == ASHIFT)
997 {
998 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
999 if (temp)
1000 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1001 }
1002
1003 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1004 C is equal to the width of MODE minus 1. */
1005 if (GET_CODE (op) == ASHIFTRT
1006 && CONST_INT_P (XEXP (op, 1))
1007 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1008 return simplify_gen_binary (LSHIFTRT, mode,
1009 XEXP (op, 0), XEXP (op, 1));
1010
1011 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1012 C is equal to the width of MODE minus 1. */
1013 if (GET_CODE (op) == LSHIFTRT
1014 && CONST_INT_P (XEXP (op, 1))
1015 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1016 return simplify_gen_binary (ASHIFTRT, mode,
1017 XEXP (op, 0), XEXP (op, 1));
1018
1019 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1020 if (GET_CODE (op) == XOR
1021 && XEXP (op, 1) == const1_rtx
1022 && nonzero_bits (XEXP (op, 0), mode) == 1)
1023 return plus_constant (mode, XEXP (op, 0), -1);
1024
1025 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1026 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1027 if (GET_CODE (op) == LT
1028 && XEXP (op, 1) == const0_rtx
1029 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1030 {
1031 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1032 int isize = GET_MODE_PRECISION (inner);
1033 if (STORE_FLAG_VALUE == 1)
1034 {
1035 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1036 GEN_INT (isize - 1));
1037 if (mode == inner)
1038 return temp;
1039 if (GET_MODE_PRECISION (mode) > isize)
1040 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1041 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1042 }
1043 else if (STORE_FLAG_VALUE == -1)
1044 {
1045 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1046 GEN_INT (isize - 1));
1047 if (mode == inner)
1048 return temp;
1049 if (GET_MODE_PRECISION (mode) > isize)
1050 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1051 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1052 }
1053 }
1054 break;
1055
1056 case TRUNCATE:
1057 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1058 with the umulXi3_highpart patterns. */
1059 if (GET_CODE (op) == LSHIFTRT
1060 && GET_CODE (XEXP (op, 0)) == MULT)
1061 break;
1062
1063 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1064 {
1065 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1066 {
1067 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1068 if (temp)
1069 return temp;
1070 }
1071 /* We can't handle truncation to a partial integer mode here
1072 because we don't know the real bitsize of the partial
1073 integer mode. */
1074 break;
1075 }
1076
1077 if (GET_MODE (op) != VOIDmode)
1078 {
1079 temp = simplify_truncation (mode, op, GET_MODE (op));
1080 if (temp)
1081 return temp;
1082 }
1083
1084 /* If we know that the value is already truncated, we can
1085 replace the TRUNCATE with a SUBREG. */
1086 if (GET_MODE_NUNITS (mode) == 1
1087 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1088 || truncated_to_mode (mode, op)))
1089 {
1090 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1091 if (temp)
1092 return temp;
1093 }
1094
1095 /* A truncate of a comparison can be replaced with a subreg if
1096 STORE_FLAG_VALUE permits. This is like the previous test,
1097 but it works even if the comparison is done in a mode larger
1098 than HOST_BITS_PER_WIDE_INT. */
1099 if (HWI_COMPUTABLE_MODE_P (mode)
1100 && COMPARISON_P (op)
1101 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1102 {
1103 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1104 if (temp)
1105 return temp;
1106 }
1107
1108 /* A truncate of a memory is just loading the low part of the memory
1109 if we are not changing the meaning of the address. */
1110 if (GET_CODE (op) == MEM
1111 && !VECTOR_MODE_P (mode)
1112 && !MEM_VOLATILE_P (op)
1113 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1114 {
1115 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1116 if (temp)
1117 return temp;
1118 }
1119
1120 break;
1121
1122 case FLOAT_TRUNCATE:
1123 if (DECIMAL_FLOAT_MODE_P (mode))
1124 break;
1125
1126 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1127 if (GET_CODE (op) == FLOAT_EXTEND
1128 && GET_MODE (XEXP (op, 0)) == mode)
1129 return XEXP (op, 0);
1130
1131 /* (float_truncate:SF (float_truncate:DF foo:XF))
1132 = (float_truncate:SF foo:XF).
1133 This may eliminate double rounding, so it is unsafe.
1134
1135 (float_truncate:SF (float_extend:XF foo:DF))
1136 = (float_truncate:SF foo:DF).
1137
1138 (float_truncate:DF (float_extend:XF foo:SF))
1139 = (float_extend:SF foo:DF). */
1140 if ((GET_CODE (op) == FLOAT_TRUNCATE
1141 && flag_unsafe_math_optimizations)
1142 || GET_CODE (op) == FLOAT_EXTEND)
1143 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1144 0)))
1145 > GET_MODE_SIZE (mode)
1146 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1147 mode,
1148 XEXP (op, 0), mode);
1149
1150 /* (float_truncate (float x)) is (float x) */
1151 if (GET_CODE (op) == FLOAT
1152 && (flag_unsafe_math_optimizations
1153 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1154 && ((unsigned)significand_size (GET_MODE (op))
1155 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1156 - num_sign_bit_copies (XEXP (op, 0),
1157 GET_MODE (XEXP (op, 0))))))))
1158 return simplify_gen_unary (FLOAT, mode,
1159 XEXP (op, 0),
1160 GET_MODE (XEXP (op, 0)));
1161
1162 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1163 (OP:SF foo:SF) if OP is NEG or ABS. */
1164 if ((GET_CODE (op) == ABS
1165 || GET_CODE (op) == NEG)
1166 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1167 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1168 return simplify_gen_unary (GET_CODE (op), mode,
1169 XEXP (XEXP (op, 0), 0), mode);
1170
1171 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1172 is (float_truncate:SF x). */
1173 if (GET_CODE (op) == SUBREG
1174 && subreg_lowpart_p (op)
1175 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1176 return SUBREG_REG (op);
1177 break;
1178
1179 case FLOAT_EXTEND:
1180 if (DECIMAL_FLOAT_MODE_P (mode))
1181 break;
1182
1183 /* (float_extend (float_extend x)) is (float_extend x)
1184
1185 (float_extend (float x)) is (float x) assuming that double
1186 rounding can't happen.
1187 */
1188 if (GET_CODE (op) == FLOAT_EXTEND
1189 || (GET_CODE (op) == FLOAT
1190 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1191 && ((unsigned)significand_size (GET_MODE (op))
1192 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1193 - num_sign_bit_copies (XEXP (op, 0),
1194 GET_MODE (XEXP (op, 0)))))))
1195 return simplify_gen_unary (GET_CODE (op), mode,
1196 XEXP (op, 0),
1197 GET_MODE (XEXP (op, 0)));
1198
1199 break;
1200
1201 case ABS:
1202 /* (abs (neg <foo>)) -> (abs <foo>) */
1203 if (GET_CODE (op) == NEG)
1204 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1205 GET_MODE (XEXP (op, 0)));
1206
1207 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1208 do nothing. */
1209 if (GET_MODE (op) == VOIDmode)
1210 break;
1211
1212 /* If operand is something known to be positive, ignore the ABS. */
1213 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1214 || val_signbit_known_clear_p (GET_MODE (op),
1215 nonzero_bits (op, GET_MODE (op))))
1216 return op;
1217
1218 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1219 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1220 return gen_rtx_NEG (mode, op);
1221
1222 break;
1223
1224 case FFS:
1225 /* (ffs (*_extend <X>)) = (ffs <X>) */
1226 if (GET_CODE (op) == SIGN_EXTEND
1227 || GET_CODE (op) == ZERO_EXTEND)
1228 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1229 GET_MODE (XEXP (op, 0)));
1230 break;
1231
1232 case POPCOUNT:
1233 switch (GET_CODE (op))
1234 {
1235 case BSWAP:
1236 case ZERO_EXTEND:
1237 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1238 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1239 GET_MODE (XEXP (op, 0)));
1240
1241 case ROTATE:
1242 case ROTATERT:
1243 /* Rotations don't affect popcount. */
1244 if (!side_effects_p (XEXP (op, 1)))
1245 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1246 GET_MODE (XEXP (op, 0)));
1247 break;
1248
1249 default:
1250 break;
1251 }
1252 break;
1253
1254 case PARITY:
1255 switch (GET_CODE (op))
1256 {
1257 case NOT:
1258 case BSWAP:
1259 case ZERO_EXTEND:
1260 case SIGN_EXTEND:
1261 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1262 GET_MODE (XEXP (op, 0)));
1263
1264 case ROTATE:
1265 case ROTATERT:
1266 /* Rotations don't affect parity. */
1267 if (!side_effects_p (XEXP (op, 1)))
1268 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1269 GET_MODE (XEXP (op, 0)));
1270 break;
1271
1272 default:
1273 break;
1274 }
1275 break;
1276
1277 case BSWAP:
1278 /* (bswap (bswap x)) -> x. */
1279 if (GET_CODE (op) == BSWAP)
1280 return XEXP (op, 0);
1281 break;
1282
1283 case FLOAT:
1284 /* (float (sign_extend <X>)) = (float <X>). */
1285 if (GET_CODE (op) == SIGN_EXTEND)
1286 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1287 GET_MODE (XEXP (op, 0)));
1288 break;
1289
1290 case SIGN_EXTEND:
1291 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1292 becomes just the MINUS if its mode is MODE. This allows
1293 folding switch statements on machines using casesi (such as
1294 the VAX). */
1295 if (GET_CODE (op) == TRUNCATE
1296 && GET_MODE (XEXP (op, 0)) == mode
1297 && GET_CODE (XEXP (op, 0)) == MINUS
1298 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1299 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1300 return XEXP (op, 0);
1301
1302 /* Extending a widening multiplication should be canonicalized to
1303 a wider widening multiplication. */
1304 if (GET_CODE (op) == MULT)
1305 {
1306 rtx lhs = XEXP (op, 0);
1307 rtx rhs = XEXP (op, 1);
1308 enum rtx_code lcode = GET_CODE (lhs);
1309 enum rtx_code rcode = GET_CODE (rhs);
1310
1311 /* Widening multiplies usually extend both operands, but sometimes
1312 they use a shift to extract a portion of a register. */
1313 if ((lcode == SIGN_EXTEND
1314 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1315 && (rcode == SIGN_EXTEND
1316 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1317 {
1318 enum machine_mode lmode = GET_MODE (lhs);
1319 enum machine_mode rmode = GET_MODE (rhs);
1320 int bits;
1321
1322 if (lcode == ASHIFTRT)
1323 /* Number of bits not shifted off the end. */
1324 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1325 else /* lcode == SIGN_EXTEND */
1326 /* Size of inner mode. */
1327 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1328
1329 if (rcode == ASHIFTRT)
1330 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1331 else /* rcode == SIGN_EXTEND */
1332 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1333
1334 /* We can only widen multiplies if the result is mathematiclly
1335 equivalent. I.e. if overflow was impossible. */
1336 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1337 return simplify_gen_binary
1338 (MULT, mode,
1339 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1340 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1341 }
1342 }
1343
1344 /* Check for a sign extension of a subreg of a promoted
1345 variable, where the promotion is sign-extended, and the
1346 target mode is the same as the variable's promotion. */
1347 if (GET_CODE (op) == SUBREG
1348 && SUBREG_PROMOTED_VAR_P (op)
1349 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1350 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1351 {
1352 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1353 if (temp)
1354 return temp;
1355 }
1356
1357 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1358 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1359 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1360 {
1361 gcc_assert (GET_MODE_BITSIZE (mode)
1362 > GET_MODE_BITSIZE (GET_MODE (op)));
1363 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1364 GET_MODE (XEXP (op, 0)));
1365 }
1366
1367 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1368 is (sign_extend:M (subreg:O <X>)) if there is mode with
1369 GET_MODE_BITSIZE (N) - I bits.
1370 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1371 is similarly (zero_extend:M (subreg:O <X>)). */
1372 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1373 && GET_CODE (XEXP (op, 0)) == ASHIFT
1374 && CONST_INT_P (XEXP (op, 1))
1375 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1376 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1377 {
1378 enum machine_mode tmode
1379 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1380 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1381 gcc_assert (GET_MODE_BITSIZE (mode)
1382 > GET_MODE_BITSIZE (GET_MODE (op)));
1383 if (tmode != BLKmode)
1384 {
1385 rtx inner =
1386 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1387 if (inner)
1388 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1389 ? SIGN_EXTEND : ZERO_EXTEND,
1390 mode, inner, tmode);
1391 }
1392 }
1393
1394 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1395 /* As we do not know which address space the pointer is referring to,
1396 we can do this only if the target does not support different pointer
1397 or address modes depending on the address space. */
1398 if (target_default_pointer_address_modes_p ()
1399 && ! POINTERS_EXTEND_UNSIGNED
1400 && mode == Pmode && GET_MODE (op) == ptr_mode
1401 && (CONSTANT_P (op)
1402 || (GET_CODE (op) == SUBREG
1403 && REG_P (SUBREG_REG (op))
1404 && REG_POINTER (SUBREG_REG (op))
1405 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1406 return convert_memory_address (Pmode, op);
1407 #endif
1408 break;
1409
1410 case ZERO_EXTEND:
1411 /* Check for a zero extension of a subreg of a promoted
1412 variable, where the promotion is zero-extended, and the
1413 target mode is the same as the variable's promotion. */
1414 if (GET_CODE (op) == SUBREG
1415 && SUBREG_PROMOTED_VAR_P (op)
1416 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1417 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1418 {
1419 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1420 if (temp)
1421 return temp;
1422 }
1423
1424 /* Extending a widening multiplication should be canonicalized to
1425 a wider widening multiplication. */
1426 if (GET_CODE (op) == MULT)
1427 {
1428 rtx lhs = XEXP (op, 0);
1429 rtx rhs = XEXP (op, 1);
1430 enum rtx_code lcode = GET_CODE (lhs);
1431 enum rtx_code rcode = GET_CODE (rhs);
1432
1433 /* Widening multiplies usually extend both operands, but sometimes
1434 they use a shift to extract a portion of a register. */
1435 if ((lcode == ZERO_EXTEND
1436 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1437 && (rcode == ZERO_EXTEND
1438 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1439 {
1440 enum machine_mode lmode = GET_MODE (lhs);
1441 enum machine_mode rmode = GET_MODE (rhs);
1442 int bits;
1443
1444 if (lcode == LSHIFTRT)
1445 /* Number of bits not shifted off the end. */
1446 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1447 else /* lcode == ZERO_EXTEND */
1448 /* Size of inner mode. */
1449 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1450
1451 if (rcode == LSHIFTRT)
1452 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1453 else /* rcode == ZERO_EXTEND */
1454 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1455
1456 /* We can only widen multiplies if the result is mathematiclly
1457 equivalent. I.e. if overflow was impossible. */
1458 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1459 return simplify_gen_binary
1460 (MULT, mode,
1461 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1462 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1463 }
1464 }
1465
1466 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1467 if (GET_CODE (op) == ZERO_EXTEND)
1468 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1469 GET_MODE (XEXP (op, 0)));
1470
1471 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1472 is (zero_extend:M (subreg:O <X>)) if there is mode with
1473 GET_MODE_BITSIZE (N) - I bits. */
1474 if (GET_CODE (op) == LSHIFTRT
1475 && GET_CODE (XEXP (op, 0)) == ASHIFT
1476 && CONST_INT_P (XEXP (op, 1))
1477 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1478 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1479 {
1480 enum machine_mode tmode
1481 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1482 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1483 if (tmode != BLKmode)
1484 {
1485 rtx inner =
1486 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1487 if (inner)
1488 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1489 }
1490 }
1491
1492 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1493 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1494 of mode N. E.g.
1495 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1496 (and:SI (reg:SI) (const_int 63)). */
1497 if (GET_CODE (op) == SUBREG
1498 && GET_MODE_PRECISION (GET_MODE (op))
1499 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1500 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1501 <= HOST_BITS_PER_WIDE_INT
1502 && GET_MODE_PRECISION (mode)
1503 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1504 && subreg_lowpart_p (op)
1505 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1506 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1507 {
1508 if (GET_MODE_PRECISION (mode)
1509 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1510 return SUBREG_REG (op);
1511 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1512 GET_MODE (SUBREG_REG (op)));
1513 }
1514
1515 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1516 /* As we do not know which address space the pointer is referring to,
1517 we can do this only if the target does not support different pointer
1518 or address modes depending on the address space. */
1519 if (target_default_pointer_address_modes_p ()
1520 && POINTERS_EXTEND_UNSIGNED > 0
1521 && mode == Pmode && GET_MODE (op) == ptr_mode
1522 && (CONSTANT_P (op)
1523 || (GET_CODE (op) == SUBREG
1524 && REG_P (SUBREG_REG (op))
1525 && REG_POINTER (SUBREG_REG (op))
1526 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1527 return convert_memory_address (Pmode, op);
1528 #endif
1529 break;
1530
1531 default:
1532 break;
1533 }
1534
1535 return 0;
1536 }
1537
1538 /* Try to compute the value of a unary operation CODE whose output mode is to
1539 be MODE with input operand OP whose mode was originally OP_MODE.
1540 Return zero if the value cannot be computed. */
1541 rtx
1542 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1543 rtx op, enum machine_mode op_mode)
1544 {
1545 unsigned int width = GET_MODE_PRECISION (mode);
1546
1547 if (code == VEC_DUPLICATE)
1548 {
1549 gcc_assert (VECTOR_MODE_P (mode));
1550 if (GET_MODE (op) != VOIDmode)
1551 {
1552 if (!VECTOR_MODE_P (GET_MODE (op)))
1553 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1554 else
1555 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1556 (GET_MODE (op)));
1557 }
1558 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1559 || GET_CODE (op) == CONST_VECTOR)
1560 {
1561 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1562 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1563 rtvec v = rtvec_alloc (n_elts);
1564 unsigned int i;
1565
1566 if (GET_CODE (op) != CONST_VECTOR)
1567 for (i = 0; i < n_elts; i++)
1568 RTVEC_ELT (v, i) = op;
1569 else
1570 {
1571 enum machine_mode inmode = GET_MODE (op);
1572 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1573 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1574
1575 gcc_assert (in_n_elts < n_elts);
1576 gcc_assert ((n_elts % in_n_elts) == 0);
1577 for (i = 0; i < n_elts; i++)
1578 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1579 }
1580 return gen_rtx_CONST_VECTOR (mode, v);
1581 }
1582 }
1583
1584 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1585 {
1586 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1587 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1588 enum machine_mode opmode = GET_MODE (op);
1589 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1590 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1591 rtvec v = rtvec_alloc (n_elts);
1592 unsigned int i;
1593
1594 gcc_assert (op_n_elts == n_elts);
1595 for (i = 0; i < n_elts; i++)
1596 {
1597 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1598 CONST_VECTOR_ELT (op, i),
1599 GET_MODE_INNER (opmode));
1600 if (!x)
1601 return 0;
1602 RTVEC_ELT (v, i) = x;
1603 }
1604 return gen_rtx_CONST_VECTOR (mode, v);
1605 }
1606
1607 /* The order of these tests is critical so that, for example, we don't
1608 check the wrong mode (input vs. output) for a conversion operation,
1609 such as FIX. At some point, this should be simplified. */
1610
1611 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1612 {
1613 REAL_VALUE_TYPE d;
1614
1615 if (op_mode == VOIDmode)
1616 {
1617 /* CONST_INT have VOIDmode as the mode. We assume that all
1618 the bits of the constant are significant, though, this is
1619 a dangerous assumption as many times CONST_INTs are
1620 created and used with garbage in the bits outside of the
1621 precision of the implied mode of the const_int. */
1622 op_mode = mode_for_size (MAX_BITSIZE_MODE_ANY_INT, MODE_INT, 0);
1623 }
1624
1625 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1626 d = real_value_truncate (mode, d);
1627 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1628 }
1629 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1630 {
1631 REAL_VALUE_TYPE d;
1632
1633 if (op_mode == VOIDmode)
1634 {
1635 /* CONST_INT have VOIDmode as the mode. We assume that all
1636 the bits of the constant are significant, though, this is
1637 a dangerous assumption as many times CONST_INTs are
1638 created and used with garbage in the bits outside of the
1639 precision of the implied mode of the const_int. */
1640 op_mode = mode_for_size (MAX_BITSIZE_MODE_ANY_INT, MODE_INT, 0);
1641 }
1642
1643 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1644 d = real_value_truncate (mode, d);
1645 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1646 }
1647
1648 if (CONST_SCALAR_INT_P (op) && width > 0)
1649 {
1650 wide_int result;
1651 enum machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1652 rtx_mode_t op0 = std::make_pair (op, imode);
1653
1654 #if TARGET_SUPPORTS_WIDE_INT == 0
1655 /* This assert keeps the simplification from producing a result
1656 that cannot be represented in a CONST_DOUBLE but a lot of
1657 upstream callers expect that this function never fails to
1658 simplify something and so you if you added this to the test
1659 above the code would die later anyway. If this assert
1660 happens, you just need to make the port support wide int. */
1661 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1662 #endif
1663
1664 switch (code)
1665 {
1666 case NOT:
1667 result = wi::bit_not (op0);
1668 break;
1669
1670 case NEG:
1671 result = wi::neg (op0);
1672 break;
1673
1674 case ABS:
1675 result = wi::abs (op0);
1676 break;
1677
1678 case FFS:
1679 result = wi::shwi (wi::ffs (op0), mode);
1680 break;
1681
1682 case CLZ:
1683 result = wi::shwi (wi::clz (op0), mode);
1684 break;
1685
1686 case CLRSB:
1687 result = wi::shwi (wi::clrsb (op0), mode);
1688 break;
1689
1690 case CTZ:
1691 result = wi::shwi (wi::ctz (op0), mode);
1692 break;
1693
1694 case POPCOUNT:
1695 result = wi::shwi (wi::popcount (op0), mode);
1696 break;
1697
1698 case PARITY:
1699 result = wi::shwi (wi::parity (op0), mode);
1700 break;
1701
1702 case BSWAP:
1703 result = wide_int (op0).bswap ();
1704 break;
1705
1706 case TRUNCATE:
1707 case ZERO_EXTEND:
1708 result = wide_int::from (op0, width, UNSIGNED);
1709 break;
1710
1711 case SIGN_EXTEND:
1712 result = wide_int::from (op0, width, SIGNED);
1713 break;
1714
1715 case SQRT:
1716 default:
1717 return 0;
1718 }
1719
1720 return immed_wide_int_const (result, mode);
1721 }
1722
1723 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1724 && SCALAR_FLOAT_MODE_P (mode)
1725 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1726 {
1727 REAL_VALUE_TYPE d, t;
1728 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1729
1730 switch (code)
1731 {
1732 case SQRT:
1733 if (HONOR_SNANS (mode) && real_isnan (&d))
1734 return 0;
1735 real_sqrt (&t, mode, &d);
1736 d = t;
1737 break;
1738 case ABS:
1739 d = real_value_abs (&d);
1740 break;
1741 case NEG:
1742 d = real_value_negate (&d);
1743 break;
1744 case FLOAT_TRUNCATE:
1745 d = real_value_truncate (mode, d);
1746 break;
1747 case FLOAT_EXTEND:
1748 /* All this does is change the mode, unless changing
1749 mode class. */
1750 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1751 real_convert (&d, mode, &d);
1752 break;
1753 case FIX:
1754 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1755 break;
1756 case NOT:
1757 {
1758 long tmp[4];
1759 int i;
1760
1761 real_to_target (tmp, &d, GET_MODE (op));
1762 for (i = 0; i < 4; i++)
1763 tmp[i] = ~tmp[i];
1764 real_from_target (&d, tmp, mode);
1765 break;
1766 }
1767 default:
1768 gcc_unreachable ();
1769 }
1770 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1771 }
1772 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1773 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1774 && GET_MODE_CLASS (mode) == MODE_INT
1775 && width > 0)
1776 {
1777 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1778 operators are intentionally left unspecified (to ease implementation
1779 by target backends), for consistency, this routine implements the
1780 same semantics for constant folding as used by the middle-end. */
1781
1782 /* This was formerly used only for non-IEEE float.
1783 eggert@twinsun.com says it is safe for IEEE also. */
1784 REAL_VALUE_TYPE x, t;
1785 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1786 wide_int wmax, wmin;
1787 /* This is part of the abi to real_to_integer, but we check
1788 things before making this call. */
1789 bool fail;
1790
1791 switch (code)
1792 {
1793 case FIX:
1794 if (REAL_VALUE_ISNAN (x))
1795 return const0_rtx;
1796
1797 /* Test against the signed upper bound. */
1798 wmax = wi::max_value (width, SIGNED);
1799 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1800 if (REAL_VALUES_LESS (t, x))
1801 return immed_wide_int_const (wmax, mode);
1802
1803 /* Test against the signed lower bound. */
1804 wmin = wi::min_value (width, SIGNED);
1805 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1806 if (REAL_VALUES_LESS (x, t))
1807 return immed_wide_int_const (wmin, mode);
1808
1809 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1810 break;
1811
1812 case UNSIGNED_FIX:
1813 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1814 return const0_rtx;
1815
1816 /* Test against the unsigned upper bound. */
1817 wmax = wi::max_value (width, UNSIGNED);
1818 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1819 if (REAL_VALUES_LESS (t, x))
1820 return immed_wide_int_const (wmax, mode);
1821
1822 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1823 mode);
1824 break;
1825
1826 default:
1827 gcc_unreachable ();
1828 }
1829 }
1830
1831 return NULL_RTX;
1832 }
1833 \f
1834 /* Subroutine of simplify_binary_operation to simplify a binary operation
1835 CODE that can commute with byte swapping, with result mode MODE and
1836 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1837 Return zero if no simplification or canonicalization is possible. */
1838
1839 static rtx
1840 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
1841 rtx op0, rtx op1)
1842 {
1843 rtx tem;
1844
1845 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1846 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1847 {
1848 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1849 simplify_gen_unary (BSWAP, mode, op1, mode));
1850 return simplify_gen_unary (BSWAP, mode, tem, mode);
1851 }
1852
1853 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1854 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1855 {
1856 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1857 return simplify_gen_unary (BSWAP, mode, tem, mode);
1858 }
1859
1860 return NULL_RTX;
1861 }
1862
1863 /* Subroutine of simplify_binary_operation to simplify a commutative,
1864 associative binary operation CODE with result mode MODE, operating
1865 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1866 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1867 canonicalization is possible. */
1868
1869 static rtx
1870 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1871 rtx op0, rtx op1)
1872 {
1873 rtx tem;
1874
1875 /* Linearize the operator to the left. */
1876 if (GET_CODE (op1) == code)
1877 {
1878 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1879 if (GET_CODE (op0) == code)
1880 {
1881 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1882 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1883 }
1884
1885 /* "a op (b op c)" becomes "(b op c) op a". */
1886 if (! swap_commutative_operands_p (op1, op0))
1887 return simplify_gen_binary (code, mode, op1, op0);
1888
1889 tem = op0;
1890 op0 = op1;
1891 op1 = tem;
1892 }
1893
1894 if (GET_CODE (op0) == code)
1895 {
1896 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1897 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1898 {
1899 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1900 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1901 }
1902
1903 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1904 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1905 if (tem != 0)
1906 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1907
1908 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1909 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1910 if (tem != 0)
1911 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1912 }
1913
1914 return 0;
1915 }
1916
1917
1918 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1919 and OP1. Return 0 if no simplification is possible.
1920
1921 Don't use this for relational operations such as EQ or LT.
1922 Use simplify_relational_operation instead. */
1923 rtx
1924 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1925 rtx op0, rtx op1)
1926 {
1927 rtx trueop0, trueop1;
1928 rtx tem;
1929
1930 /* Relational operations don't work here. We must know the mode
1931 of the operands in order to do the comparison correctly.
1932 Assuming a full word can give incorrect results.
1933 Consider comparing 128 with -128 in QImode. */
1934 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1935 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1936
1937 /* Make sure the constant is second. */
1938 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1939 && swap_commutative_operands_p (op0, op1))
1940 {
1941 tem = op0, op0 = op1, op1 = tem;
1942 }
1943
1944 trueop0 = avoid_constant_pool_reference (op0);
1945 trueop1 = avoid_constant_pool_reference (op1);
1946
1947 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1948 if (tem)
1949 return tem;
1950 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1951 }
1952
1953 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1954 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1955 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1956 actual constants. */
1957
1958 static rtx
1959 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1960 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1961 {
1962 rtx tem, reversed, opleft, opright;
1963 HOST_WIDE_INT val;
1964 unsigned int width = GET_MODE_PRECISION (mode);
1965
1966 /* Even if we can't compute a constant result,
1967 there are some cases worth simplifying. */
1968
1969 switch (code)
1970 {
1971 case PLUS:
1972 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1973 when x is NaN, infinite, or finite and nonzero. They aren't
1974 when x is -0 and the rounding mode is not towards -infinity,
1975 since (-0) + 0 is then 0. */
1976 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1977 return op0;
1978
1979 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1980 transformations are safe even for IEEE. */
1981 if (GET_CODE (op0) == NEG)
1982 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1983 else if (GET_CODE (op1) == NEG)
1984 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1985
1986 /* (~a) + 1 -> -a */
1987 if (INTEGRAL_MODE_P (mode)
1988 && GET_CODE (op0) == NOT
1989 && trueop1 == const1_rtx)
1990 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1991
1992 /* Handle both-operands-constant cases. We can only add
1993 CONST_INTs to constants since the sum of relocatable symbols
1994 can't be handled by most assemblers. Don't add CONST_INT
1995 to CONST_INT since overflow won't be computed properly if wider
1996 than HOST_BITS_PER_WIDE_INT. */
1997
1998 if ((GET_CODE (op0) == CONST
1999 || GET_CODE (op0) == SYMBOL_REF
2000 || GET_CODE (op0) == LABEL_REF)
2001 && CONST_INT_P (op1))
2002 return plus_constant (mode, op0, INTVAL (op1));
2003 else if ((GET_CODE (op1) == CONST
2004 || GET_CODE (op1) == SYMBOL_REF
2005 || GET_CODE (op1) == LABEL_REF)
2006 && CONST_INT_P (op0))
2007 return plus_constant (mode, op1, INTVAL (op0));
2008
2009 /* See if this is something like X * C - X or vice versa or
2010 if the multiplication is written as a shift. If so, we can
2011 distribute and make a new multiply, shift, or maybe just
2012 have X (if C is 2 in the example above). But don't make
2013 something more expensive than we had before. */
2014
2015 if (SCALAR_INT_MODE_P (mode))
2016 {
2017 rtx lhs = op0, rhs = op1;
2018
2019 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2020 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2021
2022 if (GET_CODE (lhs) == NEG)
2023 {
2024 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2025 lhs = XEXP (lhs, 0);
2026 }
2027 else if (GET_CODE (lhs) == MULT
2028 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2029 {
2030 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2031 lhs = XEXP (lhs, 0);
2032 }
2033 else if (GET_CODE (lhs) == ASHIFT
2034 && CONST_INT_P (XEXP (lhs, 1))
2035 && INTVAL (XEXP (lhs, 1)) >= 0
2036 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2037 {
2038 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2039 GET_MODE_PRECISION (mode));
2040 lhs = XEXP (lhs, 0);
2041 }
2042
2043 if (GET_CODE (rhs) == NEG)
2044 {
2045 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2046 rhs = XEXP (rhs, 0);
2047 }
2048 else if (GET_CODE (rhs) == MULT
2049 && CONST_INT_P (XEXP (rhs, 1)))
2050 {
2051 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2052 rhs = XEXP (rhs, 0);
2053 }
2054 else if (GET_CODE (rhs) == ASHIFT
2055 && CONST_INT_P (XEXP (rhs, 1))
2056 && INTVAL (XEXP (rhs, 1)) >= 0
2057 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2058 {
2059 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2060 GET_MODE_PRECISION (mode));
2061 rhs = XEXP (rhs, 0);
2062 }
2063
2064 if (rtx_equal_p (lhs, rhs))
2065 {
2066 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2067 rtx coeff;
2068 bool speed = optimize_function_for_speed_p (cfun);
2069
2070 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2071
2072 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2073 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2074 ? tem : 0;
2075 }
2076 }
2077
2078 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2079 if (CONST_SCALAR_INT_P (op1)
2080 && GET_CODE (op0) == XOR
2081 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2082 && mode_signbit_p (mode, op1))
2083 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2084 simplify_gen_binary (XOR, mode, op1,
2085 XEXP (op0, 1)));
2086
2087 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2088 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2089 && GET_CODE (op0) == MULT
2090 && GET_CODE (XEXP (op0, 0)) == NEG)
2091 {
2092 rtx in1, in2;
2093
2094 in1 = XEXP (XEXP (op0, 0), 0);
2095 in2 = XEXP (op0, 1);
2096 return simplify_gen_binary (MINUS, mode, op1,
2097 simplify_gen_binary (MULT, mode,
2098 in1, in2));
2099 }
2100
2101 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2102 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2103 is 1. */
2104 if (COMPARISON_P (op0)
2105 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2106 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2107 && (reversed = reversed_comparison (op0, mode)))
2108 return
2109 simplify_gen_unary (NEG, mode, reversed, mode);
2110
2111 /* If one of the operands is a PLUS or a MINUS, see if we can
2112 simplify this by the associative law.
2113 Don't use the associative law for floating point.
2114 The inaccuracy makes it nonassociative,
2115 and subtle programs can break if operations are associated. */
2116
2117 if (INTEGRAL_MODE_P (mode)
2118 && (plus_minus_operand_p (op0)
2119 || plus_minus_operand_p (op1))
2120 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2121 return tem;
2122
2123 /* Reassociate floating point addition only when the user
2124 specifies associative math operations. */
2125 if (FLOAT_MODE_P (mode)
2126 && flag_associative_math)
2127 {
2128 tem = simplify_associative_operation (code, mode, op0, op1);
2129 if (tem)
2130 return tem;
2131 }
2132 break;
2133
2134 case COMPARE:
2135 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2136 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2137 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2138 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2139 {
2140 rtx xop00 = XEXP (op0, 0);
2141 rtx xop10 = XEXP (op1, 0);
2142
2143 #ifdef HAVE_cc0
2144 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2145 #else
2146 if (REG_P (xop00) && REG_P (xop10)
2147 && GET_MODE (xop00) == GET_MODE (xop10)
2148 && REGNO (xop00) == REGNO (xop10)
2149 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2150 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2151 #endif
2152 return xop00;
2153 }
2154 break;
2155
2156 case MINUS:
2157 /* We can't assume x-x is 0 even with non-IEEE floating point,
2158 but since it is zero except in very strange circumstances, we
2159 will treat it as zero with -ffinite-math-only. */
2160 if (rtx_equal_p (trueop0, trueop1)
2161 && ! side_effects_p (op0)
2162 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2163 return CONST0_RTX (mode);
2164
2165 /* Change subtraction from zero into negation. (0 - x) is the
2166 same as -x when x is NaN, infinite, or finite and nonzero.
2167 But if the mode has signed zeros, and does not round towards
2168 -infinity, then 0 - 0 is 0, not -0. */
2169 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2170 return simplify_gen_unary (NEG, mode, op1, mode);
2171
2172 /* (-1 - a) is ~a. */
2173 if (trueop0 == constm1_rtx)
2174 return simplify_gen_unary (NOT, mode, op1, mode);
2175
2176 /* Subtracting 0 has no effect unless the mode has signed zeros
2177 and supports rounding towards -infinity. In such a case,
2178 0 - 0 is -0. */
2179 if (!(HONOR_SIGNED_ZEROS (mode)
2180 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2181 && trueop1 == CONST0_RTX (mode))
2182 return op0;
2183
2184 /* See if this is something like X * C - X or vice versa or
2185 if the multiplication is written as a shift. If so, we can
2186 distribute and make a new multiply, shift, or maybe just
2187 have X (if C is 2 in the example above). But don't make
2188 something more expensive than we had before. */
2189
2190 if (SCALAR_INT_MODE_P (mode))
2191 {
2192 rtx lhs = op0, rhs = op1;
2193
2194 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2195 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2196
2197 if (GET_CODE (lhs) == NEG)
2198 {
2199 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2200 lhs = XEXP (lhs, 0);
2201 }
2202 else if (GET_CODE (lhs) == MULT
2203 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2204 {
2205 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2206 lhs = XEXP (lhs, 0);
2207 }
2208 else if (GET_CODE (lhs) == ASHIFT
2209 && CONST_INT_P (XEXP (lhs, 1))
2210 && INTVAL (XEXP (lhs, 1)) >= 0
2211 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2212 {
2213 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2214 GET_MODE_PRECISION (mode));
2215 lhs = XEXP (lhs, 0);
2216 }
2217
2218 if (GET_CODE (rhs) == NEG)
2219 {
2220 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2221 rhs = XEXP (rhs, 0);
2222 }
2223 else if (GET_CODE (rhs) == MULT
2224 && CONST_INT_P (XEXP (rhs, 1)))
2225 {
2226 negcoeff1 = -wide_int (std::make_pair (XEXP (rhs, 1), mode));
2227 rhs = XEXP (rhs, 0);
2228 }
2229 else if (GET_CODE (rhs) == ASHIFT
2230 && CONST_INT_P (XEXP (rhs, 1))
2231 && INTVAL (XEXP (rhs, 1)) >= 0
2232 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2233 {
2234 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2235 GET_MODE_PRECISION (mode));
2236 negcoeff1 = -negcoeff1;
2237 rhs = XEXP (rhs, 0);
2238 }
2239
2240 if (rtx_equal_p (lhs, rhs))
2241 {
2242 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2243 rtx coeff;
2244 bool speed = optimize_function_for_speed_p (cfun);
2245
2246 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2247
2248 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2249 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2250 ? tem : 0;
2251 }
2252 }
2253
2254 /* (a - (-b)) -> (a + b). True even for IEEE. */
2255 if (GET_CODE (op1) == NEG)
2256 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2257
2258 /* (-x - c) may be simplified as (-c - x). */
2259 if (GET_CODE (op0) == NEG
2260 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2261 {
2262 tem = simplify_unary_operation (NEG, mode, op1, mode);
2263 if (tem)
2264 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2265 }
2266
2267 /* Don't let a relocatable value get a negative coeff. */
2268 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2269 return simplify_gen_binary (PLUS, mode,
2270 op0,
2271 neg_const_int (mode, op1));
2272
2273 /* (x - (x & y)) -> (x & ~y) */
2274 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2275 {
2276 if (rtx_equal_p (op0, XEXP (op1, 0)))
2277 {
2278 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2279 GET_MODE (XEXP (op1, 1)));
2280 return simplify_gen_binary (AND, mode, op0, tem);
2281 }
2282 if (rtx_equal_p (op0, XEXP (op1, 1)))
2283 {
2284 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2285 GET_MODE (XEXP (op1, 0)));
2286 return simplify_gen_binary (AND, mode, op0, tem);
2287 }
2288 }
2289
2290 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2291 by reversing the comparison code if valid. */
2292 if (STORE_FLAG_VALUE == 1
2293 && trueop0 == const1_rtx
2294 && COMPARISON_P (op1)
2295 && (reversed = reversed_comparison (op1, mode)))
2296 return reversed;
2297
2298 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2299 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2300 && GET_CODE (op1) == MULT
2301 && GET_CODE (XEXP (op1, 0)) == NEG)
2302 {
2303 rtx in1, in2;
2304
2305 in1 = XEXP (XEXP (op1, 0), 0);
2306 in2 = XEXP (op1, 1);
2307 return simplify_gen_binary (PLUS, mode,
2308 simplify_gen_binary (MULT, mode,
2309 in1, in2),
2310 op0);
2311 }
2312
2313 /* Canonicalize (minus (neg A) (mult B C)) to
2314 (minus (mult (neg B) C) A). */
2315 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2316 && GET_CODE (op1) == MULT
2317 && GET_CODE (op0) == NEG)
2318 {
2319 rtx in1, in2;
2320
2321 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2322 in2 = XEXP (op1, 1);
2323 return simplify_gen_binary (MINUS, mode,
2324 simplify_gen_binary (MULT, mode,
2325 in1, in2),
2326 XEXP (op0, 0));
2327 }
2328
2329 /* If one of the operands is a PLUS or a MINUS, see if we can
2330 simplify this by the associative law. This will, for example,
2331 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2332 Don't use the associative law for floating point.
2333 The inaccuracy makes it nonassociative,
2334 and subtle programs can break if operations are associated. */
2335
2336 if (INTEGRAL_MODE_P (mode)
2337 && (plus_minus_operand_p (op0)
2338 || plus_minus_operand_p (op1))
2339 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2340 return tem;
2341 break;
2342
2343 case MULT:
2344 if (trueop1 == constm1_rtx)
2345 return simplify_gen_unary (NEG, mode, op0, mode);
2346
2347 if (GET_CODE (op0) == NEG)
2348 {
2349 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2350 /* If op1 is a MULT as well and simplify_unary_operation
2351 just moved the NEG to the second operand, simplify_gen_binary
2352 below could through simplify_associative_operation move
2353 the NEG around again and recurse endlessly. */
2354 if (temp
2355 && GET_CODE (op1) == MULT
2356 && GET_CODE (temp) == MULT
2357 && XEXP (op1, 0) == XEXP (temp, 0)
2358 && GET_CODE (XEXP (temp, 1)) == NEG
2359 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2360 temp = NULL_RTX;
2361 if (temp)
2362 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2363 }
2364 if (GET_CODE (op1) == NEG)
2365 {
2366 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2367 /* If op0 is a MULT as well and simplify_unary_operation
2368 just moved the NEG to the second operand, simplify_gen_binary
2369 below could through simplify_associative_operation move
2370 the NEG around again and recurse endlessly. */
2371 if (temp
2372 && GET_CODE (op0) == MULT
2373 && GET_CODE (temp) == MULT
2374 && XEXP (op0, 0) == XEXP (temp, 0)
2375 && GET_CODE (XEXP (temp, 1)) == NEG
2376 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2377 temp = NULL_RTX;
2378 if (temp)
2379 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2380 }
2381
2382 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2383 x is NaN, since x * 0 is then also NaN. Nor is it valid
2384 when the mode has signed zeros, since multiplying a negative
2385 number by 0 will give -0, not 0. */
2386 if (!HONOR_NANS (mode)
2387 && !HONOR_SIGNED_ZEROS (mode)
2388 && trueop1 == CONST0_RTX (mode)
2389 && ! side_effects_p (op0))
2390 return op1;
2391
2392 /* In IEEE floating point, x*1 is not equivalent to x for
2393 signalling NaNs. */
2394 if (!HONOR_SNANS (mode)
2395 && trueop1 == CONST1_RTX (mode))
2396 return op0;
2397
2398 /* Convert multiply by constant power of two into shift. */
2399 if (CONST_SCALAR_INT_P (trueop1))
2400 {
2401 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2402 if (val >= 0 && val < GET_MODE_BITSIZE (mode))
2403 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2404 }
2405
2406 /* x*2 is x+x and x*(-1) is -x */
2407 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2408 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2409 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2410 && GET_MODE (op0) == mode)
2411 {
2412 REAL_VALUE_TYPE d;
2413 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2414
2415 if (REAL_VALUES_EQUAL (d, dconst2))
2416 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2417
2418 if (!HONOR_SNANS (mode)
2419 && REAL_VALUES_EQUAL (d, dconstm1))
2420 return simplify_gen_unary (NEG, mode, op0, mode);
2421 }
2422
2423 /* Optimize -x * -x as x * x. */
2424 if (FLOAT_MODE_P (mode)
2425 && GET_CODE (op0) == NEG
2426 && GET_CODE (op1) == NEG
2427 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2428 && !side_effects_p (XEXP (op0, 0)))
2429 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2430
2431 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2432 if (SCALAR_FLOAT_MODE_P (mode)
2433 && GET_CODE (op0) == ABS
2434 && GET_CODE (op1) == ABS
2435 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2436 && !side_effects_p (XEXP (op0, 0)))
2437 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2438
2439 /* Reassociate multiplication, but for floating point MULTs
2440 only when the user specifies unsafe math optimizations. */
2441 if (! FLOAT_MODE_P (mode)
2442 || flag_unsafe_math_optimizations)
2443 {
2444 tem = simplify_associative_operation (code, mode, op0, op1);
2445 if (tem)
2446 return tem;
2447 }
2448 break;
2449
2450 case IOR:
2451 if (trueop1 == CONST0_RTX (mode))
2452 return op0;
2453 if (INTEGRAL_MODE_P (mode)
2454 && trueop1 == CONSTM1_RTX (mode)
2455 && !side_effects_p (op0))
2456 return op1;
2457 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2458 return op0;
2459 /* A | (~A) -> -1 */
2460 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2461 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2462 && ! side_effects_p (op0)
2463 && SCALAR_INT_MODE_P (mode))
2464 return constm1_rtx;
2465
2466 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2467 if (CONST_INT_P (op1)
2468 && HWI_COMPUTABLE_MODE_P (mode)
2469 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2470 && !side_effects_p (op0))
2471 return op1;
2472
2473 /* Canonicalize (X & C1) | C2. */
2474 if (GET_CODE (op0) == AND
2475 && CONST_INT_P (trueop1)
2476 && CONST_INT_P (XEXP (op0, 1)))
2477 {
2478 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2479 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2480 HOST_WIDE_INT c2 = INTVAL (trueop1);
2481
2482 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2483 if ((c1 & c2) == c1
2484 && !side_effects_p (XEXP (op0, 0)))
2485 return trueop1;
2486
2487 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2488 if (((c1|c2) & mask) == mask)
2489 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2490
2491 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2492 if (((c1 & ~c2) & mask) != (c1 & mask))
2493 {
2494 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2495 gen_int_mode (c1 & ~c2, mode));
2496 return simplify_gen_binary (IOR, mode, tem, op1);
2497 }
2498 }
2499
2500 /* Convert (A & B) | A to A. */
2501 if (GET_CODE (op0) == AND
2502 && (rtx_equal_p (XEXP (op0, 0), op1)
2503 || rtx_equal_p (XEXP (op0, 1), op1))
2504 && ! side_effects_p (XEXP (op0, 0))
2505 && ! side_effects_p (XEXP (op0, 1)))
2506 return op1;
2507
2508 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2509 mode size to (rotate A CX). */
2510
2511 if (GET_CODE (op1) == ASHIFT
2512 || GET_CODE (op1) == SUBREG)
2513 {
2514 opleft = op1;
2515 opright = op0;
2516 }
2517 else
2518 {
2519 opright = op1;
2520 opleft = op0;
2521 }
2522
2523 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2524 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2525 && CONST_INT_P (XEXP (opleft, 1))
2526 && CONST_INT_P (XEXP (opright, 1))
2527 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2528 == GET_MODE_PRECISION (mode)))
2529 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2530
2531 /* Same, but for ashift that has been "simplified" to a wider mode
2532 by simplify_shift_const. */
2533
2534 if (GET_CODE (opleft) == SUBREG
2535 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2536 && GET_CODE (opright) == LSHIFTRT
2537 && GET_CODE (XEXP (opright, 0)) == SUBREG
2538 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2539 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2540 && (GET_MODE_SIZE (GET_MODE (opleft))
2541 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2542 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2543 SUBREG_REG (XEXP (opright, 0)))
2544 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2545 && CONST_INT_P (XEXP (opright, 1))
2546 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2547 == GET_MODE_PRECISION (mode)))
2548 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2549 XEXP (SUBREG_REG (opleft), 1));
2550
2551 /* If we have (ior (and (X C1) C2)), simplify this by making
2552 C1 as small as possible if C1 actually changes. */
2553 if (CONST_INT_P (op1)
2554 && (HWI_COMPUTABLE_MODE_P (mode)
2555 || INTVAL (op1) > 0)
2556 && GET_CODE (op0) == AND
2557 && CONST_INT_P (XEXP (op0, 1))
2558 && CONST_INT_P (op1)
2559 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2560 {
2561 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2562 gen_int_mode (UINTVAL (XEXP (op0, 1))
2563 & ~UINTVAL (op1),
2564 mode));
2565 return simplify_gen_binary (IOR, mode, tmp, op1);
2566 }
2567
2568 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2569 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2570 the PLUS does not affect any of the bits in OP1: then we can do
2571 the IOR as a PLUS and we can associate. This is valid if OP1
2572 can be safely shifted left C bits. */
2573 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2574 && GET_CODE (XEXP (op0, 0)) == PLUS
2575 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2576 && CONST_INT_P (XEXP (op0, 1))
2577 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2578 {
2579 int count = INTVAL (XEXP (op0, 1));
2580 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2581
2582 if (mask >> count == INTVAL (trueop1)
2583 && trunc_int_for_mode (mask, mode) == mask
2584 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2585 return simplify_gen_binary (ASHIFTRT, mode,
2586 plus_constant (mode, XEXP (op0, 0),
2587 mask),
2588 XEXP (op0, 1));
2589 }
2590
2591 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2592 if (tem)
2593 return tem;
2594
2595 tem = simplify_associative_operation (code, mode, op0, op1);
2596 if (tem)
2597 return tem;
2598 break;
2599
2600 case XOR:
2601 if (trueop1 == CONST0_RTX (mode))
2602 return op0;
2603 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2604 return simplify_gen_unary (NOT, mode, op0, mode);
2605 if (rtx_equal_p (trueop0, trueop1)
2606 && ! side_effects_p (op0)
2607 && GET_MODE_CLASS (mode) != MODE_CC)
2608 return CONST0_RTX (mode);
2609
2610 /* Canonicalize XOR of the most significant bit to PLUS. */
2611 if (CONST_SCALAR_INT_P (op1)
2612 && mode_signbit_p (mode, op1))
2613 return simplify_gen_binary (PLUS, mode, op0, op1);
2614 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2615 if (CONST_SCALAR_INT_P (op1)
2616 && GET_CODE (op0) == PLUS
2617 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2618 && mode_signbit_p (mode, XEXP (op0, 1)))
2619 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2620 simplify_gen_binary (XOR, mode, op1,
2621 XEXP (op0, 1)));
2622
2623 /* If we are XORing two things that have no bits in common,
2624 convert them into an IOR. This helps to detect rotation encoded
2625 using those methods and possibly other simplifications. */
2626
2627 if (HWI_COMPUTABLE_MODE_P (mode)
2628 && (nonzero_bits (op0, mode)
2629 & nonzero_bits (op1, mode)) == 0)
2630 return (simplify_gen_binary (IOR, mode, op0, op1));
2631
2632 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2633 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2634 (NOT y). */
2635 {
2636 int num_negated = 0;
2637
2638 if (GET_CODE (op0) == NOT)
2639 num_negated++, op0 = XEXP (op0, 0);
2640 if (GET_CODE (op1) == NOT)
2641 num_negated++, op1 = XEXP (op1, 0);
2642
2643 if (num_negated == 2)
2644 return simplify_gen_binary (XOR, mode, op0, op1);
2645 else if (num_negated == 1)
2646 return simplify_gen_unary (NOT, mode,
2647 simplify_gen_binary (XOR, mode, op0, op1),
2648 mode);
2649 }
2650
2651 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2652 correspond to a machine insn or result in further simplifications
2653 if B is a constant. */
2654
2655 if (GET_CODE (op0) == AND
2656 && rtx_equal_p (XEXP (op0, 1), op1)
2657 && ! side_effects_p (op1))
2658 return simplify_gen_binary (AND, mode,
2659 simplify_gen_unary (NOT, mode,
2660 XEXP (op0, 0), mode),
2661 op1);
2662
2663 else if (GET_CODE (op0) == AND
2664 && rtx_equal_p (XEXP (op0, 0), op1)
2665 && ! side_effects_p (op1))
2666 return simplify_gen_binary (AND, mode,
2667 simplify_gen_unary (NOT, mode,
2668 XEXP (op0, 1), mode),
2669 op1);
2670
2671 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2672 we can transform like this:
2673 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2674 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2675 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2676 Attempt a few simplifications when B and C are both constants. */
2677 if (GET_CODE (op0) == AND
2678 && CONST_INT_P (op1)
2679 && CONST_INT_P (XEXP (op0, 1)))
2680 {
2681 rtx a = XEXP (op0, 0);
2682 rtx b = XEXP (op0, 1);
2683 rtx c = op1;
2684 HOST_WIDE_INT bval = INTVAL (b);
2685 HOST_WIDE_INT cval = INTVAL (c);
2686
2687 rtx na_c
2688 = simplify_binary_operation (AND, mode,
2689 simplify_gen_unary (NOT, mode, a, mode),
2690 c);
2691 if ((~cval & bval) == 0)
2692 {
2693 /* Try to simplify ~A&C | ~B&C. */
2694 if (na_c != NULL_RTX)
2695 return simplify_gen_binary (IOR, mode, na_c,
2696 gen_int_mode (~bval & cval, mode));
2697 }
2698 else
2699 {
2700 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2701 if (na_c == const0_rtx)
2702 {
2703 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2704 gen_int_mode (~cval & bval,
2705 mode));
2706 return simplify_gen_binary (IOR, mode, a_nc_b,
2707 gen_int_mode (~bval & cval,
2708 mode));
2709 }
2710 }
2711 }
2712
2713 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2714 comparison if STORE_FLAG_VALUE is 1. */
2715 if (STORE_FLAG_VALUE == 1
2716 && trueop1 == const1_rtx
2717 && COMPARISON_P (op0)
2718 && (reversed = reversed_comparison (op0, mode)))
2719 return reversed;
2720
2721 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2722 is (lt foo (const_int 0)), so we can perform the above
2723 simplification if STORE_FLAG_VALUE is 1. */
2724
2725 if (STORE_FLAG_VALUE == 1
2726 && trueop1 == const1_rtx
2727 && GET_CODE (op0) == LSHIFTRT
2728 && CONST_INT_P (XEXP (op0, 1))
2729 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2730 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2731
2732 /* (xor (comparison foo bar) (const_int sign-bit))
2733 when STORE_FLAG_VALUE is the sign bit. */
2734 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2735 && trueop1 == const_true_rtx
2736 && COMPARISON_P (op0)
2737 && (reversed = reversed_comparison (op0, mode)))
2738 return reversed;
2739
2740 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2741 if (tem)
2742 return tem;
2743
2744 tem = simplify_associative_operation (code, mode, op0, op1);
2745 if (tem)
2746 return tem;
2747 break;
2748
2749 case AND:
2750 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2751 return trueop1;
2752 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2753 return op0;
2754 if (HWI_COMPUTABLE_MODE_P (mode))
2755 {
2756 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2757 HOST_WIDE_INT nzop1;
2758 if (CONST_INT_P (trueop1))
2759 {
2760 HOST_WIDE_INT val1 = INTVAL (trueop1);
2761 /* If we are turning off bits already known off in OP0, we need
2762 not do an AND. */
2763 if ((nzop0 & ~val1) == 0)
2764 return op0;
2765 }
2766 nzop1 = nonzero_bits (trueop1, mode);
2767 /* If we are clearing all the nonzero bits, the result is zero. */
2768 if ((nzop1 & nzop0) == 0
2769 && !side_effects_p (op0) && !side_effects_p (op1))
2770 return CONST0_RTX (mode);
2771 }
2772 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2773 && GET_MODE_CLASS (mode) != MODE_CC)
2774 return op0;
2775 /* A & (~A) -> 0 */
2776 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2777 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2778 && ! side_effects_p (op0)
2779 && GET_MODE_CLASS (mode) != MODE_CC)
2780 return CONST0_RTX (mode);
2781
2782 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2783 there are no nonzero bits of C outside of X's mode. */
2784 if ((GET_CODE (op0) == SIGN_EXTEND
2785 || GET_CODE (op0) == ZERO_EXTEND)
2786 && CONST_INT_P (trueop1)
2787 && HWI_COMPUTABLE_MODE_P (mode)
2788 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2789 & UINTVAL (trueop1)) == 0)
2790 {
2791 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2792 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2793 gen_int_mode (INTVAL (trueop1),
2794 imode));
2795 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2796 }
2797
2798 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2799 we might be able to further simplify the AND with X and potentially
2800 remove the truncation altogether. */
2801 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2802 {
2803 rtx x = XEXP (op0, 0);
2804 enum machine_mode xmode = GET_MODE (x);
2805 tem = simplify_gen_binary (AND, xmode, x,
2806 gen_int_mode (INTVAL (trueop1), xmode));
2807 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2808 }
2809
2810 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2811 if (GET_CODE (op0) == IOR
2812 && CONST_INT_P (trueop1)
2813 && CONST_INT_P (XEXP (op0, 1)))
2814 {
2815 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2816 return simplify_gen_binary (IOR, mode,
2817 simplify_gen_binary (AND, mode,
2818 XEXP (op0, 0), op1),
2819 gen_int_mode (tmp, mode));
2820 }
2821
2822 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2823 insn (and may simplify more). */
2824 if (GET_CODE (op0) == XOR
2825 && rtx_equal_p (XEXP (op0, 0), op1)
2826 && ! side_effects_p (op1))
2827 return simplify_gen_binary (AND, mode,
2828 simplify_gen_unary (NOT, mode,
2829 XEXP (op0, 1), mode),
2830 op1);
2831
2832 if (GET_CODE (op0) == XOR
2833 && rtx_equal_p (XEXP (op0, 1), op1)
2834 && ! side_effects_p (op1))
2835 return simplify_gen_binary (AND, mode,
2836 simplify_gen_unary (NOT, mode,
2837 XEXP (op0, 0), mode),
2838 op1);
2839
2840 /* Similarly for (~(A ^ B)) & A. */
2841 if (GET_CODE (op0) == NOT
2842 && GET_CODE (XEXP (op0, 0)) == XOR
2843 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2844 && ! side_effects_p (op1))
2845 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2846
2847 if (GET_CODE (op0) == NOT
2848 && GET_CODE (XEXP (op0, 0)) == XOR
2849 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2850 && ! side_effects_p (op1))
2851 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2852
2853 /* Convert (A | B) & A to A. */
2854 if (GET_CODE (op0) == IOR
2855 && (rtx_equal_p (XEXP (op0, 0), op1)
2856 || rtx_equal_p (XEXP (op0, 1), op1))
2857 && ! side_effects_p (XEXP (op0, 0))
2858 && ! side_effects_p (XEXP (op0, 1)))
2859 return op1;
2860
2861 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2862 ((A & N) + B) & M -> (A + B) & M
2863 Similarly if (N & M) == 0,
2864 ((A | N) + B) & M -> (A + B) & M
2865 and for - instead of + and/or ^ instead of |.
2866 Also, if (N & M) == 0, then
2867 (A +- N) & M -> A & M. */
2868 if (CONST_INT_P (trueop1)
2869 && HWI_COMPUTABLE_MODE_P (mode)
2870 && ~UINTVAL (trueop1)
2871 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2872 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2873 {
2874 rtx pmop[2];
2875 int which;
2876
2877 pmop[0] = XEXP (op0, 0);
2878 pmop[1] = XEXP (op0, 1);
2879
2880 if (CONST_INT_P (pmop[1])
2881 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2882 return simplify_gen_binary (AND, mode, pmop[0], op1);
2883
2884 for (which = 0; which < 2; which++)
2885 {
2886 tem = pmop[which];
2887 switch (GET_CODE (tem))
2888 {
2889 case AND:
2890 if (CONST_INT_P (XEXP (tem, 1))
2891 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2892 == UINTVAL (trueop1))
2893 pmop[which] = XEXP (tem, 0);
2894 break;
2895 case IOR:
2896 case XOR:
2897 if (CONST_INT_P (XEXP (tem, 1))
2898 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2899 pmop[which] = XEXP (tem, 0);
2900 break;
2901 default:
2902 break;
2903 }
2904 }
2905
2906 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2907 {
2908 tem = simplify_gen_binary (GET_CODE (op0), mode,
2909 pmop[0], pmop[1]);
2910 return simplify_gen_binary (code, mode, tem, op1);
2911 }
2912 }
2913
2914 /* (and X (ior (not X) Y) -> (and X Y) */
2915 if (GET_CODE (op1) == IOR
2916 && GET_CODE (XEXP (op1, 0)) == NOT
2917 && op0 == XEXP (XEXP (op1, 0), 0))
2918 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2919
2920 /* (and (ior (not X) Y) X) -> (and X Y) */
2921 if (GET_CODE (op0) == IOR
2922 && GET_CODE (XEXP (op0, 0)) == NOT
2923 && op1 == XEXP (XEXP (op0, 0), 0))
2924 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2925
2926 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2927 if (tem)
2928 return tem;
2929
2930 tem = simplify_associative_operation (code, mode, op0, op1);
2931 if (tem)
2932 return tem;
2933 break;
2934
2935 case UDIV:
2936 /* 0/x is 0 (or x&0 if x has side-effects). */
2937 if (trueop0 == CONST0_RTX (mode))
2938 {
2939 if (side_effects_p (op1))
2940 return simplify_gen_binary (AND, mode, op1, trueop0);
2941 return trueop0;
2942 }
2943 /* x/1 is x. */
2944 if (trueop1 == CONST1_RTX (mode))
2945 {
2946 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2947 if (tem)
2948 return tem;
2949 }
2950 /* Convert divide by power of two into shift. */
2951 if (CONST_INT_P (trueop1)
2952 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2953 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2954 break;
2955
2956 case DIV:
2957 /* Handle floating point and integers separately. */
2958 if (SCALAR_FLOAT_MODE_P (mode))
2959 {
2960 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2961 safe for modes with NaNs, since 0.0 / 0.0 will then be
2962 NaN rather than 0.0. Nor is it safe for modes with signed
2963 zeros, since dividing 0 by a negative number gives -0.0 */
2964 if (trueop0 == CONST0_RTX (mode)
2965 && !HONOR_NANS (mode)
2966 && !HONOR_SIGNED_ZEROS (mode)
2967 && ! side_effects_p (op1))
2968 return op0;
2969 /* x/1.0 is x. */
2970 if (trueop1 == CONST1_RTX (mode)
2971 && !HONOR_SNANS (mode))
2972 return op0;
2973
2974 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2975 && trueop1 != CONST0_RTX (mode))
2976 {
2977 REAL_VALUE_TYPE d;
2978 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2979
2980 /* x/-1.0 is -x. */
2981 if (REAL_VALUES_EQUAL (d, dconstm1)
2982 && !HONOR_SNANS (mode))
2983 return simplify_gen_unary (NEG, mode, op0, mode);
2984
2985 /* Change FP division by a constant into multiplication.
2986 Only do this with -freciprocal-math. */
2987 if (flag_reciprocal_math
2988 && !REAL_VALUES_EQUAL (d, dconst0))
2989 {
2990 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2991 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2992 return simplify_gen_binary (MULT, mode, op0, tem);
2993 }
2994 }
2995 }
2996 else if (SCALAR_INT_MODE_P (mode))
2997 {
2998 /* 0/x is 0 (or x&0 if x has side-effects). */
2999 if (trueop0 == CONST0_RTX (mode)
3000 && !cfun->can_throw_non_call_exceptions)
3001 {
3002 if (side_effects_p (op1))
3003 return simplify_gen_binary (AND, mode, op1, trueop0);
3004 return trueop0;
3005 }
3006 /* x/1 is x. */
3007 if (trueop1 == CONST1_RTX (mode))
3008 {
3009 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3010 if (tem)
3011 return tem;
3012 }
3013 /* x/-1 is -x. */
3014 if (trueop1 == constm1_rtx)
3015 {
3016 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3017 if (x)
3018 return simplify_gen_unary (NEG, mode, x, mode);
3019 }
3020 }
3021 break;
3022
3023 case UMOD:
3024 /* 0%x is 0 (or x&0 if x has side-effects). */
3025 if (trueop0 == CONST0_RTX (mode))
3026 {
3027 if (side_effects_p (op1))
3028 return simplify_gen_binary (AND, mode, op1, trueop0);
3029 return trueop0;
3030 }
3031 /* x%1 is 0 (of x&0 if x has side-effects). */
3032 if (trueop1 == CONST1_RTX (mode))
3033 {
3034 if (side_effects_p (op0))
3035 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3036 return CONST0_RTX (mode);
3037 }
3038 /* Implement modulus by power of two as AND. */
3039 if (CONST_INT_P (trueop1)
3040 && exact_log2 (UINTVAL (trueop1)) > 0)
3041 return simplify_gen_binary (AND, mode, op0,
3042 gen_int_mode (INTVAL (op1) - 1, mode));
3043 break;
3044
3045 case MOD:
3046 /* 0%x is 0 (or x&0 if x has side-effects). */
3047 if (trueop0 == CONST0_RTX (mode))
3048 {
3049 if (side_effects_p (op1))
3050 return simplify_gen_binary (AND, mode, op1, trueop0);
3051 return trueop0;
3052 }
3053 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3054 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3055 {
3056 if (side_effects_p (op0))
3057 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3058 return CONST0_RTX (mode);
3059 }
3060 break;
3061
3062 case ROTATERT:
3063 case ROTATE:
3064 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3065 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3066 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3067 amount instead. */
3068 if (CONST_INT_P (trueop1)
3069 && IN_RANGE (INTVAL (trueop1),
3070 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3071 GET_MODE_BITSIZE (mode) - 1))
3072 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3073 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3074 - INTVAL (trueop1)));
3075 /* FALLTHRU */
3076 case ASHIFTRT:
3077 if (trueop1 == CONST0_RTX (mode))
3078 return op0;
3079 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3080 return op0;
3081 /* Rotating ~0 always results in ~0. */
3082 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3083 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3084 && ! side_effects_p (op1))
3085 return op0;
3086 canonicalize_shift:
3087 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3088 {
3089 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3090 if (val != INTVAL (op1))
3091 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3092 }
3093 break;
3094
3095 case ASHIFT:
3096 case SS_ASHIFT:
3097 case US_ASHIFT:
3098 if (trueop1 == CONST0_RTX (mode))
3099 return op0;
3100 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3101 return op0;
3102 goto canonicalize_shift;
3103
3104 case LSHIFTRT:
3105 if (trueop1 == CONST0_RTX (mode))
3106 return op0;
3107 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3108 return op0;
3109 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3110 if (GET_CODE (op0) == CLZ
3111 && CONST_INT_P (trueop1)
3112 && STORE_FLAG_VALUE == 1
3113 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3114 {
3115 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3116 unsigned HOST_WIDE_INT zero_val = 0;
3117
3118 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3119 && zero_val == GET_MODE_PRECISION (imode)
3120 && INTVAL (trueop1) == exact_log2 (zero_val))
3121 return simplify_gen_relational (EQ, mode, imode,
3122 XEXP (op0, 0), const0_rtx);
3123 }
3124 goto canonicalize_shift;
3125
3126 case SMIN:
3127 if (width <= HOST_BITS_PER_WIDE_INT
3128 && mode_signbit_p (mode, trueop1)
3129 && ! side_effects_p (op0))
3130 return op1;
3131 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3132 return op0;
3133 tem = simplify_associative_operation (code, mode, op0, op1);
3134 if (tem)
3135 return tem;
3136 break;
3137
3138 case SMAX:
3139 if (width <= HOST_BITS_PER_WIDE_INT
3140 && CONST_INT_P (trueop1)
3141 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3142 && ! side_effects_p (op0))
3143 return op1;
3144 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3145 return op0;
3146 tem = simplify_associative_operation (code, mode, op0, op1);
3147 if (tem)
3148 return tem;
3149 break;
3150
3151 case UMIN:
3152 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3153 return op1;
3154 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3155 return op0;
3156 tem = simplify_associative_operation (code, mode, op0, op1);
3157 if (tem)
3158 return tem;
3159 break;
3160
3161 case UMAX:
3162 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3163 return op1;
3164 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3165 return op0;
3166 tem = simplify_associative_operation (code, mode, op0, op1);
3167 if (tem)
3168 return tem;
3169 break;
3170
3171 case SS_PLUS:
3172 case US_PLUS:
3173 case SS_MINUS:
3174 case US_MINUS:
3175 case SS_MULT:
3176 case US_MULT:
3177 case SS_DIV:
3178 case US_DIV:
3179 /* ??? There are simplifications that can be done. */
3180 return 0;
3181
3182 case VEC_SELECT:
3183 if (!VECTOR_MODE_P (mode))
3184 {
3185 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3186 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3187 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3188 gcc_assert (XVECLEN (trueop1, 0) == 1);
3189 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3190
3191 if (GET_CODE (trueop0) == CONST_VECTOR)
3192 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3193 (trueop1, 0, 0)));
3194
3195 /* Extract a scalar element from a nested VEC_SELECT expression
3196 (with optional nested VEC_CONCAT expression). Some targets
3197 (i386) extract scalar element from a vector using chain of
3198 nested VEC_SELECT expressions. When input operand is a memory
3199 operand, this operation can be simplified to a simple scalar
3200 load from an offseted memory address. */
3201 if (GET_CODE (trueop0) == VEC_SELECT)
3202 {
3203 rtx op0 = XEXP (trueop0, 0);
3204 rtx op1 = XEXP (trueop0, 1);
3205
3206 enum machine_mode opmode = GET_MODE (op0);
3207 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3208 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3209
3210 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3211 int elem;
3212
3213 rtvec vec;
3214 rtx tmp_op, tmp;
3215
3216 gcc_assert (GET_CODE (op1) == PARALLEL);
3217 gcc_assert (i < n_elts);
3218
3219 /* Select element, pointed by nested selector. */
3220 elem = INTVAL (XVECEXP (op1, 0, i));
3221
3222 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3223 if (GET_CODE (op0) == VEC_CONCAT)
3224 {
3225 rtx op00 = XEXP (op0, 0);
3226 rtx op01 = XEXP (op0, 1);
3227
3228 enum machine_mode mode00, mode01;
3229 int n_elts00, n_elts01;
3230
3231 mode00 = GET_MODE (op00);
3232 mode01 = GET_MODE (op01);
3233
3234 /* Find out number of elements of each operand. */
3235 if (VECTOR_MODE_P (mode00))
3236 {
3237 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3238 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3239 }
3240 else
3241 n_elts00 = 1;
3242
3243 if (VECTOR_MODE_P (mode01))
3244 {
3245 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3246 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3247 }
3248 else
3249 n_elts01 = 1;
3250
3251 gcc_assert (n_elts == n_elts00 + n_elts01);
3252
3253 /* Select correct operand of VEC_CONCAT
3254 and adjust selector. */
3255 if (elem < n_elts01)
3256 tmp_op = op00;
3257 else
3258 {
3259 tmp_op = op01;
3260 elem -= n_elts00;
3261 }
3262 }
3263 else
3264 tmp_op = op0;
3265
3266 vec = rtvec_alloc (1);
3267 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3268
3269 tmp = gen_rtx_fmt_ee (code, mode,
3270 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3271 return tmp;
3272 }
3273 if (GET_CODE (trueop0) == VEC_DUPLICATE
3274 && GET_MODE (XEXP (trueop0, 0)) == mode)
3275 return XEXP (trueop0, 0);
3276 }
3277 else
3278 {
3279 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3280 gcc_assert (GET_MODE_INNER (mode)
3281 == GET_MODE_INNER (GET_MODE (trueop0)));
3282 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3283
3284 if (GET_CODE (trueop0) == CONST_VECTOR)
3285 {
3286 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3287 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3288 rtvec v = rtvec_alloc (n_elts);
3289 unsigned int i;
3290
3291 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3292 for (i = 0; i < n_elts; i++)
3293 {
3294 rtx x = XVECEXP (trueop1, 0, i);
3295
3296 gcc_assert (CONST_INT_P (x));
3297 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3298 INTVAL (x));
3299 }
3300
3301 return gen_rtx_CONST_VECTOR (mode, v);
3302 }
3303
3304 /* Recognize the identity. */
3305 if (GET_MODE (trueop0) == mode)
3306 {
3307 bool maybe_ident = true;
3308 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3309 {
3310 rtx j = XVECEXP (trueop1, 0, i);
3311 if (!CONST_INT_P (j) || INTVAL (j) != i)
3312 {
3313 maybe_ident = false;
3314 break;
3315 }
3316 }
3317 if (maybe_ident)
3318 return trueop0;
3319 }
3320
3321 /* If we build {a,b} then permute it, build the result directly. */
3322 if (XVECLEN (trueop1, 0) == 2
3323 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3324 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3325 && GET_CODE (trueop0) == VEC_CONCAT
3326 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3327 && GET_MODE (XEXP (trueop0, 0)) == mode
3328 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3329 && GET_MODE (XEXP (trueop0, 1)) == mode)
3330 {
3331 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3332 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3333 rtx subop0, subop1;
3334
3335 gcc_assert (i0 < 4 && i1 < 4);
3336 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3337 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3338
3339 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3340 }
3341
3342 if (XVECLEN (trueop1, 0) == 2
3343 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3344 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3345 && GET_CODE (trueop0) == VEC_CONCAT
3346 && GET_MODE (trueop0) == mode)
3347 {
3348 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3349 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3350 rtx subop0, subop1;
3351
3352 gcc_assert (i0 < 2 && i1 < 2);
3353 subop0 = XEXP (trueop0, i0);
3354 subop1 = XEXP (trueop0, i1);
3355
3356 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3357 }
3358 }
3359
3360 if (XVECLEN (trueop1, 0) == 1
3361 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3362 && GET_CODE (trueop0) == VEC_CONCAT)
3363 {
3364 rtx vec = trueop0;
3365 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3366
3367 /* Try to find the element in the VEC_CONCAT. */
3368 while (GET_MODE (vec) != mode
3369 && GET_CODE (vec) == VEC_CONCAT)
3370 {
3371 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3372 if (offset < vec_size)
3373 vec = XEXP (vec, 0);
3374 else
3375 {
3376 offset -= vec_size;
3377 vec = XEXP (vec, 1);
3378 }
3379 vec = avoid_constant_pool_reference (vec);
3380 }
3381
3382 if (GET_MODE (vec) == mode)
3383 return vec;
3384 }
3385
3386 /* If we select elements in a vec_merge that all come from the same
3387 operand, select from that operand directly. */
3388 if (GET_CODE (op0) == VEC_MERGE)
3389 {
3390 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3391 if (CONST_INT_P (trueop02))
3392 {
3393 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3394 bool all_operand0 = true;
3395 bool all_operand1 = true;
3396 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3397 {
3398 rtx j = XVECEXP (trueop1, 0, i);
3399 if (sel & (1 << UINTVAL (j)))
3400 all_operand1 = false;
3401 else
3402 all_operand0 = false;
3403 }
3404 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3405 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3406 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3407 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3408 }
3409 }
3410
3411 return 0;
3412 case VEC_CONCAT:
3413 {
3414 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3415 ? GET_MODE (trueop0)
3416 : GET_MODE_INNER (mode));
3417 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3418 ? GET_MODE (trueop1)
3419 : GET_MODE_INNER (mode));
3420
3421 gcc_assert (VECTOR_MODE_P (mode));
3422 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3423 == GET_MODE_SIZE (mode));
3424
3425 if (VECTOR_MODE_P (op0_mode))
3426 gcc_assert (GET_MODE_INNER (mode)
3427 == GET_MODE_INNER (op0_mode));
3428 else
3429 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3430
3431 if (VECTOR_MODE_P (op1_mode))
3432 gcc_assert (GET_MODE_INNER (mode)
3433 == GET_MODE_INNER (op1_mode));
3434 else
3435 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3436
3437 if ((GET_CODE (trueop0) == CONST_VECTOR
3438 || CONST_SCALAR_INT_P (trueop0)
3439 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3440 && (GET_CODE (trueop1) == CONST_VECTOR
3441 || CONST_SCALAR_INT_P (trueop1)
3442 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3443 {
3444 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3445 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3446 rtvec v = rtvec_alloc (n_elts);
3447 unsigned int i;
3448 unsigned in_n_elts = 1;
3449
3450 if (VECTOR_MODE_P (op0_mode))
3451 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3452 for (i = 0; i < n_elts; i++)
3453 {
3454 if (i < in_n_elts)
3455 {
3456 if (!VECTOR_MODE_P (op0_mode))
3457 RTVEC_ELT (v, i) = trueop0;
3458 else
3459 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3460 }
3461 else
3462 {
3463 if (!VECTOR_MODE_P (op1_mode))
3464 RTVEC_ELT (v, i) = trueop1;
3465 else
3466 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3467 i - in_n_elts);
3468 }
3469 }
3470
3471 return gen_rtx_CONST_VECTOR (mode, v);
3472 }
3473
3474 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3475 Restrict the transformation to avoid generating a VEC_SELECT with a
3476 mode unrelated to its operand. */
3477 if (GET_CODE (trueop0) == VEC_SELECT
3478 && GET_CODE (trueop1) == VEC_SELECT
3479 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3480 && GET_MODE (XEXP (trueop0, 0)) == mode)
3481 {
3482 rtx par0 = XEXP (trueop0, 1);
3483 rtx par1 = XEXP (trueop1, 1);
3484 int len0 = XVECLEN (par0, 0);
3485 int len1 = XVECLEN (par1, 0);
3486 rtvec vec = rtvec_alloc (len0 + len1);
3487 for (int i = 0; i < len0; i++)
3488 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3489 for (int i = 0; i < len1; i++)
3490 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3491 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3492 gen_rtx_PARALLEL (VOIDmode, vec));
3493 }
3494 }
3495 return 0;
3496
3497 default:
3498 gcc_unreachable ();
3499 }
3500
3501 return 0;
3502 }
3503
3504 rtx
3505 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3506 rtx op0, rtx op1)
3507 {
3508 unsigned int width = GET_MODE_PRECISION (mode);
3509
3510 if (VECTOR_MODE_P (mode)
3511 && code != VEC_CONCAT
3512 && GET_CODE (op0) == CONST_VECTOR
3513 && GET_CODE (op1) == CONST_VECTOR)
3514 {
3515 unsigned n_elts = GET_MODE_NUNITS (mode);
3516 enum machine_mode op0mode = GET_MODE (op0);
3517 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3518 enum machine_mode op1mode = GET_MODE (op1);
3519 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3520 rtvec v = rtvec_alloc (n_elts);
3521 unsigned int i;
3522
3523 gcc_assert (op0_n_elts == n_elts);
3524 gcc_assert (op1_n_elts == n_elts);
3525 for (i = 0; i < n_elts; i++)
3526 {
3527 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3528 CONST_VECTOR_ELT (op0, i),
3529 CONST_VECTOR_ELT (op1, i));
3530 if (!x)
3531 return 0;
3532 RTVEC_ELT (v, i) = x;
3533 }
3534
3535 return gen_rtx_CONST_VECTOR (mode, v);
3536 }
3537
3538 if (VECTOR_MODE_P (mode)
3539 && code == VEC_CONCAT
3540 && (CONST_SCALAR_INT_P (op0)
3541 || GET_CODE (op0) == CONST_FIXED
3542 || CONST_DOUBLE_AS_FLOAT_P (op0))
3543 && (CONST_SCALAR_INT_P (op1)
3544 || CONST_DOUBLE_AS_FLOAT_P (op1)
3545 || GET_CODE (op1) == CONST_FIXED))
3546 {
3547 unsigned n_elts = GET_MODE_NUNITS (mode);
3548 rtvec v = rtvec_alloc (n_elts);
3549
3550 gcc_assert (n_elts >= 2);
3551 if (n_elts == 2)
3552 {
3553 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3554 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3555
3556 RTVEC_ELT (v, 0) = op0;
3557 RTVEC_ELT (v, 1) = op1;
3558 }
3559 else
3560 {
3561 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3562 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3563 unsigned i;
3564
3565 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3566 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3567 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3568
3569 for (i = 0; i < op0_n_elts; ++i)
3570 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3571 for (i = 0; i < op1_n_elts; ++i)
3572 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3573 }
3574
3575 return gen_rtx_CONST_VECTOR (mode, v);
3576 }
3577
3578 if (SCALAR_FLOAT_MODE_P (mode)
3579 && CONST_DOUBLE_AS_FLOAT_P (op0)
3580 && CONST_DOUBLE_AS_FLOAT_P (op1)
3581 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3582 {
3583 if (code == AND
3584 || code == IOR
3585 || code == XOR)
3586 {
3587 long tmp0[4];
3588 long tmp1[4];
3589 REAL_VALUE_TYPE r;
3590 int i;
3591
3592 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3593 GET_MODE (op0));
3594 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3595 GET_MODE (op1));
3596 for (i = 0; i < 4; i++)
3597 {
3598 switch (code)
3599 {
3600 case AND:
3601 tmp0[i] &= tmp1[i];
3602 break;
3603 case IOR:
3604 tmp0[i] |= tmp1[i];
3605 break;
3606 case XOR:
3607 tmp0[i] ^= tmp1[i];
3608 break;
3609 default:
3610 gcc_unreachable ();
3611 }
3612 }
3613 real_from_target (&r, tmp0, mode);
3614 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3615 }
3616 else
3617 {
3618 REAL_VALUE_TYPE f0, f1, value, result;
3619 bool inexact;
3620
3621 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3622 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3623 real_convert (&f0, mode, &f0);
3624 real_convert (&f1, mode, &f1);
3625
3626 if (HONOR_SNANS (mode)
3627 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3628 return 0;
3629
3630 if (code == DIV
3631 && REAL_VALUES_EQUAL (f1, dconst0)
3632 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3633 return 0;
3634
3635 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3636 && flag_trapping_math
3637 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3638 {
3639 int s0 = REAL_VALUE_NEGATIVE (f0);
3640 int s1 = REAL_VALUE_NEGATIVE (f1);
3641
3642 switch (code)
3643 {
3644 case PLUS:
3645 /* Inf + -Inf = NaN plus exception. */
3646 if (s0 != s1)
3647 return 0;
3648 break;
3649 case MINUS:
3650 /* Inf - Inf = NaN plus exception. */
3651 if (s0 == s1)
3652 return 0;
3653 break;
3654 case DIV:
3655 /* Inf / Inf = NaN plus exception. */
3656 return 0;
3657 default:
3658 break;
3659 }
3660 }
3661
3662 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3663 && flag_trapping_math
3664 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3665 || (REAL_VALUE_ISINF (f1)
3666 && REAL_VALUES_EQUAL (f0, dconst0))))
3667 /* Inf * 0 = NaN plus exception. */
3668 return 0;
3669
3670 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3671 &f0, &f1);
3672 real_convert (&result, mode, &value);
3673
3674 /* Don't constant fold this floating point operation if
3675 the result has overflowed and flag_trapping_math. */
3676
3677 if (flag_trapping_math
3678 && MODE_HAS_INFINITIES (mode)
3679 && REAL_VALUE_ISINF (result)
3680 && !REAL_VALUE_ISINF (f0)
3681 && !REAL_VALUE_ISINF (f1))
3682 /* Overflow plus exception. */
3683 return 0;
3684
3685 /* Don't constant fold this floating point operation if the
3686 result may dependent upon the run-time rounding mode and
3687 flag_rounding_math is set, or if GCC's software emulation
3688 is unable to accurately represent the result. */
3689
3690 if ((flag_rounding_math
3691 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3692 && (inexact || !real_identical (&result, &value)))
3693 return NULL_RTX;
3694
3695 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3696 }
3697 }
3698
3699 /* We can fold some multi-word operations. */
3700 if (GET_MODE_CLASS (mode) == MODE_INT
3701 && CONST_SCALAR_INT_P (op0)
3702 && CONST_SCALAR_INT_P (op1))
3703 {
3704 wide_int result;
3705 bool overflow;
3706 unsigned int bitsize = GET_MODE_BITSIZE (mode);
3707 rtx_mode_t pop0 = std::make_pair (op0, mode);
3708 rtx_mode_t pop1 = std::make_pair (op1, mode);
3709
3710 #if TARGET_SUPPORTS_WIDE_INT == 0
3711 /* This assert keeps the simplification from producing a result
3712 that cannot be represented in a CONST_DOUBLE but a lot of
3713 upstream callers expect that this function never fails to
3714 simplify something and so you if you added this to the test
3715 above the code would die later anyway. If this assert
3716 happens, you just need to make the port support wide int. */
3717 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3718 #endif
3719 switch (code)
3720 {
3721 case MINUS:
3722 result = wi::sub (pop0, pop1);
3723 break;
3724
3725 case PLUS:
3726 result = wi::add (pop0, pop1);
3727 break;
3728
3729 case MULT:
3730 result = wi::mul (pop0, pop1);
3731 break;
3732
3733 case DIV:
3734 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3735 if (overflow)
3736 return NULL_RTX;
3737 break;
3738
3739 case MOD:
3740 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3741 if (overflow)
3742 return NULL_RTX;
3743 break;
3744
3745 case UDIV:
3746 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3747 if (overflow)
3748 return NULL_RTX;
3749 break;
3750
3751 case UMOD:
3752 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3753 if (overflow)
3754 return NULL_RTX;
3755 break;
3756
3757 case AND:
3758 result = wi::bit_and (pop0, pop1);
3759 break;
3760
3761 case IOR:
3762 result = wi::bit_or (pop0, pop1);
3763 break;
3764
3765 case XOR:
3766 result = wi::bit_xor (pop0, pop1);
3767 break;
3768
3769 case SMIN:
3770 result = wi::smin (pop0, pop1);
3771 break;
3772
3773 case SMAX:
3774 result = wi::smax (pop0, pop1);
3775 break;
3776
3777 case UMIN:
3778 result = wi::umin (pop0, pop1);
3779 break;
3780
3781 case UMAX:
3782 result = wi::umax (pop0, pop1);
3783 break;
3784
3785 case LSHIFTRT:
3786 case ASHIFTRT:
3787 case ASHIFT:
3788 case ROTATE:
3789 case ROTATERT:
3790 {
3791 wide_int wop1 = pop1;
3792 if (wi::neg_p (wop1))
3793 return NULL_RTX;
3794
3795 if (SHIFT_COUNT_TRUNCATED)
3796 wop1 = wi::umod_trunc (wop1, width);
3797
3798 switch (code)
3799 {
3800 case LSHIFTRT:
3801 result = wi::lrshift (pop0, wop1, bitsize);
3802 break;
3803
3804 case ASHIFTRT:
3805 result = wi::arshift (pop0, wop1, bitsize);
3806 break;
3807
3808 case ASHIFT:
3809 result = wi::lshift (pop0, wop1, bitsize);
3810 break;
3811
3812 case ROTATE:
3813 result = wi::lrotate (pop0, wop1);
3814 break;
3815
3816 case ROTATERT:
3817 result = wi::rrotate (pop0, wop1);
3818 break;
3819
3820 default:
3821 gcc_unreachable ();
3822 }
3823 break;
3824 }
3825 default:
3826 return NULL_RTX;
3827 }
3828 return immed_wide_int_const (result, mode);
3829 }
3830
3831 return NULL_RTX;
3832 }
3833
3834
3835 \f
3836 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3837 PLUS or MINUS.
3838
3839 Rather than test for specific case, we do this by a brute-force method
3840 and do all possible simplifications until no more changes occur. Then
3841 we rebuild the operation. */
3842
3843 struct simplify_plus_minus_op_data
3844 {
3845 rtx op;
3846 short neg;
3847 };
3848
3849 static bool
3850 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3851 {
3852 int result;
3853
3854 result = (commutative_operand_precedence (y)
3855 - commutative_operand_precedence (x));
3856 if (result)
3857 return result > 0;
3858
3859 /* Group together equal REGs to do more simplification. */
3860 if (REG_P (x) && REG_P (y))
3861 return REGNO (x) > REGNO (y);
3862 else
3863 return false;
3864 }
3865
3866 static rtx
3867 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3868 rtx op1)
3869 {
3870 struct simplify_plus_minus_op_data ops[8];
3871 rtx result, tem;
3872 int n_ops = 2, input_ops = 2;
3873 int changed, n_constants = 0, canonicalized = 0;
3874 int i, j;
3875
3876 memset (ops, 0, sizeof ops);
3877
3878 /* Set up the two operands and then expand them until nothing has been
3879 changed. If we run out of room in our array, give up; this should
3880 almost never happen. */
3881
3882 ops[0].op = op0;
3883 ops[0].neg = 0;
3884 ops[1].op = op1;
3885 ops[1].neg = (code == MINUS);
3886
3887 do
3888 {
3889 changed = 0;
3890
3891 for (i = 0; i < n_ops; i++)
3892 {
3893 rtx this_op = ops[i].op;
3894 int this_neg = ops[i].neg;
3895 enum rtx_code this_code = GET_CODE (this_op);
3896
3897 switch (this_code)
3898 {
3899 case PLUS:
3900 case MINUS:
3901 if (n_ops == 7)
3902 return NULL_RTX;
3903
3904 ops[n_ops].op = XEXP (this_op, 1);
3905 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3906 n_ops++;
3907
3908 ops[i].op = XEXP (this_op, 0);
3909 input_ops++;
3910 changed = 1;
3911 canonicalized |= this_neg;
3912 break;
3913
3914 case NEG:
3915 ops[i].op = XEXP (this_op, 0);
3916 ops[i].neg = ! this_neg;
3917 changed = 1;
3918 canonicalized = 1;
3919 break;
3920
3921 case CONST:
3922 if (n_ops < 7
3923 && GET_CODE (XEXP (this_op, 0)) == PLUS
3924 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3925 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3926 {
3927 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3928 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3929 ops[n_ops].neg = this_neg;
3930 n_ops++;
3931 changed = 1;
3932 canonicalized = 1;
3933 }
3934 break;
3935
3936 case NOT:
3937 /* ~a -> (-a - 1) */
3938 if (n_ops != 7)
3939 {
3940 ops[n_ops].op = CONSTM1_RTX (mode);
3941 ops[n_ops++].neg = this_neg;
3942 ops[i].op = XEXP (this_op, 0);
3943 ops[i].neg = !this_neg;
3944 changed = 1;
3945 canonicalized = 1;
3946 }
3947 break;
3948
3949 case CONST_INT:
3950 n_constants++;
3951 if (this_neg)
3952 {
3953 ops[i].op = neg_const_int (mode, this_op);
3954 ops[i].neg = 0;
3955 changed = 1;
3956 canonicalized = 1;
3957 }
3958 break;
3959
3960 default:
3961 break;
3962 }
3963 }
3964 }
3965 while (changed);
3966
3967 if (n_constants > 1)
3968 canonicalized = 1;
3969
3970 gcc_assert (n_ops >= 2);
3971
3972 /* If we only have two operands, we can avoid the loops. */
3973 if (n_ops == 2)
3974 {
3975 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3976 rtx lhs, rhs;
3977
3978 /* Get the two operands. Be careful with the order, especially for
3979 the cases where code == MINUS. */
3980 if (ops[0].neg && ops[1].neg)
3981 {
3982 lhs = gen_rtx_NEG (mode, ops[0].op);
3983 rhs = ops[1].op;
3984 }
3985 else if (ops[0].neg)
3986 {
3987 lhs = ops[1].op;
3988 rhs = ops[0].op;
3989 }
3990 else
3991 {
3992 lhs = ops[0].op;
3993 rhs = ops[1].op;
3994 }
3995
3996 return simplify_const_binary_operation (code, mode, lhs, rhs);
3997 }
3998
3999 /* Now simplify each pair of operands until nothing changes. */
4000 do
4001 {
4002 /* Insertion sort is good enough for an eight-element array. */
4003 for (i = 1; i < n_ops; i++)
4004 {
4005 struct simplify_plus_minus_op_data save;
4006 j = i - 1;
4007 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4008 continue;
4009
4010 canonicalized = 1;
4011 save = ops[i];
4012 do
4013 ops[j + 1] = ops[j];
4014 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4015 ops[j + 1] = save;
4016 }
4017
4018 changed = 0;
4019 for (i = n_ops - 1; i > 0; i--)
4020 for (j = i - 1; j >= 0; j--)
4021 {
4022 rtx lhs = ops[j].op, rhs = ops[i].op;
4023 int lneg = ops[j].neg, rneg = ops[i].neg;
4024
4025 if (lhs != 0 && rhs != 0)
4026 {
4027 enum rtx_code ncode = PLUS;
4028
4029 if (lneg != rneg)
4030 {
4031 ncode = MINUS;
4032 if (lneg)
4033 tem = lhs, lhs = rhs, rhs = tem;
4034 }
4035 else if (swap_commutative_operands_p (lhs, rhs))
4036 tem = lhs, lhs = rhs, rhs = tem;
4037
4038 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4039 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4040 {
4041 rtx tem_lhs, tem_rhs;
4042
4043 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4044 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4045 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4046
4047 if (tem && !CONSTANT_P (tem))
4048 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4049 }
4050 else
4051 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4052
4053 /* Reject "simplifications" that just wrap the two
4054 arguments in a CONST. Failure to do so can result
4055 in infinite recursion with simplify_binary_operation
4056 when it calls us to simplify CONST operations. */
4057 if (tem
4058 && ! (GET_CODE (tem) == CONST
4059 && GET_CODE (XEXP (tem, 0)) == ncode
4060 && XEXP (XEXP (tem, 0), 0) == lhs
4061 && XEXP (XEXP (tem, 0), 1) == rhs))
4062 {
4063 lneg &= rneg;
4064 if (GET_CODE (tem) == NEG)
4065 tem = XEXP (tem, 0), lneg = !lneg;
4066 if (CONST_INT_P (tem) && lneg)
4067 tem = neg_const_int (mode, tem), lneg = 0;
4068
4069 ops[i].op = tem;
4070 ops[i].neg = lneg;
4071 ops[j].op = NULL_RTX;
4072 changed = 1;
4073 canonicalized = 1;
4074 }
4075 }
4076 }
4077
4078 /* If nothing changed, fail. */
4079 if (!canonicalized)
4080 return NULL_RTX;
4081
4082 /* Pack all the operands to the lower-numbered entries. */
4083 for (i = 0, j = 0; j < n_ops; j++)
4084 if (ops[j].op)
4085 {
4086 ops[i] = ops[j];
4087 i++;
4088 }
4089 n_ops = i;
4090 }
4091 while (changed);
4092
4093 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4094 if (n_ops == 2
4095 && CONST_INT_P (ops[1].op)
4096 && CONSTANT_P (ops[0].op)
4097 && ops[0].neg)
4098 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4099
4100 /* We suppressed creation of trivial CONST expressions in the
4101 combination loop to avoid recursion. Create one manually now.
4102 The combination loop should have ensured that there is exactly
4103 one CONST_INT, and the sort will have ensured that it is last
4104 in the array and that any other constant will be next-to-last. */
4105
4106 if (n_ops > 1
4107 && CONST_INT_P (ops[n_ops - 1].op)
4108 && CONSTANT_P (ops[n_ops - 2].op))
4109 {
4110 rtx value = ops[n_ops - 1].op;
4111 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4112 value = neg_const_int (mode, value);
4113 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4114 INTVAL (value));
4115 n_ops--;
4116 }
4117
4118 /* Put a non-negated operand first, if possible. */
4119
4120 for (i = 0; i < n_ops && ops[i].neg; i++)
4121 continue;
4122 if (i == n_ops)
4123 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4124 else if (i != 0)
4125 {
4126 tem = ops[0].op;
4127 ops[0] = ops[i];
4128 ops[i].op = tem;
4129 ops[i].neg = 1;
4130 }
4131
4132 /* Now make the result by performing the requested operations. */
4133 result = ops[0].op;
4134 for (i = 1; i < n_ops; i++)
4135 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4136 mode, result, ops[i].op);
4137
4138 return result;
4139 }
4140
4141 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4142 static bool
4143 plus_minus_operand_p (const_rtx x)
4144 {
4145 return GET_CODE (x) == PLUS
4146 || GET_CODE (x) == MINUS
4147 || (GET_CODE (x) == CONST
4148 && GET_CODE (XEXP (x, 0)) == PLUS
4149 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4150 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4151 }
4152
4153 /* Like simplify_binary_operation except used for relational operators.
4154 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4155 not also be VOIDmode.
4156
4157 CMP_MODE specifies in which mode the comparison is done in, so it is
4158 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4159 the operands or, if both are VOIDmode, the operands are compared in
4160 "infinite precision". */
4161 rtx
4162 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4163 enum machine_mode cmp_mode, rtx op0, rtx op1)
4164 {
4165 rtx tem, trueop0, trueop1;
4166
4167 if (cmp_mode == VOIDmode)
4168 cmp_mode = GET_MODE (op0);
4169 if (cmp_mode == VOIDmode)
4170 cmp_mode = GET_MODE (op1);
4171
4172 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4173 if (tem)
4174 {
4175 if (SCALAR_FLOAT_MODE_P (mode))
4176 {
4177 if (tem == const0_rtx)
4178 return CONST0_RTX (mode);
4179 #ifdef FLOAT_STORE_FLAG_VALUE
4180 {
4181 REAL_VALUE_TYPE val;
4182 val = FLOAT_STORE_FLAG_VALUE (mode);
4183 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4184 }
4185 #else
4186 return NULL_RTX;
4187 #endif
4188 }
4189 if (VECTOR_MODE_P (mode))
4190 {
4191 if (tem == const0_rtx)
4192 return CONST0_RTX (mode);
4193 #ifdef VECTOR_STORE_FLAG_VALUE
4194 {
4195 int i, units;
4196 rtvec v;
4197
4198 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4199 if (val == NULL_RTX)
4200 return NULL_RTX;
4201 if (val == const1_rtx)
4202 return CONST1_RTX (mode);
4203
4204 units = GET_MODE_NUNITS (mode);
4205 v = rtvec_alloc (units);
4206 for (i = 0; i < units; i++)
4207 RTVEC_ELT (v, i) = val;
4208 return gen_rtx_raw_CONST_VECTOR (mode, v);
4209 }
4210 #else
4211 return NULL_RTX;
4212 #endif
4213 }
4214
4215 return tem;
4216 }
4217
4218 /* For the following tests, ensure const0_rtx is op1. */
4219 if (swap_commutative_operands_p (op0, op1)
4220 || (op0 == const0_rtx && op1 != const0_rtx))
4221 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4222
4223 /* If op0 is a compare, extract the comparison arguments from it. */
4224 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4225 return simplify_gen_relational (code, mode, VOIDmode,
4226 XEXP (op0, 0), XEXP (op0, 1));
4227
4228 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4229 || CC0_P (op0))
4230 return NULL_RTX;
4231
4232 trueop0 = avoid_constant_pool_reference (op0);
4233 trueop1 = avoid_constant_pool_reference (op1);
4234 return simplify_relational_operation_1 (code, mode, cmp_mode,
4235 trueop0, trueop1);
4236 }
4237
4238 /* This part of simplify_relational_operation is only used when CMP_MODE
4239 is not in class MODE_CC (i.e. it is a real comparison).
4240
4241 MODE is the mode of the result, while CMP_MODE specifies in which
4242 mode the comparison is done in, so it is the mode of the operands. */
4243
4244 static rtx
4245 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4246 enum machine_mode cmp_mode, rtx op0, rtx op1)
4247 {
4248 enum rtx_code op0code = GET_CODE (op0);
4249
4250 if (op1 == const0_rtx && COMPARISON_P (op0))
4251 {
4252 /* If op0 is a comparison, extract the comparison arguments
4253 from it. */
4254 if (code == NE)
4255 {
4256 if (GET_MODE (op0) == mode)
4257 return simplify_rtx (op0);
4258 else
4259 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4260 XEXP (op0, 0), XEXP (op0, 1));
4261 }
4262 else if (code == EQ)
4263 {
4264 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4265 if (new_code != UNKNOWN)
4266 return simplify_gen_relational (new_code, mode, VOIDmode,
4267 XEXP (op0, 0), XEXP (op0, 1));
4268 }
4269 }
4270
4271 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4272 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4273 if ((code == LTU || code == GEU)
4274 && GET_CODE (op0) == PLUS
4275 && CONST_INT_P (XEXP (op0, 1))
4276 && (rtx_equal_p (op1, XEXP (op0, 0))
4277 || rtx_equal_p (op1, XEXP (op0, 1)))
4278 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4279 && XEXP (op0, 1) != const0_rtx)
4280 {
4281 rtx new_cmp
4282 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4283 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4284 cmp_mode, XEXP (op0, 0), new_cmp);
4285 }
4286
4287 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4288 if ((code == LTU || code == GEU)
4289 && GET_CODE (op0) == PLUS
4290 && rtx_equal_p (op1, XEXP (op0, 1))
4291 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4292 && !rtx_equal_p (op1, XEXP (op0, 0)))
4293 return simplify_gen_relational (code, mode, cmp_mode, op0,
4294 copy_rtx (XEXP (op0, 0)));
4295
4296 if (op1 == const0_rtx)
4297 {
4298 /* Canonicalize (GTU x 0) as (NE x 0). */
4299 if (code == GTU)
4300 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4301 /* Canonicalize (LEU x 0) as (EQ x 0). */
4302 if (code == LEU)
4303 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4304 }
4305 else if (op1 == const1_rtx)
4306 {
4307 switch (code)
4308 {
4309 case GE:
4310 /* Canonicalize (GE x 1) as (GT x 0). */
4311 return simplify_gen_relational (GT, mode, cmp_mode,
4312 op0, const0_rtx);
4313 case GEU:
4314 /* Canonicalize (GEU x 1) as (NE x 0). */
4315 return simplify_gen_relational (NE, mode, cmp_mode,
4316 op0, const0_rtx);
4317 case LT:
4318 /* Canonicalize (LT x 1) as (LE x 0). */
4319 return simplify_gen_relational (LE, mode, cmp_mode,
4320 op0, const0_rtx);
4321 case LTU:
4322 /* Canonicalize (LTU x 1) as (EQ x 0). */
4323 return simplify_gen_relational (EQ, mode, cmp_mode,
4324 op0, const0_rtx);
4325 default:
4326 break;
4327 }
4328 }
4329 else if (op1 == constm1_rtx)
4330 {
4331 /* Canonicalize (LE x -1) as (LT x 0). */
4332 if (code == LE)
4333 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4334 /* Canonicalize (GT x -1) as (GE x 0). */
4335 if (code == GT)
4336 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4337 }
4338
4339 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4340 if ((code == EQ || code == NE)
4341 && (op0code == PLUS || op0code == MINUS)
4342 && CONSTANT_P (op1)
4343 && CONSTANT_P (XEXP (op0, 1))
4344 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4345 {
4346 rtx x = XEXP (op0, 0);
4347 rtx c = XEXP (op0, 1);
4348 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4349 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4350
4351 /* Detect an infinite recursive condition, where we oscillate at this
4352 simplification case between:
4353 A + B == C <---> C - B == A,
4354 where A, B, and C are all constants with non-simplifiable expressions,
4355 usually SYMBOL_REFs. */
4356 if (GET_CODE (tem) == invcode
4357 && CONSTANT_P (x)
4358 && rtx_equal_p (c, XEXP (tem, 1)))
4359 return NULL_RTX;
4360
4361 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4362 }
4363
4364 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4365 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4366 if (code == NE
4367 && op1 == const0_rtx
4368 && GET_MODE_CLASS (mode) == MODE_INT
4369 && cmp_mode != VOIDmode
4370 /* ??? Work-around BImode bugs in the ia64 backend. */
4371 && mode != BImode
4372 && cmp_mode != BImode
4373 && nonzero_bits (op0, cmp_mode) == 1
4374 && STORE_FLAG_VALUE == 1)
4375 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4376 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4377 : lowpart_subreg (mode, op0, cmp_mode);
4378
4379 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4380 if ((code == EQ || code == NE)
4381 && op1 == const0_rtx
4382 && op0code == XOR)
4383 return simplify_gen_relational (code, mode, cmp_mode,
4384 XEXP (op0, 0), XEXP (op0, 1));
4385
4386 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4387 if ((code == EQ || code == NE)
4388 && op0code == XOR
4389 && rtx_equal_p (XEXP (op0, 0), op1)
4390 && !side_effects_p (XEXP (op0, 0)))
4391 return simplify_gen_relational (code, mode, cmp_mode,
4392 XEXP (op0, 1), const0_rtx);
4393
4394 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4395 if ((code == EQ || code == NE)
4396 && op0code == XOR
4397 && rtx_equal_p (XEXP (op0, 1), op1)
4398 && !side_effects_p (XEXP (op0, 1)))
4399 return simplify_gen_relational (code, mode, cmp_mode,
4400 XEXP (op0, 0), const0_rtx);
4401
4402 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4403 if ((code == EQ || code == NE)
4404 && op0code == XOR
4405 && CONST_SCALAR_INT_P (op1)
4406 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4407 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4408 simplify_gen_binary (XOR, cmp_mode,
4409 XEXP (op0, 1), op1));
4410
4411 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4412 if ((code == EQ || code == NE)
4413 && GET_CODE (op0) == BSWAP
4414 && CONST_SCALAR_INT_P (op1))
4415 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4416 simplify_gen_unary (BSWAP, cmp_mode,
4417 op1, cmp_mode));
4418
4419 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4420 if ((code == EQ || code == NE)
4421 && GET_CODE (op0) == BSWAP
4422 && GET_CODE (op1) == BSWAP)
4423 return simplify_gen_relational (code, mode, cmp_mode,
4424 XEXP (op0, 0), XEXP (op1, 0));
4425
4426 if (op0code == POPCOUNT && op1 == const0_rtx)
4427 switch (code)
4428 {
4429 case EQ:
4430 case LE:
4431 case LEU:
4432 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4433 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4434 XEXP (op0, 0), const0_rtx);
4435
4436 case NE:
4437 case GT:
4438 case GTU:
4439 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4440 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4441 XEXP (op0, 0), const0_rtx);
4442
4443 default:
4444 break;
4445 }
4446
4447 return NULL_RTX;
4448 }
4449
4450 enum
4451 {
4452 CMP_EQ = 1,
4453 CMP_LT = 2,
4454 CMP_GT = 4,
4455 CMP_LTU = 8,
4456 CMP_GTU = 16
4457 };
4458
4459
4460 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4461 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4462 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4463 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4464 For floating-point comparisons, assume that the operands were ordered. */
4465
4466 static rtx
4467 comparison_result (enum rtx_code code, int known_results)
4468 {
4469 switch (code)
4470 {
4471 case EQ:
4472 case UNEQ:
4473 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4474 case NE:
4475 case LTGT:
4476 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4477
4478 case LT:
4479 case UNLT:
4480 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4481 case GE:
4482 case UNGE:
4483 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4484
4485 case GT:
4486 case UNGT:
4487 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4488 case LE:
4489 case UNLE:
4490 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4491
4492 case LTU:
4493 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4494 case GEU:
4495 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4496
4497 case GTU:
4498 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4499 case LEU:
4500 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4501
4502 case ORDERED:
4503 return const_true_rtx;
4504 case UNORDERED:
4505 return const0_rtx;
4506 default:
4507 gcc_unreachable ();
4508 }
4509 }
4510
4511 /* Check if the given comparison (done in the given MODE) is actually
4512 a tautology or a contradiction. If the mode is VOID_mode, the
4513 comparison is done in "infinite precision". If no simplification
4514 is possible, this function returns zero. Otherwise, it returns
4515 either const_true_rtx or const0_rtx. */
4516
4517 rtx
4518 simplify_const_relational_operation (enum rtx_code code,
4519 enum machine_mode mode,
4520 rtx op0, rtx op1)
4521 {
4522 rtx tem;
4523 rtx trueop0;
4524 rtx trueop1;
4525
4526 gcc_assert (mode != VOIDmode
4527 || (GET_MODE (op0) == VOIDmode
4528 && GET_MODE (op1) == VOIDmode));
4529
4530 /* If op0 is a compare, extract the comparison arguments from it. */
4531 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4532 {
4533 op1 = XEXP (op0, 1);
4534 op0 = XEXP (op0, 0);
4535
4536 if (GET_MODE (op0) != VOIDmode)
4537 mode = GET_MODE (op0);
4538 else if (GET_MODE (op1) != VOIDmode)
4539 mode = GET_MODE (op1);
4540 else
4541 return 0;
4542 }
4543
4544 /* We can't simplify MODE_CC values since we don't know what the
4545 actual comparison is. */
4546 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4547 return 0;
4548
4549 /* Make sure the constant is second. */
4550 if (swap_commutative_operands_p (op0, op1))
4551 {
4552 tem = op0, op0 = op1, op1 = tem;
4553 code = swap_condition (code);
4554 }
4555
4556 trueop0 = avoid_constant_pool_reference (op0);
4557 trueop1 = avoid_constant_pool_reference (op1);
4558
4559 /* For integer comparisons of A and B maybe we can simplify A - B and can
4560 then simplify a comparison of that with zero. If A and B are both either
4561 a register or a CONST_INT, this can't help; testing for these cases will
4562 prevent infinite recursion here and speed things up.
4563
4564 We can only do this for EQ and NE comparisons as otherwise we may
4565 lose or introduce overflow which we cannot disregard as undefined as
4566 we do not know the signedness of the operation on either the left or
4567 the right hand side of the comparison. */
4568
4569 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4570 && (code == EQ || code == NE)
4571 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4572 && (REG_P (op1) || CONST_INT_P (trueop1)))
4573 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4574 /* We cannot do this if tem is a nonzero address. */
4575 && ! nonzero_address_p (tem))
4576 return simplify_const_relational_operation (signed_condition (code),
4577 mode, tem, const0_rtx);
4578
4579 if (! HONOR_NANS (mode) && code == ORDERED)
4580 return const_true_rtx;
4581
4582 if (! HONOR_NANS (mode) && code == UNORDERED)
4583 return const0_rtx;
4584
4585 /* For modes without NaNs, if the two operands are equal, we know the
4586 result except if they have side-effects. Even with NaNs we know
4587 the result of unordered comparisons and, if signaling NaNs are
4588 irrelevant, also the result of LT/GT/LTGT. */
4589 if ((! HONOR_NANS (GET_MODE (trueop0))
4590 || code == UNEQ || code == UNLE || code == UNGE
4591 || ((code == LT || code == GT || code == LTGT)
4592 && ! HONOR_SNANS (GET_MODE (trueop0))))
4593 && rtx_equal_p (trueop0, trueop1)
4594 && ! side_effects_p (trueop0))
4595 return comparison_result (code, CMP_EQ);
4596
4597 /* If the operands are floating-point constants, see if we can fold
4598 the result. */
4599 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4600 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4601 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4602 {
4603 REAL_VALUE_TYPE d0, d1;
4604
4605 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4606 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4607
4608 /* Comparisons are unordered iff at least one of the values is NaN. */
4609 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4610 switch (code)
4611 {
4612 case UNEQ:
4613 case UNLT:
4614 case UNGT:
4615 case UNLE:
4616 case UNGE:
4617 case NE:
4618 case UNORDERED:
4619 return const_true_rtx;
4620 case EQ:
4621 case LT:
4622 case GT:
4623 case LE:
4624 case GE:
4625 case LTGT:
4626 case ORDERED:
4627 return const0_rtx;
4628 default:
4629 return 0;
4630 }
4631
4632 return comparison_result (code,
4633 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4634 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4635 }
4636
4637 /* Otherwise, see if the operands are both integers. */
4638 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4639 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4640 {
4641 /* It would be nice if we really had a mode here. However, the
4642 largest int representable on the target is as good as
4643 infinite. */
4644 enum machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4645 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4646 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4647
4648 if (wi::eq_p (ptrueop0, ptrueop1))
4649 return comparison_result (code, CMP_EQ);
4650 else
4651 {
4652 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4653 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4654 return comparison_result (code, cr);
4655 }
4656 }
4657
4658 /* Optimize comparisons with upper and lower bounds. */
4659 if (HWI_COMPUTABLE_MODE_P (mode)
4660 && CONST_INT_P (trueop1))
4661 {
4662 int sign;
4663 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4664 HOST_WIDE_INT val = INTVAL (trueop1);
4665 HOST_WIDE_INT mmin, mmax;
4666
4667 if (code == GEU
4668 || code == LEU
4669 || code == GTU
4670 || code == LTU)
4671 sign = 0;
4672 else
4673 sign = 1;
4674
4675 /* Get a reduced range if the sign bit is zero. */
4676 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4677 {
4678 mmin = 0;
4679 mmax = nonzero;
4680 }
4681 else
4682 {
4683 rtx mmin_rtx, mmax_rtx;
4684 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4685
4686 mmin = INTVAL (mmin_rtx);
4687 mmax = INTVAL (mmax_rtx);
4688 if (sign)
4689 {
4690 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4691
4692 mmin >>= (sign_copies - 1);
4693 mmax >>= (sign_copies - 1);
4694 }
4695 }
4696
4697 switch (code)
4698 {
4699 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4700 case GEU:
4701 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4702 return const_true_rtx;
4703 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4704 return const0_rtx;
4705 break;
4706 case GE:
4707 if (val <= mmin)
4708 return const_true_rtx;
4709 if (val > mmax)
4710 return const0_rtx;
4711 break;
4712
4713 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4714 case LEU:
4715 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4716 return const_true_rtx;
4717 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4718 return const0_rtx;
4719 break;
4720 case LE:
4721 if (val >= mmax)
4722 return const_true_rtx;
4723 if (val < mmin)
4724 return const0_rtx;
4725 break;
4726
4727 case EQ:
4728 /* x == y is always false for y out of range. */
4729 if (val < mmin || val > mmax)
4730 return const0_rtx;
4731 break;
4732
4733 /* x > y is always false for y >= mmax, always true for y < mmin. */
4734 case GTU:
4735 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4736 return const0_rtx;
4737 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4738 return const_true_rtx;
4739 break;
4740 case GT:
4741 if (val >= mmax)
4742 return const0_rtx;
4743 if (val < mmin)
4744 return const_true_rtx;
4745 break;
4746
4747 /* x < y is always false for y <= mmin, always true for y > mmax. */
4748 case LTU:
4749 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4750 return const0_rtx;
4751 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4752 return const_true_rtx;
4753 break;
4754 case LT:
4755 if (val <= mmin)
4756 return const0_rtx;
4757 if (val > mmax)
4758 return const_true_rtx;
4759 break;
4760
4761 case NE:
4762 /* x != y is always true for y out of range. */
4763 if (val < mmin || val > mmax)
4764 return const_true_rtx;
4765 break;
4766
4767 default:
4768 break;
4769 }
4770 }
4771
4772 /* Optimize integer comparisons with zero. */
4773 if (trueop1 == const0_rtx)
4774 {
4775 /* Some addresses are known to be nonzero. We don't know
4776 their sign, but equality comparisons are known. */
4777 if (nonzero_address_p (trueop0))
4778 {
4779 if (code == EQ || code == LEU)
4780 return const0_rtx;
4781 if (code == NE || code == GTU)
4782 return const_true_rtx;
4783 }
4784
4785 /* See if the first operand is an IOR with a constant. If so, we
4786 may be able to determine the result of this comparison. */
4787 if (GET_CODE (op0) == IOR)
4788 {
4789 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4790 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4791 {
4792 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4793 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4794 && (UINTVAL (inner_const)
4795 & ((unsigned HOST_WIDE_INT) 1
4796 << sign_bitnum)));
4797
4798 switch (code)
4799 {
4800 case EQ:
4801 case LEU:
4802 return const0_rtx;
4803 case NE:
4804 case GTU:
4805 return const_true_rtx;
4806 case LT:
4807 case LE:
4808 if (has_sign)
4809 return const_true_rtx;
4810 break;
4811 case GT:
4812 case GE:
4813 if (has_sign)
4814 return const0_rtx;
4815 break;
4816 default:
4817 break;
4818 }
4819 }
4820 }
4821 }
4822
4823 /* Optimize comparison of ABS with zero. */
4824 if (trueop1 == CONST0_RTX (mode)
4825 && (GET_CODE (trueop0) == ABS
4826 || (GET_CODE (trueop0) == FLOAT_EXTEND
4827 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4828 {
4829 switch (code)
4830 {
4831 case LT:
4832 /* Optimize abs(x) < 0.0. */
4833 if (!HONOR_SNANS (mode)
4834 && (!INTEGRAL_MODE_P (mode)
4835 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4836 {
4837 if (INTEGRAL_MODE_P (mode)
4838 && (issue_strict_overflow_warning
4839 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4840 warning (OPT_Wstrict_overflow,
4841 ("assuming signed overflow does not occur when "
4842 "assuming abs (x) < 0 is false"));
4843 return const0_rtx;
4844 }
4845 break;
4846
4847 case GE:
4848 /* Optimize abs(x) >= 0.0. */
4849 if (!HONOR_NANS (mode)
4850 && (!INTEGRAL_MODE_P (mode)
4851 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4852 {
4853 if (INTEGRAL_MODE_P (mode)
4854 && (issue_strict_overflow_warning
4855 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4856 warning (OPT_Wstrict_overflow,
4857 ("assuming signed overflow does not occur when "
4858 "assuming abs (x) >= 0 is true"));
4859 return const_true_rtx;
4860 }
4861 break;
4862
4863 case UNGE:
4864 /* Optimize ! (abs(x) < 0.0). */
4865 return const_true_rtx;
4866
4867 default:
4868 break;
4869 }
4870 }
4871
4872 return 0;
4873 }
4874 \f
4875 /* Simplify CODE, an operation with result mode MODE and three operands,
4876 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4877 a constant. Return 0 if no simplifications is possible. */
4878
4879 rtx
4880 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4881 enum machine_mode op0_mode, rtx op0, rtx op1,
4882 rtx op2)
4883 {
4884 unsigned int width = GET_MODE_PRECISION (mode);
4885 bool any_change = false;
4886 rtx tem, trueop2;
4887
4888 /* VOIDmode means "infinite" precision. */
4889 if (width == 0)
4890 width = HOST_BITS_PER_WIDE_INT;
4891
4892 switch (code)
4893 {
4894 case FMA:
4895 /* Simplify negations around the multiplication. */
4896 /* -a * -b + c => a * b + c. */
4897 if (GET_CODE (op0) == NEG)
4898 {
4899 tem = simplify_unary_operation (NEG, mode, op1, mode);
4900 if (tem)
4901 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4902 }
4903 else if (GET_CODE (op1) == NEG)
4904 {
4905 tem = simplify_unary_operation (NEG, mode, op0, mode);
4906 if (tem)
4907 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4908 }
4909
4910 /* Canonicalize the two multiplication operands. */
4911 /* a * -b + c => -b * a + c. */
4912 if (swap_commutative_operands_p (op0, op1))
4913 tem = op0, op0 = op1, op1 = tem, any_change = true;
4914
4915 if (any_change)
4916 return gen_rtx_FMA (mode, op0, op1, op2);
4917 return NULL_RTX;
4918
4919 case SIGN_EXTRACT:
4920 case ZERO_EXTRACT:
4921 if (CONST_INT_P (op0)
4922 && CONST_INT_P (op1)
4923 && CONST_INT_P (op2)
4924 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4925 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4926 {
4927 /* Extracting a bit-field from a constant */
4928 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4929 HOST_WIDE_INT op1val = INTVAL (op1);
4930 HOST_WIDE_INT op2val = INTVAL (op2);
4931 if (BITS_BIG_ENDIAN)
4932 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4933 else
4934 val >>= op2val;
4935
4936 if (HOST_BITS_PER_WIDE_INT != op1val)
4937 {
4938 /* First zero-extend. */
4939 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4940 /* If desired, propagate sign bit. */
4941 if (code == SIGN_EXTRACT
4942 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4943 != 0)
4944 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4945 }
4946
4947 return gen_int_mode (val, mode);
4948 }
4949 break;
4950
4951 case IF_THEN_ELSE:
4952 if (CONST_INT_P (op0))
4953 return op0 != const0_rtx ? op1 : op2;
4954
4955 /* Convert c ? a : a into "a". */
4956 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4957 return op1;
4958
4959 /* Convert a != b ? a : b into "a". */
4960 if (GET_CODE (op0) == NE
4961 && ! side_effects_p (op0)
4962 && ! HONOR_NANS (mode)
4963 && ! HONOR_SIGNED_ZEROS (mode)
4964 && ((rtx_equal_p (XEXP (op0, 0), op1)
4965 && rtx_equal_p (XEXP (op0, 1), op2))
4966 || (rtx_equal_p (XEXP (op0, 0), op2)
4967 && rtx_equal_p (XEXP (op0, 1), op1))))
4968 return op1;
4969
4970 /* Convert a == b ? a : b into "b". */
4971 if (GET_CODE (op0) == EQ
4972 && ! side_effects_p (op0)
4973 && ! HONOR_NANS (mode)
4974 && ! HONOR_SIGNED_ZEROS (mode)
4975 && ((rtx_equal_p (XEXP (op0, 0), op1)
4976 && rtx_equal_p (XEXP (op0, 1), op2))
4977 || (rtx_equal_p (XEXP (op0, 0), op2)
4978 && rtx_equal_p (XEXP (op0, 1), op1))))
4979 return op2;
4980
4981 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4982 {
4983 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4984 ? GET_MODE (XEXP (op0, 1))
4985 : GET_MODE (XEXP (op0, 0)));
4986 rtx temp;
4987
4988 /* Look for happy constants in op1 and op2. */
4989 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4990 {
4991 HOST_WIDE_INT t = INTVAL (op1);
4992 HOST_WIDE_INT f = INTVAL (op2);
4993
4994 if (t == STORE_FLAG_VALUE && f == 0)
4995 code = GET_CODE (op0);
4996 else if (t == 0 && f == STORE_FLAG_VALUE)
4997 {
4998 enum rtx_code tmp;
4999 tmp = reversed_comparison_code (op0, NULL_RTX);
5000 if (tmp == UNKNOWN)
5001 break;
5002 code = tmp;
5003 }
5004 else
5005 break;
5006
5007 return simplify_gen_relational (code, mode, cmp_mode,
5008 XEXP (op0, 0), XEXP (op0, 1));
5009 }
5010
5011 if (cmp_mode == VOIDmode)
5012 cmp_mode = op0_mode;
5013 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5014 cmp_mode, XEXP (op0, 0),
5015 XEXP (op0, 1));
5016
5017 /* See if any simplifications were possible. */
5018 if (temp)
5019 {
5020 if (CONST_INT_P (temp))
5021 return temp == const0_rtx ? op2 : op1;
5022 else if (temp)
5023 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5024 }
5025 }
5026 break;
5027
5028 case VEC_MERGE:
5029 gcc_assert (GET_MODE (op0) == mode);
5030 gcc_assert (GET_MODE (op1) == mode);
5031 gcc_assert (VECTOR_MODE_P (mode));
5032 trueop2 = avoid_constant_pool_reference (op2);
5033 if (CONST_INT_P (trueop2))
5034 {
5035 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5036 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5037 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5038 unsigned HOST_WIDE_INT mask;
5039 if (n_elts == HOST_BITS_PER_WIDE_INT)
5040 mask = -1;
5041 else
5042 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5043
5044 if (!(sel & mask) && !side_effects_p (op0))
5045 return op1;
5046 if ((sel & mask) == mask && !side_effects_p (op1))
5047 return op0;
5048
5049 rtx trueop0 = avoid_constant_pool_reference (op0);
5050 rtx trueop1 = avoid_constant_pool_reference (op1);
5051 if (GET_CODE (trueop0) == CONST_VECTOR
5052 && GET_CODE (trueop1) == CONST_VECTOR)
5053 {
5054 rtvec v = rtvec_alloc (n_elts);
5055 unsigned int i;
5056
5057 for (i = 0; i < n_elts; i++)
5058 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5059 ? CONST_VECTOR_ELT (trueop0, i)
5060 : CONST_VECTOR_ELT (trueop1, i));
5061 return gen_rtx_CONST_VECTOR (mode, v);
5062 }
5063
5064 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5065 if no element from a appears in the result. */
5066 if (GET_CODE (op0) == VEC_MERGE)
5067 {
5068 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5069 if (CONST_INT_P (tem))
5070 {
5071 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5072 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5073 return simplify_gen_ternary (code, mode, mode,
5074 XEXP (op0, 1), op1, op2);
5075 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5076 return simplify_gen_ternary (code, mode, mode,
5077 XEXP (op0, 0), op1, op2);
5078 }
5079 }
5080 if (GET_CODE (op1) == VEC_MERGE)
5081 {
5082 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5083 if (CONST_INT_P (tem))
5084 {
5085 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5086 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5087 return simplify_gen_ternary (code, mode, mode,
5088 op0, XEXP (op1, 1), op2);
5089 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5090 return simplify_gen_ternary (code, mode, mode,
5091 op0, XEXP (op1, 0), op2);
5092 }
5093 }
5094 }
5095
5096 if (rtx_equal_p (op0, op1)
5097 && !side_effects_p (op2) && !side_effects_p (op1))
5098 return op0;
5099
5100 break;
5101
5102 default:
5103 gcc_unreachable ();
5104 }
5105
5106 return 0;
5107 }
5108
5109 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5110 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5111 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5112
5113 Works by unpacking OP into a collection of 8-bit values
5114 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5115 and then repacking them again for OUTERMODE. */
5116
5117 static rtx
5118 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5119 enum machine_mode innermode, unsigned int byte)
5120 {
5121 enum {
5122 value_bit = 8,
5123 value_mask = (1 << value_bit) - 1
5124 };
5125 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE/value_bit];
5126 int value_start;
5127 int i;
5128 int elem;
5129
5130 int num_elem;
5131 rtx * elems;
5132 int elem_bitsize;
5133 rtx result_s;
5134 rtvec result_v = NULL;
5135 enum mode_class outer_class;
5136 enum machine_mode outer_submode;
5137 int max_bitsize;
5138
5139 /* Some ports misuse CCmode. */
5140 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5141 return op;
5142
5143 /* We have no way to represent a complex constant at the rtl level. */
5144 if (COMPLEX_MODE_P (outermode))
5145 return NULL_RTX;
5146
5147 /* We support any size mode. */
5148 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5149 GET_MODE_BITSIZE (innermode));
5150
5151 /* Unpack the value. */
5152
5153 if (GET_CODE (op) == CONST_VECTOR)
5154 {
5155 num_elem = CONST_VECTOR_NUNITS (op);
5156 elems = &CONST_VECTOR_ELT (op, 0);
5157 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5158 }
5159 else
5160 {
5161 num_elem = 1;
5162 elems = &op;
5163 elem_bitsize = max_bitsize;
5164 }
5165 /* If this asserts, it is too complicated; reducing value_bit may help. */
5166 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5167 /* I don't know how to handle endianness of sub-units. */
5168 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5169
5170 for (elem = 0; elem < num_elem; elem++)
5171 {
5172 unsigned char * vp;
5173 rtx el = elems[elem];
5174
5175 /* Vectors are kept in target memory order. (This is probably
5176 a mistake.) */
5177 {
5178 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5179 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5180 / BITS_PER_UNIT);
5181 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5182 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5183 unsigned bytele = (subword_byte % UNITS_PER_WORD
5184 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5185 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5186 }
5187
5188 switch (GET_CODE (el))
5189 {
5190 case CONST_INT:
5191 for (i = 0;
5192 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5193 i += value_bit)
5194 *vp++ = INTVAL (el) >> i;
5195 /* CONST_INTs are always logically sign-extended. */
5196 for (; i < elem_bitsize; i += value_bit)
5197 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5198 break;
5199
5200 case CONST_WIDE_INT:
5201 {
5202 rtx_mode_t val = std::make_pair (el, innermode);
5203 unsigned char extend = wi::sign_mask (val);
5204
5205 for (i = 0; i < elem_bitsize; i += value_bit)
5206 *vp++ = wi::extract_uhwi (val, i, value_bit);
5207 for (; i < elem_bitsize; i += value_bit)
5208 *vp++ = extend;
5209 }
5210 break;
5211
5212 case CONST_DOUBLE:
5213 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5214 {
5215 unsigned char extend = 0;
5216 /* If this triggers, someone should have generated a
5217 CONST_INT instead. */
5218 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5219
5220 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5221 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5222 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5223 {
5224 *vp++
5225 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5226 i += value_bit;
5227 }
5228
5229 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5230 extend = -1;
5231 for (; i < elem_bitsize; i += value_bit)
5232 *vp++ = extend;
5233 }
5234 else
5235 {
5236 /* This is big enough for anything on the platform. */
5237 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5238 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5239
5240 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5241 gcc_assert (bitsize <= elem_bitsize);
5242 gcc_assert (bitsize % value_bit == 0);
5243
5244 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5245 GET_MODE (el));
5246
5247 /* real_to_target produces its result in words affected by
5248 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5249 and use WORDS_BIG_ENDIAN instead; see the documentation
5250 of SUBREG in rtl.texi. */
5251 for (i = 0; i < bitsize; i += value_bit)
5252 {
5253 int ibase;
5254 if (WORDS_BIG_ENDIAN)
5255 ibase = bitsize - 1 - i;
5256 else
5257 ibase = i;
5258 *vp++ = tmp[ibase / 32] >> i % 32;
5259 }
5260
5261 /* It shouldn't matter what's done here, so fill it with
5262 zero. */
5263 for (; i < elem_bitsize; i += value_bit)
5264 *vp++ = 0;
5265 }
5266 break;
5267
5268 case CONST_FIXED:
5269 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5270 {
5271 for (i = 0; i < elem_bitsize; i += value_bit)
5272 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5273 }
5274 else
5275 {
5276 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5277 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5278 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5279 i += value_bit)
5280 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5281 >> (i - HOST_BITS_PER_WIDE_INT);
5282 for (; i < elem_bitsize; i += value_bit)
5283 *vp++ = 0;
5284 }
5285 break;
5286
5287 default:
5288 gcc_unreachable ();
5289 }
5290 }
5291
5292 /* Now, pick the right byte to start with. */
5293 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5294 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5295 will already have offset 0. */
5296 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5297 {
5298 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5299 - byte);
5300 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5301 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5302 byte = (subword_byte % UNITS_PER_WORD
5303 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5304 }
5305
5306 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5307 so if it's become negative it will instead be very large.) */
5308 gcc_assert (byte < GET_MODE_SIZE (innermode));
5309
5310 /* Convert from bytes to chunks of size value_bit. */
5311 value_start = byte * (BITS_PER_UNIT / value_bit);
5312
5313 /* Re-pack the value. */
5314
5315 if (VECTOR_MODE_P (outermode))
5316 {
5317 num_elem = GET_MODE_NUNITS (outermode);
5318 result_v = rtvec_alloc (num_elem);
5319 elems = &RTVEC_ELT (result_v, 0);
5320 outer_submode = GET_MODE_INNER (outermode);
5321 }
5322 else
5323 {
5324 num_elem = 1;
5325 elems = &result_s;
5326 outer_submode = outermode;
5327 }
5328
5329 outer_class = GET_MODE_CLASS (outer_submode);
5330 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5331
5332 gcc_assert (elem_bitsize % value_bit == 0);
5333 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5334
5335 for (elem = 0; elem < num_elem; elem++)
5336 {
5337 unsigned char *vp;
5338
5339 /* Vectors are stored in target memory order. (This is probably
5340 a mistake.) */
5341 {
5342 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5343 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5344 / BITS_PER_UNIT);
5345 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5346 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5347 unsigned bytele = (subword_byte % UNITS_PER_WORD
5348 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5349 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5350 }
5351
5352 switch (outer_class)
5353 {
5354 case MODE_INT:
5355 case MODE_PARTIAL_INT:
5356 {
5357 int u;
5358 int base = 0;
5359 int units
5360 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5361 / HOST_BITS_PER_WIDE_INT;
5362 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5363 wide_int r;
5364
5365 for (u = 0; u < units; u++)
5366 {
5367 unsigned HOST_WIDE_INT buf = 0;
5368 for (i = 0;
5369 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5370 i += value_bit)
5371 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5372
5373 tmp[u] = buf;
5374 base += HOST_BITS_PER_WIDE_INT;
5375 }
5376 r = wide_int::from_array (tmp, units,
5377 GET_MODE_PRECISION (outer_submode));
5378 elems[elem] = immed_wide_int_const (r, outer_submode);
5379 }
5380 break;
5381
5382 case MODE_FLOAT:
5383 case MODE_DECIMAL_FLOAT:
5384 {
5385 REAL_VALUE_TYPE r;
5386 long tmp[MAX_BITSIZE_MODE_ANY_INT / 32];
5387
5388 /* real_from_target wants its input in words affected by
5389 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5390 and use WORDS_BIG_ENDIAN instead; see the documentation
5391 of SUBREG in rtl.texi. */
5392 for (i = 0; i < max_bitsize / 32; i++)
5393 tmp[i] = 0;
5394 for (i = 0; i < elem_bitsize; i += value_bit)
5395 {
5396 int ibase;
5397 if (WORDS_BIG_ENDIAN)
5398 ibase = elem_bitsize - 1 - i;
5399 else
5400 ibase = i;
5401 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5402 }
5403
5404 real_from_target (&r, tmp, outer_submode);
5405 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5406 }
5407 break;
5408
5409 case MODE_FRACT:
5410 case MODE_UFRACT:
5411 case MODE_ACCUM:
5412 case MODE_UACCUM:
5413 {
5414 FIXED_VALUE_TYPE f;
5415 f.data.low = 0;
5416 f.data.high = 0;
5417 f.mode = outer_submode;
5418
5419 for (i = 0;
5420 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5421 i += value_bit)
5422 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5423 for (; i < elem_bitsize; i += value_bit)
5424 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5425 << (i - HOST_BITS_PER_WIDE_INT));
5426
5427 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5428 }
5429 break;
5430
5431 default:
5432 gcc_unreachable ();
5433 }
5434 }
5435 if (VECTOR_MODE_P (outermode))
5436 return gen_rtx_CONST_VECTOR (outermode, result_v);
5437 else
5438 return result_s;
5439 }
5440
5441 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5442 Return 0 if no simplifications are possible. */
5443 rtx
5444 simplify_subreg (enum machine_mode outermode, rtx op,
5445 enum machine_mode innermode, unsigned int byte)
5446 {
5447 /* Little bit of sanity checking. */
5448 gcc_assert (innermode != VOIDmode);
5449 gcc_assert (outermode != VOIDmode);
5450 gcc_assert (innermode != BLKmode);
5451 gcc_assert (outermode != BLKmode);
5452
5453 gcc_assert (GET_MODE (op) == innermode
5454 || GET_MODE (op) == VOIDmode);
5455
5456 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5457 return NULL_RTX;
5458
5459 if (byte >= GET_MODE_SIZE (innermode))
5460 return NULL_RTX;
5461
5462 if (outermode == innermode && !byte)
5463 return op;
5464
5465 if (CONST_SCALAR_INT_P (op)
5466 || CONST_DOUBLE_AS_FLOAT_P (op)
5467 || GET_CODE (op) == CONST_FIXED
5468 || GET_CODE (op) == CONST_VECTOR)
5469 return simplify_immed_subreg (outermode, op, innermode, byte);
5470
5471 /* Changing mode twice with SUBREG => just change it once,
5472 or not at all if changing back op starting mode. */
5473 if (GET_CODE (op) == SUBREG)
5474 {
5475 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5476 int final_offset = byte + SUBREG_BYTE (op);
5477 rtx newx;
5478
5479 if (outermode == innermostmode
5480 && byte == 0 && SUBREG_BYTE (op) == 0)
5481 return SUBREG_REG (op);
5482
5483 /* The SUBREG_BYTE represents offset, as if the value were stored
5484 in memory. Irritating exception is paradoxical subreg, where
5485 we define SUBREG_BYTE to be 0. On big endian machines, this
5486 value should be negative. For a moment, undo this exception. */
5487 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5488 {
5489 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5490 if (WORDS_BIG_ENDIAN)
5491 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5492 if (BYTES_BIG_ENDIAN)
5493 final_offset += difference % UNITS_PER_WORD;
5494 }
5495 if (SUBREG_BYTE (op) == 0
5496 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5497 {
5498 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5499 if (WORDS_BIG_ENDIAN)
5500 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5501 if (BYTES_BIG_ENDIAN)
5502 final_offset += difference % UNITS_PER_WORD;
5503 }
5504
5505 /* See whether resulting subreg will be paradoxical. */
5506 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5507 {
5508 /* In nonparadoxical subregs we can't handle negative offsets. */
5509 if (final_offset < 0)
5510 return NULL_RTX;
5511 /* Bail out in case resulting subreg would be incorrect. */
5512 if (final_offset % GET_MODE_SIZE (outermode)
5513 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5514 return NULL_RTX;
5515 }
5516 else
5517 {
5518 int offset = 0;
5519 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5520
5521 /* In paradoxical subreg, see if we are still looking on lower part.
5522 If so, our SUBREG_BYTE will be 0. */
5523 if (WORDS_BIG_ENDIAN)
5524 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5525 if (BYTES_BIG_ENDIAN)
5526 offset += difference % UNITS_PER_WORD;
5527 if (offset == final_offset)
5528 final_offset = 0;
5529 else
5530 return NULL_RTX;
5531 }
5532
5533 /* Recurse for further possible simplifications. */
5534 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5535 final_offset);
5536 if (newx)
5537 return newx;
5538 if (validate_subreg (outermode, innermostmode,
5539 SUBREG_REG (op), final_offset))
5540 {
5541 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5542 if (SUBREG_PROMOTED_VAR_P (op)
5543 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5544 && GET_MODE_CLASS (outermode) == MODE_INT
5545 && IN_RANGE (GET_MODE_SIZE (outermode),
5546 GET_MODE_SIZE (innermode),
5547 GET_MODE_SIZE (innermostmode))
5548 && subreg_lowpart_p (newx))
5549 {
5550 SUBREG_PROMOTED_VAR_P (newx) = 1;
5551 SUBREG_PROMOTED_UNSIGNED_SET
5552 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5553 }
5554 return newx;
5555 }
5556 return NULL_RTX;
5557 }
5558
5559 /* SUBREG of a hard register => just change the register number
5560 and/or mode. If the hard register is not valid in that mode,
5561 suppress this simplification. If the hard register is the stack,
5562 frame, or argument pointer, leave this as a SUBREG. */
5563
5564 if (REG_P (op) && HARD_REGISTER_P (op))
5565 {
5566 unsigned int regno, final_regno;
5567
5568 regno = REGNO (op);
5569 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5570 if (HARD_REGISTER_NUM_P (final_regno))
5571 {
5572 rtx x;
5573 int final_offset = byte;
5574
5575 /* Adjust offset for paradoxical subregs. */
5576 if (byte == 0
5577 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5578 {
5579 int difference = (GET_MODE_SIZE (innermode)
5580 - GET_MODE_SIZE (outermode));
5581 if (WORDS_BIG_ENDIAN)
5582 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5583 if (BYTES_BIG_ENDIAN)
5584 final_offset += difference % UNITS_PER_WORD;
5585 }
5586
5587 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5588
5589 /* Propagate original regno. We don't have any way to specify
5590 the offset inside original regno, so do so only for lowpart.
5591 The information is used only by alias analysis that can not
5592 grog partial register anyway. */
5593
5594 if (subreg_lowpart_offset (outermode, innermode) == byte)
5595 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5596 return x;
5597 }
5598 }
5599
5600 /* If we have a SUBREG of a register that we are replacing and we are
5601 replacing it with a MEM, make a new MEM and try replacing the
5602 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5603 or if we would be widening it. */
5604
5605 if (MEM_P (op)
5606 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5607 /* Allow splitting of volatile memory references in case we don't
5608 have instruction to move the whole thing. */
5609 && (! MEM_VOLATILE_P (op)
5610 || ! have_insn_for (SET, innermode))
5611 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5612 return adjust_address_nv (op, outermode, byte);
5613
5614 /* Handle complex values represented as CONCAT
5615 of real and imaginary part. */
5616 if (GET_CODE (op) == CONCAT)
5617 {
5618 unsigned int part_size, final_offset;
5619 rtx part, res;
5620
5621 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5622 if (byte < part_size)
5623 {
5624 part = XEXP (op, 0);
5625 final_offset = byte;
5626 }
5627 else
5628 {
5629 part = XEXP (op, 1);
5630 final_offset = byte - part_size;
5631 }
5632
5633 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5634 return NULL_RTX;
5635
5636 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5637 if (res)
5638 return res;
5639 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5640 return gen_rtx_SUBREG (outermode, part, final_offset);
5641 return NULL_RTX;
5642 }
5643
5644 /* A SUBREG resulting from a zero extension may fold to zero if
5645 it extracts higher bits that the ZERO_EXTEND's source bits. */
5646 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5647 {
5648 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5649 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5650 return CONST0_RTX (outermode);
5651 }
5652
5653 if (SCALAR_INT_MODE_P (outermode)
5654 && SCALAR_INT_MODE_P (innermode)
5655 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5656 && byte == subreg_lowpart_offset (outermode, innermode))
5657 {
5658 rtx tem = simplify_truncation (outermode, op, innermode);
5659 if (tem)
5660 return tem;
5661 }
5662
5663 return NULL_RTX;
5664 }
5665
5666 /* Make a SUBREG operation or equivalent if it folds. */
5667
5668 rtx
5669 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5670 enum machine_mode innermode, unsigned int byte)
5671 {
5672 rtx newx;
5673
5674 newx = simplify_subreg (outermode, op, innermode, byte);
5675 if (newx)
5676 return newx;
5677
5678 if (GET_CODE (op) == SUBREG
5679 || GET_CODE (op) == CONCAT
5680 || GET_MODE (op) == VOIDmode)
5681 return NULL_RTX;
5682
5683 if (validate_subreg (outermode, innermode, op, byte))
5684 return gen_rtx_SUBREG (outermode, op, byte);
5685
5686 return NULL_RTX;
5687 }
5688
5689 /* Simplify X, an rtx expression.
5690
5691 Return the simplified expression or NULL if no simplifications
5692 were possible.
5693
5694 This is the preferred entry point into the simplification routines;
5695 however, we still allow passes to call the more specific routines.
5696
5697 Right now GCC has three (yes, three) major bodies of RTL simplification
5698 code that need to be unified.
5699
5700 1. fold_rtx in cse.c. This code uses various CSE specific
5701 information to aid in RTL simplification.
5702
5703 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5704 it uses combine specific information to aid in RTL
5705 simplification.
5706
5707 3. The routines in this file.
5708
5709
5710 Long term we want to only have one body of simplification code; to
5711 get to that state I recommend the following steps:
5712
5713 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5714 which are not pass dependent state into these routines.
5715
5716 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5717 use this routine whenever possible.
5718
5719 3. Allow for pass dependent state to be provided to these
5720 routines and add simplifications based on the pass dependent
5721 state. Remove code from cse.c & combine.c that becomes
5722 redundant/dead.
5723
5724 It will take time, but ultimately the compiler will be easier to
5725 maintain and improve. It's totally silly that when we add a
5726 simplification that it needs to be added to 4 places (3 for RTL
5727 simplification and 1 for tree simplification. */
5728
5729 rtx
5730 simplify_rtx (const_rtx x)
5731 {
5732 const enum rtx_code code = GET_CODE (x);
5733 const enum machine_mode mode = GET_MODE (x);
5734
5735 switch (GET_RTX_CLASS (code))
5736 {
5737 case RTX_UNARY:
5738 return simplify_unary_operation (code, mode,
5739 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5740 case RTX_COMM_ARITH:
5741 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5742 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5743
5744 /* Fall through.... */
5745
5746 case RTX_BIN_ARITH:
5747 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5748
5749 case RTX_TERNARY:
5750 case RTX_BITFIELD_OPS:
5751 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5752 XEXP (x, 0), XEXP (x, 1),
5753 XEXP (x, 2));
5754
5755 case RTX_COMPARE:
5756 case RTX_COMM_COMPARE:
5757 return simplify_relational_operation (code, mode,
5758 ((GET_MODE (XEXP (x, 0))
5759 != VOIDmode)
5760 ? GET_MODE (XEXP (x, 0))
5761 : GET_MODE (XEXP (x, 1))),
5762 XEXP (x, 0),
5763 XEXP (x, 1));
5764
5765 case RTX_EXTRA:
5766 if (code == SUBREG)
5767 return simplify_subreg (mode, SUBREG_REG (x),
5768 GET_MODE (SUBREG_REG (x)),
5769 SUBREG_BYTE (x));
5770 break;
5771
5772 case RTX_OBJ:
5773 if (code == LO_SUM)
5774 {
5775 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5776 if (GET_CODE (XEXP (x, 0)) == HIGH
5777 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5778 return XEXP (x, 1);
5779 }
5780 break;
5781
5782 default:
5783 break;
5784 }
5785 return NULL;
5786 }