]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
- Fix comment typos that I'd introducted.
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
38
39 /* Simplification and canonicalization of RTL. */
40
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
47
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 rtx, rtx, rtx, rtx);
61 \f
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, const_rtx i)
66 {
67 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
68 }
69
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
72
73 bool
74 mode_signbit_p (enum machine_mode mode, const_rtx x)
75 {
76 unsigned HOST_WIDE_INT val;
77 unsigned int width;
78
79 if (GET_MODE_CLASS (mode) != MODE_INT)
80 return false;
81
82 width = GET_MODE_PRECISION (mode);
83 if (width == 0)
84 return false;
85
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && CONST_INT_P (x))
88 val = INTVAL (x);
89 #if TARGET_SUPPORTS_WIDE_INT
90 else if (CONST_WIDE_INT_P (x))
91 {
92 unsigned int i;
93 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
94 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
95 return false;
96 for (i = 0; i < elts - 1; i++)
97 if (CONST_WIDE_INT_ELT (x, i) != 0)
98 return false;
99 val = CONST_WIDE_INT_ELT (x, elts - 1);
100 width %= HOST_BITS_PER_WIDE_INT;
101 if (width == 0)
102 width = HOST_BITS_PER_WIDE_INT;
103 }
104 #else
105 else if (width <= HOST_BITS_PER_DOUBLE_INT
106 && CONST_DOUBLE_AS_INT_P (x)
107 && CONST_DOUBLE_LOW (x) == 0)
108 {
109 val = CONST_DOUBLE_HIGH (x);
110 width -= HOST_BITS_PER_WIDE_INT;
111 }
112 #endif
113 else
114 /* X is not an integer constant. */
115 return false;
116
117 if (width < HOST_BITS_PER_WIDE_INT)
118 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
119 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
120 }
121
122 /* Test whether VAL is equal to the most significant bit of mode MODE
123 (after masking with the mode mask of MODE). Returns false if the
124 precision of MODE is too large to handle. */
125
126 bool
127 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
128 {
129 unsigned int width;
130
131 if (GET_MODE_CLASS (mode) != MODE_INT)
132 return false;
133
134 width = GET_MODE_PRECISION (mode);
135 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
136 return false;
137
138 val &= GET_MODE_MASK (mode);
139 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
140 }
141
142 /* Test whether the most significant bit of mode MODE is set in VAL.
143 Returns false if the precision of MODE is too large to handle. */
144 bool
145 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
146 {
147 unsigned int width;
148
149 if (GET_MODE_CLASS (mode) != MODE_INT)
150 return false;
151
152 width = GET_MODE_PRECISION (mode);
153 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
154 return false;
155
156 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
157 return val != 0;
158 }
159
160 /* Test whether the most significant bit of mode MODE is clear in VAL.
161 Returns false if the precision of MODE is too large to handle. */
162 bool
163 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
164 {
165 unsigned int width;
166
167 if (GET_MODE_CLASS (mode) != MODE_INT)
168 return false;
169
170 width = GET_MODE_PRECISION (mode);
171 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
172 return false;
173
174 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
175 return val == 0;
176 }
177 \f
178 /* Make a binary operation by properly ordering the operands and
179 seeing if the expression folds. */
180
181 rtx
182 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
183 rtx op1)
184 {
185 rtx tem;
186
187 /* If this simplifies, do it. */
188 tem = simplify_binary_operation (code, mode, op0, op1);
189 if (tem)
190 return tem;
191
192 /* Put complex operands first and constants second if commutative. */
193 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
194 && swap_commutative_operands_p (op0, op1))
195 tem = op0, op0 = op1, op1 = tem;
196
197 return gen_rtx_fmt_ee (code, mode, op0, op1);
198 }
199 \f
200 /* If X is a MEM referencing the constant pool, return the real value.
201 Otherwise return X. */
202 rtx
203 avoid_constant_pool_reference (rtx x)
204 {
205 rtx c, tmp, addr;
206 enum machine_mode cmode;
207 HOST_WIDE_INT offset = 0;
208
209 switch (GET_CODE (x))
210 {
211 case MEM:
212 break;
213
214 case FLOAT_EXTEND:
215 /* Handle float extensions of constant pool references. */
216 tmp = XEXP (x, 0);
217 c = avoid_constant_pool_reference (tmp);
218 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
219 {
220 REAL_VALUE_TYPE d;
221
222 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
223 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
224 }
225 return x;
226
227 default:
228 return x;
229 }
230
231 if (GET_MODE (x) == BLKmode)
232 return x;
233
234 addr = XEXP (x, 0);
235
236 /* Call target hook to avoid the effects of -fpic etc.... */
237 addr = targetm.delegitimize_address (addr);
238
239 /* Split the address into a base and integer offset. */
240 if (GET_CODE (addr) == CONST
241 && GET_CODE (XEXP (addr, 0)) == PLUS
242 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
243 {
244 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
245 addr = XEXP (XEXP (addr, 0), 0);
246 }
247
248 if (GET_CODE (addr) == LO_SUM)
249 addr = XEXP (addr, 1);
250
251 /* If this is a constant pool reference, we can turn it into its
252 constant and hope that simplifications happen. */
253 if (GET_CODE (addr) == SYMBOL_REF
254 && CONSTANT_POOL_ADDRESS_P (addr))
255 {
256 c = get_pool_constant (addr);
257 cmode = get_pool_mode (addr);
258
259 /* If we're accessing the constant in a different mode than it was
260 originally stored, attempt to fix that up via subreg simplifications.
261 If that fails we have no choice but to return the original memory. */
262 if ((offset != 0 || cmode != GET_MODE (x))
263 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
264 {
265 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
266 if (tem && CONSTANT_P (tem))
267 return tem;
268 }
269 else
270 return c;
271 }
272
273 return x;
274 }
275 \f
276 /* Simplify a MEM based on its attributes. This is the default
277 delegitimize_address target hook, and it's recommended that every
278 overrider call it. */
279
280 rtx
281 delegitimize_mem_from_attrs (rtx x)
282 {
283 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
284 use their base addresses as equivalent. */
285 if (MEM_P (x)
286 && MEM_EXPR (x)
287 && MEM_OFFSET_KNOWN_P (x))
288 {
289 tree decl = MEM_EXPR (x);
290 enum machine_mode mode = GET_MODE (x);
291 HOST_WIDE_INT offset = 0;
292
293 switch (TREE_CODE (decl))
294 {
295 default:
296 decl = NULL;
297 break;
298
299 case VAR_DECL:
300 break;
301
302 case ARRAY_REF:
303 case ARRAY_RANGE_REF:
304 case COMPONENT_REF:
305 case BIT_FIELD_REF:
306 case REALPART_EXPR:
307 case IMAGPART_EXPR:
308 case VIEW_CONVERT_EXPR:
309 {
310 HOST_WIDE_INT bitsize, bitpos;
311 tree toffset;
312 int unsignedp, volatilep = 0;
313
314 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
315 &mode, &unsignedp, &volatilep, false);
316 if (bitsize != GET_MODE_BITSIZE (mode)
317 || (bitpos % BITS_PER_UNIT)
318 || (toffset && !tree_fits_shwi_p (toffset)))
319 decl = NULL;
320 else
321 {
322 offset += bitpos / BITS_PER_UNIT;
323 if (toffset)
324 offset += tree_to_hwi (toffset);
325 }
326 break;
327 }
328 }
329
330 if (decl
331 && mode == GET_MODE (x)
332 && TREE_CODE (decl) == VAR_DECL
333 && (TREE_STATIC (decl)
334 || DECL_THREAD_LOCAL_P (decl))
335 && DECL_RTL_SET_P (decl)
336 && MEM_P (DECL_RTL (decl)))
337 {
338 rtx newx;
339
340 offset += MEM_OFFSET (x);
341
342 newx = DECL_RTL (decl);
343
344 if (MEM_P (newx))
345 {
346 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
347
348 /* Avoid creating a new MEM needlessly if we already had
349 the same address. We do if there's no OFFSET and the
350 old address X is identical to NEWX, or if X is of the
351 form (plus NEWX OFFSET), or the NEWX is of the form
352 (plus Y (const_int Z)) and X is that with the offset
353 added: (plus Y (const_int Z+OFFSET)). */
354 if (!((offset == 0
355 || (GET_CODE (o) == PLUS
356 && GET_CODE (XEXP (o, 1)) == CONST_INT
357 && (offset == INTVAL (XEXP (o, 1))
358 || (GET_CODE (n) == PLUS
359 && GET_CODE (XEXP (n, 1)) == CONST_INT
360 && (INTVAL (XEXP (n, 1)) + offset
361 == INTVAL (XEXP (o, 1)))
362 && (n = XEXP (n, 0))))
363 && (o = XEXP (o, 0))))
364 && rtx_equal_p (o, n)))
365 x = adjust_address_nv (newx, mode, offset);
366 }
367 else if (GET_MODE (x) == GET_MODE (newx)
368 && offset == 0)
369 x = newx;
370 }
371 }
372
373 return x;
374 }
375 \f
376 /* Make a unary operation by first seeing if it folds and otherwise making
377 the specified operation. */
378
379 rtx
380 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
381 enum machine_mode op_mode)
382 {
383 rtx tem;
384
385 /* If this simplifies, use it. */
386 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
387 return tem;
388
389 return gen_rtx_fmt_e (code, mode, op);
390 }
391
392 /* Likewise for ternary operations. */
393
394 rtx
395 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
396 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
397 {
398 rtx tem;
399
400 /* If this simplifies, use it. */
401 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
402 op0, op1, op2)))
403 return tem;
404
405 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
406 }
407
408 /* Likewise, for relational operations.
409 CMP_MODE specifies mode comparison is done in. */
410
411 rtx
412 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
413 enum machine_mode cmp_mode, rtx op0, rtx op1)
414 {
415 rtx tem;
416
417 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
418 op0, op1)))
419 return tem;
420
421 return gen_rtx_fmt_ee (code, mode, op0, op1);
422 }
423 \f
424 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
425 and simplify the result. If FN is non-NULL, call this callback on each
426 X, if it returns non-NULL, replace X with its return value and simplify the
427 result. */
428
429 rtx
430 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
431 rtx (*fn) (rtx, const_rtx, void *), void *data)
432 {
433 enum rtx_code code = GET_CODE (x);
434 enum machine_mode mode = GET_MODE (x);
435 enum machine_mode op_mode;
436 const char *fmt;
437 rtx op0, op1, op2, newx, op;
438 rtvec vec, newvec;
439 int i, j;
440
441 if (__builtin_expect (fn != NULL, 0))
442 {
443 newx = fn (x, old_rtx, data);
444 if (newx)
445 return newx;
446 }
447 else if (rtx_equal_p (x, old_rtx))
448 return copy_rtx ((rtx) data);
449
450 switch (GET_RTX_CLASS (code))
451 {
452 case RTX_UNARY:
453 op0 = XEXP (x, 0);
454 op_mode = GET_MODE (op0);
455 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
456 if (op0 == XEXP (x, 0))
457 return x;
458 return simplify_gen_unary (code, mode, op0, op_mode);
459
460 case RTX_BIN_ARITH:
461 case RTX_COMM_ARITH:
462 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
463 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
464 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
465 return x;
466 return simplify_gen_binary (code, mode, op0, op1);
467
468 case RTX_COMPARE:
469 case RTX_COMM_COMPARE:
470 op0 = XEXP (x, 0);
471 op1 = XEXP (x, 1);
472 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
473 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
474 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
475 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
476 return x;
477 return simplify_gen_relational (code, mode, op_mode, op0, op1);
478
479 case RTX_TERNARY:
480 case RTX_BITFIELD_OPS:
481 op0 = XEXP (x, 0);
482 op_mode = GET_MODE (op0);
483 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
484 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
485 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
486 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
487 return x;
488 if (op_mode == VOIDmode)
489 op_mode = GET_MODE (op0);
490 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
491
492 case RTX_EXTRA:
493 if (code == SUBREG)
494 {
495 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
496 if (op0 == SUBREG_REG (x))
497 return x;
498 op0 = simplify_gen_subreg (GET_MODE (x), op0,
499 GET_MODE (SUBREG_REG (x)),
500 SUBREG_BYTE (x));
501 return op0 ? op0 : x;
502 }
503 break;
504
505 case RTX_OBJ:
506 if (code == MEM)
507 {
508 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
509 if (op0 == XEXP (x, 0))
510 return x;
511 return replace_equiv_address_nv (x, op0);
512 }
513 else if (code == LO_SUM)
514 {
515 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
516 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
517
518 /* (lo_sum (high x) x) -> x */
519 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
520 return op1;
521
522 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
523 return x;
524 return gen_rtx_LO_SUM (mode, op0, op1);
525 }
526 break;
527
528 default:
529 break;
530 }
531
532 newx = x;
533 fmt = GET_RTX_FORMAT (code);
534 for (i = 0; fmt[i]; i++)
535 switch (fmt[i])
536 {
537 case 'E':
538 vec = XVEC (x, i);
539 newvec = XVEC (newx, i);
540 for (j = 0; j < GET_NUM_ELEM (vec); j++)
541 {
542 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
543 old_rtx, fn, data);
544 if (op != RTVEC_ELT (vec, j))
545 {
546 if (newvec == vec)
547 {
548 newvec = shallow_copy_rtvec (vec);
549 if (x == newx)
550 newx = shallow_copy_rtx (x);
551 XVEC (newx, i) = newvec;
552 }
553 RTVEC_ELT (newvec, j) = op;
554 }
555 }
556 break;
557
558 case 'e':
559 if (XEXP (x, i))
560 {
561 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
562 if (op != XEXP (x, i))
563 {
564 if (x == newx)
565 newx = shallow_copy_rtx (x);
566 XEXP (newx, i) = op;
567 }
568 }
569 break;
570 }
571 return newx;
572 }
573
574 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
575 resulting RTX. Return a new RTX which is as simplified as possible. */
576
577 rtx
578 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
579 {
580 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
581 }
582 \f
583 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
584 Only handle cases where the truncated value is inherently an rvalue.
585
586 RTL provides two ways of truncating a value:
587
588 1. a lowpart subreg. This form is only a truncation when both
589 the outer and inner modes (here MODE and OP_MODE respectively)
590 are scalar integers, and only then when the subreg is used as
591 an rvalue.
592
593 It is only valid to form such truncating subregs if the
594 truncation requires no action by the target. The onus for
595 proving this is on the creator of the subreg -- e.g. the
596 caller to simplify_subreg or simplify_gen_subreg -- and typically
597 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
598
599 2. a TRUNCATE. This form handles both scalar and compound integers.
600
601 The first form is preferred where valid. However, the TRUNCATE
602 handling in simplify_unary_operation turns the second form into the
603 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
604 so it is generally safe to form rvalue truncations using:
605
606 simplify_gen_unary (TRUNCATE, ...)
607
608 and leave simplify_unary_operation to work out which representation
609 should be used.
610
611 Because of the proof requirements on (1), simplify_truncation must
612 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
613 regardless of whether the outer truncation came from a SUBREG or a
614 TRUNCATE. For example, if the caller has proven that an SImode
615 truncation of:
616
617 (and:DI X Y)
618
619 is a no-op and can be represented as a subreg, it does not follow
620 that SImode truncations of X and Y are also no-ops. On a target
621 like 64-bit MIPS that requires SImode values to be stored in
622 sign-extended form, an SImode truncation of:
623
624 (and:DI (reg:DI X) (const_int 63))
625
626 is trivially a no-op because only the lower 6 bits can be set.
627 However, X is still an arbitrary 64-bit number and so we cannot
628 assume that truncating it too is a no-op. */
629
630 static rtx
631 simplify_truncation (enum machine_mode mode, rtx op,
632 enum machine_mode op_mode)
633 {
634 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
635 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
636 gcc_assert (precision <= op_precision);
637
638 /* Optimize truncations of zero and sign extended values. */
639 if (GET_CODE (op) == ZERO_EXTEND
640 || GET_CODE (op) == SIGN_EXTEND)
641 {
642 /* There are three possibilities. If MODE is the same as the
643 origmode, we can omit both the extension and the subreg.
644 If MODE is not larger than the origmode, we can apply the
645 truncation without the extension. Finally, if the outermode
646 is larger than the origmode, we can just extend to the appropriate
647 mode. */
648 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
649 if (mode == origmode)
650 return XEXP (op, 0);
651 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
652 return simplify_gen_unary (TRUNCATE, mode,
653 XEXP (op, 0), origmode);
654 else
655 return simplify_gen_unary (GET_CODE (op), mode,
656 XEXP (op, 0), origmode);
657 }
658
659 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
660 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
661 if (GET_CODE (op) == PLUS
662 || GET_CODE (op) == MINUS
663 || GET_CODE (op) == MULT)
664 {
665 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
666 if (op0)
667 {
668 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
669 if (op1)
670 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
671 }
672 }
673
674 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
675 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op) == LSHIFTRT
678 || GET_CODE (op) == ASHIFTRT)
679 /* Ensure that OP_MODE is at least twice as wide as MODE
680 to avoid the possibility that an outer LSHIFTRT shifts by more
681 than the sign extension's sign_bit_copies and introduces zeros
682 into the high bits of the result. */
683 && 2 * precision <= op_precision
684 && CONST_INT_P (XEXP (op, 1))
685 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
686 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
687 && UINTVAL (XEXP (op, 1)) < precision)
688 return simplify_gen_binary (ASHIFTRT, mode,
689 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
690
691 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
692 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
693 the outer subreg is effectively a truncation to the original mode. */
694 if ((GET_CODE (op) == LSHIFTRT
695 || GET_CODE (op) == ASHIFTRT)
696 && CONST_INT_P (XEXP (op, 1))
697 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
698 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
699 && UINTVAL (XEXP (op, 1)) < precision)
700 return simplify_gen_binary (LSHIFTRT, mode,
701 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
702
703 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
704 to (ashift:QI (x:QI) C), where C is a suitable small constant and
705 the outer subreg is effectively a truncation to the original mode. */
706 if (GET_CODE (op) == ASHIFT
707 && CONST_INT_P (XEXP (op, 1))
708 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
709 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
710 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
711 && UINTVAL (XEXP (op, 1)) < precision)
712 return simplify_gen_binary (ASHIFT, mode,
713 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
714
715 /* Recognize a word extraction from a multi-word subreg. */
716 if ((GET_CODE (op) == LSHIFTRT
717 || GET_CODE (op) == ASHIFTRT)
718 && SCALAR_INT_MODE_P (mode)
719 && SCALAR_INT_MODE_P (op_mode)
720 && precision >= BITS_PER_WORD
721 && 2 * precision <= op_precision
722 && CONST_INT_P (XEXP (op, 1))
723 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
724 && UINTVAL (XEXP (op, 1)) < op_precision)
725 {
726 int byte = subreg_lowpart_offset (mode, op_mode);
727 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
728 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
729 (WORDS_BIG_ENDIAN
730 ? byte - shifted_bytes
731 : byte + shifted_bytes));
732 }
733
734 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
735 and try replacing the TRUNCATE and shift with it. Don't do this
736 if the MEM has a mode-dependent address. */
737 if ((GET_CODE (op) == LSHIFTRT
738 || GET_CODE (op) == ASHIFTRT)
739 && SCALAR_INT_MODE_P (op_mode)
740 && MEM_P (XEXP (op, 0))
741 && CONST_INT_P (XEXP (op, 1))
742 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
743 && INTVAL (XEXP (op, 1)) > 0
744 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
745 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
746 MEM_ADDR_SPACE (XEXP (op, 0)))
747 && ! MEM_VOLATILE_P (XEXP (op, 0))
748 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
749 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
750 {
751 int byte = subreg_lowpart_offset (mode, op_mode);
752 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
753 return adjust_address_nv (XEXP (op, 0), mode,
754 (WORDS_BIG_ENDIAN
755 ? byte - shifted_bytes
756 : byte + shifted_bytes));
757 }
758
759 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
760 (OP:SI foo:SI) if OP is NEG or ABS. */
761 if ((GET_CODE (op) == ABS
762 || GET_CODE (op) == NEG)
763 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
764 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
765 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
766 return simplify_gen_unary (GET_CODE (op), mode,
767 XEXP (XEXP (op, 0), 0), mode);
768
769 /* (truncate:A (subreg:B (truncate:C X) 0)) is
770 (truncate:A X). */
771 if (GET_CODE (op) == SUBREG
772 && SCALAR_INT_MODE_P (mode)
773 && SCALAR_INT_MODE_P (op_mode)
774 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
775 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
776 && subreg_lowpart_p (op))
777 {
778 rtx inner = XEXP (SUBREG_REG (op), 0);
779 if (GET_MODE_PRECISION (mode)
780 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
781 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
782 else
783 /* If subreg above is paradoxical and C is narrower
784 than A, return (subreg:A (truncate:C X) 0). */
785 return simplify_gen_subreg (mode, SUBREG_REG (op),
786 GET_MODE (SUBREG_REG (op)), 0);
787 }
788
789 /* (truncate:A (truncate:B X)) is (truncate:A X). */
790 if (GET_CODE (op) == TRUNCATE)
791 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
792 GET_MODE (XEXP (op, 0)));
793
794 return NULL_RTX;
795 }
796 \f
797 /* Try to simplify a unary operation CODE whose output mode is to be
798 MODE with input operand OP whose mode was originally OP_MODE.
799 Return zero if no simplification can be made. */
800 rtx
801 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
802 rtx op, enum machine_mode op_mode)
803 {
804 rtx trueop, tem;
805
806 trueop = avoid_constant_pool_reference (op);
807
808 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
809 if (tem)
810 return tem;
811
812 return simplify_unary_operation_1 (code, mode, op);
813 }
814
815 /* Perform some simplifications we can do even if the operands
816 aren't constant. */
817 static rtx
818 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
819 {
820 enum rtx_code reversed;
821 rtx temp;
822
823 switch (code)
824 {
825 case NOT:
826 /* (not (not X)) == X. */
827 if (GET_CODE (op) == NOT)
828 return XEXP (op, 0);
829
830 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
831 comparison is all ones. */
832 if (COMPARISON_P (op)
833 && (mode == BImode || STORE_FLAG_VALUE == -1)
834 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
835 return simplify_gen_relational (reversed, mode, VOIDmode,
836 XEXP (op, 0), XEXP (op, 1));
837
838 /* (not (plus X -1)) can become (neg X). */
839 if (GET_CODE (op) == PLUS
840 && XEXP (op, 1) == constm1_rtx)
841 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
842
843 /* Similarly, (not (neg X)) is (plus X -1). */
844 if (GET_CODE (op) == NEG)
845 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
846 CONSTM1_RTX (mode));
847
848 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
849 if (GET_CODE (op) == XOR
850 && CONST_INT_P (XEXP (op, 1))
851 && (temp = simplify_unary_operation (NOT, mode,
852 XEXP (op, 1), mode)) != 0)
853 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
854
855 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
856 if (GET_CODE (op) == PLUS
857 && CONST_INT_P (XEXP (op, 1))
858 && mode_signbit_p (mode, XEXP (op, 1))
859 && (temp = simplify_unary_operation (NOT, mode,
860 XEXP (op, 1), mode)) != 0)
861 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
862
863
864 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
865 operands other than 1, but that is not valid. We could do a
866 similar simplification for (not (lshiftrt C X)) where C is
867 just the sign bit, but this doesn't seem common enough to
868 bother with. */
869 if (GET_CODE (op) == ASHIFT
870 && XEXP (op, 0) == const1_rtx)
871 {
872 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
873 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
874 }
875
876 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
877 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
878 so we can perform the above simplification. */
879 if (STORE_FLAG_VALUE == -1
880 && GET_CODE (op) == ASHIFTRT
881 && GET_CODE (XEXP (op, 1))
882 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
883 return simplify_gen_relational (GE, mode, VOIDmode,
884 XEXP (op, 0), const0_rtx);
885
886
887 if (GET_CODE (op) == SUBREG
888 && subreg_lowpart_p (op)
889 && (GET_MODE_SIZE (GET_MODE (op))
890 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
891 && GET_CODE (SUBREG_REG (op)) == ASHIFT
892 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
893 {
894 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
895 rtx x;
896
897 x = gen_rtx_ROTATE (inner_mode,
898 simplify_gen_unary (NOT, inner_mode, const1_rtx,
899 inner_mode),
900 XEXP (SUBREG_REG (op), 1));
901 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
902 if (temp)
903 return temp;
904 }
905
906 /* Apply De Morgan's laws to reduce number of patterns for machines
907 with negating logical insns (and-not, nand, etc.). If result has
908 only one NOT, put it first, since that is how the patterns are
909 coded. */
910 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
911 {
912 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
913 enum machine_mode op_mode;
914
915 op_mode = GET_MODE (in1);
916 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
917
918 op_mode = GET_MODE (in2);
919 if (op_mode == VOIDmode)
920 op_mode = mode;
921 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
922
923 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
924 {
925 rtx tem = in2;
926 in2 = in1; in1 = tem;
927 }
928
929 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
930 mode, in1, in2);
931 }
932
933 /* (not (bswap x)) -> (bswap (not x)). */
934 if (GET_CODE (op) == BSWAP)
935 {
936 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
937 return simplify_gen_unary (BSWAP, mode, x, mode);
938 }
939 break;
940
941 case NEG:
942 /* (neg (neg X)) == X. */
943 if (GET_CODE (op) == NEG)
944 return XEXP (op, 0);
945
946 /* (neg (plus X 1)) can become (not X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == const1_rtx)
949 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
950
951 /* Similarly, (neg (not X)) is (plus X 1). */
952 if (GET_CODE (op) == NOT)
953 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
954 CONST1_RTX (mode));
955
956 /* (neg (minus X Y)) can become (minus Y X). This transformation
957 isn't safe for modes with signed zeros, since if X and Y are
958 both +0, (minus Y X) is the same as (minus X Y). If the
959 rounding mode is towards +infinity (or -infinity) then the two
960 expressions will be rounded differently. */
961 if (GET_CODE (op) == MINUS
962 && !HONOR_SIGNED_ZEROS (mode)
963 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
964 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
965
966 if (GET_CODE (op) == PLUS
967 && !HONOR_SIGNED_ZEROS (mode)
968 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
969 {
970 /* (neg (plus A C)) is simplified to (minus -C A). */
971 if (CONST_SCALAR_INT_P (XEXP (op, 1))
972 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
973 {
974 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
975 if (temp)
976 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
977 }
978
979 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
980 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
981 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
982 }
983
984 /* (neg (mult A B)) becomes (mult A (neg B)).
985 This works even for floating-point values. */
986 if (GET_CODE (op) == MULT
987 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
988 {
989 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
990 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
991 }
992
993 /* NEG commutes with ASHIFT since it is multiplication. Only do
994 this if we can then eliminate the NEG (e.g., if the operand
995 is a constant). */
996 if (GET_CODE (op) == ASHIFT)
997 {
998 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
999 if (temp)
1000 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1001 }
1002
1003 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1004 C is equal to the width of MODE minus 1. */
1005 if (GET_CODE (op) == ASHIFTRT
1006 && CONST_INT_P (XEXP (op, 1))
1007 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1008 return simplify_gen_binary (LSHIFTRT, mode,
1009 XEXP (op, 0), XEXP (op, 1));
1010
1011 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1012 C is equal to the width of MODE minus 1. */
1013 if (GET_CODE (op) == LSHIFTRT
1014 && CONST_INT_P (XEXP (op, 1))
1015 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1016 return simplify_gen_binary (ASHIFTRT, mode,
1017 XEXP (op, 0), XEXP (op, 1));
1018
1019 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1020 if (GET_CODE (op) == XOR
1021 && XEXP (op, 1) == const1_rtx
1022 && nonzero_bits (XEXP (op, 0), mode) == 1)
1023 return plus_constant (mode, XEXP (op, 0), -1);
1024
1025 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1026 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1027 if (GET_CODE (op) == LT
1028 && XEXP (op, 1) == const0_rtx
1029 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1030 {
1031 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1032 int isize = GET_MODE_PRECISION (inner);
1033 if (STORE_FLAG_VALUE == 1)
1034 {
1035 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1036 GEN_INT (isize - 1));
1037 if (mode == inner)
1038 return temp;
1039 if (GET_MODE_PRECISION (mode) > isize)
1040 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1041 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1042 }
1043 else if (STORE_FLAG_VALUE == -1)
1044 {
1045 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1046 GEN_INT (isize - 1));
1047 if (mode == inner)
1048 return temp;
1049 if (GET_MODE_PRECISION (mode) > isize)
1050 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1051 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1052 }
1053 }
1054 break;
1055
1056 case TRUNCATE:
1057 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1058 with the umulXi3_highpart patterns. */
1059 if (GET_CODE (op) == LSHIFTRT
1060 && GET_CODE (XEXP (op, 0)) == MULT)
1061 break;
1062
1063 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1064 {
1065 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1066 {
1067 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1068 if (temp)
1069 return temp;
1070 }
1071 /* We can't handle truncation to a partial integer mode here
1072 because we don't know the real bitsize of the partial
1073 integer mode. */
1074 break;
1075 }
1076
1077 if (GET_MODE (op) != VOIDmode)
1078 {
1079 temp = simplify_truncation (mode, op, GET_MODE (op));
1080 if (temp)
1081 return temp;
1082 }
1083
1084 /* If we know that the value is already truncated, we can
1085 replace the TRUNCATE with a SUBREG. */
1086 if (GET_MODE_NUNITS (mode) == 1
1087 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1088 || truncated_to_mode (mode, op)))
1089 {
1090 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1091 if (temp)
1092 return temp;
1093 }
1094
1095 /* A truncate of a comparison can be replaced with a subreg if
1096 STORE_FLAG_VALUE permits. This is like the previous test,
1097 but it works even if the comparison is done in a mode larger
1098 than HOST_BITS_PER_WIDE_INT. */
1099 if (HWI_COMPUTABLE_MODE_P (mode)
1100 && COMPARISON_P (op)
1101 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1102 {
1103 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1104 if (temp)
1105 return temp;
1106 }
1107
1108 /* A truncate of a memory is just loading the low part of the memory
1109 if we are not changing the meaning of the address. */
1110 if (GET_CODE (op) == MEM
1111 && !VECTOR_MODE_P (mode)
1112 && !MEM_VOLATILE_P (op)
1113 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1114 {
1115 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1116 if (temp)
1117 return temp;
1118 }
1119
1120 break;
1121
1122 case FLOAT_TRUNCATE:
1123 if (DECIMAL_FLOAT_MODE_P (mode))
1124 break;
1125
1126 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1127 if (GET_CODE (op) == FLOAT_EXTEND
1128 && GET_MODE (XEXP (op, 0)) == mode)
1129 return XEXP (op, 0);
1130
1131 /* (float_truncate:SF (float_truncate:DF foo:XF))
1132 = (float_truncate:SF foo:XF).
1133 This may eliminate double rounding, so it is unsafe.
1134
1135 (float_truncate:SF (float_extend:XF foo:DF))
1136 = (float_truncate:SF foo:DF).
1137
1138 (float_truncate:DF (float_extend:XF foo:SF))
1139 = (float_extend:SF foo:DF). */
1140 if ((GET_CODE (op) == FLOAT_TRUNCATE
1141 && flag_unsafe_math_optimizations)
1142 || GET_CODE (op) == FLOAT_EXTEND)
1143 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1144 0)))
1145 > GET_MODE_SIZE (mode)
1146 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1147 mode,
1148 XEXP (op, 0), mode);
1149
1150 /* (float_truncate (float x)) is (float x) */
1151 if (GET_CODE (op) == FLOAT
1152 && (flag_unsafe_math_optimizations
1153 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1154 && ((unsigned)significand_size (GET_MODE (op))
1155 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1156 - num_sign_bit_copies (XEXP (op, 0),
1157 GET_MODE (XEXP (op, 0))))))))
1158 return simplify_gen_unary (FLOAT, mode,
1159 XEXP (op, 0),
1160 GET_MODE (XEXP (op, 0)));
1161
1162 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1163 (OP:SF foo:SF) if OP is NEG or ABS. */
1164 if ((GET_CODE (op) == ABS
1165 || GET_CODE (op) == NEG)
1166 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1167 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1168 return simplify_gen_unary (GET_CODE (op), mode,
1169 XEXP (XEXP (op, 0), 0), mode);
1170
1171 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1172 is (float_truncate:SF x). */
1173 if (GET_CODE (op) == SUBREG
1174 && subreg_lowpart_p (op)
1175 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1176 return SUBREG_REG (op);
1177 break;
1178
1179 case FLOAT_EXTEND:
1180 if (DECIMAL_FLOAT_MODE_P (mode))
1181 break;
1182
1183 /* (float_extend (float_extend x)) is (float_extend x)
1184
1185 (float_extend (float x)) is (float x) assuming that double
1186 rounding can't happen.
1187 */
1188 if (GET_CODE (op) == FLOAT_EXTEND
1189 || (GET_CODE (op) == FLOAT
1190 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1191 && ((unsigned)significand_size (GET_MODE (op))
1192 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1193 - num_sign_bit_copies (XEXP (op, 0),
1194 GET_MODE (XEXP (op, 0)))))))
1195 return simplify_gen_unary (GET_CODE (op), mode,
1196 XEXP (op, 0),
1197 GET_MODE (XEXP (op, 0)));
1198
1199 break;
1200
1201 case ABS:
1202 /* (abs (neg <foo>)) -> (abs <foo>) */
1203 if (GET_CODE (op) == NEG)
1204 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1205 GET_MODE (XEXP (op, 0)));
1206
1207 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1208 do nothing. */
1209 if (GET_MODE (op) == VOIDmode)
1210 break;
1211
1212 /* If operand is something known to be positive, ignore the ABS. */
1213 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1214 || val_signbit_known_clear_p (GET_MODE (op),
1215 nonzero_bits (op, GET_MODE (op))))
1216 return op;
1217
1218 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1219 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1220 return gen_rtx_NEG (mode, op);
1221
1222 break;
1223
1224 case FFS:
1225 /* (ffs (*_extend <X>)) = (ffs <X>) */
1226 if (GET_CODE (op) == SIGN_EXTEND
1227 || GET_CODE (op) == ZERO_EXTEND)
1228 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1229 GET_MODE (XEXP (op, 0)));
1230 break;
1231
1232 case POPCOUNT:
1233 switch (GET_CODE (op))
1234 {
1235 case BSWAP:
1236 case ZERO_EXTEND:
1237 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1238 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1239 GET_MODE (XEXP (op, 0)));
1240
1241 case ROTATE:
1242 case ROTATERT:
1243 /* Rotations don't affect popcount. */
1244 if (!side_effects_p (XEXP (op, 1)))
1245 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1246 GET_MODE (XEXP (op, 0)));
1247 break;
1248
1249 default:
1250 break;
1251 }
1252 break;
1253
1254 case PARITY:
1255 switch (GET_CODE (op))
1256 {
1257 case NOT:
1258 case BSWAP:
1259 case ZERO_EXTEND:
1260 case SIGN_EXTEND:
1261 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1262 GET_MODE (XEXP (op, 0)));
1263
1264 case ROTATE:
1265 case ROTATERT:
1266 /* Rotations don't affect parity. */
1267 if (!side_effects_p (XEXP (op, 1)))
1268 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1269 GET_MODE (XEXP (op, 0)));
1270 break;
1271
1272 default:
1273 break;
1274 }
1275 break;
1276
1277 case BSWAP:
1278 /* (bswap (bswap x)) -> x. */
1279 if (GET_CODE (op) == BSWAP)
1280 return XEXP (op, 0);
1281 break;
1282
1283 case FLOAT:
1284 /* (float (sign_extend <X>)) = (float <X>). */
1285 if (GET_CODE (op) == SIGN_EXTEND)
1286 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1287 GET_MODE (XEXP (op, 0)));
1288 break;
1289
1290 case SIGN_EXTEND:
1291 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1292 becomes just the MINUS if its mode is MODE. This allows
1293 folding switch statements on machines using casesi (such as
1294 the VAX). */
1295 if (GET_CODE (op) == TRUNCATE
1296 && GET_MODE (XEXP (op, 0)) == mode
1297 && GET_CODE (XEXP (op, 0)) == MINUS
1298 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1299 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1300 return XEXP (op, 0);
1301
1302 /* Extending a widening multiplication should be canonicalized to
1303 a wider widening multiplication. */
1304 if (GET_CODE (op) == MULT)
1305 {
1306 rtx lhs = XEXP (op, 0);
1307 rtx rhs = XEXP (op, 1);
1308 enum rtx_code lcode = GET_CODE (lhs);
1309 enum rtx_code rcode = GET_CODE (rhs);
1310
1311 /* Widening multiplies usually extend both operands, but sometimes
1312 they use a shift to extract a portion of a register. */
1313 if ((lcode == SIGN_EXTEND
1314 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1315 && (rcode == SIGN_EXTEND
1316 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1317 {
1318 enum machine_mode lmode = GET_MODE (lhs);
1319 enum machine_mode rmode = GET_MODE (rhs);
1320 int bits;
1321
1322 if (lcode == ASHIFTRT)
1323 /* Number of bits not shifted off the end. */
1324 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1325 else /* lcode == SIGN_EXTEND */
1326 /* Size of inner mode. */
1327 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1328
1329 if (rcode == ASHIFTRT)
1330 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1331 else /* rcode == SIGN_EXTEND */
1332 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1333
1334 /* We can only widen multiplies if the result is mathematiclly
1335 equivalent. I.e. if overflow was impossible. */
1336 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1337 return simplify_gen_binary
1338 (MULT, mode,
1339 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1340 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1341 }
1342 }
1343
1344 /* Check for a sign extension of a subreg of a promoted
1345 variable, where the promotion is sign-extended, and the
1346 target mode is the same as the variable's promotion. */
1347 if (GET_CODE (op) == SUBREG
1348 && SUBREG_PROMOTED_VAR_P (op)
1349 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1350 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1351 {
1352 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1353 if (temp)
1354 return temp;
1355 }
1356
1357 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1358 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1359 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1360 {
1361 gcc_assert (GET_MODE_BITSIZE (mode)
1362 > GET_MODE_BITSIZE (GET_MODE (op)));
1363 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1364 GET_MODE (XEXP (op, 0)));
1365 }
1366
1367 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1368 is (sign_extend:M (subreg:O <X>)) if there is mode with
1369 GET_MODE_BITSIZE (N) - I bits.
1370 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1371 is similarly (zero_extend:M (subreg:O <X>)). */
1372 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1373 && GET_CODE (XEXP (op, 0)) == ASHIFT
1374 && CONST_INT_P (XEXP (op, 1))
1375 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1376 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1377 {
1378 enum machine_mode tmode
1379 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1380 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1381 gcc_assert (GET_MODE_BITSIZE (mode)
1382 > GET_MODE_BITSIZE (GET_MODE (op)));
1383 if (tmode != BLKmode)
1384 {
1385 rtx inner =
1386 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1387 if (inner)
1388 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1389 ? SIGN_EXTEND : ZERO_EXTEND,
1390 mode, inner, tmode);
1391 }
1392 }
1393
1394 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1395 /* As we do not know which address space the pointer is referring to,
1396 we can do this only if the target does not support different pointer
1397 or address modes depending on the address space. */
1398 if (target_default_pointer_address_modes_p ()
1399 && ! POINTERS_EXTEND_UNSIGNED
1400 && mode == Pmode && GET_MODE (op) == ptr_mode
1401 && (CONSTANT_P (op)
1402 || (GET_CODE (op) == SUBREG
1403 && REG_P (SUBREG_REG (op))
1404 && REG_POINTER (SUBREG_REG (op))
1405 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1406 return convert_memory_address (Pmode, op);
1407 #endif
1408 break;
1409
1410 case ZERO_EXTEND:
1411 /* Check for a zero extension of a subreg of a promoted
1412 variable, where the promotion is zero-extended, and the
1413 target mode is the same as the variable's promotion. */
1414 if (GET_CODE (op) == SUBREG
1415 && SUBREG_PROMOTED_VAR_P (op)
1416 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1417 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1418 {
1419 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1420 if (temp)
1421 return temp;
1422 }
1423
1424 /* Extending a widening multiplication should be canonicalized to
1425 a wider widening multiplication. */
1426 if (GET_CODE (op) == MULT)
1427 {
1428 rtx lhs = XEXP (op, 0);
1429 rtx rhs = XEXP (op, 1);
1430 enum rtx_code lcode = GET_CODE (lhs);
1431 enum rtx_code rcode = GET_CODE (rhs);
1432
1433 /* Widening multiplies usually extend both operands, but sometimes
1434 they use a shift to extract a portion of a register. */
1435 if ((lcode == ZERO_EXTEND
1436 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1437 && (rcode == ZERO_EXTEND
1438 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1439 {
1440 enum machine_mode lmode = GET_MODE (lhs);
1441 enum machine_mode rmode = GET_MODE (rhs);
1442 int bits;
1443
1444 if (lcode == LSHIFTRT)
1445 /* Number of bits not shifted off the end. */
1446 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1447 else /* lcode == ZERO_EXTEND */
1448 /* Size of inner mode. */
1449 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1450
1451 if (rcode == LSHIFTRT)
1452 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1453 else /* rcode == ZERO_EXTEND */
1454 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1455
1456 /* We can only widen multiplies if the result is mathematiclly
1457 equivalent. I.e. if overflow was impossible. */
1458 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1459 return simplify_gen_binary
1460 (MULT, mode,
1461 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1462 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1463 }
1464 }
1465
1466 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1467 if (GET_CODE (op) == ZERO_EXTEND)
1468 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1469 GET_MODE (XEXP (op, 0)));
1470
1471 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1472 is (zero_extend:M (subreg:O <X>)) if there is mode with
1473 GET_MODE_BITSIZE (N) - I bits. */
1474 if (GET_CODE (op) == LSHIFTRT
1475 && GET_CODE (XEXP (op, 0)) == ASHIFT
1476 && CONST_INT_P (XEXP (op, 1))
1477 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1478 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1479 {
1480 enum machine_mode tmode
1481 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1482 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1483 if (tmode != BLKmode)
1484 {
1485 rtx inner =
1486 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1487 if (inner)
1488 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1489 }
1490 }
1491
1492 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1493 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1494 of mode N. E.g.
1495 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1496 (and:SI (reg:SI) (const_int 63)). */
1497 if (GET_CODE (op) == SUBREG
1498 && GET_MODE_PRECISION (GET_MODE (op))
1499 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1500 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1501 <= HOST_BITS_PER_WIDE_INT
1502 && GET_MODE_PRECISION (mode)
1503 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1504 && subreg_lowpart_p (op)
1505 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1506 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1507 {
1508 if (GET_MODE_PRECISION (mode)
1509 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1510 return SUBREG_REG (op);
1511 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1512 GET_MODE (SUBREG_REG (op)));
1513 }
1514
1515 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1516 /* As we do not know which address space the pointer is referring to,
1517 we can do this only if the target does not support different pointer
1518 or address modes depending on the address space. */
1519 if (target_default_pointer_address_modes_p ()
1520 && POINTERS_EXTEND_UNSIGNED > 0
1521 && mode == Pmode && GET_MODE (op) == ptr_mode
1522 && (CONSTANT_P (op)
1523 || (GET_CODE (op) == SUBREG
1524 && REG_P (SUBREG_REG (op))
1525 && REG_POINTER (SUBREG_REG (op))
1526 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1527 return convert_memory_address (Pmode, op);
1528 #endif
1529 break;
1530
1531 default:
1532 break;
1533 }
1534
1535 return 0;
1536 }
1537
1538 /* Try to compute the value of a unary operation CODE whose output mode is to
1539 be MODE with input operand OP whose mode was originally OP_MODE.
1540 Return zero if the value cannot be computed. */
1541 rtx
1542 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1543 rtx op, enum machine_mode op_mode)
1544 {
1545 unsigned int width = GET_MODE_PRECISION (mode);
1546
1547 if (code == VEC_DUPLICATE)
1548 {
1549 gcc_assert (VECTOR_MODE_P (mode));
1550 if (GET_MODE (op) != VOIDmode)
1551 {
1552 if (!VECTOR_MODE_P (GET_MODE (op)))
1553 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1554 else
1555 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1556 (GET_MODE (op)));
1557 }
1558 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1559 || GET_CODE (op) == CONST_VECTOR)
1560 {
1561 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1562 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1563 rtvec v = rtvec_alloc (n_elts);
1564 unsigned int i;
1565
1566 if (GET_CODE (op) != CONST_VECTOR)
1567 for (i = 0; i < n_elts; i++)
1568 RTVEC_ELT (v, i) = op;
1569 else
1570 {
1571 enum machine_mode inmode = GET_MODE (op);
1572 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1573 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1574
1575 gcc_assert (in_n_elts < n_elts);
1576 gcc_assert ((n_elts % in_n_elts) == 0);
1577 for (i = 0; i < n_elts; i++)
1578 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1579 }
1580 return gen_rtx_CONST_VECTOR (mode, v);
1581 }
1582 }
1583
1584 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1585 {
1586 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1587 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1588 enum machine_mode opmode = GET_MODE (op);
1589 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1590 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1591 rtvec v = rtvec_alloc (n_elts);
1592 unsigned int i;
1593
1594 gcc_assert (op_n_elts == n_elts);
1595 for (i = 0; i < n_elts; i++)
1596 {
1597 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1598 CONST_VECTOR_ELT (op, i),
1599 GET_MODE_INNER (opmode));
1600 if (!x)
1601 return 0;
1602 RTVEC_ELT (v, i) = x;
1603 }
1604 return gen_rtx_CONST_VECTOR (mode, v);
1605 }
1606
1607 /* The order of these tests is critical so that, for example, we don't
1608 check the wrong mode (input vs. output) for a conversion operation,
1609 such as FIX. At some point, this should be simplified. */
1610
1611 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1612 {
1613 REAL_VALUE_TYPE d;
1614
1615 if (op_mode == VOIDmode)
1616 {
1617 /* CONST_INT have VOIDmode as the mode. We assume that all
1618 the bits of the constant are significant, though, this is
1619 a dangerous assumption as many times CONST_INTs are
1620 created and used with garbage in the bits outside of the
1621 precision of the implied mode of the const_int. */
1622 op_mode = mode_for_size (MAX_BITSIZE_MODE_ANY_INT, MODE_INT, 0);
1623 }
1624
1625 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1626 d = real_value_truncate (mode, d);
1627 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1628 }
1629 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1630 {
1631 REAL_VALUE_TYPE d;
1632
1633 if (op_mode == VOIDmode)
1634 {
1635 /* CONST_INT have VOIDmode as the mode. We assume that all
1636 the bits of the constant are significant, though, this is
1637 a dangerous assumption as many times CONST_INTs are
1638 created and used with garbage in the bits outside of the
1639 precision of the implied mode of the const_int. */
1640 op_mode = mode_for_size (MAX_BITSIZE_MODE_ANY_INT, MODE_INT, 0);
1641 }
1642
1643 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1644 d = real_value_truncate (mode, d);
1645 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1646 }
1647
1648 if (CONST_SCALAR_INT_P (op) && width > 0)
1649 {
1650 wide_int result;
1651 enum machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1652 rtx_mode_t op0 = std::make_pair (op, imode);
1653
1654 #if TARGET_SUPPORTS_WIDE_INT == 0
1655 /* This assert keeps the simplification from producing a result
1656 that cannot be represented in a CONST_DOUBLE but a lot of
1657 upstream callers expect that this function never fails to
1658 simplify something and so you if you added this to the test
1659 above the code would die later anyway. If this assert
1660 happens, you just need to make the port support wide int. */
1661 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1662 #endif
1663
1664 switch (code)
1665 {
1666 case NOT:
1667 result = wi::bit_not (op0);
1668 break;
1669
1670 case NEG:
1671 result = wi::neg (op0);
1672 break;
1673
1674 case ABS:
1675 result = wi::abs (op0);
1676 break;
1677
1678 case FFS:
1679 result = wi::shwi (wi::ffs (op0), mode);
1680 break;
1681
1682 case CLZ:
1683 result = wi::shwi (wi::clz (op0), mode);
1684 break;
1685
1686 case CLRSB:
1687 result = wi::shwi (wi::clrsb (op0), mode);
1688 break;
1689
1690 case CTZ:
1691 result = wi::shwi (wi::ctz (op0), mode);
1692 break;
1693
1694 case POPCOUNT:
1695 result = wi::shwi (wi::popcount (op0), mode);
1696 break;
1697
1698 case PARITY:
1699 result = wi::shwi (wi::parity (op0), mode);
1700 break;
1701
1702 case BSWAP:
1703 result = wide_int (op0).bswap ();
1704 break;
1705
1706 case TRUNCATE:
1707 case ZERO_EXTEND:
1708 result = wide_int::from (op0, width, UNSIGNED);
1709 break;
1710
1711 case SIGN_EXTEND:
1712 result = wide_int::from (op0, width, SIGNED);
1713 break;
1714
1715 case SQRT:
1716 default:
1717 return 0;
1718 }
1719
1720 return immed_wide_int_const (result, mode);
1721 }
1722
1723 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1724 && SCALAR_FLOAT_MODE_P (mode)
1725 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1726 {
1727 REAL_VALUE_TYPE d, t;
1728 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1729
1730 switch (code)
1731 {
1732 case SQRT:
1733 if (HONOR_SNANS (mode) && real_isnan (&d))
1734 return 0;
1735 real_sqrt (&t, mode, &d);
1736 d = t;
1737 break;
1738 case ABS:
1739 d = real_value_abs (&d);
1740 break;
1741 case NEG:
1742 d = real_value_negate (&d);
1743 break;
1744 case FLOAT_TRUNCATE:
1745 d = real_value_truncate (mode, d);
1746 break;
1747 case FLOAT_EXTEND:
1748 /* All this does is change the mode, unless changing
1749 mode class. */
1750 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1751 real_convert (&d, mode, &d);
1752 break;
1753 case FIX:
1754 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1755 break;
1756 case NOT:
1757 {
1758 long tmp[4];
1759 int i;
1760
1761 real_to_target (tmp, &d, GET_MODE (op));
1762 for (i = 0; i < 4; i++)
1763 tmp[i] = ~tmp[i];
1764 real_from_target (&d, tmp, mode);
1765 break;
1766 }
1767 default:
1768 gcc_unreachable ();
1769 }
1770 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1771 }
1772 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1773 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1774 && GET_MODE_CLASS (mode) == MODE_INT
1775 && width > 0)
1776 {
1777 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1778 operators are intentionally left unspecified (to ease implementation
1779 by target backends), for consistency, this routine implements the
1780 same semantics for constant folding as used by the middle-end. */
1781
1782 /* This was formerly used only for non-IEEE float.
1783 eggert@twinsun.com says it is safe for IEEE also. */
1784 REAL_VALUE_TYPE x, t;
1785 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1786 wide_int wmax, wmin;
1787 /* This is part of the abi to real_to_integer, but we check
1788 things before making this call. */
1789 bool fail;
1790
1791 switch (code)
1792 {
1793 case FIX:
1794 if (REAL_VALUE_ISNAN (x))
1795 return const0_rtx;
1796
1797 /* Test against the signed upper bound. */
1798 wmax = wi::max_value (width, SIGNED);
1799 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1800 if (REAL_VALUES_LESS (t, x))
1801 return immed_wide_int_const (wmax, mode);
1802
1803 /* Test against the signed lower bound. */
1804 wmin = wi::min_value (width, SIGNED);
1805 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1806 if (REAL_VALUES_LESS (x, t))
1807 return immed_wide_int_const (wmin, mode);
1808
1809 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1810 break;
1811
1812 case UNSIGNED_FIX:
1813 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1814 return const0_rtx;
1815
1816 /* Test against the unsigned upper bound. */
1817 wmax = wi::max_value (width, UNSIGNED);
1818 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1819 if (REAL_VALUES_LESS (t, x))
1820 return immed_wide_int_const (wmax, mode);
1821
1822 return immed_wide_int_const (real_to_integer (&t, &fail, width), mode);
1823 break;
1824
1825 default:
1826 gcc_unreachable ();
1827 }
1828 }
1829
1830 return NULL_RTX;
1831 }
1832 \f
1833 /* Subroutine of simplify_binary_operation to simplify a binary operation
1834 CODE that can commute with byte swapping, with result mode MODE and
1835 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1836 Return zero if no simplification or canonicalization is possible. */
1837
1838 static rtx
1839 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
1840 rtx op0, rtx op1)
1841 {
1842 rtx tem;
1843
1844 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1845 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1846 {
1847 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1848 simplify_gen_unary (BSWAP, mode, op1, mode));
1849 return simplify_gen_unary (BSWAP, mode, tem, mode);
1850 }
1851
1852 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1853 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1854 {
1855 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1856 return simplify_gen_unary (BSWAP, mode, tem, mode);
1857 }
1858
1859 return NULL_RTX;
1860 }
1861
1862 /* Subroutine of simplify_binary_operation to simplify a commutative,
1863 associative binary operation CODE with result mode MODE, operating
1864 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1865 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1866 canonicalization is possible. */
1867
1868 static rtx
1869 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1870 rtx op0, rtx op1)
1871 {
1872 rtx tem;
1873
1874 /* Linearize the operator to the left. */
1875 if (GET_CODE (op1) == code)
1876 {
1877 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1878 if (GET_CODE (op0) == code)
1879 {
1880 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1881 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1882 }
1883
1884 /* "a op (b op c)" becomes "(b op c) op a". */
1885 if (! swap_commutative_operands_p (op1, op0))
1886 return simplify_gen_binary (code, mode, op1, op0);
1887
1888 tem = op0;
1889 op0 = op1;
1890 op1 = tem;
1891 }
1892
1893 if (GET_CODE (op0) == code)
1894 {
1895 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1896 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1897 {
1898 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1899 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1900 }
1901
1902 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1903 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1904 if (tem != 0)
1905 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1906
1907 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1908 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1909 if (tem != 0)
1910 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1911 }
1912
1913 return 0;
1914 }
1915
1916
1917 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1918 and OP1. Return 0 if no simplification is possible.
1919
1920 Don't use this for relational operations such as EQ or LT.
1921 Use simplify_relational_operation instead. */
1922 rtx
1923 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1924 rtx op0, rtx op1)
1925 {
1926 rtx trueop0, trueop1;
1927 rtx tem;
1928
1929 /* Relational operations don't work here. We must know the mode
1930 of the operands in order to do the comparison correctly.
1931 Assuming a full word can give incorrect results.
1932 Consider comparing 128 with -128 in QImode. */
1933 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1934 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1935
1936 /* Make sure the constant is second. */
1937 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1938 && swap_commutative_operands_p (op0, op1))
1939 {
1940 tem = op0, op0 = op1, op1 = tem;
1941 }
1942
1943 trueop0 = avoid_constant_pool_reference (op0);
1944 trueop1 = avoid_constant_pool_reference (op1);
1945
1946 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1947 if (tem)
1948 return tem;
1949 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1950 }
1951
1952 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1953 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1954 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1955 actual constants. */
1956
1957 static rtx
1958 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1959 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1960 {
1961 rtx tem, reversed, opleft, opright;
1962 HOST_WIDE_INT val;
1963 unsigned int width = GET_MODE_PRECISION (mode);
1964
1965 /* Even if we can't compute a constant result,
1966 there are some cases worth simplifying. */
1967
1968 switch (code)
1969 {
1970 case PLUS:
1971 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1972 when x is NaN, infinite, or finite and nonzero. They aren't
1973 when x is -0 and the rounding mode is not towards -infinity,
1974 since (-0) + 0 is then 0. */
1975 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1976 return op0;
1977
1978 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1979 transformations are safe even for IEEE. */
1980 if (GET_CODE (op0) == NEG)
1981 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1982 else if (GET_CODE (op1) == NEG)
1983 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1984
1985 /* (~a) + 1 -> -a */
1986 if (INTEGRAL_MODE_P (mode)
1987 && GET_CODE (op0) == NOT
1988 && trueop1 == const1_rtx)
1989 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1990
1991 /* Handle both-operands-constant cases. We can only add
1992 CONST_INTs to constants since the sum of relocatable symbols
1993 can't be handled by most assemblers. Don't add CONST_INT
1994 to CONST_INT since overflow won't be computed properly if wider
1995 than HOST_BITS_PER_WIDE_INT. */
1996
1997 if ((GET_CODE (op0) == CONST
1998 || GET_CODE (op0) == SYMBOL_REF
1999 || GET_CODE (op0) == LABEL_REF)
2000 && CONST_INT_P (op1))
2001 return plus_constant (mode, op0, INTVAL (op1));
2002 else if ((GET_CODE (op1) == CONST
2003 || GET_CODE (op1) == SYMBOL_REF
2004 || GET_CODE (op1) == LABEL_REF)
2005 && CONST_INT_P (op0))
2006 return plus_constant (mode, op1, INTVAL (op0));
2007
2008 /* See if this is something like X * C - X or vice versa or
2009 if the multiplication is written as a shift. If so, we can
2010 distribute and make a new multiply, shift, or maybe just
2011 have X (if C is 2 in the example above). But don't make
2012 something more expensive than we had before. */
2013
2014 if (SCALAR_INT_MODE_P (mode))
2015 {
2016 rtx lhs = op0, rhs = op1;
2017
2018 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2019 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2020
2021 if (GET_CODE (lhs) == NEG)
2022 {
2023 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2024 lhs = XEXP (lhs, 0);
2025 }
2026 else if (GET_CODE (lhs) == MULT
2027 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2028 {
2029 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2030 lhs = XEXP (lhs, 0);
2031 }
2032 else if (GET_CODE (lhs) == ASHIFT
2033 && CONST_INT_P (XEXP (lhs, 1))
2034 && INTVAL (XEXP (lhs, 1)) >= 0
2035 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2036 {
2037 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2038 GET_MODE_PRECISION (mode));
2039 lhs = XEXP (lhs, 0);
2040 }
2041
2042 if (GET_CODE (rhs) == NEG)
2043 {
2044 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2045 rhs = XEXP (rhs, 0);
2046 }
2047 else if (GET_CODE (rhs) == MULT
2048 && CONST_INT_P (XEXP (rhs, 1)))
2049 {
2050 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2051 rhs = XEXP (rhs, 0);
2052 }
2053 else if (GET_CODE (rhs) == ASHIFT
2054 && CONST_INT_P (XEXP (rhs, 1))
2055 && INTVAL (XEXP (rhs, 1)) >= 0
2056 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2057 {
2058 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2059 GET_MODE_PRECISION (mode));
2060 rhs = XEXP (rhs, 0);
2061 }
2062
2063 if (rtx_equal_p (lhs, rhs))
2064 {
2065 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2066 rtx coeff;
2067 bool speed = optimize_function_for_speed_p (cfun);
2068
2069 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2070
2071 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2072 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2073 ? tem : 0;
2074 }
2075 }
2076
2077 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2078 if (CONST_SCALAR_INT_P (op1)
2079 && GET_CODE (op0) == XOR
2080 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2081 && mode_signbit_p (mode, op1))
2082 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2083 simplify_gen_binary (XOR, mode, op1,
2084 XEXP (op0, 1)));
2085
2086 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2087 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2088 && GET_CODE (op0) == MULT
2089 && GET_CODE (XEXP (op0, 0)) == NEG)
2090 {
2091 rtx in1, in2;
2092
2093 in1 = XEXP (XEXP (op0, 0), 0);
2094 in2 = XEXP (op0, 1);
2095 return simplify_gen_binary (MINUS, mode, op1,
2096 simplify_gen_binary (MULT, mode,
2097 in1, in2));
2098 }
2099
2100 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2101 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2102 is 1. */
2103 if (COMPARISON_P (op0)
2104 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2105 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2106 && (reversed = reversed_comparison (op0, mode)))
2107 return
2108 simplify_gen_unary (NEG, mode, reversed, mode);
2109
2110 /* If one of the operands is a PLUS or a MINUS, see if we can
2111 simplify this by the associative law.
2112 Don't use the associative law for floating point.
2113 The inaccuracy makes it nonassociative,
2114 and subtle programs can break if operations are associated. */
2115
2116 if (INTEGRAL_MODE_P (mode)
2117 && (plus_minus_operand_p (op0)
2118 || plus_minus_operand_p (op1))
2119 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2120 return tem;
2121
2122 /* Reassociate floating point addition only when the user
2123 specifies associative math operations. */
2124 if (FLOAT_MODE_P (mode)
2125 && flag_associative_math)
2126 {
2127 tem = simplify_associative_operation (code, mode, op0, op1);
2128 if (tem)
2129 return tem;
2130 }
2131 break;
2132
2133 case COMPARE:
2134 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2135 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2136 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2137 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2138 {
2139 rtx xop00 = XEXP (op0, 0);
2140 rtx xop10 = XEXP (op1, 0);
2141
2142 #ifdef HAVE_cc0
2143 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2144 #else
2145 if (REG_P (xop00) && REG_P (xop10)
2146 && GET_MODE (xop00) == GET_MODE (xop10)
2147 && REGNO (xop00) == REGNO (xop10)
2148 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2149 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2150 #endif
2151 return xop00;
2152 }
2153 break;
2154
2155 case MINUS:
2156 /* We can't assume x-x is 0 even with non-IEEE floating point,
2157 but since it is zero except in very strange circumstances, we
2158 will treat it as zero with -ffinite-math-only. */
2159 if (rtx_equal_p (trueop0, trueop1)
2160 && ! side_effects_p (op0)
2161 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2162 return CONST0_RTX (mode);
2163
2164 /* Change subtraction from zero into negation. (0 - x) is the
2165 same as -x when x is NaN, infinite, or finite and nonzero.
2166 But if the mode has signed zeros, and does not round towards
2167 -infinity, then 0 - 0 is 0, not -0. */
2168 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2169 return simplify_gen_unary (NEG, mode, op1, mode);
2170
2171 /* (-1 - a) is ~a. */
2172 if (trueop0 == constm1_rtx)
2173 return simplify_gen_unary (NOT, mode, op1, mode);
2174
2175 /* Subtracting 0 has no effect unless the mode has signed zeros
2176 and supports rounding towards -infinity. In such a case,
2177 0 - 0 is -0. */
2178 if (!(HONOR_SIGNED_ZEROS (mode)
2179 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2180 && trueop1 == CONST0_RTX (mode))
2181 return op0;
2182
2183 /* See if this is something like X * C - X or vice versa or
2184 if the multiplication is written as a shift. If so, we can
2185 distribute and make a new multiply, shift, or maybe just
2186 have X (if C is 2 in the example above). But don't make
2187 something more expensive than we had before. */
2188
2189 if (SCALAR_INT_MODE_P (mode))
2190 {
2191 rtx lhs = op0, rhs = op1;
2192
2193 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2194 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2195
2196 if (GET_CODE (lhs) == NEG)
2197 {
2198 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2199 lhs = XEXP (lhs, 0);
2200 }
2201 else if (GET_CODE (lhs) == MULT
2202 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2203 {
2204 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2205 lhs = XEXP (lhs, 0);
2206 }
2207 else if (GET_CODE (lhs) == ASHIFT
2208 && CONST_INT_P (XEXP (lhs, 1))
2209 && INTVAL (XEXP (lhs, 1)) >= 0
2210 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2211 {
2212 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2213 GET_MODE_PRECISION (mode));
2214 lhs = XEXP (lhs, 0);
2215 }
2216
2217 if (GET_CODE (rhs) == NEG)
2218 {
2219 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2220 rhs = XEXP (rhs, 0);
2221 }
2222 else if (GET_CODE (rhs) == MULT
2223 && CONST_INT_P (XEXP (rhs, 1)))
2224 {
2225 negcoeff1 = -wide_int (std::make_pair (XEXP (rhs, 1), mode));
2226 rhs = XEXP (rhs, 0);
2227 }
2228 else if (GET_CODE (rhs) == ASHIFT
2229 && CONST_INT_P (XEXP (rhs, 1))
2230 && INTVAL (XEXP (rhs, 1)) >= 0
2231 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2232 {
2233 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2234 GET_MODE_PRECISION (mode));
2235 negcoeff1 = -negcoeff1;
2236 rhs = XEXP (rhs, 0);
2237 }
2238
2239 if (rtx_equal_p (lhs, rhs))
2240 {
2241 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2242 rtx coeff;
2243 bool speed = optimize_function_for_speed_p (cfun);
2244
2245 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2246
2247 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2248 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2249 ? tem : 0;
2250 }
2251 }
2252
2253 /* (a - (-b)) -> (a + b). True even for IEEE. */
2254 if (GET_CODE (op1) == NEG)
2255 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2256
2257 /* (-x - c) may be simplified as (-c - x). */
2258 if (GET_CODE (op0) == NEG
2259 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2260 {
2261 tem = simplify_unary_operation (NEG, mode, op1, mode);
2262 if (tem)
2263 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2264 }
2265
2266 /* Don't let a relocatable value get a negative coeff. */
2267 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2268 return simplify_gen_binary (PLUS, mode,
2269 op0,
2270 neg_const_int (mode, op1));
2271
2272 /* (x - (x & y)) -> (x & ~y) */
2273 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2274 {
2275 if (rtx_equal_p (op0, XEXP (op1, 0)))
2276 {
2277 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2278 GET_MODE (XEXP (op1, 1)));
2279 return simplify_gen_binary (AND, mode, op0, tem);
2280 }
2281 if (rtx_equal_p (op0, XEXP (op1, 1)))
2282 {
2283 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2284 GET_MODE (XEXP (op1, 0)));
2285 return simplify_gen_binary (AND, mode, op0, tem);
2286 }
2287 }
2288
2289 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2290 by reversing the comparison code if valid. */
2291 if (STORE_FLAG_VALUE == 1
2292 && trueop0 == const1_rtx
2293 && COMPARISON_P (op1)
2294 && (reversed = reversed_comparison (op1, mode)))
2295 return reversed;
2296
2297 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2298 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2299 && GET_CODE (op1) == MULT
2300 && GET_CODE (XEXP (op1, 0)) == NEG)
2301 {
2302 rtx in1, in2;
2303
2304 in1 = XEXP (XEXP (op1, 0), 0);
2305 in2 = XEXP (op1, 1);
2306 return simplify_gen_binary (PLUS, mode,
2307 simplify_gen_binary (MULT, mode,
2308 in1, in2),
2309 op0);
2310 }
2311
2312 /* Canonicalize (minus (neg A) (mult B C)) to
2313 (minus (mult (neg B) C) A). */
2314 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2315 && GET_CODE (op1) == MULT
2316 && GET_CODE (op0) == NEG)
2317 {
2318 rtx in1, in2;
2319
2320 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2321 in2 = XEXP (op1, 1);
2322 return simplify_gen_binary (MINUS, mode,
2323 simplify_gen_binary (MULT, mode,
2324 in1, in2),
2325 XEXP (op0, 0));
2326 }
2327
2328 /* If one of the operands is a PLUS or a MINUS, see if we can
2329 simplify this by the associative law. This will, for example,
2330 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2331 Don't use the associative law for floating point.
2332 The inaccuracy makes it nonassociative,
2333 and subtle programs can break if operations are associated. */
2334
2335 if (INTEGRAL_MODE_P (mode)
2336 && (plus_minus_operand_p (op0)
2337 || plus_minus_operand_p (op1))
2338 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2339 return tem;
2340 break;
2341
2342 case MULT:
2343 if (trueop1 == constm1_rtx)
2344 return simplify_gen_unary (NEG, mode, op0, mode);
2345
2346 if (GET_CODE (op0) == NEG)
2347 {
2348 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2349 /* If op1 is a MULT as well and simplify_unary_operation
2350 just moved the NEG to the second operand, simplify_gen_binary
2351 below could through simplify_associative_operation move
2352 the NEG around again and recurse endlessly. */
2353 if (temp
2354 && GET_CODE (op1) == MULT
2355 && GET_CODE (temp) == MULT
2356 && XEXP (op1, 0) == XEXP (temp, 0)
2357 && GET_CODE (XEXP (temp, 1)) == NEG
2358 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2359 temp = NULL_RTX;
2360 if (temp)
2361 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2362 }
2363 if (GET_CODE (op1) == NEG)
2364 {
2365 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2366 /* If op0 is a MULT as well and simplify_unary_operation
2367 just moved the NEG to the second operand, simplify_gen_binary
2368 below could through simplify_associative_operation move
2369 the NEG around again and recurse endlessly. */
2370 if (temp
2371 && GET_CODE (op0) == MULT
2372 && GET_CODE (temp) == MULT
2373 && XEXP (op0, 0) == XEXP (temp, 0)
2374 && GET_CODE (XEXP (temp, 1)) == NEG
2375 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2376 temp = NULL_RTX;
2377 if (temp)
2378 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2379 }
2380
2381 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2382 x is NaN, since x * 0 is then also NaN. Nor is it valid
2383 when the mode has signed zeros, since multiplying a negative
2384 number by 0 will give -0, not 0. */
2385 if (!HONOR_NANS (mode)
2386 && !HONOR_SIGNED_ZEROS (mode)
2387 && trueop1 == CONST0_RTX (mode)
2388 && ! side_effects_p (op0))
2389 return op1;
2390
2391 /* In IEEE floating point, x*1 is not equivalent to x for
2392 signalling NaNs. */
2393 if (!HONOR_SNANS (mode)
2394 && trueop1 == CONST1_RTX (mode))
2395 return op0;
2396
2397 /* Convert multiply by constant power of two into shift. */
2398 if (CONST_SCALAR_INT_P (trueop1))
2399 {
2400 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2401 if (val >= 0 && val < GET_MODE_BITSIZE (mode))
2402 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2403 }
2404
2405 /* x*2 is x+x and x*(-1) is -x */
2406 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2407 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2408 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2409 && GET_MODE (op0) == mode)
2410 {
2411 REAL_VALUE_TYPE d;
2412 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2413
2414 if (REAL_VALUES_EQUAL (d, dconst2))
2415 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2416
2417 if (!HONOR_SNANS (mode)
2418 && REAL_VALUES_EQUAL (d, dconstm1))
2419 return simplify_gen_unary (NEG, mode, op0, mode);
2420 }
2421
2422 /* Optimize -x * -x as x * x. */
2423 if (FLOAT_MODE_P (mode)
2424 && GET_CODE (op0) == NEG
2425 && GET_CODE (op1) == NEG
2426 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2427 && !side_effects_p (XEXP (op0, 0)))
2428 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2429
2430 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2431 if (SCALAR_FLOAT_MODE_P (mode)
2432 && GET_CODE (op0) == ABS
2433 && GET_CODE (op1) == ABS
2434 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2435 && !side_effects_p (XEXP (op0, 0)))
2436 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2437
2438 /* Reassociate multiplication, but for floating point MULTs
2439 only when the user specifies unsafe math optimizations. */
2440 if (! FLOAT_MODE_P (mode)
2441 || flag_unsafe_math_optimizations)
2442 {
2443 tem = simplify_associative_operation (code, mode, op0, op1);
2444 if (tem)
2445 return tem;
2446 }
2447 break;
2448
2449 case IOR:
2450 if (trueop1 == CONST0_RTX (mode))
2451 return op0;
2452 if (INTEGRAL_MODE_P (mode)
2453 && trueop1 == CONSTM1_RTX (mode)
2454 && !side_effects_p (op0))
2455 return op1;
2456 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2457 return op0;
2458 /* A | (~A) -> -1 */
2459 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2460 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2461 && ! side_effects_p (op0)
2462 && SCALAR_INT_MODE_P (mode))
2463 return constm1_rtx;
2464
2465 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2466 if (CONST_INT_P (op1)
2467 && HWI_COMPUTABLE_MODE_P (mode)
2468 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2469 && !side_effects_p (op0))
2470 return op1;
2471
2472 /* Canonicalize (X & C1) | C2. */
2473 if (GET_CODE (op0) == AND
2474 && CONST_INT_P (trueop1)
2475 && CONST_INT_P (XEXP (op0, 1)))
2476 {
2477 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2478 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2479 HOST_WIDE_INT c2 = INTVAL (trueop1);
2480
2481 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2482 if ((c1 & c2) == c1
2483 && !side_effects_p (XEXP (op0, 0)))
2484 return trueop1;
2485
2486 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2487 if (((c1|c2) & mask) == mask)
2488 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2489
2490 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2491 if (((c1 & ~c2) & mask) != (c1 & mask))
2492 {
2493 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2494 gen_int_mode (c1 & ~c2, mode));
2495 return simplify_gen_binary (IOR, mode, tem, op1);
2496 }
2497 }
2498
2499 /* Convert (A & B) | A to A. */
2500 if (GET_CODE (op0) == AND
2501 && (rtx_equal_p (XEXP (op0, 0), op1)
2502 || rtx_equal_p (XEXP (op0, 1), op1))
2503 && ! side_effects_p (XEXP (op0, 0))
2504 && ! side_effects_p (XEXP (op0, 1)))
2505 return op1;
2506
2507 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2508 mode size to (rotate A CX). */
2509
2510 if (GET_CODE (op1) == ASHIFT
2511 || GET_CODE (op1) == SUBREG)
2512 {
2513 opleft = op1;
2514 opright = op0;
2515 }
2516 else
2517 {
2518 opright = op1;
2519 opleft = op0;
2520 }
2521
2522 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2523 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2524 && CONST_INT_P (XEXP (opleft, 1))
2525 && CONST_INT_P (XEXP (opright, 1))
2526 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2527 == GET_MODE_PRECISION (mode)))
2528 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2529
2530 /* Same, but for ashift that has been "simplified" to a wider mode
2531 by simplify_shift_const. */
2532
2533 if (GET_CODE (opleft) == SUBREG
2534 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2535 && GET_CODE (opright) == LSHIFTRT
2536 && GET_CODE (XEXP (opright, 0)) == SUBREG
2537 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2538 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2539 && (GET_MODE_SIZE (GET_MODE (opleft))
2540 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2541 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2542 SUBREG_REG (XEXP (opright, 0)))
2543 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2544 && CONST_INT_P (XEXP (opright, 1))
2545 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2546 == GET_MODE_PRECISION (mode)))
2547 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2548 XEXP (SUBREG_REG (opleft), 1));
2549
2550 /* If we have (ior (and (X C1) C2)), simplify this by making
2551 C1 as small as possible if C1 actually changes. */
2552 if (CONST_INT_P (op1)
2553 && (HWI_COMPUTABLE_MODE_P (mode)
2554 || INTVAL (op1) > 0)
2555 && GET_CODE (op0) == AND
2556 && CONST_INT_P (XEXP (op0, 1))
2557 && CONST_INT_P (op1)
2558 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2559 {
2560 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2561 gen_int_mode (UINTVAL (XEXP (op0, 1))
2562 & ~UINTVAL (op1),
2563 mode));
2564 return simplify_gen_binary (IOR, mode, tmp, op1);
2565 }
2566
2567 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2568 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2569 the PLUS does not affect any of the bits in OP1: then we can do
2570 the IOR as a PLUS and we can associate. This is valid if OP1
2571 can be safely shifted left C bits. */
2572 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2573 && GET_CODE (XEXP (op0, 0)) == PLUS
2574 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2575 && CONST_INT_P (XEXP (op0, 1))
2576 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2577 {
2578 int count = INTVAL (XEXP (op0, 1));
2579 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2580
2581 if (mask >> count == INTVAL (trueop1)
2582 && trunc_int_for_mode (mask, mode) == mask
2583 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2584 return simplify_gen_binary (ASHIFTRT, mode,
2585 plus_constant (mode, XEXP (op0, 0),
2586 mask),
2587 XEXP (op0, 1));
2588 }
2589
2590 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2591 if (tem)
2592 return tem;
2593
2594 tem = simplify_associative_operation (code, mode, op0, op1);
2595 if (tem)
2596 return tem;
2597 break;
2598
2599 case XOR:
2600 if (trueop1 == CONST0_RTX (mode))
2601 return op0;
2602 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2603 return simplify_gen_unary (NOT, mode, op0, mode);
2604 if (rtx_equal_p (trueop0, trueop1)
2605 && ! side_effects_p (op0)
2606 && GET_MODE_CLASS (mode) != MODE_CC)
2607 return CONST0_RTX (mode);
2608
2609 /* Canonicalize XOR of the most significant bit to PLUS. */
2610 if (CONST_SCALAR_INT_P (op1)
2611 && mode_signbit_p (mode, op1))
2612 return simplify_gen_binary (PLUS, mode, op0, op1);
2613 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2614 if (CONST_SCALAR_INT_P (op1)
2615 && GET_CODE (op0) == PLUS
2616 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2617 && mode_signbit_p (mode, XEXP (op0, 1)))
2618 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2619 simplify_gen_binary (XOR, mode, op1,
2620 XEXP (op0, 1)));
2621
2622 /* If we are XORing two things that have no bits in common,
2623 convert them into an IOR. This helps to detect rotation encoded
2624 using those methods and possibly other simplifications. */
2625
2626 if (HWI_COMPUTABLE_MODE_P (mode)
2627 && (nonzero_bits (op0, mode)
2628 & nonzero_bits (op1, mode)) == 0)
2629 return (simplify_gen_binary (IOR, mode, op0, op1));
2630
2631 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2632 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2633 (NOT y). */
2634 {
2635 int num_negated = 0;
2636
2637 if (GET_CODE (op0) == NOT)
2638 num_negated++, op0 = XEXP (op0, 0);
2639 if (GET_CODE (op1) == NOT)
2640 num_negated++, op1 = XEXP (op1, 0);
2641
2642 if (num_negated == 2)
2643 return simplify_gen_binary (XOR, mode, op0, op1);
2644 else if (num_negated == 1)
2645 return simplify_gen_unary (NOT, mode,
2646 simplify_gen_binary (XOR, mode, op0, op1),
2647 mode);
2648 }
2649
2650 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2651 correspond to a machine insn or result in further simplifications
2652 if B is a constant. */
2653
2654 if (GET_CODE (op0) == AND
2655 && rtx_equal_p (XEXP (op0, 1), op1)
2656 && ! side_effects_p (op1))
2657 return simplify_gen_binary (AND, mode,
2658 simplify_gen_unary (NOT, mode,
2659 XEXP (op0, 0), mode),
2660 op1);
2661
2662 else if (GET_CODE (op0) == AND
2663 && rtx_equal_p (XEXP (op0, 0), op1)
2664 && ! side_effects_p (op1))
2665 return simplify_gen_binary (AND, mode,
2666 simplify_gen_unary (NOT, mode,
2667 XEXP (op0, 1), mode),
2668 op1);
2669
2670 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2671 we can transform like this:
2672 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2673 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2674 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2675 Attempt a few simplifications when B and C are both constants. */
2676 if (GET_CODE (op0) == AND
2677 && CONST_INT_P (op1)
2678 && CONST_INT_P (XEXP (op0, 1)))
2679 {
2680 rtx a = XEXP (op0, 0);
2681 rtx b = XEXP (op0, 1);
2682 rtx c = op1;
2683 HOST_WIDE_INT bval = INTVAL (b);
2684 HOST_WIDE_INT cval = INTVAL (c);
2685
2686 rtx na_c
2687 = simplify_binary_operation (AND, mode,
2688 simplify_gen_unary (NOT, mode, a, mode),
2689 c);
2690 if ((~cval & bval) == 0)
2691 {
2692 /* Try to simplify ~A&C | ~B&C. */
2693 if (na_c != NULL_RTX)
2694 return simplify_gen_binary (IOR, mode, na_c,
2695 gen_int_mode (~bval & cval, mode));
2696 }
2697 else
2698 {
2699 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2700 if (na_c == const0_rtx)
2701 {
2702 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2703 gen_int_mode (~cval & bval,
2704 mode));
2705 return simplify_gen_binary (IOR, mode, a_nc_b,
2706 gen_int_mode (~bval & cval,
2707 mode));
2708 }
2709 }
2710 }
2711
2712 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2713 comparison if STORE_FLAG_VALUE is 1. */
2714 if (STORE_FLAG_VALUE == 1
2715 && trueop1 == const1_rtx
2716 && COMPARISON_P (op0)
2717 && (reversed = reversed_comparison (op0, mode)))
2718 return reversed;
2719
2720 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2721 is (lt foo (const_int 0)), so we can perform the above
2722 simplification if STORE_FLAG_VALUE is 1. */
2723
2724 if (STORE_FLAG_VALUE == 1
2725 && trueop1 == const1_rtx
2726 && GET_CODE (op0) == LSHIFTRT
2727 && CONST_INT_P (XEXP (op0, 1))
2728 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2729 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2730
2731 /* (xor (comparison foo bar) (const_int sign-bit))
2732 when STORE_FLAG_VALUE is the sign bit. */
2733 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2734 && trueop1 == const_true_rtx
2735 && COMPARISON_P (op0)
2736 && (reversed = reversed_comparison (op0, mode)))
2737 return reversed;
2738
2739 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2740 if (tem)
2741 return tem;
2742
2743 tem = simplify_associative_operation (code, mode, op0, op1);
2744 if (tem)
2745 return tem;
2746 break;
2747
2748 case AND:
2749 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2750 return trueop1;
2751 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2752 return op0;
2753 if (HWI_COMPUTABLE_MODE_P (mode))
2754 {
2755 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2756 HOST_WIDE_INT nzop1;
2757 if (CONST_INT_P (trueop1))
2758 {
2759 HOST_WIDE_INT val1 = INTVAL (trueop1);
2760 /* If we are turning off bits already known off in OP0, we need
2761 not do an AND. */
2762 if ((nzop0 & ~val1) == 0)
2763 return op0;
2764 }
2765 nzop1 = nonzero_bits (trueop1, mode);
2766 /* If we are clearing all the nonzero bits, the result is zero. */
2767 if ((nzop1 & nzop0) == 0
2768 && !side_effects_p (op0) && !side_effects_p (op1))
2769 return CONST0_RTX (mode);
2770 }
2771 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2772 && GET_MODE_CLASS (mode) != MODE_CC)
2773 return op0;
2774 /* A & (~A) -> 0 */
2775 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2776 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2777 && ! side_effects_p (op0)
2778 && GET_MODE_CLASS (mode) != MODE_CC)
2779 return CONST0_RTX (mode);
2780
2781 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2782 there are no nonzero bits of C outside of X's mode. */
2783 if ((GET_CODE (op0) == SIGN_EXTEND
2784 || GET_CODE (op0) == ZERO_EXTEND)
2785 && CONST_INT_P (trueop1)
2786 && HWI_COMPUTABLE_MODE_P (mode)
2787 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2788 & UINTVAL (trueop1)) == 0)
2789 {
2790 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2791 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2792 gen_int_mode (INTVAL (trueop1),
2793 imode));
2794 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2795 }
2796
2797 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2798 we might be able to further simplify the AND with X and potentially
2799 remove the truncation altogether. */
2800 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2801 {
2802 rtx x = XEXP (op0, 0);
2803 enum machine_mode xmode = GET_MODE (x);
2804 tem = simplify_gen_binary (AND, xmode, x,
2805 gen_int_mode (INTVAL (trueop1), xmode));
2806 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2807 }
2808
2809 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2810 if (GET_CODE (op0) == IOR
2811 && CONST_INT_P (trueop1)
2812 && CONST_INT_P (XEXP (op0, 1)))
2813 {
2814 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2815 return simplify_gen_binary (IOR, mode,
2816 simplify_gen_binary (AND, mode,
2817 XEXP (op0, 0), op1),
2818 gen_int_mode (tmp, mode));
2819 }
2820
2821 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2822 insn (and may simplify more). */
2823 if (GET_CODE (op0) == XOR
2824 && rtx_equal_p (XEXP (op0, 0), op1)
2825 && ! side_effects_p (op1))
2826 return simplify_gen_binary (AND, mode,
2827 simplify_gen_unary (NOT, mode,
2828 XEXP (op0, 1), mode),
2829 op1);
2830
2831 if (GET_CODE (op0) == XOR
2832 && rtx_equal_p (XEXP (op0, 1), op1)
2833 && ! side_effects_p (op1))
2834 return simplify_gen_binary (AND, mode,
2835 simplify_gen_unary (NOT, mode,
2836 XEXP (op0, 0), mode),
2837 op1);
2838
2839 /* Similarly for (~(A ^ B)) & A. */
2840 if (GET_CODE (op0) == NOT
2841 && GET_CODE (XEXP (op0, 0)) == XOR
2842 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2843 && ! side_effects_p (op1))
2844 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2845
2846 if (GET_CODE (op0) == NOT
2847 && GET_CODE (XEXP (op0, 0)) == XOR
2848 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2849 && ! side_effects_p (op1))
2850 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2851
2852 /* Convert (A | B) & A to A. */
2853 if (GET_CODE (op0) == IOR
2854 && (rtx_equal_p (XEXP (op0, 0), op1)
2855 || rtx_equal_p (XEXP (op0, 1), op1))
2856 && ! side_effects_p (XEXP (op0, 0))
2857 && ! side_effects_p (XEXP (op0, 1)))
2858 return op1;
2859
2860 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2861 ((A & N) + B) & M -> (A + B) & M
2862 Similarly if (N & M) == 0,
2863 ((A | N) + B) & M -> (A + B) & M
2864 and for - instead of + and/or ^ instead of |.
2865 Also, if (N & M) == 0, then
2866 (A +- N) & M -> A & M. */
2867 if (CONST_INT_P (trueop1)
2868 && HWI_COMPUTABLE_MODE_P (mode)
2869 && ~UINTVAL (trueop1)
2870 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2871 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2872 {
2873 rtx pmop[2];
2874 int which;
2875
2876 pmop[0] = XEXP (op0, 0);
2877 pmop[1] = XEXP (op0, 1);
2878
2879 if (CONST_INT_P (pmop[1])
2880 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2881 return simplify_gen_binary (AND, mode, pmop[0], op1);
2882
2883 for (which = 0; which < 2; which++)
2884 {
2885 tem = pmop[which];
2886 switch (GET_CODE (tem))
2887 {
2888 case AND:
2889 if (CONST_INT_P (XEXP (tem, 1))
2890 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2891 == UINTVAL (trueop1))
2892 pmop[which] = XEXP (tem, 0);
2893 break;
2894 case IOR:
2895 case XOR:
2896 if (CONST_INT_P (XEXP (tem, 1))
2897 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2898 pmop[which] = XEXP (tem, 0);
2899 break;
2900 default:
2901 break;
2902 }
2903 }
2904
2905 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2906 {
2907 tem = simplify_gen_binary (GET_CODE (op0), mode,
2908 pmop[0], pmop[1]);
2909 return simplify_gen_binary (code, mode, tem, op1);
2910 }
2911 }
2912
2913 /* (and X (ior (not X) Y) -> (and X Y) */
2914 if (GET_CODE (op1) == IOR
2915 && GET_CODE (XEXP (op1, 0)) == NOT
2916 && op0 == XEXP (XEXP (op1, 0), 0))
2917 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2918
2919 /* (and (ior (not X) Y) X) -> (and X Y) */
2920 if (GET_CODE (op0) == IOR
2921 && GET_CODE (XEXP (op0, 0)) == NOT
2922 && op1 == XEXP (XEXP (op0, 0), 0))
2923 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2924
2925 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2926 if (tem)
2927 return tem;
2928
2929 tem = simplify_associative_operation (code, mode, op0, op1);
2930 if (tem)
2931 return tem;
2932 break;
2933
2934 case UDIV:
2935 /* 0/x is 0 (or x&0 if x has side-effects). */
2936 if (trueop0 == CONST0_RTX (mode))
2937 {
2938 if (side_effects_p (op1))
2939 return simplify_gen_binary (AND, mode, op1, trueop0);
2940 return trueop0;
2941 }
2942 /* x/1 is x. */
2943 if (trueop1 == CONST1_RTX (mode))
2944 {
2945 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2946 if (tem)
2947 return tem;
2948 }
2949 /* Convert divide by power of two into shift. */
2950 if (CONST_INT_P (trueop1)
2951 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2952 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2953 break;
2954
2955 case DIV:
2956 /* Handle floating point and integers separately. */
2957 if (SCALAR_FLOAT_MODE_P (mode))
2958 {
2959 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2960 safe for modes with NaNs, since 0.0 / 0.0 will then be
2961 NaN rather than 0.0. Nor is it safe for modes with signed
2962 zeros, since dividing 0 by a negative number gives -0.0 */
2963 if (trueop0 == CONST0_RTX (mode)
2964 && !HONOR_NANS (mode)
2965 && !HONOR_SIGNED_ZEROS (mode)
2966 && ! side_effects_p (op1))
2967 return op0;
2968 /* x/1.0 is x. */
2969 if (trueop1 == CONST1_RTX (mode)
2970 && !HONOR_SNANS (mode))
2971 return op0;
2972
2973 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2974 && trueop1 != CONST0_RTX (mode))
2975 {
2976 REAL_VALUE_TYPE d;
2977 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2978
2979 /* x/-1.0 is -x. */
2980 if (REAL_VALUES_EQUAL (d, dconstm1)
2981 && !HONOR_SNANS (mode))
2982 return simplify_gen_unary (NEG, mode, op0, mode);
2983
2984 /* Change FP division by a constant into multiplication.
2985 Only do this with -freciprocal-math. */
2986 if (flag_reciprocal_math
2987 && !REAL_VALUES_EQUAL (d, dconst0))
2988 {
2989 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2990 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2991 return simplify_gen_binary (MULT, mode, op0, tem);
2992 }
2993 }
2994 }
2995 else if (SCALAR_INT_MODE_P (mode))
2996 {
2997 /* 0/x is 0 (or x&0 if x has side-effects). */
2998 if (trueop0 == CONST0_RTX (mode)
2999 && !cfun->can_throw_non_call_exceptions)
3000 {
3001 if (side_effects_p (op1))
3002 return simplify_gen_binary (AND, mode, op1, trueop0);
3003 return trueop0;
3004 }
3005 /* x/1 is x. */
3006 if (trueop1 == CONST1_RTX (mode))
3007 {
3008 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3009 if (tem)
3010 return tem;
3011 }
3012 /* x/-1 is -x. */
3013 if (trueop1 == constm1_rtx)
3014 {
3015 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3016 if (x)
3017 return simplify_gen_unary (NEG, mode, x, mode);
3018 }
3019 }
3020 break;
3021
3022 case UMOD:
3023 /* 0%x is 0 (or x&0 if x has side-effects). */
3024 if (trueop0 == CONST0_RTX (mode))
3025 {
3026 if (side_effects_p (op1))
3027 return simplify_gen_binary (AND, mode, op1, trueop0);
3028 return trueop0;
3029 }
3030 /* x%1 is 0 (of x&0 if x has side-effects). */
3031 if (trueop1 == CONST1_RTX (mode))
3032 {
3033 if (side_effects_p (op0))
3034 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3035 return CONST0_RTX (mode);
3036 }
3037 /* Implement modulus by power of two as AND. */
3038 if (CONST_INT_P (trueop1)
3039 && exact_log2 (UINTVAL (trueop1)) > 0)
3040 return simplify_gen_binary (AND, mode, op0,
3041 gen_int_mode (INTVAL (op1) - 1, mode));
3042 break;
3043
3044 case MOD:
3045 /* 0%x is 0 (or x&0 if x has side-effects). */
3046 if (trueop0 == CONST0_RTX (mode))
3047 {
3048 if (side_effects_p (op1))
3049 return simplify_gen_binary (AND, mode, op1, trueop0);
3050 return trueop0;
3051 }
3052 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3053 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3054 {
3055 if (side_effects_p (op0))
3056 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3057 return CONST0_RTX (mode);
3058 }
3059 break;
3060
3061 case ROTATERT:
3062 case ROTATE:
3063 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3064 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3065 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3066 amount instead. */
3067 if (CONST_INT_P (trueop1)
3068 && IN_RANGE (INTVAL (trueop1),
3069 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3070 GET_MODE_BITSIZE (mode) - 1))
3071 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3072 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3073 - INTVAL (trueop1)));
3074 /* FALLTHRU */
3075 case ASHIFTRT:
3076 if (trueop1 == CONST0_RTX (mode))
3077 return op0;
3078 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3079 return op0;
3080 /* Rotating ~0 always results in ~0. */
3081 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3082 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3083 && ! side_effects_p (op1))
3084 return op0;
3085 canonicalize_shift:
3086 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3087 {
3088 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3089 if (val != INTVAL (op1))
3090 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3091 }
3092 break;
3093
3094 case ASHIFT:
3095 case SS_ASHIFT:
3096 case US_ASHIFT:
3097 if (trueop1 == CONST0_RTX (mode))
3098 return op0;
3099 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3100 return op0;
3101 goto canonicalize_shift;
3102
3103 case LSHIFTRT:
3104 if (trueop1 == CONST0_RTX (mode))
3105 return op0;
3106 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3107 return op0;
3108 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3109 if (GET_CODE (op0) == CLZ
3110 && CONST_INT_P (trueop1)
3111 && STORE_FLAG_VALUE == 1
3112 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3113 {
3114 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3115 unsigned HOST_WIDE_INT zero_val = 0;
3116
3117 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3118 && zero_val == GET_MODE_PRECISION (imode)
3119 && INTVAL (trueop1) == exact_log2 (zero_val))
3120 return simplify_gen_relational (EQ, mode, imode,
3121 XEXP (op0, 0), const0_rtx);
3122 }
3123 goto canonicalize_shift;
3124
3125 case SMIN:
3126 if (width <= HOST_BITS_PER_WIDE_INT
3127 && mode_signbit_p (mode, trueop1)
3128 && ! side_effects_p (op0))
3129 return op1;
3130 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3131 return op0;
3132 tem = simplify_associative_operation (code, mode, op0, op1);
3133 if (tem)
3134 return tem;
3135 break;
3136
3137 case SMAX:
3138 if (width <= HOST_BITS_PER_WIDE_INT
3139 && CONST_INT_P (trueop1)
3140 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3141 && ! side_effects_p (op0))
3142 return op1;
3143 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3144 return op0;
3145 tem = simplify_associative_operation (code, mode, op0, op1);
3146 if (tem)
3147 return tem;
3148 break;
3149
3150 case UMIN:
3151 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3152 return op1;
3153 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3154 return op0;
3155 tem = simplify_associative_operation (code, mode, op0, op1);
3156 if (tem)
3157 return tem;
3158 break;
3159
3160 case UMAX:
3161 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3162 return op1;
3163 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3164 return op0;
3165 tem = simplify_associative_operation (code, mode, op0, op1);
3166 if (tem)
3167 return tem;
3168 break;
3169
3170 case SS_PLUS:
3171 case US_PLUS:
3172 case SS_MINUS:
3173 case US_MINUS:
3174 case SS_MULT:
3175 case US_MULT:
3176 case SS_DIV:
3177 case US_DIV:
3178 /* ??? There are simplifications that can be done. */
3179 return 0;
3180
3181 case VEC_SELECT:
3182 if (!VECTOR_MODE_P (mode))
3183 {
3184 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3185 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3186 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3187 gcc_assert (XVECLEN (trueop1, 0) == 1);
3188 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3189
3190 if (GET_CODE (trueop0) == CONST_VECTOR)
3191 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3192 (trueop1, 0, 0)));
3193
3194 /* Extract a scalar element from a nested VEC_SELECT expression
3195 (with optional nested VEC_CONCAT expression). Some targets
3196 (i386) extract scalar element from a vector using chain of
3197 nested VEC_SELECT expressions. When input operand is a memory
3198 operand, this operation can be simplified to a simple scalar
3199 load from an offseted memory address. */
3200 if (GET_CODE (trueop0) == VEC_SELECT)
3201 {
3202 rtx op0 = XEXP (trueop0, 0);
3203 rtx op1 = XEXP (trueop0, 1);
3204
3205 enum machine_mode opmode = GET_MODE (op0);
3206 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3207 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3208
3209 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3210 int elem;
3211
3212 rtvec vec;
3213 rtx tmp_op, tmp;
3214
3215 gcc_assert (GET_CODE (op1) == PARALLEL);
3216 gcc_assert (i < n_elts);
3217
3218 /* Select element, pointed by nested selector. */
3219 elem = INTVAL (XVECEXP (op1, 0, i));
3220
3221 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3222 if (GET_CODE (op0) == VEC_CONCAT)
3223 {
3224 rtx op00 = XEXP (op0, 0);
3225 rtx op01 = XEXP (op0, 1);
3226
3227 enum machine_mode mode00, mode01;
3228 int n_elts00, n_elts01;
3229
3230 mode00 = GET_MODE (op00);
3231 mode01 = GET_MODE (op01);
3232
3233 /* Find out number of elements of each operand. */
3234 if (VECTOR_MODE_P (mode00))
3235 {
3236 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3237 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3238 }
3239 else
3240 n_elts00 = 1;
3241
3242 if (VECTOR_MODE_P (mode01))
3243 {
3244 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3245 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3246 }
3247 else
3248 n_elts01 = 1;
3249
3250 gcc_assert (n_elts == n_elts00 + n_elts01);
3251
3252 /* Select correct operand of VEC_CONCAT
3253 and adjust selector. */
3254 if (elem < n_elts01)
3255 tmp_op = op00;
3256 else
3257 {
3258 tmp_op = op01;
3259 elem -= n_elts00;
3260 }
3261 }
3262 else
3263 tmp_op = op0;
3264
3265 vec = rtvec_alloc (1);
3266 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3267
3268 tmp = gen_rtx_fmt_ee (code, mode,
3269 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3270 return tmp;
3271 }
3272 if (GET_CODE (trueop0) == VEC_DUPLICATE
3273 && GET_MODE (XEXP (trueop0, 0)) == mode)
3274 return XEXP (trueop0, 0);
3275 }
3276 else
3277 {
3278 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3279 gcc_assert (GET_MODE_INNER (mode)
3280 == GET_MODE_INNER (GET_MODE (trueop0)));
3281 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3282
3283 if (GET_CODE (trueop0) == CONST_VECTOR)
3284 {
3285 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3286 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3287 rtvec v = rtvec_alloc (n_elts);
3288 unsigned int i;
3289
3290 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3291 for (i = 0; i < n_elts; i++)
3292 {
3293 rtx x = XVECEXP (trueop1, 0, i);
3294
3295 gcc_assert (CONST_INT_P (x));
3296 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3297 INTVAL (x));
3298 }
3299
3300 return gen_rtx_CONST_VECTOR (mode, v);
3301 }
3302
3303 /* Recognize the identity. */
3304 if (GET_MODE (trueop0) == mode)
3305 {
3306 bool maybe_ident = true;
3307 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3308 {
3309 rtx j = XVECEXP (trueop1, 0, i);
3310 if (!CONST_INT_P (j) || INTVAL (j) != i)
3311 {
3312 maybe_ident = false;
3313 break;
3314 }
3315 }
3316 if (maybe_ident)
3317 return trueop0;
3318 }
3319
3320 /* If we build {a,b} then permute it, build the result directly. */
3321 if (XVECLEN (trueop1, 0) == 2
3322 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3323 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3324 && GET_CODE (trueop0) == VEC_CONCAT
3325 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3326 && GET_MODE (XEXP (trueop0, 0)) == mode
3327 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3328 && GET_MODE (XEXP (trueop0, 1)) == mode)
3329 {
3330 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3331 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3332 rtx subop0, subop1;
3333
3334 gcc_assert (i0 < 4 && i1 < 4);
3335 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3336 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3337
3338 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3339 }
3340
3341 if (XVECLEN (trueop1, 0) == 2
3342 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3343 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3344 && GET_CODE (trueop0) == VEC_CONCAT
3345 && GET_MODE (trueop0) == mode)
3346 {
3347 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3348 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3349 rtx subop0, subop1;
3350
3351 gcc_assert (i0 < 2 && i1 < 2);
3352 subop0 = XEXP (trueop0, i0);
3353 subop1 = XEXP (trueop0, i1);
3354
3355 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3356 }
3357 }
3358
3359 if (XVECLEN (trueop1, 0) == 1
3360 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3361 && GET_CODE (trueop0) == VEC_CONCAT)
3362 {
3363 rtx vec = trueop0;
3364 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3365
3366 /* Try to find the element in the VEC_CONCAT. */
3367 while (GET_MODE (vec) != mode
3368 && GET_CODE (vec) == VEC_CONCAT)
3369 {
3370 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3371 if (offset < vec_size)
3372 vec = XEXP (vec, 0);
3373 else
3374 {
3375 offset -= vec_size;
3376 vec = XEXP (vec, 1);
3377 }
3378 vec = avoid_constant_pool_reference (vec);
3379 }
3380
3381 if (GET_MODE (vec) == mode)
3382 return vec;
3383 }
3384
3385 /* If we select elements in a vec_merge that all come from the same
3386 operand, select from that operand directly. */
3387 if (GET_CODE (op0) == VEC_MERGE)
3388 {
3389 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3390 if (CONST_INT_P (trueop02))
3391 {
3392 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3393 bool all_operand0 = true;
3394 bool all_operand1 = true;
3395 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3396 {
3397 rtx j = XVECEXP (trueop1, 0, i);
3398 if (sel & (1 << UINTVAL (j)))
3399 all_operand1 = false;
3400 else
3401 all_operand0 = false;
3402 }
3403 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3404 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3405 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3406 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3407 }
3408 }
3409
3410 return 0;
3411 case VEC_CONCAT:
3412 {
3413 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3414 ? GET_MODE (trueop0)
3415 : GET_MODE_INNER (mode));
3416 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3417 ? GET_MODE (trueop1)
3418 : GET_MODE_INNER (mode));
3419
3420 gcc_assert (VECTOR_MODE_P (mode));
3421 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3422 == GET_MODE_SIZE (mode));
3423
3424 if (VECTOR_MODE_P (op0_mode))
3425 gcc_assert (GET_MODE_INNER (mode)
3426 == GET_MODE_INNER (op0_mode));
3427 else
3428 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3429
3430 if (VECTOR_MODE_P (op1_mode))
3431 gcc_assert (GET_MODE_INNER (mode)
3432 == GET_MODE_INNER (op1_mode));
3433 else
3434 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3435
3436 if ((GET_CODE (trueop0) == CONST_VECTOR
3437 || CONST_SCALAR_INT_P (trueop0)
3438 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3439 && (GET_CODE (trueop1) == CONST_VECTOR
3440 || CONST_SCALAR_INT_P (trueop1)
3441 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3442 {
3443 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3444 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3445 rtvec v = rtvec_alloc (n_elts);
3446 unsigned int i;
3447 unsigned in_n_elts = 1;
3448
3449 if (VECTOR_MODE_P (op0_mode))
3450 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3451 for (i = 0; i < n_elts; i++)
3452 {
3453 if (i < in_n_elts)
3454 {
3455 if (!VECTOR_MODE_P (op0_mode))
3456 RTVEC_ELT (v, i) = trueop0;
3457 else
3458 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3459 }
3460 else
3461 {
3462 if (!VECTOR_MODE_P (op1_mode))
3463 RTVEC_ELT (v, i) = trueop1;
3464 else
3465 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3466 i - in_n_elts);
3467 }
3468 }
3469
3470 return gen_rtx_CONST_VECTOR (mode, v);
3471 }
3472
3473 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3474 Restrict the transformation to avoid generating a VEC_SELECT with a
3475 mode unrelated to its operand. */
3476 if (GET_CODE (trueop0) == VEC_SELECT
3477 && GET_CODE (trueop1) == VEC_SELECT
3478 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3479 && GET_MODE (XEXP (trueop0, 0)) == mode)
3480 {
3481 rtx par0 = XEXP (trueop0, 1);
3482 rtx par1 = XEXP (trueop1, 1);
3483 int len0 = XVECLEN (par0, 0);
3484 int len1 = XVECLEN (par1, 0);
3485 rtvec vec = rtvec_alloc (len0 + len1);
3486 for (int i = 0; i < len0; i++)
3487 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3488 for (int i = 0; i < len1; i++)
3489 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3490 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3491 gen_rtx_PARALLEL (VOIDmode, vec));
3492 }
3493 }
3494 return 0;
3495
3496 default:
3497 gcc_unreachable ();
3498 }
3499
3500 return 0;
3501 }
3502
3503 rtx
3504 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3505 rtx op0, rtx op1)
3506 {
3507 unsigned int width = GET_MODE_PRECISION (mode);
3508
3509 if (VECTOR_MODE_P (mode)
3510 && code != VEC_CONCAT
3511 && GET_CODE (op0) == CONST_VECTOR
3512 && GET_CODE (op1) == CONST_VECTOR)
3513 {
3514 unsigned n_elts = GET_MODE_NUNITS (mode);
3515 enum machine_mode op0mode = GET_MODE (op0);
3516 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3517 enum machine_mode op1mode = GET_MODE (op1);
3518 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3519 rtvec v = rtvec_alloc (n_elts);
3520 unsigned int i;
3521
3522 gcc_assert (op0_n_elts == n_elts);
3523 gcc_assert (op1_n_elts == n_elts);
3524 for (i = 0; i < n_elts; i++)
3525 {
3526 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3527 CONST_VECTOR_ELT (op0, i),
3528 CONST_VECTOR_ELT (op1, i));
3529 if (!x)
3530 return 0;
3531 RTVEC_ELT (v, i) = x;
3532 }
3533
3534 return gen_rtx_CONST_VECTOR (mode, v);
3535 }
3536
3537 if (VECTOR_MODE_P (mode)
3538 && code == VEC_CONCAT
3539 && (CONST_SCALAR_INT_P (op0)
3540 || GET_CODE (op0) == CONST_FIXED
3541 || CONST_DOUBLE_AS_FLOAT_P (op0))
3542 && (CONST_SCALAR_INT_P (op1)
3543 || CONST_DOUBLE_AS_FLOAT_P (op1)
3544 || GET_CODE (op1) == CONST_FIXED))
3545 {
3546 unsigned n_elts = GET_MODE_NUNITS (mode);
3547 rtvec v = rtvec_alloc (n_elts);
3548
3549 gcc_assert (n_elts >= 2);
3550 if (n_elts == 2)
3551 {
3552 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3553 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3554
3555 RTVEC_ELT (v, 0) = op0;
3556 RTVEC_ELT (v, 1) = op1;
3557 }
3558 else
3559 {
3560 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3561 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3562 unsigned i;
3563
3564 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3565 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3566 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3567
3568 for (i = 0; i < op0_n_elts; ++i)
3569 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3570 for (i = 0; i < op1_n_elts; ++i)
3571 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3572 }
3573
3574 return gen_rtx_CONST_VECTOR (mode, v);
3575 }
3576
3577 if (SCALAR_FLOAT_MODE_P (mode)
3578 && CONST_DOUBLE_AS_FLOAT_P (op0)
3579 && CONST_DOUBLE_AS_FLOAT_P (op1)
3580 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3581 {
3582 if (code == AND
3583 || code == IOR
3584 || code == XOR)
3585 {
3586 long tmp0[4];
3587 long tmp1[4];
3588 REAL_VALUE_TYPE r;
3589 int i;
3590
3591 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3592 GET_MODE (op0));
3593 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3594 GET_MODE (op1));
3595 for (i = 0; i < 4; i++)
3596 {
3597 switch (code)
3598 {
3599 case AND:
3600 tmp0[i] &= tmp1[i];
3601 break;
3602 case IOR:
3603 tmp0[i] |= tmp1[i];
3604 break;
3605 case XOR:
3606 tmp0[i] ^= tmp1[i];
3607 break;
3608 default:
3609 gcc_unreachable ();
3610 }
3611 }
3612 real_from_target (&r, tmp0, mode);
3613 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3614 }
3615 else
3616 {
3617 REAL_VALUE_TYPE f0, f1, value, result;
3618 bool inexact;
3619
3620 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3621 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3622 real_convert (&f0, mode, &f0);
3623 real_convert (&f1, mode, &f1);
3624
3625 if (HONOR_SNANS (mode)
3626 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3627 return 0;
3628
3629 if (code == DIV
3630 && REAL_VALUES_EQUAL (f1, dconst0)
3631 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3632 return 0;
3633
3634 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3635 && flag_trapping_math
3636 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3637 {
3638 int s0 = REAL_VALUE_NEGATIVE (f0);
3639 int s1 = REAL_VALUE_NEGATIVE (f1);
3640
3641 switch (code)
3642 {
3643 case PLUS:
3644 /* Inf + -Inf = NaN plus exception. */
3645 if (s0 != s1)
3646 return 0;
3647 break;
3648 case MINUS:
3649 /* Inf - Inf = NaN plus exception. */
3650 if (s0 == s1)
3651 return 0;
3652 break;
3653 case DIV:
3654 /* Inf / Inf = NaN plus exception. */
3655 return 0;
3656 default:
3657 break;
3658 }
3659 }
3660
3661 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3662 && flag_trapping_math
3663 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3664 || (REAL_VALUE_ISINF (f1)
3665 && REAL_VALUES_EQUAL (f0, dconst0))))
3666 /* Inf * 0 = NaN plus exception. */
3667 return 0;
3668
3669 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3670 &f0, &f1);
3671 real_convert (&result, mode, &value);
3672
3673 /* Don't constant fold this floating point operation if
3674 the result has overflowed and flag_trapping_math. */
3675
3676 if (flag_trapping_math
3677 && MODE_HAS_INFINITIES (mode)
3678 && REAL_VALUE_ISINF (result)
3679 && !REAL_VALUE_ISINF (f0)
3680 && !REAL_VALUE_ISINF (f1))
3681 /* Overflow plus exception. */
3682 return 0;
3683
3684 /* Don't constant fold this floating point operation if the
3685 result may dependent upon the run-time rounding mode and
3686 flag_rounding_math is set, or if GCC's software emulation
3687 is unable to accurately represent the result. */
3688
3689 if ((flag_rounding_math
3690 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3691 && (inexact || !real_identical (&result, &value)))
3692 return NULL_RTX;
3693
3694 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3695 }
3696 }
3697
3698 /* We can fold some multi-word operations. */
3699 if (GET_MODE_CLASS (mode) == MODE_INT
3700 && CONST_SCALAR_INT_P (op0)
3701 && CONST_SCALAR_INT_P (op1))
3702 {
3703 wide_int result;
3704 bool overflow;
3705 unsigned int bitsize = GET_MODE_BITSIZE (mode);
3706 rtx_mode_t pop0 = std::make_pair (op0, mode);
3707 rtx_mode_t pop1 = std::make_pair (op1, mode);
3708
3709 #if TARGET_SUPPORTS_WIDE_INT == 0
3710 /* This assert keeps the simplification from producing a result
3711 that cannot be represented in a CONST_DOUBLE but a lot of
3712 upstream callers expect that this function never fails to
3713 simplify something and so you if you added this to the test
3714 above the code would die later anyway. If this assert
3715 happens, you just need to make the port support wide int. */
3716 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3717 #endif
3718 switch (code)
3719 {
3720 case MINUS:
3721 result = wi::sub (pop0, pop1);
3722 break;
3723
3724 case PLUS:
3725 result = wi::add (pop0, pop1);
3726 break;
3727
3728 case MULT:
3729 result = wi::mul (pop0, pop1);
3730 break;
3731
3732 case DIV:
3733 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3734 if (overflow)
3735 return NULL_RTX;
3736 break;
3737
3738 case MOD:
3739 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3740 if (overflow)
3741 return NULL_RTX;
3742 break;
3743
3744 case UDIV:
3745 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3746 if (overflow)
3747 return NULL_RTX;
3748 break;
3749
3750 case UMOD:
3751 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3752 if (overflow)
3753 return NULL_RTX;
3754 break;
3755
3756 case AND:
3757 result = wi::bit_and (pop0, pop1);
3758 break;
3759
3760 case IOR:
3761 result = wi::bit_or (pop0, pop1);
3762 break;
3763
3764 case XOR:
3765 result = wi::bit_xor (pop0, pop1);
3766 break;
3767
3768 case SMIN:
3769 result = wi::smin (pop0, pop1);
3770 break;
3771
3772 case SMAX:
3773 result = wi::smax (pop0, pop1);
3774 break;
3775
3776 case UMIN:
3777 result = wi::umin (pop0, pop1);
3778 break;
3779
3780 case UMAX:
3781 result = wi::umax (pop0, pop1);
3782 break;
3783
3784 case LSHIFTRT:
3785 case ASHIFTRT:
3786 case ASHIFT:
3787 case ROTATE:
3788 case ROTATERT:
3789 {
3790 wide_int wop1 = pop1;
3791 if (wi::neg_p (wop1))
3792 return NULL_RTX;
3793
3794 if (SHIFT_COUNT_TRUNCATED)
3795 wop1 = wi::umod_trunc (wop1, width);
3796
3797 switch (code)
3798 {
3799 case LSHIFTRT:
3800 result = wi::lrshift (pop0, wop1, bitsize);
3801 break;
3802
3803 case ASHIFTRT:
3804 result = wi::arshift (pop0, wop1, bitsize);
3805 break;
3806
3807 case ASHIFT:
3808 result = wi::lshift (pop0, wop1, bitsize);
3809 break;
3810
3811 case ROTATE:
3812 result = wi::lrotate (pop0, wop1);
3813 break;
3814
3815 case ROTATERT:
3816 result = wi::rrotate (pop0, wop1);
3817 break;
3818
3819 default:
3820 gcc_unreachable ();
3821 }
3822 break;
3823 }
3824 default:
3825 return NULL_RTX;
3826 }
3827 return immed_wide_int_const (result, mode);
3828 }
3829
3830 return NULL_RTX;
3831 }
3832
3833
3834 \f
3835 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3836 PLUS or MINUS.
3837
3838 Rather than test for specific case, we do this by a brute-force method
3839 and do all possible simplifications until no more changes occur. Then
3840 we rebuild the operation. */
3841
3842 struct simplify_plus_minus_op_data
3843 {
3844 rtx op;
3845 short neg;
3846 };
3847
3848 static bool
3849 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3850 {
3851 int result;
3852
3853 result = (commutative_operand_precedence (y)
3854 - commutative_operand_precedence (x));
3855 if (result)
3856 return result > 0;
3857
3858 /* Group together equal REGs to do more simplification. */
3859 if (REG_P (x) && REG_P (y))
3860 return REGNO (x) > REGNO (y);
3861 else
3862 return false;
3863 }
3864
3865 static rtx
3866 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3867 rtx op1)
3868 {
3869 struct simplify_plus_minus_op_data ops[8];
3870 rtx result, tem;
3871 int n_ops = 2, input_ops = 2;
3872 int changed, n_constants = 0, canonicalized = 0;
3873 int i, j;
3874
3875 memset (ops, 0, sizeof ops);
3876
3877 /* Set up the two operands and then expand them until nothing has been
3878 changed. If we run out of room in our array, give up; this should
3879 almost never happen. */
3880
3881 ops[0].op = op0;
3882 ops[0].neg = 0;
3883 ops[1].op = op1;
3884 ops[1].neg = (code == MINUS);
3885
3886 do
3887 {
3888 changed = 0;
3889
3890 for (i = 0; i < n_ops; i++)
3891 {
3892 rtx this_op = ops[i].op;
3893 int this_neg = ops[i].neg;
3894 enum rtx_code this_code = GET_CODE (this_op);
3895
3896 switch (this_code)
3897 {
3898 case PLUS:
3899 case MINUS:
3900 if (n_ops == 7)
3901 return NULL_RTX;
3902
3903 ops[n_ops].op = XEXP (this_op, 1);
3904 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3905 n_ops++;
3906
3907 ops[i].op = XEXP (this_op, 0);
3908 input_ops++;
3909 changed = 1;
3910 canonicalized |= this_neg;
3911 break;
3912
3913 case NEG:
3914 ops[i].op = XEXP (this_op, 0);
3915 ops[i].neg = ! this_neg;
3916 changed = 1;
3917 canonicalized = 1;
3918 break;
3919
3920 case CONST:
3921 if (n_ops < 7
3922 && GET_CODE (XEXP (this_op, 0)) == PLUS
3923 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3924 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3925 {
3926 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3927 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3928 ops[n_ops].neg = this_neg;
3929 n_ops++;
3930 changed = 1;
3931 canonicalized = 1;
3932 }
3933 break;
3934
3935 case NOT:
3936 /* ~a -> (-a - 1) */
3937 if (n_ops != 7)
3938 {
3939 ops[n_ops].op = CONSTM1_RTX (mode);
3940 ops[n_ops++].neg = this_neg;
3941 ops[i].op = XEXP (this_op, 0);
3942 ops[i].neg = !this_neg;
3943 changed = 1;
3944 canonicalized = 1;
3945 }
3946 break;
3947
3948 case CONST_INT:
3949 n_constants++;
3950 if (this_neg)
3951 {
3952 ops[i].op = neg_const_int (mode, this_op);
3953 ops[i].neg = 0;
3954 changed = 1;
3955 canonicalized = 1;
3956 }
3957 break;
3958
3959 default:
3960 break;
3961 }
3962 }
3963 }
3964 while (changed);
3965
3966 if (n_constants > 1)
3967 canonicalized = 1;
3968
3969 gcc_assert (n_ops >= 2);
3970
3971 /* If we only have two operands, we can avoid the loops. */
3972 if (n_ops == 2)
3973 {
3974 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3975 rtx lhs, rhs;
3976
3977 /* Get the two operands. Be careful with the order, especially for
3978 the cases where code == MINUS. */
3979 if (ops[0].neg && ops[1].neg)
3980 {
3981 lhs = gen_rtx_NEG (mode, ops[0].op);
3982 rhs = ops[1].op;
3983 }
3984 else if (ops[0].neg)
3985 {
3986 lhs = ops[1].op;
3987 rhs = ops[0].op;
3988 }
3989 else
3990 {
3991 lhs = ops[0].op;
3992 rhs = ops[1].op;
3993 }
3994
3995 return simplify_const_binary_operation (code, mode, lhs, rhs);
3996 }
3997
3998 /* Now simplify each pair of operands until nothing changes. */
3999 do
4000 {
4001 /* Insertion sort is good enough for an eight-element array. */
4002 for (i = 1; i < n_ops; i++)
4003 {
4004 struct simplify_plus_minus_op_data save;
4005 j = i - 1;
4006 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4007 continue;
4008
4009 canonicalized = 1;
4010 save = ops[i];
4011 do
4012 ops[j + 1] = ops[j];
4013 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4014 ops[j + 1] = save;
4015 }
4016
4017 changed = 0;
4018 for (i = n_ops - 1; i > 0; i--)
4019 for (j = i - 1; j >= 0; j--)
4020 {
4021 rtx lhs = ops[j].op, rhs = ops[i].op;
4022 int lneg = ops[j].neg, rneg = ops[i].neg;
4023
4024 if (lhs != 0 && rhs != 0)
4025 {
4026 enum rtx_code ncode = PLUS;
4027
4028 if (lneg != rneg)
4029 {
4030 ncode = MINUS;
4031 if (lneg)
4032 tem = lhs, lhs = rhs, rhs = tem;
4033 }
4034 else if (swap_commutative_operands_p (lhs, rhs))
4035 tem = lhs, lhs = rhs, rhs = tem;
4036
4037 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4038 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4039 {
4040 rtx tem_lhs, tem_rhs;
4041
4042 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4043 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4044 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4045
4046 if (tem && !CONSTANT_P (tem))
4047 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4048 }
4049 else
4050 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4051
4052 /* Reject "simplifications" that just wrap the two
4053 arguments in a CONST. Failure to do so can result
4054 in infinite recursion with simplify_binary_operation
4055 when it calls us to simplify CONST operations. */
4056 if (tem
4057 && ! (GET_CODE (tem) == CONST
4058 && GET_CODE (XEXP (tem, 0)) == ncode
4059 && XEXP (XEXP (tem, 0), 0) == lhs
4060 && XEXP (XEXP (tem, 0), 1) == rhs))
4061 {
4062 lneg &= rneg;
4063 if (GET_CODE (tem) == NEG)
4064 tem = XEXP (tem, 0), lneg = !lneg;
4065 if (CONST_INT_P (tem) && lneg)
4066 tem = neg_const_int (mode, tem), lneg = 0;
4067
4068 ops[i].op = tem;
4069 ops[i].neg = lneg;
4070 ops[j].op = NULL_RTX;
4071 changed = 1;
4072 canonicalized = 1;
4073 }
4074 }
4075 }
4076
4077 /* If nothing changed, fail. */
4078 if (!canonicalized)
4079 return NULL_RTX;
4080
4081 /* Pack all the operands to the lower-numbered entries. */
4082 for (i = 0, j = 0; j < n_ops; j++)
4083 if (ops[j].op)
4084 {
4085 ops[i] = ops[j];
4086 i++;
4087 }
4088 n_ops = i;
4089 }
4090 while (changed);
4091
4092 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4093 if (n_ops == 2
4094 && CONST_INT_P (ops[1].op)
4095 && CONSTANT_P (ops[0].op)
4096 && ops[0].neg)
4097 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4098
4099 /* We suppressed creation of trivial CONST expressions in the
4100 combination loop to avoid recursion. Create one manually now.
4101 The combination loop should have ensured that there is exactly
4102 one CONST_INT, and the sort will have ensured that it is last
4103 in the array and that any other constant will be next-to-last. */
4104
4105 if (n_ops > 1
4106 && CONST_INT_P (ops[n_ops - 1].op)
4107 && CONSTANT_P (ops[n_ops - 2].op))
4108 {
4109 rtx value = ops[n_ops - 1].op;
4110 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4111 value = neg_const_int (mode, value);
4112 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4113 INTVAL (value));
4114 n_ops--;
4115 }
4116
4117 /* Put a non-negated operand first, if possible. */
4118
4119 for (i = 0; i < n_ops && ops[i].neg; i++)
4120 continue;
4121 if (i == n_ops)
4122 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4123 else if (i != 0)
4124 {
4125 tem = ops[0].op;
4126 ops[0] = ops[i];
4127 ops[i].op = tem;
4128 ops[i].neg = 1;
4129 }
4130
4131 /* Now make the result by performing the requested operations. */
4132 result = ops[0].op;
4133 for (i = 1; i < n_ops; i++)
4134 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4135 mode, result, ops[i].op);
4136
4137 return result;
4138 }
4139
4140 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4141 static bool
4142 plus_minus_operand_p (const_rtx x)
4143 {
4144 return GET_CODE (x) == PLUS
4145 || GET_CODE (x) == MINUS
4146 || (GET_CODE (x) == CONST
4147 && GET_CODE (XEXP (x, 0)) == PLUS
4148 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4149 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4150 }
4151
4152 /* Like simplify_binary_operation except used for relational operators.
4153 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4154 not also be VOIDmode.
4155
4156 CMP_MODE specifies in which mode the comparison is done in, so it is
4157 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4158 the operands or, if both are VOIDmode, the operands are compared in
4159 "infinite precision". */
4160 rtx
4161 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4162 enum machine_mode cmp_mode, rtx op0, rtx op1)
4163 {
4164 rtx tem, trueop0, trueop1;
4165
4166 if (cmp_mode == VOIDmode)
4167 cmp_mode = GET_MODE (op0);
4168 if (cmp_mode == VOIDmode)
4169 cmp_mode = GET_MODE (op1);
4170
4171 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4172 if (tem)
4173 {
4174 if (SCALAR_FLOAT_MODE_P (mode))
4175 {
4176 if (tem == const0_rtx)
4177 return CONST0_RTX (mode);
4178 #ifdef FLOAT_STORE_FLAG_VALUE
4179 {
4180 REAL_VALUE_TYPE val;
4181 val = FLOAT_STORE_FLAG_VALUE (mode);
4182 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4183 }
4184 #else
4185 return NULL_RTX;
4186 #endif
4187 }
4188 if (VECTOR_MODE_P (mode))
4189 {
4190 if (tem == const0_rtx)
4191 return CONST0_RTX (mode);
4192 #ifdef VECTOR_STORE_FLAG_VALUE
4193 {
4194 int i, units;
4195 rtvec v;
4196
4197 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4198 if (val == NULL_RTX)
4199 return NULL_RTX;
4200 if (val == const1_rtx)
4201 return CONST1_RTX (mode);
4202
4203 units = GET_MODE_NUNITS (mode);
4204 v = rtvec_alloc (units);
4205 for (i = 0; i < units; i++)
4206 RTVEC_ELT (v, i) = val;
4207 return gen_rtx_raw_CONST_VECTOR (mode, v);
4208 }
4209 #else
4210 return NULL_RTX;
4211 #endif
4212 }
4213
4214 return tem;
4215 }
4216
4217 /* For the following tests, ensure const0_rtx is op1. */
4218 if (swap_commutative_operands_p (op0, op1)
4219 || (op0 == const0_rtx && op1 != const0_rtx))
4220 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4221
4222 /* If op0 is a compare, extract the comparison arguments from it. */
4223 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4224 return simplify_gen_relational (code, mode, VOIDmode,
4225 XEXP (op0, 0), XEXP (op0, 1));
4226
4227 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4228 || CC0_P (op0))
4229 return NULL_RTX;
4230
4231 trueop0 = avoid_constant_pool_reference (op0);
4232 trueop1 = avoid_constant_pool_reference (op1);
4233 return simplify_relational_operation_1 (code, mode, cmp_mode,
4234 trueop0, trueop1);
4235 }
4236
4237 /* This part of simplify_relational_operation is only used when CMP_MODE
4238 is not in class MODE_CC (i.e. it is a real comparison).
4239
4240 MODE is the mode of the result, while CMP_MODE specifies in which
4241 mode the comparison is done in, so it is the mode of the operands. */
4242
4243 static rtx
4244 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4245 enum machine_mode cmp_mode, rtx op0, rtx op1)
4246 {
4247 enum rtx_code op0code = GET_CODE (op0);
4248
4249 if (op1 == const0_rtx && COMPARISON_P (op0))
4250 {
4251 /* If op0 is a comparison, extract the comparison arguments
4252 from it. */
4253 if (code == NE)
4254 {
4255 if (GET_MODE (op0) == mode)
4256 return simplify_rtx (op0);
4257 else
4258 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4259 XEXP (op0, 0), XEXP (op0, 1));
4260 }
4261 else if (code == EQ)
4262 {
4263 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4264 if (new_code != UNKNOWN)
4265 return simplify_gen_relational (new_code, mode, VOIDmode,
4266 XEXP (op0, 0), XEXP (op0, 1));
4267 }
4268 }
4269
4270 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4271 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4272 if ((code == LTU || code == GEU)
4273 && GET_CODE (op0) == PLUS
4274 && CONST_INT_P (XEXP (op0, 1))
4275 && (rtx_equal_p (op1, XEXP (op0, 0))
4276 || rtx_equal_p (op1, XEXP (op0, 1)))
4277 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4278 && XEXP (op0, 1) != const0_rtx)
4279 {
4280 rtx new_cmp
4281 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4282 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4283 cmp_mode, XEXP (op0, 0), new_cmp);
4284 }
4285
4286 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4287 if ((code == LTU || code == GEU)
4288 && GET_CODE (op0) == PLUS
4289 && rtx_equal_p (op1, XEXP (op0, 1))
4290 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4291 && !rtx_equal_p (op1, XEXP (op0, 0)))
4292 return simplify_gen_relational (code, mode, cmp_mode, op0,
4293 copy_rtx (XEXP (op0, 0)));
4294
4295 if (op1 == const0_rtx)
4296 {
4297 /* Canonicalize (GTU x 0) as (NE x 0). */
4298 if (code == GTU)
4299 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4300 /* Canonicalize (LEU x 0) as (EQ x 0). */
4301 if (code == LEU)
4302 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4303 }
4304 else if (op1 == const1_rtx)
4305 {
4306 switch (code)
4307 {
4308 case GE:
4309 /* Canonicalize (GE x 1) as (GT x 0). */
4310 return simplify_gen_relational (GT, mode, cmp_mode,
4311 op0, const0_rtx);
4312 case GEU:
4313 /* Canonicalize (GEU x 1) as (NE x 0). */
4314 return simplify_gen_relational (NE, mode, cmp_mode,
4315 op0, const0_rtx);
4316 case LT:
4317 /* Canonicalize (LT x 1) as (LE x 0). */
4318 return simplify_gen_relational (LE, mode, cmp_mode,
4319 op0, const0_rtx);
4320 case LTU:
4321 /* Canonicalize (LTU x 1) as (EQ x 0). */
4322 return simplify_gen_relational (EQ, mode, cmp_mode,
4323 op0, const0_rtx);
4324 default:
4325 break;
4326 }
4327 }
4328 else if (op1 == constm1_rtx)
4329 {
4330 /* Canonicalize (LE x -1) as (LT x 0). */
4331 if (code == LE)
4332 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4333 /* Canonicalize (GT x -1) as (GE x 0). */
4334 if (code == GT)
4335 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4336 }
4337
4338 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4339 if ((code == EQ || code == NE)
4340 && (op0code == PLUS || op0code == MINUS)
4341 && CONSTANT_P (op1)
4342 && CONSTANT_P (XEXP (op0, 1))
4343 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4344 {
4345 rtx x = XEXP (op0, 0);
4346 rtx c = XEXP (op0, 1);
4347 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4348 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4349
4350 /* Detect an infinite recursive condition, where we oscillate at this
4351 simplification case between:
4352 A + B == C <---> C - B == A,
4353 where A, B, and C are all constants with non-simplifiable expressions,
4354 usually SYMBOL_REFs. */
4355 if (GET_CODE (tem) == invcode
4356 && CONSTANT_P (x)
4357 && rtx_equal_p (c, XEXP (tem, 1)))
4358 return NULL_RTX;
4359
4360 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4361 }
4362
4363 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4364 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4365 if (code == NE
4366 && op1 == const0_rtx
4367 && GET_MODE_CLASS (mode) == MODE_INT
4368 && cmp_mode != VOIDmode
4369 /* ??? Work-around BImode bugs in the ia64 backend. */
4370 && mode != BImode
4371 && cmp_mode != BImode
4372 && nonzero_bits (op0, cmp_mode) == 1
4373 && STORE_FLAG_VALUE == 1)
4374 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4375 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4376 : lowpart_subreg (mode, op0, cmp_mode);
4377
4378 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4379 if ((code == EQ || code == NE)
4380 && op1 == const0_rtx
4381 && op0code == XOR)
4382 return simplify_gen_relational (code, mode, cmp_mode,
4383 XEXP (op0, 0), XEXP (op0, 1));
4384
4385 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4386 if ((code == EQ || code == NE)
4387 && op0code == XOR
4388 && rtx_equal_p (XEXP (op0, 0), op1)
4389 && !side_effects_p (XEXP (op0, 0)))
4390 return simplify_gen_relational (code, mode, cmp_mode,
4391 XEXP (op0, 1), const0_rtx);
4392
4393 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4394 if ((code == EQ || code == NE)
4395 && op0code == XOR
4396 && rtx_equal_p (XEXP (op0, 1), op1)
4397 && !side_effects_p (XEXP (op0, 1)))
4398 return simplify_gen_relational (code, mode, cmp_mode,
4399 XEXP (op0, 0), const0_rtx);
4400
4401 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4402 if ((code == EQ || code == NE)
4403 && op0code == XOR
4404 && CONST_SCALAR_INT_P (op1)
4405 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4406 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4407 simplify_gen_binary (XOR, cmp_mode,
4408 XEXP (op0, 1), op1));
4409
4410 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4411 if ((code == EQ || code == NE)
4412 && GET_CODE (op0) == BSWAP
4413 && CONST_SCALAR_INT_P (op1))
4414 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4415 simplify_gen_unary (BSWAP, cmp_mode,
4416 op1, cmp_mode));
4417
4418 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4419 if ((code == EQ || code == NE)
4420 && GET_CODE (op0) == BSWAP
4421 && GET_CODE (op1) == BSWAP)
4422 return simplify_gen_relational (code, mode, cmp_mode,
4423 XEXP (op0, 0), XEXP (op1, 0));
4424
4425 if (op0code == POPCOUNT && op1 == const0_rtx)
4426 switch (code)
4427 {
4428 case EQ:
4429 case LE:
4430 case LEU:
4431 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4432 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4433 XEXP (op0, 0), const0_rtx);
4434
4435 case NE:
4436 case GT:
4437 case GTU:
4438 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4439 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4440 XEXP (op0, 0), const0_rtx);
4441
4442 default:
4443 break;
4444 }
4445
4446 return NULL_RTX;
4447 }
4448
4449 enum
4450 {
4451 CMP_EQ = 1,
4452 CMP_LT = 2,
4453 CMP_GT = 4,
4454 CMP_LTU = 8,
4455 CMP_GTU = 16
4456 };
4457
4458
4459 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4460 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4461 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4462 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4463 For floating-point comparisons, assume that the operands were ordered. */
4464
4465 static rtx
4466 comparison_result (enum rtx_code code, int known_results)
4467 {
4468 switch (code)
4469 {
4470 case EQ:
4471 case UNEQ:
4472 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4473 case NE:
4474 case LTGT:
4475 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4476
4477 case LT:
4478 case UNLT:
4479 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4480 case GE:
4481 case UNGE:
4482 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4483
4484 case GT:
4485 case UNGT:
4486 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4487 case LE:
4488 case UNLE:
4489 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4490
4491 case LTU:
4492 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4493 case GEU:
4494 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4495
4496 case GTU:
4497 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4498 case LEU:
4499 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4500
4501 case ORDERED:
4502 return const_true_rtx;
4503 case UNORDERED:
4504 return const0_rtx;
4505 default:
4506 gcc_unreachable ();
4507 }
4508 }
4509
4510 /* Check if the given comparison (done in the given MODE) is actually
4511 a tautology or a contradiction. If the mode is VOID_mode, the
4512 comparison is done in "infinite precision". If no simplification
4513 is possible, this function returns zero. Otherwise, it returns
4514 either const_true_rtx or const0_rtx. */
4515
4516 rtx
4517 simplify_const_relational_operation (enum rtx_code code,
4518 enum machine_mode mode,
4519 rtx op0, rtx op1)
4520 {
4521 rtx tem;
4522 rtx trueop0;
4523 rtx trueop1;
4524
4525 gcc_assert (mode != VOIDmode
4526 || (GET_MODE (op0) == VOIDmode
4527 && GET_MODE (op1) == VOIDmode));
4528
4529 /* If op0 is a compare, extract the comparison arguments from it. */
4530 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4531 {
4532 op1 = XEXP (op0, 1);
4533 op0 = XEXP (op0, 0);
4534
4535 if (GET_MODE (op0) != VOIDmode)
4536 mode = GET_MODE (op0);
4537 else if (GET_MODE (op1) != VOIDmode)
4538 mode = GET_MODE (op1);
4539 else
4540 return 0;
4541 }
4542
4543 /* We can't simplify MODE_CC values since we don't know what the
4544 actual comparison is. */
4545 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4546 return 0;
4547
4548 /* Make sure the constant is second. */
4549 if (swap_commutative_operands_p (op0, op1))
4550 {
4551 tem = op0, op0 = op1, op1 = tem;
4552 code = swap_condition (code);
4553 }
4554
4555 trueop0 = avoid_constant_pool_reference (op0);
4556 trueop1 = avoid_constant_pool_reference (op1);
4557
4558 /* For integer comparisons of A and B maybe we can simplify A - B and can
4559 then simplify a comparison of that with zero. If A and B are both either
4560 a register or a CONST_INT, this can't help; testing for these cases will
4561 prevent infinite recursion here and speed things up.
4562
4563 We can only do this for EQ and NE comparisons as otherwise we may
4564 lose or introduce overflow which we cannot disregard as undefined as
4565 we do not know the signedness of the operation on either the left or
4566 the right hand side of the comparison. */
4567
4568 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4569 && (code == EQ || code == NE)
4570 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4571 && (REG_P (op1) || CONST_INT_P (trueop1)))
4572 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4573 /* We cannot do this if tem is a nonzero address. */
4574 && ! nonzero_address_p (tem))
4575 return simplify_const_relational_operation (signed_condition (code),
4576 mode, tem, const0_rtx);
4577
4578 if (! HONOR_NANS (mode) && code == ORDERED)
4579 return const_true_rtx;
4580
4581 if (! HONOR_NANS (mode) && code == UNORDERED)
4582 return const0_rtx;
4583
4584 /* For modes without NaNs, if the two operands are equal, we know the
4585 result except if they have side-effects. Even with NaNs we know
4586 the result of unordered comparisons and, if signaling NaNs are
4587 irrelevant, also the result of LT/GT/LTGT. */
4588 if ((! HONOR_NANS (GET_MODE (trueop0))
4589 || code == UNEQ || code == UNLE || code == UNGE
4590 || ((code == LT || code == GT || code == LTGT)
4591 && ! HONOR_SNANS (GET_MODE (trueop0))))
4592 && rtx_equal_p (trueop0, trueop1)
4593 && ! side_effects_p (trueop0))
4594 return comparison_result (code, CMP_EQ);
4595
4596 /* If the operands are floating-point constants, see if we can fold
4597 the result. */
4598 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4599 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4600 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4601 {
4602 REAL_VALUE_TYPE d0, d1;
4603
4604 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4605 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4606
4607 /* Comparisons are unordered iff at least one of the values is NaN. */
4608 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4609 switch (code)
4610 {
4611 case UNEQ:
4612 case UNLT:
4613 case UNGT:
4614 case UNLE:
4615 case UNGE:
4616 case NE:
4617 case UNORDERED:
4618 return const_true_rtx;
4619 case EQ:
4620 case LT:
4621 case GT:
4622 case LE:
4623 case GE:
4624 case LTGT:
4625 case ORDERED:
4626 return const0_rtx;
4627 default:
4628 return 0;
4629 }
4630
4631 return comparison_result (code,
4632 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4633 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4634 }
4635
4636 /* Otherwise, see if the operands are both integers. */
4637 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4638 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4639 {
4640 /* It would be nice if we really had a mode here. However, the
4641 largest int representable on the target is as good as
4642 infinite. */
4643 enum machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4644 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4645 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4646
4647 if (wi::eq_p (ptrueop0, ptrueop1))
4648 return comparison_result (code, CMP_EQ);
4649 else
4650 {
4651 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4652 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4653 return comparison_result (code, cr);
4654 }
4655 }
4656
4657 /* Optimize comparisons with upper and lower bounds. */
4658 if (HWI_COMPUTABLE_MODE_P (mode)
4659 && CONST_INT_P (trueop1))
4660 {
4661 int sign;
4662 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4663 HOST_WIDE_INT val = INTVAL (trueop1);
4664 HOST_WIDE_INT mmin, mmax;
4665
4666 if (code == GEU
4667 || code == LEU
4668 || code == GTU
4669 || code == LTU)
4670 sign = 0;
4671 else
4672 sign = 1;
4673
4674 /* Get a reduced range if the sign bit is zero. */
4675 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4676 {
4677 mmin = 0;
4678 mmax = nonzero;
4679 }
4680 else
4681 {
4682 rtx mmin_rtx, mmax_rtx;
4683 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4684
4685 mmin = INTVAL (mmin_rtx);
4686 mmax = INTVAL (mmax_rtx);
4687 if (sign)
4688 {
4689 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4690
4691 mmin >>= (sign_copies - 1);
4692 mmax >>= (sign_copies - 1);
4693 }
4694 }
4695
4696 switch (code)
4697 {
4698 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4699 case GEU:
4700 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4701 return const_true_rtx;
4702 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4703 return const0_rtx;
4704 break;
4705 case GE:
4706 if (val <= mmin)
4707 return const_true_rtx;
4708 if (val > mmax)
4709 return const0_rtx;
4710 break;
4711
4712 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4713 case LEU:
4714 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4715 return const_true_rtx;
4716 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4717 return const0_rtx;
4718 break;
4719 case LE:
4720 if (val >= mmax)
4721 return const_true_rtx;
4722 if (val < mmin)
4723 return const0_rtx;
4724 break;
4725
4726 case EQ:
4727 /* x == y is always false for y out of range. */
4728 if (val < mmin || val > mmax)
4729 return const0_rtx;
4730 break;
4731
4732 /* x > y is always false for y >= mmax, always true for y < mmin. */
4733 case GTU:
4734 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4735 return const0_rtx;
4736 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4737 return const_true_rtx;
4738 break;
4739 case GT:
4740 if (val >= mmax)
4741 return const0_rtx;
4742 if (val < mmin)
4743 return const_true_rtx;
4744 break;
4745
4746 /* x < y is always false for y <= mmin, always true for y > mmax. */
4747 case LTU:
4748 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4749 return const0_rtx;
4750 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4751 return const_true_rtx;
4752 break;
4753 case LT:
4754 if (val <= mmin)
4755 return const0_rtx;
4756 if (val > mmax)
4757 return const_true_rtx;
4758 break;
4759
4760 case NE:
4761 /* x != y is always true for y out of range. */
4762 if (val < mmin || val > mmax)
4763 return const_true_rtx;
4764 break;
4765
4766 default:
4767 break;
4768 }
4769 }
4770
4771 /* Optimize integer comparisons with zero. */
4772 if (trueop1 == const0_rtx)
4773 {
4774 /* Some addresses are known to be nonzero. We don't know
4775 their sign, but equality comparisons are known. */
4776 if (nonzero_address_p (trueop0))
4777 {
4778 if (code == EQ || code == LEU)
4779 return const0_rtx;
4780 if (code == NE || code == GTU)
4781 return const_true_rtx;
4782 }
4783
4784 /* See if the first operand is an IOR with a constant. If so, we
4785 may be able to determine the result of this comparison. */
4786 if (GET_CODE (op0) == IOR)
4787 {
4788 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4789 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4790 {
4791 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4792 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4793 && (UINTVAL (inner_const)
4794 & ((unsigned HOST_WIDE_INT) 1
4795 << sign_bitnum)));
4796
4797 switch (code)
4798 {
4799 case EQ:
4800 case LEU:
4801 return const0_rtx;
4802 case NE:
4803 case GTU:
4804 return const_true_rtx;
4805 case LT:
4806 case LE:
4807 if (has_sign)
4808 return const_true_rtx;
4809 break;
4810 case GT:
4811 case GE:
4812 if (has_sign)
4813 return const0_rtx;
4814 break;
4815 default:
4816 break;
4817 }
4818 }
4819 }
4820 }
4821
4822 /* Optimize comparison of ABS with zero. */
4823 if (trueop1 == CONST0_RTX (mode)
4824 && (GET_CODE (trueop0) == ABS
4825 || (GET_CODE (trueop0) == FLOAT_EXTEND
4826 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4827 {
4828 switch (code)
4829 {
4830 case LT:
4831 /* Optimize abs(x) < 0.0. */
4832 if (!HONOR_SNANS (mode)
4833 && (!INTEGRAL_MODE_P (mode)
4834 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4835 {
4836 if (INTEGRAL_MODE_P (mode)
4837 && (issue_strict_overflow_warning
4838 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4839 warning (OPT_Wstrict_overflow,
4840 ("assuming signed overflow does not occur when "
4841 "assuming abs (x) < 0 is false"));
4842 return const0_rtx;
4843 }
4844 break;
4845
4846 case GE:
4847 /* Optimize abs(x) >= 0.0. */
4848 if (!HONOR_NANS (mode)
4849 && (!INTEGRAL_MODE_P (mode)
4850 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4851 {
4852 if (INTEGRAL_MODE_P (mode)
4853 && (issue_strict_overflow_warning
4854 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4855 warning (OPT_Wstrict_overflow,
4856 ("assuming signed overflow does not occur when "
4857 "assuming abs (x) >= 0 is true"));
4858 return const_true_rtx;
4859 }
4860 break;
4861
4862 case UNGE:
4863 /* Optimize ! (abs(x) < 0.0). */
4864 return const_true_rtx;
4865
4866 default:
4867 break;
4868 }
4869 }
4870
4871 return 0;
4872 }
4873 \f
4874 /* Simplify CODE, an operation with result mode MODE and three operands,
4875 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4876 a constant. Return 0 if no simplifications is possible. */
4877
4878 rtx
4879 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4880 enum machine_mode op0_mode, rtx op0, rtx op1,
4881 rtx op2)
4882 {
4883 unsigned int width = GET_MODE_PRECISION (mode);
4884 bool any_change = false;
4885 rtx tem, trueop2;
4886
4887 /* VOIDmode means "infinite" precision. */
4888 if (width == 0)
4889 width = HOST_BITS_PER_WIDE_INT;
4890
4891 switch (code)
4892 {
4893 case FMA:
4894 /* Simplify negations around the multiplication. */
4895 /* -a * -b + c => a * b + c. */
4896 if (GET_CODE (op0) == NEG)
4897 {
4898 tem = simplify_unary_operation (NEG, mode, op1, mode);
4899 if (tem)
4900 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4901 }
4902 else if (GET_CODE (op1) == NEG)
4903 {
4904 tem = simplify_unary_operation (NEG, mode, op0, mode);
4905 if (tem)
4906 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4907 }
4908
4909 /* Canonicalize the two multiplication operands. */
4910 /* a * -b + c => -b * a + c. */
4911 if (swap_commutative_operands_p (op0, op1))
4912 tem = op0, op0 = op1, op1 = tem, any_change = true;
4913
4914 if (any_change)
4915 return gen_rtx_FMA (mode, op0, op1, op2);
4916 return NULL_RTX;
4917
4918 case SIGN_EXTRACT:
4919 case ZERO_EXTRACT:
4920 if (CONST_INT_P (op0)
4921 && CONST_INT_P (op1)
4922 && CONST_INT_P (op2)
4923 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4924 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4925 {
4926 /* Extracting a bit-field from a constant */
4927 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4928 HOST_WIDE_INT op1val = INTVAL (op1);
4929 HOST_WIDE_INT op2val = INTVAL (op2);
4930 if (BITS_BIG_ENDIAN)
4931 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4932 else
4933 val >>= op2val;
4934
4935 if (HOST_BITS_PER_WIDE_INT != op1val)
4936 {
4937 /* First zero-extend. */
4938 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4939 /* If desired, propagate sign bit. */
4940 if (code == SIGN_EXTRACT
4941 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4942 != 0)
4943 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4944 }
4945
4946 return gen_int_mode (val, mode);
4947 }
4948 break;
4949
4950 case IF_THEN_ELSE:
4951 if (CONST_INT_P (op0))
4952 return op0 != const0_rtx ? op1 : op2;
4953
4954 /* Convert c ? a : a into "a". */
4955 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4956 return op1;
4957
4958 /* Convert a != b ? a : b into "a". */
4959 if (GET_CODE (op0) == NE
4960 && ! side_effects_p (op0)
4961 && ! HONOR_NANS (mode)
4962 && ! HONOR_SIGNED_ZEROS (mode)
4963 && ((rtx_equal_p (XEXP (op0, 0), op1)
4964 && rtx_equal_p (XEXP (op0, 1), op2))
4965 || (rtx_equal_p (XEXP (op0, 0), op2)
4966 && rtx_equal_p (XEXP (op0, 1), op1))))
4967 return op1;
4968
4969 /* Convert a == b ? a : b into "b". */
4970 if (GET_CODE (op0) == EQ
4971 && ! side_effects_p (op0)
4972 && ! HONOR_NANS (mode)
4973 && ! HONOR_SIGNED_ZEROS (mode)
4974 && ((rtx_equal_p (XEXP (op0, 0), op1)
4975 && rtx_equal_p (XEXP (op0, 1), op2))
4976 || (rtx_equal_p (XEXP (op0, 0), op2)
4977 && rtx_equal_p (XEXP (op0, 1), op1))))
4978 return op2;
4979
4980 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4981 {
4982 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4983 ? GET_MODE (XEXP (op0, 1))
4984 : GET_MODE (XEXP (op0, 0)));
4985 rtx temp;
4986
4987 /* Look for happy constants in op1 and op2. */
4988 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4989 {
4990 HOST_WIDE_INT t = INTVAL (op1);
4991 HOST_WIDE_INT f = INTVAL (op2);
4992
4993 if (t == STORE_FLAG_VALUE && f == 0)
4994 code = GET_CODE (op0);
4995 else if (t == 0 && f == STORE_FLAG_VALUE)
4996 {
4997 enum rtx_code tmp;
4998 tmp = reversed_comparison_code (op0, NULL_RTX);
4999 if (tmp == UNKNOWN)
5000 break;
5001 code = tmp;
5002 }
5003 else
5004 break;
5005
5006 return simplify_gen_relational (code, mode, cmp_mode,
5007 XEXP (op0, 0), XEXP (op0, 1));
5008 }
5009
5010 if (cmp_mode == VOIDmode)
5011 cmp_mode = op0_mode;
5012 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5013 cmp_mode, XEXP (op0, 0),
5014 XEXP (op0, 1));
5015
5016 /* See if any simplifications were possible. */
5017 if (temp)
5018 {
5019 if (CONST_INT_P (temp))
5020 return temp == const0_rtx ? op2 : op1;
5021 else if (temp)
5022 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5023 }
5024 }
5025 break;
5026
5027 case VEC_MERGE:
5028 gcc_assert (GET_MODE (op0) == mode);
5029 gcc_assert (GET_MODE (op1) == mode);
5030 gcc_assert (VECTOR_MODE_P (mode));
5031 trueop2 = avoid_constant_pool_reference (op2);
5032 if (CONST_INT_P (trueop2))
5033 {
5034 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5035 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5036 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5037 unsigned HOST_WIDE_INT mask;
5038 if (n_elts == HOST_BITS_PER_WIDE_INT)
5039 mask = -1;
5040 else
5041 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5042
5043 if (!(sel & mask) && !side_effects_p (op0))
5044 return op1;
5045 if ((sel & mask) == mask && !side_effects_p (op1))
5046 return op0;
5047
5048 rtx trueop0 = avoid_constant_pool_reference (op0);
5049 rtx trueop1 = avoid_constant_pool_reference (op1);
5050 if (GET_CODE (trueop0) == CONST_VECTOR
5051 && GET_CODE (trueop1) == CONST_VECTOR)
5052 {
5053 rtvec v = rtvec_alloc (n_elts);
5054 unsigned int i;
5055
5056 for (i = 0; i < n_elts; i++)
5057 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5058 ? CONST_VECTOR_ELT (trueop0, i)
5059 : CONST_VECTOR_ELT (trueop1, i));
5060 return gen_rtx_CONST_VECTOR (mode, v);
5061 }
5062
5063 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5064 if no element from a appears in the result. */
5065 if (GET_CODE (op0) == VEC_MERGE)
5066 {
5067 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5068 if (CONST_INT_P (tem))
5069 {
5070 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5071 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5072 return simplify_gen_ternary (code, mode, mode,
5073 XEXP (op0, 1), op1, op2);
5074 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5075 return simplify_gen_ternary (code, mode, mode,
5076 XEXP (op0, 0), op1, op2);
5077 }
5078 }
5079 if (GET_CODE (op1) == VEC_MERGE)
5080 {
5081 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5082 if (CONST_INT_P (tem))
5083 {
5084 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5085 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5086 return simplify_gen_ternary (code, mode, mode,
5087 op0, XEXP (op1, 1), op2);
5088 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5089 return simplify_gen_ternary (code, mode, mode,
5090 op0, XEXP (op1, 0), op2);
5091 }
5092 }
5093 }
5094
5095 if (rtx_equal_p (op0, op1)
5096 && !side_effects_p (op2) && !side_effects_p (op1))
5097 return op0;
5098
5099 break;
5100
5101 default:
5102 gcc_unreachable ();
5103 }
5104
5105 return 0;
5106 }
5107
5108 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5109 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5110 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5111
5112 Works by unpacking OP into a collection of 8-bit values
5113 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5114 and then repacking them again for OUTERMODE. */
5115
5116 static rtx
5117 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5118 enum machine_mode innermode, unsigned int byte)
5119 {
5120 enum {
5121 value_bit = 8,
5122 value_mask = (1 << value_bit) - 1
5123 };
5124 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE/value_bit];
5125 int value_start;
5126 int i;
5127 int elem;
5128
5129 int num_elem;
5130 rtx * elems;
5131 int elem_bitsize;
5132 rtx result_s;
5133 rtvec result_v = NULL;
5134 enum mode_class outer_class;
5135 enum machine_mode outer_submode;
5136 int max_bitsize;
5137
5138 /* Some ports misuse CCmode. */
5139 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5140 return op;
5141
5142 /* We have no way to represent a complex constant at the rtl level. */
5143 if (COMPLEX_MODE_P (outermode))
5144 return NULL_RTX;
5145
5146 /* We support any size mode. */
5147 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5148 GET_MODE_BITSIZE (innermode));
5149
5150 /* Unpack the value. */
5151
5152 if (GET_CODE (op) == CONST_VECTOR)
5153 {
5154 num_elem = CONST_VECTOR_NUNITS (op);
5155 elems = &CONST_VECTOR_ELT (op, 0);
5156 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5157 }
5158 else
5159 {
5160 num_elem = 1;
5161 elems = &op;
5162 elem_bitsize = max_bitsize;
5163 }
5164 /* If this asserts, it is too complicated; reducing value_bit may help. */
5165 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5166 /* I don't know how to handle endianness of sub-units. */
5167 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5168
5169 for (elem = 0; elem < num_elem; elem++)
5170 {
5171 unsigned char * vp;
5172 rtx el = elems[elem];
5173
5174 /* Vectors are kept in target memory order. (This is probably
5175 a mistake.) */
5176 {
5177 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5178 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5179 / BITS_PER_UNIT);
5180 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5181 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5182 unsigned bytele = (subword_byte % UNITS_PER_WORD
5183 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5184 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5185 }
5186
5187 switch (GET_CODE (el))
5188 {
5189 case CONST_INT:
5190 for (i = 0;
5191 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5192 i += value_bit)
5193 *vp++ = INTVAL (el) >> i;
5194 /* CONST_INTs are always logically sign-extended. */
5195 for (; i < elem_bitsize; i += value_bit)
5196 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5197 break;
5198
5199 case CONST_WIDE_INT:
5200 {
5201 rtx_mode_t val = std::make_pair (el, innermode);
5202 unsigned char extend = wi::sign_mask (val);
5203
5204 for (i = 0; i < elem_bitsize; i += value_bit)
5205 *vp++ = wi::extract_uhwi (val, i, value_bit);
5206 for (; i < elem_bitsize; i += value_bit)
5207 *vp++ = extend;
5208 }
5209 break;
5210
5211 case CONST_DOUBLE:
5212 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5213 {
5214 unsigned char extend = 0;
5215 /* If this triggers, someone should have generated a
5216 CONST_INT instead. */
5217 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5218
5219 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5220 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5221 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5222 {
5223 *vp++
5224 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5225 i += value_bit;
5226 }
5227
5228 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5229 extend = -1;
5230 for (; i < elem_bitsize; i += value_bit)
5231 *vp++ = extend;
5232 }
5233 else
5234 {
5235 /* This is big enough for anything on the platform. */
5236 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5237 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5238
5239 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5240 gcc_assert (bitsize <= elem_bitsize);
5241 gcc_assert (bitsize % value_bit == 0);
5242
5243 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5244 GET_MODE (el));
5245
5246 /* real_to_target produces its result in words affected by
5247 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5248 and use WORDS_BIG_ENDIAN instead; see the documentation
5249 of SUBREG in rtl.texi. */
5250 for (i = 0; i < bitsize; i += value_bit)
5251 {
5252 int ibase;
5253 if (WORDS_BIG_ENDIAN)
5254 ibase = bitsize - 1 - i;
5255 else
5256 ibase = i;
5257 *vp++ = tmp[ibase / 32] >> i % 32;
5258 }
5259
5260 /* It shouldn't matter what's done here, so fill it with
5261 zero. */
5262 for (; i < elem_bitsize; i += value_bit)
5263 *vp++ = 0;
5264 }
5265 break;
5266
5267 case CONST_FIXED:
5268 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5269 {
5270 for (i = 0; i < elem_bitsize; i += value_bit)
5271 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5272 }
5273 else
5274 {
5275 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5276 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5277 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5278 i += value_bit)
5279 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5280 >> (i - HOST_BITS_PER_WIDE_INT);
5281 for (; i < elem_bitsize; i += value_bit)
5282 *vp++ = 0;
5283 }
5284 break;
5285
5286 default:
5287 gcc_unreachable ();
5288 }
5289 }
5290
5291 /* Now, pick the right byte to start with. */
5292 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5293 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5294 will already have offset 0. */
5295 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5296 {
5297 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5298 - byte);
5299 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5300 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5301 byte = (subword_byte % UNITS_PER_WORD
5302 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5303 }
5304
5305 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5306 so if it's become negative it will instead be very large.) */
5307 gcc_assert (byte < GET_MODE_SIZE (innermode));
5308
5309 /* Convert from bytes to chunks of size value_bit. */
5310 value_start = byte * (BITS_PER_UNIT / value_bit);
5311
5312 /* Re-pack the value. */
5313
5314 if (VECTOR_MODE_P (outermode))
5315 {
5316 num_elem = GET_MODE_NUNITS (outermode);
5317 result_v = rtvec_alloc (num_elem);
5318 elems = &RTVEC_ELT (result_v, 0);
5319 outer_submode = GET_MODE_INNER (outermode);
5320 }
5321 else
5322 {
5323 num_elem = 1;
5324 elems = &result_s;
5325 outer_submode = outermode;
5326 }
5327
5328 outer_class = GET_MODE_CLASS (outer_submode);
5329 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5330
5331 gcc_assert (elem_bitsize % value_bit == 0);
5332 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5333
5334 for (elem = 0; elem < num_elem; elem++)
5335 {
5336 unsigned char *vp;
5337
5338 /* Vectors are stored in target memory order. (This is probably
5339 a mistake.) */
5340 {
5341 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5342 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5343 / BITS_PER_UNIT);
5344 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5345 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5346 unsigned bytele = (subword_byte % UNITS_PER_WORD
5347 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5348 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5349 }
5350
5351 switch (outer_class)
5352 {
5353 case MODE_INT:
5354 case MODE_PARTIAL_INT:
5355 {
5356 int u;
5357 int base = 0;
5358 int units
5359 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5360 / HOST_BITS_PER_WIDE_INT;
5361 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5362 wide_int r;
5363
5364 for (u = 0; u < units; u++)
5365 {
5366 unsigned HOST_WIDE_INT buf = 0;
5367 for (i = 0;
5368 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5369 i += value_bit)
5370 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5371
5372 tmp[u] = buf;
5373 base += HOST_BITS_PER_WIDE_INT;
5374 }
5375 r = wide_int::from_array (tmp, units,
5376 GET_MODE_PRECISION (outer_submode));
5377 elems[elem] = immed_wide_int_const (r, outer_submode);
5378 }
5379 break;
5380
5381 case MODE_FLOAT:
5382 case MODE_DECIMAL_FLOAT:
5383 {
5384 REAL_VALUE_TYPE r;
5385 long tmp[MAX_BITSIZE_MODE_ANY_INT / 32];
5386
5387 /* real_from_target wants its input in words affected by
5388 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5389 and use WORDS_BIG_ENDIAN instead; see the documentation
5390 of SUBREG in rtl.texi. */
5391 for (i = 0; i < max_bitsize / 32; i++)
5392 tmp[i] = 0;
5393 for (i = 0; i < elem_bitsize; i += value_bit)
5394 {
5395 int ibase;
5396 if (WORDS_BIG_ENDIAN)
5397 ibase = elem_bitsize - 1 - i;
5398 else
5399 ibase = i;
5400 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5401 }
5402
5403 real_from_target (&r, tmp, outer_submode);
5404 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5405 }
5406 break;
5407
5408 case MODE_FRACT:
5409 case MODE_UFRACT:
5410 case MODE_ACCUM:
5411 case MODE_UACCUM:
5412 {
5413 FIXED_VALUE_TYPE f;
5414 f.data.low = 0;
5415 f.data.high = 0;
5416 f.mode = outer_submode;
5417
5418 for (i = 0;
5419 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5420 i += value_bit)
5421 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5422 for (; i < elem_bitsize; i += value_bit)
5423 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5424 << (i - HOST_BITS_PER_WIDE_INT));
5425
5426 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5427 }
5428 break;
5429
5430 default:
5431 gcc_unreachable ();
5432 }
5433 }
5434 if (VECTOR_MODE_P (outermode))
5435 return gen_rtx_CONST_VECTOR (outermode, result_v);
5436 else
5437 return result_s;
5438 }
5439
5440 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5441 Return 0 if no simplifications are possible. */
5442 rtx
5443 simplify_subreg (enum machine_mode outermode, rtx op,
5444 enum machine_mode innermode, unsigned int byte)
5445 {
5446 /* Little bit of sanity checking. */
5447 gcc_assert (innermode != VOIDmode);
5448 gcc_assert (outermode != VOIDmode);
5449 gcc_assert (innermode != BLKmode);
5450 gcc_assert (outermode != BLKmode);
5451
5452 gcc_assert (GET_MODE (op) == innermode
5453 || GET_MODE (op) == VOIDmode);
5454
5455 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5456 return NULL_RTX;
5457
5458 if (byte >= GET_MODE_SIZE (innermode))
5459 return NULL_RTX;
5460
5461 if (outermode == innermode && !byte)
5462 return op;
5463
5464 if (CONST_SCALAR_INT_P (op)
5465 || CONST_DOUBLE_AS_FLOAT_P (op)
5466 || GET_CODE (op) == CONST_FIXED
5467 || GET_CODE (op) == CONST_VECTOR)
5468 return simplify_immed_subreg (outermode, op, innermode, byte);
5469
5470 /* Changing mode twice with SUBREG => just change it once,
5471 or not at all if changing back op starting mode. */
5472 if (GET_CODE (op) == SUBREG)
5473 {
5474 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5475 int final_offset = byte + SUBREG_BYTE (op);
5476 rtx newx;
5477
5478 if (outermode == innermostmode
5479 && byte == 0 && SUBREG_BYTE (op) == 0)
5480 return SUBREG_REG (op);
5481
5482 /* The SUBREG_BYTE represents offset, as if the value were stored
5483 in memory. Irritating exception is paradoxical subreg, where
5484 we define SUBREG_BYTE to be 0. On big endian machines, this
5485 value should be negative. For a moment, undo this exception. */
5486 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5487 {
5488 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5489 if (WORDS_BIG_ENDIAN)
5490 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5491 if (BYTES_BIG_ENDIAN)
5492 final_offset += difference % UNITS_PER_WORD;
5493 }
5494 if (SUBREG_BYTE (op) == 0
5495 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5496 {
5497 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5498 if (WORDS_BIG_ENDIAN)
5499 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5500 if (BYTES_BIG_ENDIAN)
5501 final_offset += difference % UNITS_PER_WORD;
5502 }
5503
5504 /* See whether resulting subreg will be paradoxical. */
5505 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5506 {
5507 /* In nonparadoxical subregs we can't handle negative offsets. */
5508 if (final_offset < 0)
5509 return NULL_RTX;
5510 /* Bail out in case resulting subreg would be incorrect. */
5511 if (final_offset % GET_MODE_SIZE (outermode)
5512 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5513 return NULL_RTX;
5514 }
5515 else
5516 {
5517 int offset = 0;
5518 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5519
5520 /* In paradoxical subreg, see if we are still looking on lower part.
5521 If so, our SUBREG_BYTE will be 0. */
5522 if (WORDS_BIG_ENDIAN)
5523 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5524 if (BYTES_BIG_ENDIAN)
5525 offset += difference % UNITS_PER_WORD;
5526 if (offset == final_offset)
5527 final_offset = 0;
5528 else
5529 return NULL_RTX;
5530 }
5531
5532 /* Recurse for further possible simplifications. */
5533 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5534 final_offset);
5535 if (newx)
5536 return newx;
5537 if (validate_subreg (outermode, innermostmode,
5538 SUBREG_REG (op), final_offset))
5539 {
5540 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5541 if (SUBREG_PROMOTED_VAR_P (op)
5542 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5543 && GET_MODE_CLASS (outermode) == MODE_INT
5544 && IN_RANGE (GET_MODE_SIZE (outermode),
5545 GET_MODE_SIZE (innermode),
5546 GET_MODE_SIZE (innermostmode))
5547 && subreg_lowpart_p (newx))
5548 {
5549 SUBREG_PROMOTED_VAR_P (newx) = 1;
5550 SUBREG_PROMOTED_UNSIGNED_SET
5551 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5552 }
5553 return newx;
5554 }
5555 return NULL_RTX;
5556 }
5557
5558 /* SUBREG of a hard register => just change the register number
5559 and/or mode. If the hard register is not valid in that mode,
5560 suppress this simplification. If the hard register is the stack,
5561 frame, or argument pointer, leave this as a SUBREG. */
5562
5563 if (REG_P (op) && HARD_REGISTER_P (op))
5564 {
5565 unsigned int regno, final_regno;
5566
5567 regno = REGNO (op);
5568 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5569 if (HARD_REGISTER_NUM_P (final_regno))
5570 {
5571 rtx x;
5572 int final_offset = byte;
5573
5574 /* Adjust offset for paradoxical subregs. */
5575 if (byte == 0
5576 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5577 {
5578 int difference = (GET_MODE_SIZE (innermode)
5579 - GET_MODE_SIZE (outermode));
5580 if (WORDS_BIG_ENDIAN)
5581 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5582 if (BYTES_BIG_ENDIAN)
5583 final_offset += difference % UNITS_PER_WORD;
5584 }
5585
5586 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5587
5588 /* Propagate original regno. We don't have any way to specify
5589 the offset inside original regno, so do so only for lowpart.
5590 The information is used only by alias analysis that can not
5591 grog partial register anyway. */
5592
5593 if (subreg_lowpart_offset (outermode, innermode) == byte)
5594 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5595 return x;
5596 }
5597 }
5598
5599 /* If we have a SUBREG of a register that we are replacing and we are
5600 replacing it with a MEM, make a new MEM and try replacing the
5601 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5602 or if we would be widening it. */
5603
5604 if (MEM_P (op)
5605 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5606 /* Allow splitting of volatile memory references in case we don't
5607 have instruction to move the whole thing. */
5608 && (! MEM_VOLATILE_P (op)
5609 || ! have_insn_for (SET, innermode))
5610 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5611 return adjust_address_nv (op, outermode, byte);
5612
5613 /* Handle complex values represented as CONCAT
5614 of real and imaginary part. */
5615 if (GET_CODE (op) == CONCAT)
5616 {
5617 unsigned int part_size, final_offset;
5618 rtx part, res;
5619
5620 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5621 if (byte < part_size)
5622 {
5623 part = XEXP (op, 0);
5624 final_offset = byte;
5625 }
5626 else
5627 {
5628 part = XEXP (op, 1);
5629 final_offset = byte - part_size;
5630 }
5631
5632 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5633 return NULL_RTX;
5634
5635 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5636 if (res)
5637 return res;
5638 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5639 return gen_rtx_SUBREG (outermode, part, final_offset);
5640 return NULL_RTX;
5641 }
5642
5643 /* A SUBREG resulting from a zero extension may fold to zero if
5644 it extracts higher bits that the ZERO_EXTEND's source bits. */
5645 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5646 {
5647 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5648 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5649 return CONST0_RTX (outermode);
5650 }
5651
5652 if (SCALAR_INT_MODE_P (outermode)
5653 && SCALAR_INT_MODE_P (innermode)
5654 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5655 && byte == subreg_lowpart_offset (outermode, innermode))
5656 {
5657 rtx tem = simplify_truncation (outermode, op, innermode);
5658 if (tem)
5659 return tem;
5660 }
5661
5662 return NULL_RTX;
5663 }
5664
5665 /* Make a SUBREG operation or equivalent if it folds. */
5666
5667 rtx
5668 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5669 enum machine_mode innermode, unsigned int byte)
5670 {
5671 rtx newx;
5672
5673 newx = simplify_subreg (outermode, op, innermode, byte);
5674 if (newx)
5675 return newx;
5676
5677 if (GET_CODE (op) == SUBREG
5678 || GET_CODE (op) == CONCAT
5679 || GET_MODE (op) == VOIDmode)
5680 return NULL_RTX;
5681
5682 if (validate_subreg (outermode, innermode, op, byte))
5683 return gen_rtx_SUBREG (outermode, op, byte);
5684
5685 return NULL_RTX;
5686 }
5687
5688 /* Simplify X, an rtx expression.
5689
5690 Return the simplified expression or NULL if no simplifications
5691 were possible.
5692
5693 This is the preferred entry point into the simplification routines;
5694 however, we still allow passes to call the more specific routines.
5695
5696 Right now GCC has three (yes, three) major bodies of RTL simplification
5697 code that need to be unified.
5698
5699 1. fold_rtx in cse.c. This code uses various CSE specific
5700 information to aid in RTL simplification.
5701
5702 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5703 it uses combine specific information to aid in RTL
5704 simplification.
5705
5706 3. The routines in this file.
5707
5708
5709 Long term we want to only have one body of simplification code; to
5710 get to that state I recommend the following steps:
5711
5712 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5713 which are not pass dependent state into these routines.
5714
5715 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5716 use this routine whenever possible.
5717
5718 3. Allow for pass dependent state to be provided to these
5719 routines and add simplifications based on the pass dependent
5720 state. Remove code from cse.c & combine.c that becomes
5721 redundant/dead.
5722
5723 It will take time, but ultimately the compiler will be easier to
5724 maintain and improve. It's totally silly that when we add a
5725 simplification that it needs to be added to 4 places (3 for RTL
5726 simplification and 1 for tree simplification. */
5727
5728 rtx
5729 simplify_rtx (const_rtx x)
5730 {
5731 const enum rtx_code code = GET_CODE (x);
5732 const enum machine_mode mode = GET_MODE (x);
5733
5734 switch (GET_RTX_CLASS (code))
5735 {
5736 case RTX_UNARY:
5737 return simplify_unary_operation (code, mode,
5738 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5739 case RTX_COMM_ARITH:
5740 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5741 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5742
5743 /* Fall through.... */
5744
5745 case RTX_BIN_ARITH:
5746 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5747
5748 case RTX_TERNARY:
5749 case RTX_BITFIELD_OPS:
5750 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5751 XEXP (x, 0), XEXP (x, 1),
5752 XEXP (x, 2));
5753
5754 case RTX_COMPARE:
5755 case RTX_COMM_COMPARE:
5756 return simplify_relational_operation (code, mode,
5757 ((GET_MODE (XEXP (x, 0))
5758 != VOIDmode)
5759 ? GET_MODE (XEXP (x, 0))
5760 : GET_MODE (XEXP (x, 1))),
5761 XEXP (x, 0),
5762 XEXP (x, 1));
5763
5764 case RTX_EXTRA:
5765 if (code == SUBREG)
5766 return simplify_subreg (mode, SUBREG_REG (x),
5767 GET_MODE (SUBREG_REG (x)),
5768 SUBREG_BYTE (x));
5769 break;
5770
5771 case RTX_OBJ:
5772 if (code == LO_SUM)
5773 {
5774 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5775 if (GET_CODE (XEXP (x, 0)) == HIGH
5776 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5777 return XEXP (x, 1);
5778 }
5779 break;
5780
5781 default:
5782 break;
5783 }
5784 return NULL;
5785 }