]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/simplify-rtx.c
Make more use of paradoxical_subreg_p
[thirdparty/gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36
37 /* Simplification and canonicalization of RTL. */
38
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
45
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
58 \f
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
62 {
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
64
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
70 }
71
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
77 {
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
83
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
87
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 #if TARGET_SUPPORTS_WIDE_INT
92 else if (CONST_WIDE_INT_P (x))
93 {
94 unsigned int i;
95 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
96 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
97 return false;
98 for (i = 0; i < elts - 1; i++)
99 if (CONST_WIDE_INT_ELT (x, i) != 0)
100 return false;
101 val = CONST_WIDE_INT_ELT (x, elts - 1);
102 width %= HOST_BITS_PER_WIDE_INT;
103 if (width == 0)
104 width = HOST_BITS_PER_WIDE_INT;
105 }
106 #else
107 else if (width <= HOST_BITS_PER_DOUBLE_INT
108 && CONST_DOUBLE_AS_INT_P (x)
109 && CONST_DOUBLE_LOW (x) == 0)
110 {
111 val = CONST_DOUBLE_HIGH (x);
112 width -= HOST_BITS_PER_WIDE_INT;
113 }
114 #endif
115 else
116 /* X is not an integer constant. */
117 return false;
118
119 if (width < HOST_BITS_PER_WIDE_INT)
120 val &= (HOST_WIDE_INT_1U << width) - 1;
121 return val == (HOST_WIDE_INT_1U << (width - 1));
122 }
123
124 /* Test whether VAL is equal to the most significant bit of mode MODE
125 (after masking with the mode mask of MODE). Returns false if the
126 precision of MODE is too large to handle. */
127
128 bool
129 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
130 {
131 unsigned int width;
132
133 if (GET_MODE_CLASS (mode) != MODE_INT)
134 return false;
135
136 width = GET_MODE_PRECISION (mode);
137 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
138 return false;
139
140 val &= GET_MODE_MASK (mode);
141 return val == (HOST_WIDE_INT_1U << (width - 1));
142 }
143
144 /* Test whether the most significant bit of mode MODE is set in VAL.
145 Returns false if the precision of MODE is too large to handle. */
146 bool
147 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
148 {
149 unsigned int width;
150
151 if (GET_MODE_CLASS (mode) != MODE_INT)
152 return false;
153
154 width = GET_MODE_PRECISION (mode);
155 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
156 return false;
157
158 val &= HOST_WIDE_INT_1U << (width - 1);
159 return val != 0;
160 }
161
162 /* Test whether the most significant bit of mode MODE is clear in VAL.
163 Returns false if the precision of MODE is too large to handle. */
164 bool
165 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
166 {
167 unsigned int width;
168
169 if (GET_MODE_CLASS (mode) != MODE_INT)
170 return false;
171
172 width = GET_MODE_PRECISION (mode);
173 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
174 return false;
175
176 val &= HOST_WIDE_INT_1U << (width - 1);
177 return val == 0;
178 }
179 \f
180 /* Make a binary operation by properly ordering the operands and
181 seeing if the expression folds. */
182
183 rtx
184 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
185 rtx op1)
186 {
187 rtx tem;
188
189 /* If this simplifies, do it. */
190 tem = simplify_binary_operation (code, mode, op0, op1);
191 if (tem)
192 return tem;
193
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
196 && swap_commutative_operands_p (op0, op1))
197 std::swap (op0, op1);
198
199 return gen_rtx_fmt_ee (code, mode, op0, op1);
200 }
201 \f
202 /* If X is a MEM referencing the constant pool, return the real value.
203 Otherwise return X. */
204 rtx
205 avoid_constant_pool_reference (rtx x)
206 {
207 rtx c, tmp, addr;
208 machine_mode cmode;
209 HOST_WIDE_INT offset = 0;
210
211 switch (GET_CODE (x))
212 {
213 case MEM:
214 break;
215
216 case FLOAT_EXTEND:
217 /* Handle float extensions of constant pool references. */
218 tmp = XEXP (x, 0);
219 c = avoid_constant_pool_reference (tmp);
220 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
221 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
222 GET_MODE (x));
223 return x;
224
225 default:
226 return x;
227 }
228
229 if (GET_MODE (x) == BLKmode)
230 return x;
231
232 addr = XEXP (x, 0);
233
234 /* Call target hook to avoid the effects of -fpic etc.... */
235 addr = targetm.delegitimize_address (addr);
236
237 /* Split the address into a base and integer offset. */
238 if (GET_CODE (addr) == CONST
239 && GET_CODE (XEXP (addr, 0)) == PLUS
240 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
241 {
242 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
243 addr = XEXP (XEXP (addr, 0), 0);
244 }
245
246 if (GET_CODE (addr) == LO_SUM)
247 addr = XEXP (addr, 1);
248
249 /* If this is a constant pool reference, we can turn it into its
250 constant and hope that simplifications happen. */
251 if (GET_CODE (addr) == SYMBOL_REF
252 && CONSTANT_POOL_ADDRESS_P (addr))
253 {
254 c = get_pool_constant (addr);
255 cmode = get_pool_mode (addr);
256
257 /* If we're accessing the constant in a different mode than it was
258 originally stored, attempt to fix that up via subreg simplifications.
259 If that fails we have no choice but to return the original memory. */
260 if (offset == 0 && cmode == GET_MODE (x))
261 return c;
262 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
263 {
264 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
265 if (tem && CONSTANT_P (tem))
266 return tem;
267 }
268 }
269
270 return x;
271 }
272 \f
273 /* Simplify a MEM based on its attributes. This is the default
274 delegitimize_address target hook, and it's recommended that every
275 overrider call it. */
276
277 rtx
278 delegitimize_mem_from_attrs (rtx x)
279 {
280 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
281 use their base addresses as equivalent. */
282 if (MEM_P (x)
283 && MEM_EXPR (x)
284 && MEM_OFFSET_KNOWN_P (x))
285 {
286 tree decl = MEM_EXPR (x);
287 machine_mode mode = GET_MODE (x);
288 HOST_WIDE_INT offset = 0;
289
290 switch (TREE_CODE (decl))
291 {
292 default:
293 decl = NULL;
294 break;
295
296 case VAR_DECL:
297 break;
298
299 case ARRAY_REF:
300 case ARRAY_RANGE_REF:
301 case COMPONENT_REF:
302 case BIT_FIELD_REF:
303 case REALPART_EXPR:
304 case IMAGPART_EXPR:
305 case VIEW_CONVERT_EXPR:
306 {
307 HOST_WIDE_INT bitsize, bitpos;
308 tree toffset;
309 int unsignedp, reversep, volatilep = 0;
310
311 decl
312 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
313 &unsignedp, &reversep, &volatilep);
314 if (bitsize != GET_MODE_BITSIZE (mode)
315 || (bitpos % BITS_PER_UNIT)
316 || (toffset && !tree_fits_shwi_p (toffset)))
317 decl = NULL;
318 else
319 {
320 offset += bitpos / BITS_PER_UNIT;
321 if (toffset)
322 offset += tree_to_shwi (toffset);
323 }
324 break;
325 }
326 }
327
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
335 {
336 rtx newx;
337
338 offset += MEM_OFFSET (x);
339
340 newx = DECL_RTL (decl);
341
342 if (MEM_P (newx))
343 {
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
345
346 /* Avoid creating a new MEM needlessly if we already had
347 the same address. We do if there's no OFFSET and the
348 old address X is identical to NEWX, or if X is of the
349 form (plus NEWX OFFSET), or the NEWX is of the form
350 (plus Y (const_int Z)) and X is that with the offset
351 added: (plus Y (const_int Z+OFFSET)). */
352 if (!((offset == 0
353 || (GET_CODE (o) == PLUS
354 && GET_CODE (XEXP (o, 1)) == CONST_INT
355 && (offset == INTVAL (XEXP (o, 1))
356 || (GET_CODE (n) == PLUS
357 && GET_CODE (XEXP (n, 1)) == CONST_INT
358 && (INTVAL (XEXP (n, 1)) + offset
359 == INTVAL (XEXP (o, 1)))
360 && (n = XEXP (n, 0))))
361 && (o = XEXP (o, 0))))
362 && rtx_equal_p (o, n)))
363 x = adjust_address_nv (newx, mode, offset);
364 }
365 else if (GET_MODE (x) == GET_MODE (newx)
366 && offset == 0)
367 x = newx;
368 }
369 }
370
371 return x;
372 }
373 \f
374 /* Make a unary operation by first seeing if it folds and otherwise making
375 the specified operation. */
376
377 rtx
378 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
379 machine_mode op_mode)
380 {
381 rtx tem;
382
383 /* If this simplifies, use it. */
384 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
385 return tem;
386
387 return gen_rtx_fmt_e (code, mode, op);
388 }
389
390 /* Likewise for ternary operations. */
391
392 rtx
393 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
394 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
395 {
396 rtx tem;
397
398 /* If this simplifies, use it. */
399 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
400 op0, op1, op2)))
401 return tem;
402
403 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
404 }
405
406 /* Likewise, for relational operations.
407 CMP_MODE specifies mode comparison is done in. */
408
409 rtx
410 simplify_gen_relational (enum rtx_code code, machine_mode mode,
411 machine_mode cmp_mode, rtx op0, rtx op1)
412 {
413 rtx tem;
414
415 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
416 op0, op1)))
417 return tem;
418
419 return gen_rtx_fmt_ee (code, mode, op0, op1);
420 }
421 \f
422 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
423 and simplify the result. If FN is non-NULL, call this callback on each
424 X, if it returns non-NULL, replace X with its return value and simplify the
425 result. */
426
427 rtx
428 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
429 rtx (*fn) (rtx, const_rtx, void *), void *data)
430 {
431 enum rtx_code code = GET_CODE (x);
432 machine_mode mode = GET_MODE (x);
433 machine_mode op_mode;
434 const char *fmt;
435 rtx op0, op1, op2, newx, op;
436 rtvec vec, newvec;
437 int i, j;
438
439 if (__builtin_expect (fn != NULL, 0))
440 {
441 newx = fn (x, old_rtx, data);
442 if (newx)
443 return newx;
444 }
445 else if (rtx_equal_p (x, old_rtx))
446 return copy_rtx ((rtx) data);
447
448 switch (GET_RTX_CLASS (code))
449 {
450 case RTX_UNARY:
451 op0 = XEXP (x, 0);
452 op_mode = GET_MODE (op0);
453 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
454 if (op0 == XEXP (x, 0))
455 return x;
456 return simplify_gen_unary (code, mode, op0, op_mode);
457
458 case RTX_BIN_ARITH:
459 case RTX_COMM_ARITH:
460 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
461 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
463 return x;
464 return simplify_gen_binary (code, mode, op0, op1);
465
466 case RTX_COMPARE:
467 case RTX_COMM_COMPARE:
468 op0 = XEXP (x, 0);
469 op1 = XEXP (x, 1);
470 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
471 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
472 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
474 return x;
475 return simplify_gen_relational (code, mode, op_mode, op0, op1);
476
477 case RTX_TERNARY:
478 case RTX_BITFIELD_OPS:
479 op0 = XEXP (x, 0);
480 op_mode = GET_MODE (op0);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
483 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
484 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
485 return x;
486 if (op_mode == VOIDmode)
487 op_mode = GET_MODE (op0);
488 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
489
490 case RTX_EXTRA:
491 if (code == SUBREG)
492 {
493 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
494 if (op0 == SUBREG_REG (x))
495 return x;
496 op0 = simplify_gen_subreg (GET_MODE (x), op0,
497 GET_MODE (SUBREG_REG (x)),
498 SUBREG_BYTE (x));
499 return op0 ? op0 : x;
500 }
501 break;
502
503 case RTX_OBJ:
504 if (code == MEM)
505 {
506 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
507 if (op0 == XEXP (x, 0))
508 return x;
509 return replace_equiv_address_nv (x, op0);
510 }
511 else if (code == LO_SUM)
512 {
513 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
514 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
515
516 /* (lo_sum (high x) y) -> y where x and y have the same base. */
517 if (GET_CODE (op0) == HIGH)
518 {
519 rtx base0, base1, offset0, offset1;
520 split_const (XEXP (op0, 0), &base0, &offset0);
521 split_const (op1, &base1, &offset1);
522 if (rtx_equal_p (base0, base1))
523 return op1;
524 }
525
526 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
527 return x;
528 return gen_rtx_LO_SUM (mode, op0, op1);
529 }
530 break;
531
532 default:
533 break;
534 }
535
536 newx = x;
537 fmt = GET_RTX_FORMAT (code);
538 for (i = 0; fmt[i]; i++)
539 switch (fmt[i])
540 {
541 case 'E':
542 vec = XVEC (x, i);
543 newvec = XVEC (newx, i);
544 for (j = 0; j < GET_NUM_ELEM (vec); j++)
545 {
546 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
547 old_rtx, fn, data);
548 if (op != RTVEC_ELT (vec, j))
549 {
550 if (newvec == vec)
551 {
552 newvec = shallow_copy_rtvec (vec);
553 if (x == newx)
554 newx = shallow_copy_rtx (x);
555 XVEC (newx, i) = newvec;
556 }
557 RTVEC_ELT (newvec, j) = op;
558 }
559 }
560 break;
561
562 case 'e':
563 if (XEXP (x, i))
564 {
565 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
566 if (op != XEXP (x, i))
567 {
568 if (x == newx)
569 newx = shallow_copy_rtx (x);
570 XEXP (newx, i) = op;
571 }
572 }
573 break;
574 }
575 return newx;
576 }
577
578 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
579 resulting RTX. Return a new RTX which is as simplified as possible. */
580
581 rtx
582 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
583 {
584 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
585 }
586 \f
587 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
588 Only handle cases where the truncated value is inherently an rvalue.
589
590 RTL provides two ways of truncating a value:
591
592 1. a lowpart subreg. This form is only a truncation when both
593 the outer and inner modes (here MODE and OP_MODE respectively)
594 are scalar integers, and only then when the subreg is used as
595 an rvalue.
596
597 It is only valid to form such truncating subregs if the
598 truncation requires no action by the target. The onus for
599 proving this is on the creator of the subreg -- e.g. the
600 caller to simplify_subreg or simplify_gen_subreg -- and typically
601 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
602
603 2. a TRUNCATE. This form handles both scalar and compound integers.
604
605 The first form is preferred where valid. However, the TRUNCATE
606 handling in simplify_unary_operation turns the second form into the
607 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
608 so it is generally safe to form rvalue truncations using:
609
610 simplify_gen_unary (TRUNCATE, ...)
611
612 and leave simplify_unary_operation to work out which representation
613 should be used.
614
615 Because of the proof requirements on (1), simplify_truncation must
616 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
617 regardless of whether the outer truncation came from a SUBREG or a
618 TRUNCATE. For example, if the caller has proven that an SImode
619 truncation of:
620
621 (and:DI X Y)
622
623 is a no-op and can be represented as a subreg, it does not follow
624 that SImode truncations of X and Y are also no-ops. On a target
625 like 64-bit MIPS that requires SImode values to be stored in
626 sign-extended form, an SImode truncation of:
627
628 (and:DI (reg:DI X) (const_int 63))
629
630 is trivially a no-op because only the lower 6 bits can be set.
631 However, X is still an arbitrary 64-bit number and so we cannot
632 assume that truncating it too is a no-op. */
633
634 static rtx
635 simplify_truncation (machine_mode mode, rtx op,
636 machine_mode op_mode)
637 {
638 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
639 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
640 gcc_assert (precision <= op_precision);
641
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op) == ZERO_EXTEND
644 || GET_CODE (op) == SIGN_EXTEND)
645 {
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
651 mode. */
652 machine_mode origmode = GET_MODE (XEXP (op, 0));
653 if (mode == origmode)
654 return XEXP (op, 0);
655 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
656 return simplify_gen_unary (TRUNCATE, mode,
657 XEXP (op, 0), origmode);
658 else
659 return simplify_gen_unary (GET_CODE (op), mode,
660 XEXP (op, 0), origmode);
661 }
662
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
666 if (1
667 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
668 && (GET_CODE (op) == PLUS
669 || GET_CODE (op) == MINUS
670 || GET_CODE (op) == MULT))
671 {
672 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
673 if (op0)
674 {
675 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
676 if (op1)
677 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
678 }
679 }
680
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op) == LSHIFTRT
685 || GET_CODE (op) == ASHIFTRT)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision <= op_precision
691 && CONST_INT_P (XEXP (op, 1))
692 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFTRT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
697
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op) == LSHIFTRT
702 || GET_CODE (op) == ASHIFTRT)
703 && CONST_INT_P (XEXP (op, 1))
704 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
706 && UINTVAL (XEXP (op, 1)) < precision)
707 return simplify_gen_binary (LSHIFTRT, mode,
708 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
709
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op) == ASHIFT
714 && CONST_INT_P (XEXP (op, 1))
715 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
718 && UINTVAL (XEXP (op, 1)) < precision)
719 return simplify_gen_binary (ASHIFT, mode,
720 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
721
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
724 and C2. */
725 if (GET_CODE (op) == AND
726 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
728 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
729 && CONST_INT_P (XEXP (op, 1)))
730 {
731 rtx op0 = (XEXP (XEXP (op, 0), 0));
732 rtx shift_op = XEXP (XEXP (op, 0), 1);
733 rtx mask_op = XEXP (op, 1);
734 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
735 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
736
737 if (shift < precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode) >> shift) & mask)
741 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
742 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
743 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
744 {
745 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
746 return simplify_gen_binary (AND, mode, op0, mask_op);
747 }
748 }
749
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
752 changing len. */
753 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
754 && REG_P (XEXP (op, 0))
755 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
756 && CONST_INT_P (XEXP (op, 1))
757 && CONST_INT_P (XEXP (op, 2)))
758 {
759 rtx op0 = XEXP (op, 0);
760 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
761 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
762 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
763 {
764 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
765 if (op0)
766 {
767 pos -= op_precision - precision;
768 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
769 XEXP (op, 1), GEN_INT (pos));
770 }
771 }
772 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
773 {
774 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
775 if (op0)
776 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
777 XEXP (op, 1), XEXP (op, 2));
778 }
779 }
780
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op) == LSHIFTRT
783 || GET_CODE (op) == ASHIFTRT)
784 && SCALAR_INT_MODE_P (mode)
785 && SCALAR_INT_MODE_P (op_mode)
786 && precision >= BITS_PER_WORD
787 && 2 * precision <= op_precision
788 && CONST_INT_P (XEXP (op, 1))
789 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
790 && UINTVAL (XEXP (op, 1)) < op_precision)
791 {
792 int byte = subreg_lowpart_offset (mode, op_mode);
793 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
794 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
795 (WORDS_BIG_ENDIAN
796 ? byte - shifted_bytes
797 : byte + shifted_bytes));
798 }
799
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op) == LSHIFTRT
804 || GET_CODE (op) == ASHIFTRT)
805 && SCALAR_INT_MODE_P (op_mode)
806 && MEM_P (XEXP (op, 0))
807 && CONST_INT_P (XEXP (op, 1))
808 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
809 && INTVAL (XEXP (op, 1)) > 0
810 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
811 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
812 MEM_ADDR_SPACE (XEXP (op, 0)))
813 && ! MEM_VOLATILE_P (XEXP (op, 0))
814 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
815 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
816 {
817 int byte = subreg_lowpart_offset (mode, op_mode);
818 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
819 return adjust_address_nv (XEXP (op, 0), mode,
820 (WORDS_BIG_ENDIAN
821 ? byte - shifted_bytes
822 : byte + shifted_bytes));
823 }
824
825 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
826 (OP:SI foo:SI) if OP is NEG or ABS. */
827 if ((GET_CODE (op) == ABS
828 || GET_CODE (op) == NEG)
829 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
830 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
831 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
832 return simplify_gen_unary (GET_CODE (op), mode,
833 XEXP (XEXP (op, 0), 0), mode);
834
835 /* (truncate:A (subreg:B (truncate:C X) 0)) is
836 (truncate:A X). */
837 if (GET_CODE (op) == SUBREG
838 && SCALAR_INT_MODE_P (mode)
839 && SCALAR_INT_MODE_P (op_mode)
840 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
841 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
842 && subreg_lowpart_p (op))
843 {
844 rtx inner = XEXP (SUBREG_REG (op), 0);
845 if (GET_MODE_PRECISION (mode)
846 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
847 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
848 else
849 /* If subreg above is paradoxical and C is narrower
850 than A, return (subreg:A (truncate:C X) 0). */
851 return simplify_gen_subreg (mode, SUBREG_REG (op),
852 GET_MODE (SUBREG_REG (op)), 0);
853 }
854
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op) == TRUNCATE)
857 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
858 GET_MODE (XEXP (op, 0)));
859
860 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
861 in mode A. */
862 if (GET_CODE (op) == IOR
863 && SCALAR_INT_MODE_P (mode)
864 && SCALAR_INT_MODE_P (op_mode)
865 && CONST_INT_P (XEXP (op, 1))
866 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
867 return constm1_rtx;
868
869 return NULL_RTX;
870 }
871 \f
872 /* Try to simplify a unary operation CODE whose output mode is to be
873 MODE with input operand OP whose mode was originally OP_MODE.
874 Return zero if no simplification can be made. */
875 rtx
876 simplify_unary_operation (enum rtx_code code, machine_mode mode,
877 rtx op, machine_mode op_mode)
878 {
879 rtx trueop, tem;
880
881 trueop = avoid_constant_pool_reference (op);
882
883 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
884 if (tem)
885 return tem;
886
887 return simplify_unary_operation_1 (code, mode, op);
888 }
889
890 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
891 to be exact. */
892
893 static bool
894 exact_int_to_float_conversion_p (const_rtx op)
895 {
896 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
897 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
898 /* Constants shouldn't reach here. */
899 gcc_assert (op0_mode != VOIDmode);
900 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
901 int in_bits = in_prec;
902 if (HWI_COMPUTABLE_MODE_P (op0_mode))
903 {
904 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
905 if (GET_CODE (op) == FLOAT)
906 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
907 else if (GET_CODE (op) == UNSIGNED_FLOAT)
908 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
909 else
910 gcc_unreachable ();
911 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
912 }
913 return in_bits <= out_bits;
914 }
915
916 /* Perform some simplifications we can do even if the operands
917 aren't constant. */
918 static rtx
919 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
920 {
921 enum rtx_code reversed;
922 rtx temp;
923
924 switch (code)
925 {
926 case NOT:
927 /* (not (not X)) == X. */
928 if (GET_CODE (op) == NOT)
929 return XEXP (op, 0);
930
931 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
932 comparison is all ones. */
933 if (COMPARISON_P (op)
934 && (mode == BImode || STORE_FLAG_VALUE == -1)
935 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
936 return simplify_gen_relational (reversed, mode, VOIDmode,
937 XEXP (op, 0), XEXP (op, 1));
938
939 /* (not (plus X -1)) can become (neg X). */
940 if (GET_CODE (op) == PLUS
941 && XEXP (op, 1) == constm1_rtx)
942 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
943
944 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
945 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
946 and MODE_VECTOR_INT. */
947 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
948 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
949 CONSTM1_RTX (mode));
950
951 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
952 if (GET_CODE (op) == XOR
953 && CONST_INT_P (XEXP (op, 1))
954 && (temp = simplify_unary_operation (NOT, mode,
955 XEXP (op, 1), mode)) != 0)
956 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
957
958 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
959 if (GET_CODE (op) == PLUS
960 && CONST_INT_P (XEXP (op, 1))
961 && mode_signbit_p (mode, XEXP (op, 1))
962 && (temp = simplify_unary_operation (NOT, mode,
963 XEXP (op, 1), mode)) != 0)
964 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
965
966
967 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
968 operands other than 1, but that is not valid. We could do a
969 similar simplification for (not (lshiftrt C X)) where C is
970 just the sign bit, but this doesn't seem common enough to
971 bother with. */
972 if (GET_CODE (op) == ASHIFT
973 && XEXP (op, 0) == const1_rtx)
974 {
975 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
976 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
977 }
978
979 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
980 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
981 so we can perform the above simplification. */
982 if (STORE_FLAG_VALUE == -1
983 && GET_CODE (op) == ASHIFTRT
984 && CONST_INT_P (XEXP (op, 1))
985 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
986 return simplify_gen_relational (GE, mode, VOIDmode,
987 XEXP (op, 0), const0_rtx);
988
989
990 if (GET_CODE (op) == SUBREG
991 && subreg_lowpart_p (op)
992 && (GET_MODE_SIZE (GET_MODE (op))
993 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
994 && GET_CODE (SUBREG_REG (op)) == ASHIFT
995 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
996 {
997 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
998 rtx x;
999
1000 x = gen_rtx_ROTATE (inner_mode,
1001 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1002 inner_mode),
1003 XEXP (SUBREG_REG (op), 1));
1004 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1005 if (temp)
1006 return temp;
1007 }
1008
1009 /* Apply De Morgan's laws to reduce number of patterns for machines
1010 with negating logical insns (and-not, nand, etc.). If result has
1011 only one NOT, put it first, since that is how the patterns are
1012 coded. */
1013 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1014 {
1015 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1016 machine_mode op_mode;
1017
1018 op_mode = GET_MODE (in1);
1019 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1020
1021 op_mode = GET_MODE (in2);
1022 if (op_mode == VOIDmode)
1023 op_mode = mode;
1024 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1025
1026 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1027 std::swap (in1, in2);
1028
1029 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1030 mode, in1, in2);
1031 }
1032
1033 /* (not (bswap x)) -> (bswap (not x)). */
1034 if (GET_CODE (op) == BSWAP)
1035 {
1036 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1037 return simplify_gen_unary (BSWAP, mode, x, mode);
1038 }
1039 break;
1040
1041 case NEG:
1042 /* (neg (neg X)) == X. */
1043 if (GET_CODE (op) == NEG)
1044 return XEXP (op, 0);
1045
1046 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1047 If comparison is not reversible use
1048 x ? y : (neg y). */
1049 if (GET_CODE (op) == IF_THEN_ELSE)
1050 {
1051 rtx cond = XEXP (op, 0);
1052 rtx true_rtx = XEXP (op, 1);
1053 rtx false_rtx = XEXP (op, 2);
1054
1055 if ((GET_CODE (true_rtx) == NEG
1056 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1057 || (GET_CODE (false_rtx) == NEG
1058 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1059 {
1060 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1061 temp = reversed_comparison (cond, mode);
1062 else
1063 {
1064 temp = cond;
1065 std::swap (true_rtx, false_rtx);
1066 }
1067 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1068 mode, temp, true_rtx, false_rtx);
1069 }
1070 }
1071
1072 /* (neg (plus X 1)) can become (not X). */
1073 if (GET_CODE (op) == PLUS
1074 && XEXP (op, 1) == const1_rtx)
1075 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1076
1077 /* Similarly, (neg (not X)) is (plus X 1). */
1078 if (GET_CODE (op) == NOT)
1079 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1080 CONST1_RTX (mode));
1081
1082 /* (neg (minus X Y)) can become (minus Y X). This transformation
1083 isn't safe for modes with signed zeros, since if X and Y are
1084 both +0, (minus Y X) is the same as (minus X Y). If the
1085 rounding mode is towards +infinity (or -infinity) then the two
1086 expressions will be rounded differently. */
1087 if (GET_CODE (op) == MINUS
1088 && !HONOR_SIGNED_ZEROS (mode)
1089 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1090 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1091
1092 if (GET_CODE (op) == PLUS
1093 && !HONOR_SIGNED_ZEROS (mode)
1094 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1095 {
1096 /* (neg (plus A C)) is simplified to (minus -C A). */
1097 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1098 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1099 {
1100 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1101 if (temp)
1102 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1103 }
1104
1105 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1106 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1107 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1108 }
1109
1110 /* (neg (mult A B)) becomes (mult A (neg B)).
1111 This works even for floating-point values. */
1112 if (GET_CODE (op) == MULT
1113 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1114 {
1115 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1116 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1117 }
1118
1119 /* NEG commutes with ASHIFT since it is multiplication. Only do
1120 this if we can then eliminate the NEG (e.g., if the operand
1121 is a constant). */
1122 if (GET_CODE (op) == ASHIFT)
1123 {
1124 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1125 if (temp)
1126 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1127 }
1128
1129 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1130 C is equal to the width of MODE minus 1. */
1131 if (GET_CODE (op) == ASHIFTRT
1132 && CONST_INT_P (XEXP (op, 1))
1133 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1134 return simplify_gen_binary (LSHIFTRT, mode,
1135 XEXP (op, 0), XEXP (op, 1));
1136
1137 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1138 C is equal to the width of MODE minus 1. */
1139 if (GET_CODE (op) == LSHIFTRT
1140 && CONST_INT_P (XEXP (op, 1))
1141 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1142 return simplify_gen_binary (ASHIFTRT, mode,
1143 XEXP (op, 0), XEXP (op, 1));
1144
1145 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1146 if (GET_CODE (op) == XOR
1147 && XEXP (op, 1) == const1_rtx
1148 && nonzero_bits (XEXP (op, 0), mode) == 1)
1149 return plus_constant (mode, XEXP (op, 0), -1);
1150
1151 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1152 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1153 if (GET_CODE (op) == LT
1154 && XEXP (op, 1) == const0_rtx
1155 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1156 {
1157 machine_mode inner = GET_MODE (XEXP (op, 0));
1158 int isize = GET_MODE_PRECISION (inner);
1159 if (STORE_FLAG_VALUE == 1)
1160 {
1161 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1162 GEN_INT (isize - 1));
1163 if (mode == inner)
1164 return temp;
1165 if (GET_MODE_PRECISION (mode) > isize)
1166 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1167 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1168 }
1169 else if (STORE_FLAG_VALUE == -1)
1170 {
1171 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1172 GEN_INT (isize - 1));
1173 if (mode == inner)
1174 return temp;
1175 if (GET_MODE_PRECISION (mode) > isize)
1176 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1177 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1178 }
1179 }
1180 break;
1181
1182 case TRUNCATE:
1183 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1184 with the umulXi3_highpart patterns. */
1185 if (GET_CODE (op) == LSHIFTRT
1186 && GET_CODE (XEXP (op, 0)) == MULT)
1187 break;
1188
1189 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1190 {
1191 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1192 {
1193 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1194 if (temp)
1195 return temp;
1196 }
1197 /* We can't handle truncation to a partial integer mode here
1198 because we don't know the real bitsize of the partial
1199 integer mode. */
1200 break;
1201 }
1202
1203 if (GET_MODE (op) != VOIDmode)
1204 {
1205 temp = simplify_truncation (mode, op, GET_MODE (op));
1206 if (temp)
1207 return temp;
1208 }
1209
1210 /* If we know that the value is already truncated, we can
1211 replace the TRUNCATE with a SUBREG. */
1212 if (GET_MODE_NUNITS (mode) == 1
1213 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1214 || truncated_to_mode (mode, op)))
1215 {
1216 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1217 if (temp)
1218 return temp;
1219 }
1220
1221 /* A truncate of a comparison can be replaced with a subreg if
1222 STORE_FLAG_VALUE permits. This is like the previous test,
1223 but it works even if the comparison is done in a mode larger
1224 than HOST_BITS_PER_WIDE_INT. */
1225 if (HWI_COMPUTABLE_MODE_P (mode)
1226 && COMPARISON_P (op)
1227 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1228 {
1229 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1230 if (temp)
1231 return temp;
1232 }
1233
1234 /* A truncate of a memory is just loading the low part of the memory
1235 if we are not changing the meaning of the address. */
1236 if (GET_CODE (op) == MEM
1237 && !VECTOR_MODE_P (mode)
1238 && !MEM_VOLATILE_P (op)
1239 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1240 {
1241 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1242 if (temp)
1243 return temp;
1244 }
1245
1246 break;
1247
1248 case FLOAT_TRUNCATE:
1249 if (DECIMAL_FLOAT_MODE_P (mode))
1250 break;
1251
1252 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1253 if (GET_CODE (op) == FLOAT_EXTEND
1254 && GET_MODE (XEXP (op, 0)) == mode)
1255 return XEXP (op, 0);
1256
1257 /* (float_truncate:SF (float_truncate:DF foo:XF))
1258 = (float_truncate:SF foo:XF).
1259 This may eliminate double rounding, so it is unsafe.
1260
1261 (float_truncate:SF (float_extend:XF foo:DF))
1262 = (float_truncate:SF foo:DF).
1263
1264 (float_truncate:DF (float_extend:XF foo:SF))
1265 = (float_extend:DF foo:SF). */
1266 if ((GET_CODE (op) == FLOAT_TRUNCATE
1267 && flag_unsafe_math_optimizations)
1268 || GET_CODE (op) == FLOAT_EXTEND)
1269 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1270 0)))
1271 > GET_MODE_SIZE (mode)
1272 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1273 mode,
1274 XEXP (op, 0), mode);
1275
1276 /* (float_truncate (float x)) is (float x) */
1277 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1278 && (flag_unsafe_math_optimizations
1279 || exact_int_to_float_conversion_p (op)))
1280 return simplify_gen_unary (GET_CODE (op), mode,
1281 XEXP (op, 0),
1282 GET_MODE (XEXP (op, 0)));
1283
1284 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1285 (OP:SF foo:SF) if OP is NEG or ABS. */
1286 if ((GET_CODE (op) == ABS
1287 || GET_CODE (op) == NEG)
1288 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1289 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1290 return simplify_gen_unary (GET_CODE (op), mode,
1291 XEXP (XEXP (op, 0), 0), mode);
1292
1293 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1294 is (float_truncate:SF x). */
1295 if (GET_CODE (op) == SUBREG
1296 && subreg_lowpart_p (op)
1297 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1298 return SUBREG_REG (op);
1299 break;
1300
1301 case FLOAT_EXTEND:
1302 if (DECIMAL_FLOAT_MODE_P (mode))
1303 break;
1304
1305 /* (float_extend (float_extend x)) is (float_extend x)
1306
1307 (float_extend (float x)) is (float x) assuming that double
1308 rounding can't happen.
1309 */
1310 if (GET_CODE (op) == FLOAT_EXTEND
1311 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1312 && exact_int_to_float_conversion_p (op)))
1313 return simplify_gen_unary (GET_CODE (op), mode,
1314 XEXP (op, 0),
1315 GET_MODE (XEXP (op, 0)));
1316
1317 break;
1318
1319 case ABS:
1320 /* (abs (neg <foo>)) -> (abs <foo>) */
1321 if (GET_CODE (op) == NEG)
1322 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1323 GET_MODE (XEXP (op, 0)));
1324
1325 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1326 do nothing. */
1327 if (GET_MODE (op) == VOIDmode)
1328 break;
1329
1330 /* If operand is something known to be positive, ignore the ABS. */
1331 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1332 || val_signbit_known_clear_p (GET_MODE (op),
1333 nonzero_bits (op, GET_MODE (op))))
1334 return op;
1335
1336 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1337 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1338 return gen_rtx_NEG (mode, op);
1339
1340 break;
1341
1342 case FFS:
1343 /* (ffs (*_extend <X>)) = (ffs <X>) */
1344 if (GET_CODE (op) == SIGN_EXTEND
1345 || GET_CODE (op) == ZERO_EXTEND)
1346 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1347 GET_MODE (XEXP (op, 0)));
1348 break;
1349
1350 case POPCOUNT:
1351 switch (GET_CODE (op))
1352 {
1353 case BSWAP:
1354 case ZERO_EXTEND:
1355 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1356 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1357 GET_MODE (XEXP (op, 0)));
1358
1359 case ROTATE:
1360 case ROTATERT:
1361 /* Rotations don't affect popcount. */
1362 if (!side_effects_p (XEXP (op, 1)))
1363 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1364 GET_MODE (XEXP (op, 0)));
1365 break;
1366
1367 default:
1368 break;
1369 }
1370 break;
1371
1372 case PARITY:
1373 switch (GET_CODE (op))
1374 {
1375 case NOT:
1376 case BSWAP:
1377 case ZERO_EXTEND:
1378 case SIGN_EXTEND:
1379 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1380 GET_MODE (XEXP (op, 0)));
1381
1382 case ROTATE:
1383 case ROTATERT:
1384 /* Rotations don't affect parity. */
1385 if (!side_effects_p (XEXP (op, 1)))
1386 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1387 GET_MODE (XEXP (op, 0)));
1388 break;
1389
1390 default:
1391 break;
1392 }
1393 break;
1394
1395 case BSWAP:
1396 /* (bswap (bswap x)) -> x. */
1397 if (GET_CODE (op) == BSWAP)
1398 return XEXP (op, 0);
1399 break;
1400
1401 case FLOAT:
1402 /* (float (sign_extend <X>)) = (float <X>). */
1403 if (GET_CODE (op) == SIGN_EXTEND)
1404 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1405 GET_MODE (XEXP (op, 0)));
1406 break;
1407
1408 case SIGN_EXTEND:
1409 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1410 becomes just the MINUS if its mode is MODE. This allows
1411 folding switch statements on machines using casesi (such as
1412 the VAX). */
1413 if (GET_CODE (op) == TRUNCATE
1414 && GET_MODE (XEXP (op, 0)) == mode
1415 && GET_CODE (XEXP (op, 0)) == MINUS
1416 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1417 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1418 return XEXP (op, 0);
1419
1420 /* Extending a widening multiplication should be canonicalized to
1421 a wider widening multiplication. */
1422 if (GET_CODE (op) == MULT)
1423 {
1424 rtx lhs = XEXP (op, 0);
1425 rtx rhs = XEXP (op, 1);
1426 enum rtx_code lcode = GET_CODE (lhs);
1427 enum rtx_code rcode = GET_CODE (rhs);
1428
1429 /* Widening multiplies usually extend both operands, but sometimes
1430 they use a shift to extract a portion of a register. */
1431 if ((lcode == SIGN_EXTEND
1432 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1433 && (rcode == SIGN_EXTEND
1434 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1435 {
1436 machine_mode lmode = GET_MODE (lhs);
1437 machine_mode rmode = GET_MODE (rhs);
1438 int bits;
1439
1440 if (lcode == ASHIFTRT)
1441 /* Number of bits not shifted off the end. */
1442 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1443 else /* lcode == SIGN_EXTEND */
1444 /* Size of inner mode. */
1445 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1446
1447 if (rcode == ASHIFTRT)
1448 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1449 else /* rcode == SIGN_EXTEND */
1450 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1451
1452 /* We can only widen multiplies if the result is mathematiclly
1453 equivalent. I.e. if overflow was impossible. */
1454 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1455 return simplify_gen_binary
1456 (MULT, mode,
1457 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1458 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1459 }
1460 }
1461
1462 /* Check for a sign extension of a subreg of a promoted
1463 variable, where the promotion is sign-extended, and the
1464 target mode is the same as the variable's promotion. */
1465 if (GET_CODE (op) == SUBREG
1466 && SUBREG_PROMOTED_VAR_P (op)
1467 && SUBREG_PROMOTED_SIGNED_P (op)
1468 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1469 {
1470 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1471 if (temp)
1472 return temp;
1473 }
1474
1475 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1476 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1477 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1478 {
1479 gcc_assert (GET_MODE_PRECISION (mode)
1480 > GET_MODE_PRECISION (GET_MODE (op)));
1481 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1482 GET_MODE (XEXP (op, 0)));
1483 }
1484
1485 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1486 is (sign_extend:M (subreg:O <X>)) if there is mode with
1487 GET_MODE_BITSIZE (N) - I bits.
1488 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1489 is similarly (zero_extend:M (subreg:O <X>)). */
1490 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1491 && GET_CODE (XEXP (op, 0)) == ASHIFT
1492 && CONST_INT_P (XEXP (op, 1))
1493 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1494 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1495 {
1496 machine_mode tmode
1497 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1498 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1499 gcc_assert (GET_MODE_BITSIZE (mode)
1500 > GET_MODE_BITSIZE (GET_MODE (op)));
1501 if (tmode != BLKmode)
1502 {
1503 rtx inner =
1504 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1505 if (inner)
1506 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1507 ? SIGN_EXTEND : ZERO_EXTEND,
1508 mode, inner, tmode);
1509 }
1510 }
1511
1512 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1513 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1514 if (GET_CODE (op) == LSHIFTRT
1515 && CONST_INT_P (XEXP (op, 1))
1516 && XEXP (op, 1) != const0_rtx)
1517 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1518
1519 #if defined(POINTERS_EXTEND_UNSIGNED)
1520 /* As we do not know which address space the pointer is referring to,
1521 we can do this only if the target does not support different pointer
1522 or address modes depending on the address space. */
1523 if (target_default_pointer_address_modes_p ()
1524 && ! POINTERS_EXTEND_UNSIGNED
1525 && mode == Pmode && GET_MODE (op) == ptr_mode
1526 && (CONSTANT_P (op)
1527 || (GET_CODE (op) == SUBREG
1528 && REG_P (SUBREG_REG (op))
1529 && REG_POINTER (SUBREG_REG (op))
1530 && GET_MODE (SUBREG_REG (op)) == Pmode))
1531 && !targetm.have_ptr_extend ())
1532 {
1533 temp
1534 = convert_memory_address_addr_space_1 (Pmode, op,
1535 ADDR_SPACE_GENERIC, false,
1536 true);
1537 if (temp)
1538 return temp;
1539 }
1540 #endif
1541 break;
1542
1543 case ZERO_EXTEND:
1544 /* Check for a zero extension of a subreg of a promoted
1545 variable, where the promotion is zero-extended, and the
1546 target mode is the same as the variable's promotion. */
1547 if (GET_CODE (op) == SUBREG
1548 && SUBREG_PROMOTED_VAR_P (op)
1549 && SUBREG_PROMOTED_UNSIGNED_P (op)
1550 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1551 {
1552 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1553 if (temp)
1554 return temp;
1555 }
1556
1557 /* Extending a widening multiplication should be canonicalized to
1558 a wider widening multiplication. */
1559 if (GET_CODE (op) == MULT)
1560 {
1561 rtx lhs = XEXP (op, 0);
1562 rtx rhs = XEXP (op, 1);
1563 enum rtx_code lcode = GET_CODE (lhs);
1564 enum rtx_code rcode = GET_CODE (rhs);
1565
1566 /* Widening multiplies usually extend both operands, but sometimes
1567 they use a shift to extract a portion of a register. */
1568 if ((lcode == ZERO_EXTEND
1569 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1570 && (rcode == ZERO_EXTEND
1571 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1572 {
1573 machine_mode lmode = GET_MODE (lhs);
1574 machine_mode rmode = GET_MODE (rhs);
1575 int bits;
1576
1577 if (lcode == LSHIFTRT)
1578 /* Number of bits not shifted off the end. */
1579 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1580 else /* lcode == ZERO_EXTEND */
1581 /* Size of inner mode. */
1582 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1583
1584 if (rcode == LSHIFTRT)
1585 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1586 else /* rcode == ZERO_EXTEND */
1587 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1588
1589 /* We can only widen multiplies if the result is mathematiclly
1590 equivalent. I.e. if overflow was impossible. */
1591 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1592 return simplify_gen_binary
1593 (MULT, mode,
1594 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1595 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1596 }
1597 }
1598
1599 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1600 if (GET_CODE (op) == ZERO_EXTEND)
1601 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1602 GET_MODE (XEXP (op, 0)));
1603
1604 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1605 is (zero_extend:M (subreg:O <X>)) if there is mode with
1606 GET_MODE_PRECISION (N) - I bits. */
1607 if (GET_CODE (op) == LSHIFTRT
1608 && GET_CODE (XEXP (op, 0)) == ASHIFT
1609 && CONST_INT_P (XEXP (op, 1))
1610 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1611 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1612 {
1613 machine_mode tmode
1614 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1615 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1616 if (tmode != BLKmode)
1617 {
1618 rtx inner =
1619 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1620 if (inner)
1621 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1622 }
1623 }
1624
1625 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1626 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1627 of mode N. E.g.
1628 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1629 (and:SI (reg:SI) (const_int 63)). */
1630 if (GET_CODE (op) == SUBREG
1631 && GET_MODE_PRECISION (GET_MODE (op))
1632 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1633 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1634 <= HOST_BITS_PER_WIDE_INT
1635 && GET_MODE_PRECISION (mode)
1636 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1637 && subreg_lowpart_p (op)
1638 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1639 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1640 {
1641 if (GET_MODE_PRECISION (mode)
1642 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1643 return SUBREG_REG (op);
1644 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1645 GET_MODE (SUBREG_REG (op)));
1646 }
1647
1648 #if defined(POINTERS_EXTEND_UNSIGNED)
1649 /* As we do not know which address space the pointer is referring to,
1650 we can do this only if the target does not support different pointer
1651 or address modes depending on the address space. */
1652 if (target_default_pointer_address_modes_p ()
1653 && POINTERS_EXTEND_UNSIGNED > 0
1654 && mode == Pmode && GET_MODE (op) == ptr_mode
1655 && (CONSTANT_P (op)
1656 || (GET_CODE (op) == SUBREG
1657 && REG_P (SUBREG_REG (op))
1658 && REG_POINTER (SUBREG_REG (op))
1659 && GET_MODE (SUBREG_REG (op)) == Pmode))
1660 && !targetm.have_ptr_extend ())
1661 {
1662 temp
1663 = convert_memory_address_addr_space_1 (Pmode, op,
1664 ADDR_SPACE_GENERIC, false,
1665 true);
1666 if (temp)
1667 return temp;
1668 }
1669 #endif
1670 break;
1671
1672 default:
1673 break;
1674 }
1675
1676 return 0;
1677 }
1678
1679 /* Try to compute the value of a unary operation CODE whose output mode is to
1680 be MODE with input operand OP whose mode was originally OP_MODE.
1681 Return zero if the value cannot be computed. */
1682 rtx
1683 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1684 rtx op, machine_mode op_mode)
1685 {
1686 unsigned int width = GET_MODE_PRECISION (mode);
1687
1688 if (code == VEC_DUPLICATE)
1689 {
1690 gcc_assert (VECTOR_MODE_P (mode));
1691 if (GET_MODE (op) != VOIDmode)
1692 {
1693 if (!VECTOR_MODE_P (GET_MODE (op)))
1694 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1695 else
1696 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1697 (GET_MODE (op)));
1698 }
1699 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1700 || GET_CODE (op) == CONST_VECTOR)
1701 {
1702 int elt_size = GET_MODE_UNIT_SIZE (mode);
1703 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1704 rtvec v = rtvec_alloc (n_elts);
1705 unsigned int i;
1706
1707 if (GET_CODE (op) != CONST_VECTOR)
1708 for (i = 0; i < n_elts; i++)
1709 RTVEC_ELT (v, i) = op;
1710 else
1711 {
1712 machine_mode inmode = GET_MODE (op);
1713 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1714 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1715
1716 gcc_assert (in_n_elts < n_elts);
1717 gcc_assert ((n_elts % in_n_elts) == 0);
1718 for (i = 0; i < n_elts; i++)
1719 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1720 }
1721 return gen_rtx_CONST_VECTOR (mode, v);
1722 }
1723 }
1724
1725 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1726 {
1727 int elt_size = GET_MODE_UNIT_SIZE (mode);
1728 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1729 machine_mode opmode = GET_MODE (op);
1730 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1731 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1732 rtvec v = rtvec_alloc (n_elts);
1733 unsigned int i;
1734
1735 gcc_assert (op_n_elts == n_elts);
1736 for (i = 0; i < n_elts; i++)
1737 {
1738 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1739 CONST_VECTOR_ELT (op, i),
1740 GET_MODE_INNER (opmode));
1741 if (!x)
1742 return 0;
1743 RTVEC_ELT (v, i) = x;
1744 }
1745 return gen_rtx_CONST_VECTOR (mode, v);
1746 }
1747
1748 /* The order of these tests is critical so that, for example, we don't
1749 check the wrong mode (input vs. output) for a conversion operation,
1750 such as FIX. At some point, this should be simplified. */
1751
1752 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1753 {
1754 REAL_VALUE_TYPE d;
1755
1756 if (op_mode == VOIDmode)
1757 {
1758 /* CONST_INT have VOIDmode as the mode. We assume that all
1759 the bits of the constant are significant, though, this is
1760 a dangerous assumption as many times CONST_INTs are
1761 created and used with garbage in the bits outside of the
1762 precision of the implied mode of the const_int. */
1763 op_mode = MAX_MODE_INT;
1764 }
1765
1766 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1767
1768 /* Avoid the folding if flag_signaling_nans is on and
1769 operand is a signaling NaN. */
1770 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1771 return 0;
1772
1773 d = real_value_truncate (mode, d);
1774 return const_double_from_real_value (d, mode);
1775 }
1776 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1777 {
1778 REAL_VALUE_TYPE d;
1779
1780 if (op_mode == VOIDmode)
1781 {
1782 /* CONST_INT have VOIDmode as the mode. We assume that all
1783 the bits of the constant are significant, though, this is
1784 a dangerous assumption as many times CONST_INTs are
1785 created and used with garbage in the bits outside of the
1786 precision of the implied mode of the const_int. */
1787 op_mode = MAX_MODE_INT;
1788 }
1789
1790 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1791
1792 /* Avoid the folding if flag_signaling_nans is on and
1793 operand is a signaling NaN. */
1794 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1795 return 0;
1796
1797 d = real_value_truncate (mode, d);
1798 return const_double_from_real_value (d, mode);
1799 }
1800
1801 if (CONST_SCALAR_INT_P (op) && width > 0)
1802 {
1803 wide_int result;
1804 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1805 rtx_mode_t op0 = rtx_mode_t (op, imode);
1806 int int_value;
1807
1808 #if TARGET_SUPPORTS_WIDE_INT == 0
1809 /* This assert keeps the simplification from producing a result
1810 that cannot be represented in a CONST_DOUBLE but a lot of
1811 upstream callers expect that this function never fails to
1812 simplify something and so you if you added this to the test
1813 above the code would die later anyway. If this assert
1814 happens, you just need to make the port support wide int. */
1815 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1816 #endif
1817
1818 switch (code)
1819 {
1820 case NOT:
1821 result = wi::bit_not (op0);
1822 break;
1823
1824 case NEG:
1825 result = wi::neg (op0);
1826 break;
1827
1828 case ABS:
1829 result = wi::abs (op0);
1830 break;
1831
1832 case FFS:
1833 result = wi::shwi (wi::ffs (op0), mode);
1834 break;
1835
1836 case CLZ:
1837 if (wi::ne_p (op0, 0))
1838 int_value = wi::clz (op0);
1839 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1840 int_value = GET_MODE_PRECISION (mode);
1841 result = wi::shwi (int_value, mode);
1842 break;
1843
1844 case CLRSB:
1845 result = wi::shwi (wi::clrsb (op0), mode);
1846 break;
1847
1848 case CTZ:
1849 if (wi::ne_p (op0, 0))
1850 int_value = wi::ctz (op0);
1851 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1852 int_value = GET_MODE_PRECISION (mode);
1853 result = wi::shwi (int_value, mode);
1854 break;
1855
1856 case POPCOUNT:
1857 result = wi::shwi (wi::popcount (op0), mode);
1858 break;
1859
1860 case PARITY:
1861 result = wi::shwi (wi::parity (op0), mode);
1862 break;
1863
1864 case BSWAP:
1865 result = wide_int (op0).bswap ();
1866 break;
1867
1868 case TRUNCATE:
1869 case ZERO_EXTEND:
1870 result = wide_int::from (op0, width, UNSIGNED);
1871 break;
1872
1873 case SIGN_EXTEND:
1874 result = wide_int::from (op0, width, SIGNED);
1875 break;
1876
1877 case SQRT:
1878 default:
1879 return 0;
1880 }
1881
1882 return immed_wide_int_const (result, mode);
1883 }
1884
1885 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1886 && SCALAR_FLOAT_MODE_P (mode)
1887 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1888 {
1889 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1890 switch (code)
1891 {
1892 case SQRT:
1893 return 0;
1894 case ABS:
1895 d = real_value_abs (&d);
1896 break;
1897 case NEG:
1898 d = real_value_negate (&d);
1899 break;
1900 case FLOAT_TRUNCATE:
1901 /* Don't perform the operation if flag_signaling_nans is on
1902 and the operand is a signaling NaN. */
1903 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1904 return NULL_RTX;
1905 d = real_value_truncate (mode, d);
1906 break;
1907 case FLOAT_EXTEND:
1908 /* Don't perform the operation if flag_signaling_nans is on
1909 and the operand is a signaling NaN. */
1910 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1911 return NULL_RTX;
1912 /* All this does is change the mode, unless changing
1913 mode class. */
1914 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1915 real_convert (&d, mode, &d);
1916 break;
1917 case FIX:
1918 /* Don't perform the operation if flag_signaling_nans is on
1919 and the operand is a signaling NaN. */
1920 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1921 return NULL_RTX;
1922 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1923 break;
1924 case NOT:
1925 {
1926 long tmp[4];
1927 int i;
1928
1929 real_to_target (tmp, &d, GET_MODE (op));
1930 for (i = 0; i < 4; i++)
1931 tmp[i] = ~tmp[i];
1932 real_from_target (&d, tmp, mode);
1933 break;
1934 }
1935 default:
1936 gcc_unreachable ();
1937 }
1938 return const_double_from_real_value (d, mode);
1939 }
1940 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1941 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1942 && GET_MODE_CLASS (mode) == MODE_INT
1943 && width > 0)
1944 {
1945 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1946 operators are intentionally left unspecified (to ease implementation
1947 by target backends), for consistency, this routine implements the
1948 same semantics for constant folding as used by the middle-end. */
1949
1950 /* This was formerly used only for non-IEEE float.
1951 eggert@twinsun.com says it is safe for IEEE also. */
1952 REAL_VALUE_TYPE t;
1953 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1954 wide_int wmax, wmin;
1955 /* This is part of the abi to real_to_integer, but we check
1956 things before making this call. */
1957 bool fail;
1958
1959 switch (code)
1960 {
1961 case FIX:
1962 if (REAL_VALUE_ISNAN (*x))
1963 return const0_rtx;
1964
1965 /* Test against the signed upper bound. */
1966 wmax = wi::max_value (width, SIGNED);
1967 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1968 if (real_less (&t, x))
1969 return immed_wide_int_const (wmax, mode);
1970
1971 /* Test against the signed lower bound. */
1972 wmin = wi::min_value (width, SIGNED);
1973 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1974 if (real_less (x, &t))
1975 return immed_wide_int_const (wmin, mode);
1976
1977 return immed_wide_int_const (real_to_integer (x, &fail, width),
1978 mode);
1979
1980 case UNSIGNED_FIX:
1981 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1982 return const0_rtx;
1983
1984 /* Test against the unsigned upper bound. */
1985 wmax = wi::max_value (width, UNSIGNED);
1986 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1987 if (real_less (&t, x))
1988 return immed_wide_int_const (wmax, mode);
1989
1990 return immed_wide_int_const (real_to_integer (x, &fail, width),
1991 mode);
1992
1993 default:
1994 gcc_unreachable ();
1995 }
1996 }
1997
1998 return NULL_RTX;
1999 }
2000 \f
2001 /* Subroutine of simplify_binary_operation to simplify a binary operation
2002 CODE that can commute with byte swapping, with result mode MODE and
2003 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2004 Return zero if no simplification or canonicalization is possible. */
2005
2006 static rtx
2007 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2008 rtx op0, rtx op1)
2009 {
2010 rtx tem;
2011
2012 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2013 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2014 {
2015 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2016 simplify_gen_unary (BSWAP, mode, op1, mode));
2017 return simplify_gen_unary (BSWAP, mode, tem, mode);
2018 }
2019
2020 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2021 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2022 {
2023 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2024 return simplify_gen_unary (BSWAP, mode, tem, mode);
2025 }
2026
2027 return NULL_RTX;
2028 }
2029
2030 /* Subroutine of simplify_binary_operation to simplify a commutative,
2031 associative binary operation CODE with result mode MODE, operating
2032 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2033 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2034 canonicalization is possible. */
2035
2036 static rtx
2037 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2038 rtx op0, rtx op1)
2039 {
2040 rtx tem;
2041
2042 /* Linearize the operator to the left. */
2043 if (GET_CODE (op1) == code)
2044 {
2045 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2046 if (GET_CODE (op0) == code)
2047 {
2048 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2049 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2050 }
2051
2052 /* "a op (b op c)" becomes "(b op c) op a". */
2053 if (! swap_commutative_operands_p (op1, op0))
2054 return simplify_gen_binary (code, mode, op1, op0);
2055
2056 std::swap (op0, op1);
2057 }
2058
2059 if (GET_CODE (op0) == code)
2060 {
2061 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2062 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2063 {
2064 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2065 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2066 }
2067
2068 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2069 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2070 if (tem != 0)
2071 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2072
2073 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2074 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2075 if (tem != 0)
2076 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2077 }
2078
2079 return 0;
2080 }
2081
2082
2083 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2084 and OP1. Return 0 if no simplification is possible.
2085
2086 Don't use this for relational operations such as EQ or LT.
2087 Use simplify_relational_operation instead. */
2088 rtx
2089 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2090 rtx op0, rtx op1)
2091 {
2092 rtx trueop0, trueop1;
2093 rtx tem;
2094
2095 /* Relational operations don't work here. We must know the mode
2096 of the operands in order to do the comparison correctly.
2097 Assuming a full word can give incorrect results.
2098 Consider comparing 128 with -128 in QImode. */
2099 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2100 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2101
2102 /* Make sure the constant is second. */
2103 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2104 && swap_commutative_operands_p (op0, op1))
2105 std::swap (op0, op1);
2106
2107 trueop0 = avoid_constant_pool_reference (op0);
2108 trueop1 = avoid_constant_pool_reference (op1);
2109
2110 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2111 if (tem)
2112 return tem;
2113 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2114
2115 if (tem)
2116 return tem;
2117
2118 /* If the above steps did not result in a simplification and op0 or op1
2119 were constant pool references, use the referenced constants directly. */
2120 if (trueop0 != op0 || trueop1 != op1)
2121 return simplify_gen_binary (code, mode, trueop0, trueop1);
2122
2123 return NULL_RTX;
2124 }
2125
2126 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2127 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2128 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2129 actual constants. */
2130
2131 static rtx
2132 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2133 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2134 {
2135 rtx tem, reversed, opleft, opright;
2136 HOST_WIDE_INT val;
2137 unsigned int width = GET_MODE_PRECISION (mode);
2138
2139 /* Even if we can't compute a constant result,
2140 there are some cases worth simplifying. */
2141
2142 switch (code)
2143 {
2144 case PLUS:
2145 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2146 when x is NaN, infinite, or finite and nonzero. They aren't
2147 when x is -0 and the rounding mode is not towards -infinity,
2148 since (-0) + 0 is then 0. */
2149 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2150 return op0;
2151
2152 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2153 transformations are safe even for IEEE. */
2154 if (GET_CODE (op0) == NEG)
2155 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2156 else if (GET_CODE (op1) == NEG)
2157 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2158
2159 /* (~a) + 1 -> -a */
2160 if (INTEGRAL_MODE_P (mode)
2161 && GET_CODE (op0) == NOT
2162 && trueop1 == const1_rtx)
2163 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2164
2165 /* Handle both-operands-constant cases. We can only add
2166 CONST_INTs to constants since the sum of relocatable symbols
2167 can't be handled by most assemblers. Don't add CONST_INT
2168 to CONST_INT since overflow won't be computed properly if wider
2169 than HOST_BITS_PER_WIDE_INT. */
2170
2171 if ((GET_CODE (op0) == CONST
2172 || GET_CODE (op0) == SYMBOL_REF
2173 || GET_CODE (op0) == LABEL_REF)
2174 && CONST_INT_P (op1))
2175 return plus_constant (mode, op0, INTVAL (op1));
2176 else if ((GET_CODE (op1) == CONST
2177 || GET_CODE (op1) == SYMBOL_REF
2178 || GET_CODE (op1) == LABEL_REF)
2179 && CONST_INT_P (op0))
2180 return plus_constant (mode, op1, INTVAL (op0));
2181
2182 /* See if this is something like X * C - X or vice versa or
2183 if the multiplication is written as a shift. If so, we can
2184 distribute and make a new multiply, shift, or maybe just
2185 have X (if C is 2 in the example above). But don't make
2186 something more expensive than we had before. */
2187
2188 if (SCALAR_INT_MODE_P (mode))
2189 {
2190 rtx lhs = op0, rhs = op1;
2191
2192 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2193 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2194
2195 if (GET_CODE (lhs) == NEG)
2196 {
2197 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2198 lhs = XEXP (lhs, 0);
2199 }
2200 else if (GET_CODE (lhs) == MULT
2201 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2202 {
2203 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2204 lhs = XEXP (lhs, 0);
2205 }
2206 else if (GET_CODE (lhs) == ASHIFT
2207 && CONST_INT_P (XEXP (lhs, 1))
2208 && INTVAL (XEXP (lhs, 1)) >= 0
2209 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2210 {
2211 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2212 GET_MODE_PRECISION (mode));
2213 lhs = XEXP (lhs, 0);
2214 }
2215
2216 if (GET_CODE (rhs) == NEG)
2217 {
2218 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2219 rhs = XEXP (rhs, 0);
2220 }
2221 else if (GET_CODE (rhs) == MULT
2222 && CONST_INT_P (XEXP (rhs, 1)))
2223 {
2224 coeff1 = rtx_mode_t (XEXP (rhs, 1), mode);
2225 rhs = XEXP (rhs, 0);
2226 }
2227 else if (GET_CODE (rhs) == ASHIFT
2228 && CONST_INT_P (XEXP (rhs, 1))
2229 && INTVAL (XEXP (rhs, 1)) >= 0
2230 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2231 {
2232 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2233 GET_MODE_PRECISION (mode));
2234 rhs = XEXP (rhs, 0);
2235 }
2236
2237 if (rtx_equal_p (lhs, rhs))
2238 {
2239 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2240 rtx coeff;
2241 bool speed = optimize_function_for_speed_p (cfun);
2242
2243 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2244
2245 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2246 return (set_src_cost (tem, mode, speed)
2247 <= set_src_cost (orig, mode, speed) ? tem : 0);
2248 }
2249 }
2250
2251 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2252 if (CONST_SCALAR_INT_P (op1)
2253 && GET_CODE (op0) == XOR
2254 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2255 && mode_signbit_p (mode, op1))
2256 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2257 simplify_gen_binary (XOR, mode, op1,
2258 XEXP (op0, 1)));
2259
2260 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2261 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2262 && GET_CODE (op0) == MULT
2263 && GET_CODE (XEXP (op0, 0)) == NEG)
2264 {
2265 rtx in1, in2;
2266
2267 in1 = XEXP (XEXP (op0, 0), 0);
2268 in2 = XEXP (op0, 1);
2269 return simplify_gen_binary (MINUS, mode, op1,
2270 simplify_gen_binary (MULT, mode,
2271 in1, in2));
2272 }
2273
2274 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2275 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2276 is 1. */
2277 if (COMPARISON_P (op0)
2278 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2279 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2280 && (reversed = reversed_comparison (op0, mode)))
2281 return
2282 simplify_gen_unary (NEG, mode, reversed, mode);
2283
2284 /* If one of the operands is a PLUS or a MINUS, see if we can
2285 simplify this by the associative law.
2286 Don't use the associative law for floating point.
2287 The inaccuracy makes it nonassociative,
2288 and subtle programs can break if operations are associated. */
2289
2290 if (INTEGRAL_MODE_P (mode)
2291 && (plus_minus_operand_p (op0)
2292 || plus_minus_operand_p (op1))
2293 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2294 return tem;
2295
2296 /* Reassociate floating point addition only when the user
2297 specifies associative math operations. */
2298 if (FLOAT_MODE_P (mode)
2299 && flag_associative_math)
2300 {
2301 tem = simplify_associative_operation (code, mode, op0, op1);
2302 if (tem)
2303 return tem;
2304 }
2305 break;
2306
2307 case COMPARE:
2308 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2309 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2310 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2311 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2312 {
2313 rtx xop00 = XEXP (op0, 0);
2314 rtx xop10 = XEXP (op1, 0);
2315
2316 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2317 return xop00;
2318
2319 if (REG_P (xop00) && REG_P (xop10)
2320 && REGNO (xop00) == REGNO (xop10)
2321 && GET_MODE (xop00) == mode
2322 && GET_MODE (xop10) == mode
2323 && GET_MODE_CLASS (mode) == MODE_CC)
2324 return xop00;
2325 }
2326 break;
2327
2328 case MINUS:
2329 /* We can't assume x-x is 0 even with non-IEEE floating point,
2330 but since it is zero except in very strange circumstances, we
2331 will treat it as zero with -ffinite-math-only. */
2332 if (rtx_equal_p (trueop0, trueop1)
2333 && ! side_effects_p (op0)
2334 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2335 return CONST0_RTX (mode);
2336
2337 /* Change subtraction from zero into negation. (0 - x) is the
2338 same as -x when x is NaN, infinite, or finite and nonzero.
2339 But if the mode has signed zeros, and does not round towards
2340 -infinity, then 0 - 0 is 0, not -0. */
2341 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2342 return simplify_gen_unary (NEG, mode, op1, mode);
2343
2344 /* (-1 - a) is ~a, unless the expression contains symbolic
2345 constants, in which case not retaining additions and
2346 subtractions could cause invalid assembly to be produced. */
2347 if (trueop0 == constm1_rtx
2348 && !contains_symbolic_reference_p (op1))
2349 return simplify_gen_unary (NOT, mode, op1, mode);
2350
2351 /* Subtracting 0 has no effect unless the mode has signed zeros
2352 and supports rounding towards -infinity. In such a case,
2353 0 - 0 is -0. */
2354 if (!(HONOR_SIGNED_ZEROS (mode)
2355 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2356 && trueop1 == CONST0_RTX (mode))
2357 return op0;
2358
2359 /* See if this is something like X * C - X or vice versa or
2360 if the multiplication is written as a shift. If so, we can
2361 distribute and make a new multiply, shift, or maybe just
2362 have X (if C is 2 in the example above). But don't make
2363 something more expensive than we had before. */
2364
2365 if (SCALAR_INT_MODE_P (mode))
2366 {
2367 rtx lhs = op0, rhs = op1;
2368
2369 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2370 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2371
2372 if (GET_CODE (lhs) == NEG)
2373 {
2374 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2375 lhs = XEXP (lhs, 0);
2376 }
2377 else if (GET_CODE (lhs) == MULT
2378 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2379 {
2380 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2381 lhs = XEXP (lhs, 0);
2382 }
2383 else if (GET_CODE (lhs) == ASHIFT
2384 && CONST_INT_P (XEXP (lhs, 1))
2385 && INTVAL (XEXP (lhs, 1)) >= 0
2386 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2387 {
2388 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2389 GET_MODE_PRECISION (mode));
2390 lhs = XEXP (lhs, 0);
2391 }
2392
2393 if (GET_CODE (rhs) == NEG)
2394 {
2395 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2396 rhs = XEXP (rhs, 0);
2397 }
2398 else if (GET_CODE (rhs) == MULT
2399 && CONST_INT_P (XEXP (rhs, 1)))
2400 {
2401 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), mode));
2402 rhs = XEXP (rhs, 0);
2403 }
2404 else if (GET_CODE (rhs) == ASHIFT
2405 && CONST_INT_P (XEXP (rhs, 1))
2406 && INTVAL (XEXP (rhs, 1)) >= 0
2407 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2408 {
2409 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2410 GET_MODE_PRECISION (mode));
2411 negcoeff1 = -negcoeff1;
2412 rhs = XEXP (rhs, 0);
2413 }
2414
2415 if (rtx_equal_p (lhs, rhs))
2416 {
2417 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2418 rtx coeff;
2419 bool speed = optimize_function_for_speed_p (cfun);
2420
2421 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2422
2423 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2424 return (set_src_cost (tem, mode, speed)
2425 <= set_src_cost (orig, mode, speed) ? tem : 0);
2426 }
2427 }
2428
2429 /* (a - (-b)) -> (a + b). True even for IEEE. */
2430 if (GET_CODE (op1) == NEG)
2431 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2432
2433 /* (-x - c) may be simplified as (-c - x). */
2434 if (GET_CODE (op0) == NEG
2435 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2436 {
2437 tem = simplify_unary_operation (NEG, mode, op1, mode);
2438 if (tem)
2439 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2440 }
2441
2442 /* Don't let a relocatable value get a negative coeff. */
2443 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2444 return simplify_gen_binary (PLUS, mode,
2445 op0,
2446 neg_const_int (mode, op1));
2447
2448 /* (x - (x & y)) -> (x & ~y) */
2449 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2450 {
2451 if (rtx_equal_p (op0, XEXP (op1, 0)))
2452 {
2453 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2454 GET_MODE (XEXP (op1, 1)));
2455 return simplify_gen_binary (AND, mode, op0, tem);
2456 }
2457 if (rtx_equal_p (op0, XEXP (op1, 1)))
2458 {
2459 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2460 GET_MODE (XEXP (op1, 0)));
2461 return simplify_gen_binary (AND, mode, op0, tem);
2462 }
2463 }
2464
2465 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2466 by reversing the comparison code if valid. */
2467 if (STORE_FLAG_VALUE == 1
2468 && trueop0 == const1_rtx
2469 && COMPARISON_P (op1)
2470 && (reversed = reversed_comparison (op1, mode)))
2471 return reversed;
2472
2473 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2474 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2475 && GET_CODE (op1) == MULT
2476 && GET_CODE (XEXP (op1, 0)) == NEG)
2477 {
2478 rtx in1, in2;
2479
2480 in1 = XEXP (XEXP (op1, 0), 0);
2481 in2 = XEXP (op1, 1);
2482 return simplify_gen_binary (PLUS, mode,
2483 simplify_gen_binary (MULT, mode,
2484 in1, in2),
2485 op0);
2486 }
2487
2488 /* Canonicalize (minus (neg A) (mult B C)) to
2489 (minus (mult (neg B) C) A). */
2490 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2491 && GET_CODE (op1) == MULT
2492 && GET_CODE (op0) == NEG)
2493 {
2494 rtx in1, in2;
2495
2496 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2497 in2 = XEXP (op1, 1);
2498 return simplify_gen_binary (MINUS, mode,
2499 simplify_gen_binary (MULT, mode,
2500 in1, in2),
2501 XEXP (op0, 0));
2502 }
2503
2504 /* If one of the operands is a PLUS or a MINUS, see if we can
2505 simplify this by the associative law. This will, for example,
2506 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2507 Don't use the associative law for floating point.
2508 The inaccuracy makes it nonassociative,
2509 and subtle programs can break if operations are associated. */
2510
2511 if (INTEGRAL_MODE_P (mode)
2512 && (plus_minus_operand_p (op0)
2513 || plus_minus_operand_p (op1))
2514 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2515 return tem;
2516 break;
2517
2518 case MULT:
2519 if (trueop1 == constm1_rtx)
2520 return simplify_gen_unary (NEG, mode, op0, mode);
2521
2522 if (GET_CODE (op0) == NEG)
2523 {
2524 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2525 /* If op1 is a MULT as well and simplify_unary_operation
2526 just moved the NEG to the second operand, simplify_gen_binary
2527 below could through simplify_associative_operation move
2528 the NEG around again and recurse endlessly. */
2529 if (temp
2530 && GET_CODE (op1) == MULT
2531 && GET_CODE (temp) == MULT
2532 && XEXP (op1, 0) == XEXP (temp, 0)
2533 && GET_CODE (XEXP (temp, 1)) == NEG
2534 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2535 temp = NULL_RTX;
2536 if (temp)
2537 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2538 }
2539 if (GET_CODE (op1) == NEG)
2540 {
2541 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2542 /* If op0 is a MULT as well and simplify_unary_operation
2543 just moved the NEG to the second operand, simplify_gen_binary
2544 below could through simplify_associative_operation move
2545 the NEG around again and recurse endlessly. */
2546 if (temp
2547 && GET_CODE (op0) == MULT
2548 && GET_CODE (temp) == MULT
2549 && XEXP (op0, 0) == XEXP (temp, 0)
2550 && GET_CODE (XEXP (temp, 1)) == NEG
2551 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2552 temp = NULL_RTX;
2553 if (temp)
2554 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2555 }
2556
2557 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2558 x is NaN, since x * 0 is then also NaN. Nor is it valid
2559 when the mode has signed zeros, since multiplying a negative
2560 number by 0 will give -0, not 0. */
2561 if (!HONOR_NANS (mode)
2562 && !HONOR_SIGNED_ZEROS (mode)
2563 && trueop1 == CONST0_RTX (mode)
2564 && ! side_effects_p (op0))
2565 return op1;
2566
2567 /* In IEEE floating point, x*1 is not equivalent to x for
2568 signalling NaNs. */
2569 if (!HONOR_SNANS (mode)
2570 && trueop1 == CONST1_RTX (mode))
2571 return op0;
2572
2573 /* Convert multiply by constant power of two into shift. */
2574 if (CONST_SCALAR_INT_P (trueop1))
2575 {
2576 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2577 if (val >= 0)
2578 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2579 }
2580
2581 /* x*2 is x+x and x*(-1) is -x */
2582 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2583 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2584 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2585 && GET_MODE (op0) == mode)
2586 {
2587 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2588
2589 if (real_equal (d1, &dconst2))
2590 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2591
2592 if (!HONOR_SNANS (mode)
2593 && real_equal (d1, &dconstm1))
2594 return simplify_gen_unary (NEG, mode, op0, mode);
2595 }
2596
2597 /* Optimize -x * -x as x * x. */
2598 if (FLOAT_MODE_P (mode)
2599 && GET_CODE (op0) == NEG
2600 && GET_CODE (op1) == NEG
2601 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2602 && !side_effects_p (XEXP (op0, 0)))
2603 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2604
2605 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2606 if (SCALAR_FLOAT_MODE_P (mode)
2607 && GET_CODE (op0) == ABS
2608 && GET_CODE (op1) == ABS
2609 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2610 && !side_effects_p (XEXP (op0, 0)))
2611 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2612
2613 /* Reassociate multiplication, but for floating point MULTs
2614 only when the user specifies unsafe math optimizations. */
2615 if (! FLOAT_MODE_P (mode)
2616 || flag_unsafe_math_optimizations)
2617 {
2618 tem = simplify_associative_operation (code, mode, op0, op1);
2619 if (tem)
2620 return tem;
2621 }
2622 break;
2623
2624 case IOR:
2625 if (trueop1 == CONST0_RTX (mode))
2626 return op0;
2627 if (INTEGRAL_MODE_P (mode)
2628 && trueop1 == CONSTM1_RTX (mode)
2629 && !side_effects_p (op0))
2630 return op1;
2631 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2632 return op0;
2633 /* A | (~A) -> -1 */
2634 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2635 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2636 && ! side_effects_p (op0)
2637 && SCALAR_INT_MODE_P (mode))
2638 return constm1_rtx;
2639
2640 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2641 if (CONST_INT_P (op1)
2642 && HWI_COMPUTABLE_MODE_P (mode)
2643 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2644 && !side_effects_p (op0))
2645 return op1;
2646
2647 /* Canonicalize (X & C1) | C2. */
2648 if (GET_CODE (op0) == AND
2649 && CONST_INT_P (trueop1)
2650 && CONST_INT_P (XEXP (op0, 1)))
2651 {
2652 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2653 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2654 HOST_WIDE_INT c2 = INTVAL (trueop1);
2655
2656 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2657 if ((c1 & c2) == c1
2658 && !side_effects_p (XEXP (op0, 0)))
2659 return trueop1;
2660
2661 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2662 if (((c1|c2) & mask) == mask)
2663 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2664
2665 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2666 if (((c1 & ~c2) & mask) != (c1 & mask))
2667 {
2668 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2669 gen_int_mode (c1 & ~c2, mode));
2670 return simplify_gen_binary (IOR, mode, tem, op1);
2671 }
2672 }
2673
2674 /* Convert (A & B) | A to A. */
2675 if (GET_CODE (op0) == AND
2676 && (rtx_equal_p (XEXP (op0, 0), op1)
2677 || rtx_equal_p (XEXP (op0, 1), op1))
2678 && ! side_effects_p (XEXP (op0, 0))
2679 && ! side_effects_p (XEXP (op0, 1)))
2680 return op1;
2681
2682 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2683 mode size to (rotate A CX). */
2684
2685 if (GET_CODE (op1) == ASHIFT
2686 || GET_CODE (op1) == SUBREG)
2687 {
2688 opleft = op1;
2689 opright = op0;
2690 }
2691 else
2692 {
2693 opright = op1;
2694 opleft = op0;
2695 }
2696
2697 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2698 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2699 && CONST_INT_P (XEXP (opleft, 1))
2700 && CONST_INT_P (XEXP (opright, 1))
2701 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2702 == GET_MODE_PRECISION (mode)))
2703 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2704
2705 /* Same, but for ashift that has been "simplified" to a wider mode
2706 by simplify_shift_const. */
2707
2708 if (GET_CODE (opleft) == SUBREG
2709 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2710 && GET_CODE (opright) == LSHIFTRT
2711 && GET_CODE (XEXP (opright, 0)) == SUBREG
2712 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2713 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2714 && (GET_MODE_SIZE (GET_MODE (opleft))
2715 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2716 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2717 SUBREG_REG (XEXP (opright, 0)))
2718 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2719 && CONST_INT_P (XEXP (opright, 1))
2720 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2721 == GET_MODE_PRECISION (mode)))
2722 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2723 XEXP (SUBREG_REG (opleft), 1));
2724
2725 /* If we have (ior (and (X C1) C2)), simplify this by making
2726 C1 as small as possible if C1 actually changes. */
2727 if (CONST_INT_P (op1)
2728 && (HWI_COMPUTABLE_MODE_P (mode)
2729 || INTVAL (op1) > 0)
2730 && GET_CODE (op0) == AND
2731 && CONST_INT_P (XEXP (op0, 1))
2732 && CONST_INT_P (op1)
2733 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2734 {
2735 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2736 gen_int_mode (UINTVAL (XEXP (op0, 1))
2737 & ~UINTVAL (op1),
2738 mode));
2739 return simplify_gen_binary (IOR, mode, tmp, op1);
2740 }
2741
2742 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2743 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2744 the PLUS does not affect any of the bits in OP1: then we can do
2745 the IOR as a PLUS and we can associate. This is valid if OP1
2746 can be safely shifted left C bits. */
2747 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2748 && GET_CODE (XEXP (op0, 0)) == PLUS
2749 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2750 && CONST_INT_P (XEXP (op0, 1))
2751 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2752 {
2753 int count = INTVAL (XEXP (op0, 1));
2754 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2755
2756 if (mask >> count == INTVAL (trueop1)
2757 && trunc_int_for_mode (mask, mode) == mask
2758 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2759 return simplify_gen_binary (ASHIFTRT, mode,
2760 plus_constant (mode, XEXP (op0, 0),
2761 mask),
2762 XEXP (op0, 1));
2763 }
2764
2765 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2766 if (tem)
2767 return tem;
2768
2769 tem = simplify_associative_operation (code, mode, op0, op1);
2770 if (tem)
2771 return tem;
2772 break;
2773
2774 case XOR:
2775 if (trueop1 == CONST0_RTX (mode))
2776 return op0;
2777 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2778 return simplify_gen_unary (NOT, mode, op0, mode);
2779 if (rtx_equal_p (trueop0, trueop1)
2780 && ! side_effects_p (op0)
2781 && GET_MODE_CLASS (mode) != MODE_CC)
2782 return CONST0_RTX (mode);
2783
2784 /* Canonicalize XOR of the most significant bit to PLUS. */
2785 if (CONST_SCALAR_INT_P (op1)
2786 && mode_signbit_p (mode, op1))
2787 return simplify_gen_binary (PLUS, mode, op0, op1);
2788 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2789 if (CONST_SCALAR_INT_P (op1)
2790 && GET_CODE (op0) == PLUS
2791 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2792 && mode_signbit_p (mode, XEXP (op0, 1)))
2793 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2794 simplify_gen_binary (XOR, mode, op1,
2795 XEXP (op0, 1)));
2796
2797 /* If we are XORing two things that have no bits in common,
2798 convert them into an IOR. This helps to detect rotation encoded
2799 using those methods and possibly other simplifications. */
2800
2801 if (HWI_COMPUTABLE_MODE_P (mode)
2802 && (nonzero_bits (op0, mode)
2803 & nonzero_bits (op1, mode)) == 0)
2804 return (simplify_gen_binary (IOR, mode, op0, op1));
2805
2806 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2807 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2808 (NOT y). */
2809 {
2810 int num_negated = 0;
2811
2812 if (GET_CODE (op0) == NOT)
2813 num_negated++, op0 = XEXP (op0, 0);
2814 if (GET_CODE (op1) == NOT)
2815 num_negated++, op1 = XEXP (op1, 0);
2816
2817 if (num_negated == 2)
2818 return simplify_gen_binary (XOR, mode, op0, op1);
2819 else if (num_negated == 1)
2820 return simplify_gen_unary (NOT, mode,
2821 simplify_gen_binary (XOR, mode, op0, op1),
2822 mode);
2823 }
2824
2825 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2826 correspond to a machine insn or result in further simplifications
2827 if B is a constant. */
2828
2829 if (GET_CODE (op0) == AND
2830 && rtx_equal_p (XEXP (op0, 1), op1)
2831 && ! side_effects_p (op1))
2832 return simplify_gen_binary (AND, mode,
2833 simplify_gen_unary (NOT, mode,
2834 XEXP (op0, 0), mode),
2835 op1);
2836
2837 else if (GET_CODE (op0) == AND
2838 && rtx_equal_p (XEXP (op0, 0), op1)
2839 && ! side_effects_p (op1))
2840 return simplify_gen_binary (AND, mode,
2841 simplify_gen_unary (NOT, mode,
2842 XEXP (op0, 1), mode),
2843 op1);
2844
2845 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2846 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2847 out bits inverted twice and not set by C. Similarly, given
2848 (xor (and (xor A B) C) D), simplify without inverting C in
2849 the xor operand: (xor (and A C) (B&C)^D).
2850 */
2851 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2852 && GET_CODE (XEXP (op0, 0)) == XOR
2853 && CONST_INT_P (op1)
2854 && CONST_INT_P (XEXP (op0, 1))
2855 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2856 {
2857 enum rtx_code op = GET_CODE (op0);
2858 rtx a = XEXP (XEXP (op0, 0), 0);
2859 rtx b = XEXP (XEXP (op0, 0), 1);
2860 rtx c = XEXP (op0, 1);
2861 rtx d = op1;
2862 HOST_WIDE_INT bval = INTVAL (b);
2863 HOST_WIDE_INT cval = INTVAL (c);
2864 HOST_WIDE_INT dval = INTVAL (d);
2865 HOST_WIDE_INT xcval;
2866
2867 if (op == IOR)
2868 xcval = ~cval;
2869 else
2870 xcval = cval;
2871
2872 return simplify_gen_binary (XOR, mode,
2873 simplify_gen_binary (op, mode, a, c),
2874 gen_int_mode ((bval & xcval) ^ dval,
2875 mode));
2876 }
2877
2878 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2879 we can transform like this:
2880 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2881 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2882 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2883 Attempt a few simplifications when B and C are both constants. */
2884 if (GET_CODE (op0) == AND
2885 && CONST_INT_P (op1)
2886 && CONST_INT_P (XEXP (op0, 1)))
2887 {
2888 rtx a = XEXP (op0, 0);
2889 rtx b = XEXP (op0, 1);
2890 rtx c = op1;
2891 HOST_WIDE_INT bval = INTVAL (b);
2892 HOST_WIDE_INT cval = INTVAL (c);
2893
2894 /* Instead of computing ~A&C, we compute its negated value,
2895 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2896 optimize for sure. If it does not simplify, we still try
2897 to compute ~A&C below, but since that always allocates
2898 RTL, we don't try that before committing to returning a
2899 simplified expression. */
2900 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2901 GEN_INT (~cval));
2902
2903 if ((~cval & bval) == 0)
2904 {
2905 rtx na_c = NULL_RTX;
2906 if (n_na_c)
2907 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2908 else
2909 {
2910 /* If ~A does not simplify, don't bother: we don't
2911 want to simplify 2 operations into 3, and if na_c
2912 were to simplify with na, n_na_c would have
2913 simplified as well. */
2914 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2915 if (na)
2916 na_c = simplify_gen_binary (AND, mode, na, c);
2917 }
2918
2919 /* Try to simplify ~A&C | ~B&C. */
2920 if (na_c != NULL_RTX)
2921 return simplify_gen_binary (IOR, mode, na_c,
2922 gen_int_mode (~bval & cval, mode));
2923 }
2924 else
2925 {
2926 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2927 if (n_na_c == CONSTM1_RTX (mode))
2928 {
2929 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2930 gen_int_mode (~cval & bval,
2931 mode));
2932 return simplify_gen_binary (IOR, mode, a_nc_b,
2933 gen_int_mode (~bval & cval,
2934 mode));
2935 }
2936 }
2937 }
2938
2939 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2940 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2941 machines, and also has shorter instruction path length. */
2942 if (GET_CODE (op0) == AND
2943 && GET_CODE (XEXP (op0, 0)) == XOR
2944 && CONST_INT_P (XEXP (op0, 1))
2945 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2946 {
2947 rtx a = trueop1;
2948 rtx b = XEXP (XEXP (op0, 0), 1);
2949 rtx c = XEXP (op0, 1);
2950 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2951 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2952 rtx bc = simplify_gen_binary (AND, mode, b, c);
2953 return simplify_gen_binary (IOR, mode, a_nc, bc);
2954 }
2955 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2956 else if (GET_CODE (op0) == AND
2957 && GET_CODE (XEXP (op0, 0)) == XOR
2958 && CONST_INT_P (XEXP (op0, 1))
2959 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2960 {
2961 rtx a = XEXP (XEXP (op0, 0), 0);
2962 rtx b = trueop1;
2963 rtx c = XEXP (op0, 1);
2964 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2965 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2966 rtx ac = simplify_gen_binary (AND, mode, a, c);
2967 return simplify_gen_binary (IOR, mode, ac, b_nc);
2968 }
2969
2970 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2971 comparison if STORE_FLAG_VALUE is 1. */
2972 if (STORE_FLAG_VALUE == 1
2973 && trueop1 == const1_rtx
2974 && COMPARISON_P (op0)
2975 && (reversed = reversed_comparison (op0, mode)))
2976 return reversed;
2977
2978 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2979 is (lt foo (const_int 0)), so we can perform the above
2980 simplification if STORE_FLAG_VALUE is 1. */
2981
2982 if (STORE_FLAG_VALUE == 1
2983 && trueop1 == const1_rtx
2984 && GET_CODE (op0) == LSHIFTRT
2985 && CONST_INT_P (XEXP (op0, 1))
2986 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2987 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2988
2989 /* (xor (comparison foo bar) (const_int sign-bit))
2990 when STORE_FLAG_VALUE is the sign bit. */
2991 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2992 && trueop1 == const_true_rtx
2993 && COMPARISON_P (op0)
2994 && (reversed = reversed_comparison (op0, mode)))
2995 return reversed;
2996
2997 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2998 if (tem)
2999 return tem;
3000
3001 tem = simplify_associative_operation (code, mode, op0, op1);
3002 if (tem)
3003 return tem;
3004 break;
3005
3006 case AND:
3007 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3008 return trueop1;
3009 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3010 return op0;
3011 if (HWI_COMPUTABLE_MODE_P (mode))
3012 {
3013 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3014 HOST_WIDE_INT nzop1;
3015 if (CONST_INT_P (trueop1))
3016 {
3017 HOST_WIDE_INT val1 = INTVAL (trueop1);
3018 /* If we are turning off bits already known off in OP0, we need
3019 not do an AND. */
3020 if ((nzop0 & ~val1) == 0)
3021 return op0;
3022 }
3023 nzop1 = nonzero_bits (trueop1, mode);
3024 /* If we are clearing all the nonzero bits, the result is zero. */
3025 if ((nzop1 & nzop0) == 0
3026 && !side_effects_p (op0) && !side_effects_p (op1))
3027 return CONST0_RTX (mode);
3028 }
3029 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3030 && GET_MODE_CLASS (mode) != MODE_CC)
3031 return op0;
3032 /* A & (~A) -> 0 */
3033 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3034 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3035 && ! side_effects_p (op0)
3036 && GET_MODE_CLASS (mode) != MODE_CC)
3037 return CONST0_RTX (mode);
3038
3039 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3040 there are no nonzero bits of C outside of X's mode. */
3041 if ((GET_CODE (op0) == SIGN_EXTEND
3042 || GET_CODE (op0) == ZERO_EXTEND)
3043 && CONST_INT_P (trueop1)
3044 && HWI_COMPUTABLE_MODE_P (mode)
3045 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3046 & UINTVAL (trueop1)) == 0)
3047 {
3048 machine_mode imode = GET_MODE (XEXP (op0, 0));
3049 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3050 gen_int_mode (INTVAL (trueop1),
3051 imode));
3052 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3053 }
3054
3055 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3056 we might be able to further simplify the AND with X and potentially
3057 remove the truncation altogether. */
3058 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3059 {
3060 rtx x = XEXP (op0, 0);
3061 machine_mode xmode = GET_MODE (x);
3062 tem = simplify_gen_binary (AND, xmode, x,
3063 gen_int_mode (INTVAL (trueop1), xmode));
3064 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3065 }
3066
3067 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3068 if (GET_CODE (op0) == IOR
3069 && CONST_INT_P (trueop1)
3070 && CONST_INT_P (XEXP (op0, 1)))
3071 {
3072 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3073 return simplify_gen_binary (IOR, mode,
3074 simplify_gen_binary (AND, mode,
3075 XEXP (op0, 0), op1),
3076 gen_int_mode (tmp, mode));
3077 }
3078
3079 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3080 insn (and may simplify more). */
3081 if (GET_CODE (op0) == XOR
3082 && rtx_equal_p (XEXP (op0, 0), op1)
3083 && ! side_effects_p (op1))
3084 return simplify_gen_binary (AND, mode,
3085 simplify_gen_unary (NOT, mode,
3086 XEXP (op0, 1), mode),
3087 op1);
3088
3089 if (GET_CODE (op0) == XOR
3090 && rtx_equal_p (XEXP (op0, 1), op1)
3091 && ! side_effects_p (op1))
3092 return simplify_gen_binary (AND, mode,
3093 simplify_gen_unary (NOT, mode,
3094 XEXP (op0, 0), mode),
3095 op1);
3096
3097 /* Similarly for (~(A ^ B)) & A. */
3098 if (GET_CODE (op0) == NOT
3099 && GET_CODE (XEXP (op0, 0)) == XOR
3100 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3101 && ! side_effects_p (op1))
3102 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3103
3104 if (GET_CODE (op0) == NOT
3105 && GET_CODE (XEXP (op0, 0)) == XOR
3106 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3107 && ! side_effects_p (op1))
3108 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3109
3110 /* Convert (A | B) & A to A. */
3111 if (GET_CODE (op0) == IOR
3112 && (rtx_equal_p (XEXP (op0, 0), op1)
3113 || rtx_equal_p (XEXP (op0, 1), op1))
3114 && ! side_effects_p (XEXP (op0, 0))
3115 && ! side_effects_p (XEXP (op0, 1)))
3116 return op1;
3117
3118 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3119 ((A & N) + B) & M -> (A + B) & M
3120 Similarly if (N & M) == 0,
3121 ((A | N) + B) & M -> (A + B) & M
3122 and for - instead of + and/or ^ instead of |.
3123 Also, if (N & M) == 0, then
3124 (A +- N) & M -> A & M. */
3125 if (CONST_INT_P (trueop1)
3126 && HWI_COMPUTABLE_MODE_P (mode)
3127 && ~UINTVAL (trueop1)
3128 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3129 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3130 {
3131 rtx pmop[2];
3132 int which;
3133
3134 pmop[0] = XEXP (op0, 0);
3135 pmop[1] = XEXP (op0, 1);
3136
3137 if (CONST_INT_P (pmop[1])
3138 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3139 return simplify_gen_binary (AND, mode, pmop[0], op1);
3140
3141 for (which = 0; which < 2; which++)
3142 {
3143 tem = pmop[which];
3144 switch (GET_CODE (tem))
3145 {
3146 case AND:
3147 if (CONST_INT_P (XEXP (tem, 1))
3148 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3149 == UINTVAL (trueop1))
3150 pmop[which] = XEXP (tem, 0);
3151 break;
3152 case IOR:
3153 case XOR:
3154 if (CONST_INT_P (XEXP (tem, 1))
3155 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3156 pmop[which] = XEXP (tem, 0);
3157 break;
3158 default:
3159 break;
3160 }
3161 }
3162
3163 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3164 {
3165 tem = simplify_gen_binary (GET_CODE (op0), mode,
3166 pmop[0], pmop[1]);
3167 return simplify_gen_binary (code, mode, tem, op1);
3168 }
3169 }
3170
3171 /* (and X (ior (not X) Y) -> (and X Y) */
3172 if (GET_CODE (op1) == IOR
3173 && GET_CODE (XEXP (op1, 0)) == NOT
3174 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3175 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3176
3177 /* (and (ior (not X) Y) X) -> (and X Y) */
3178 if (GET_CODE (op0) == IOR
3179 && GET_CODE (XEXP (op0, 0)) == NOT
3180 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3181 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3182
3183 /* (and X (ior Y (not X)) -> (and X Y) */
3184 if (GET_CODE (op1) == IOR
3185 && GET_CODE (XEXP (op1, 1)) == NOT
3186 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3187 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3188
3189 /* (and (ior Y (not X)) X) -> (and X Y) */
3190 if (GET_CODE (op0) == IOR
3191 && GET_CODE (XEXP (op0, 1)) == NOT
3192 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3193 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3194
3195 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3196 if (tem)
3197 return tem;
3198
3199 tem = simplify_associative_operation (code, mode, op0, op1);
3200 if (tem)
3201 return tem;
3202 break;
3203
3204 case UDIV:
3205 /* 0/x is 0 (or x&0 if x has side-effects). */
3206 if (trueop0 == CONST0_RTX (mode)
3207 && !cfun->can_throw_non_call_exceptions)
3208 {
3209 if (side_effects_p (op1))
3210 return simplify_gen_binary (AND, mode, op1, trueop0);
3211 return trueop0;
3212 }
3213 /* x/1 is x. */
3214 if (trueop1 == CONST1_RTX (mode))
3215 {
3216 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3217 if (tem)
3218 return tem;
3219 }
3220 /* Convert divide by power of two into shift. */
3221 if (CONST_INT_P (trueop1)
3222 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3223 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3224 break;
3225
3226 case DIV:
3227 /* Handle floating point and integers separately. */
3228 if (SCALAR_FLOAT_MODE_P (mode))
3229 {
3230 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3231 safe for modes with NaNs, since 0.0 / 0.0 will then be
3232 NaN rather than 0.0. Nor is it safe for modes with signed
3233 zeros, since dividing 0 by a negative number gives -0.0 */
3234 if (trueop0 == CONST0_RTX (mode)
3235 && !HONOR_NANS (mode)
3236 && !HONOR_SIGNED_ZEROS (mode)
3237 && ! side_effects_p (op1))
3238 return op0;
3239 /* x/1.0 is x. */
3240 if (trueop1 == CONST1_RTX (mode)
3241 && !HONOR_SNANS (mode))
3242 return op0;
3243
3244 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3245 && trueop1 != CONST0_RTX (mode))
3246 {
3247 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3248
3249 /* x/-1.0 is -x. */
3250 if (real_equal (d1, &dconstm1)
3251 && !HONOR_SNANS (mode))
3252 return simplify_gen_unary (NEG, mode, op0, mode);
3253
3254 /* Change FP division by a constant into multiplication.
3255 Only do this with -freciprocal-math. */
3256 if (flag_reciprocal_math
3257 && !real_equal (d1, &dconst0))
3258 {
3259 REAL_VALUE_TYPE d;
3260 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3261 tem = const_double_from_real_value (d, mode);
3262 return simplify_gen_binary (MULT, mode, op0, tem);
3263 }
3264 }
3265 }
3266 else if (SCALAR_INT_MODE_P (mode))
3267 {
3268 /* 0/x is 0 (or x&0 if x has side-effects). */
3269 if (trueop0 == CONST0_RTX (mode)
3270 && !cfun->can_throw_non_call_exceptions)
3271 {
3272 if (side_effects_p (op1))
3273 return simplify_gen_binary (AND, mode, op1, trueop0);
3274 return trueop0;
3275 }
3276 /* x/1 is x. */
3277 if (trueop1 == CONST1_RTX (mode))
3278 {
3279 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3280 if (tem)
3281 return tem;
3282 }
3283 /* x/-1 is -x. */
3284 if (trueop1 == constm1_rtx)
3285 {
3286 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3287 if (x)
3288 return simplify_gen_unary (NEG, mode, x, mode);
3289 }
3290 }
3291 break;
3292
3293 case UMOD:
3294 /* 0%x is 0 (or x&0 if x has side-effects). */
3295 if (trueop0 == CONST0_RTX (mode))
3296 {
3297 if (side_effects_p (op1))
3298 return simplify_gen_binary (AND, mode, op1, trueop0);
3299 return trueop0;
3300 }
3301 /* x%1 is 0 (of x&0 if x has side-effects). */
3302 if (trueop1 == CONST1_RTX (mode))
3303 {
3304 if (side_effects_p (op0))
3305 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3306 return CONST0_RTX (mode);
3307 }
3308 /* Implement modulus by power of two as AND. */
3309 if (CONST_INT_P (trueop1)
3310 && exact_log2 (UINTVAL (trueop1)) > 0)
3311 return simplify_gen_binary (AND, mode, op0,
3312 gen_int_mode (INTVAL (op1) - 1, mode));
3313 break;
3314
3315 case MOD:
3316 /* 0%x is 0 (or x&0 if x has side-effects). */
3317 if (trueop0 == CONST0_RTX (mode))
3318 {
3319 if (side_effects_p (op1))
3320 return simplify_gen_binary (AND, mode, op1, trueop0);
3321 return trueop0;
3322 }
3323 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3324 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3325 {
3326 if (side_effects_p (op0))
3327 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3328 return CONST0_RTX (mode);
3329 }
3330 break;
3331
3332 case ROTATERT:
3333 case ROTATE:
3334 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3335 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3336 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3337 amount instead. */
3338 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3339 if (CONST_INT_P (trueop1)
3340 && IN_RANGE (INTVAL (trueop1),
3341 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3342 GET_MODE_PRECISION (mode) - 1))
3343 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3344 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3345 - INTVAL (trueop1)));
3346 #endif
3347 /* FALLTHRU */
3348 case ASHIFTRT:
3349 if (trueop1 == CONST0_RTX (mode))
3350 return op0;
3351 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3352 return op0;
3353 /* Rotating ~0 always results in ~0. */
3354 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3355 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3356 && ! side_effects_p (op1))
3357 return op0;
3358
3359 canonicalize_shift:
3360 /* Given:
3361 scalar modes M1, M2
3362 scalar constants c1, c2
3363 size (M2) > size (M1)
3364 c1 == size (M2) - size (M1)
3365 optimize:
3366 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3367 <low_part>)
3368 (const_int <c2>))
3369 to:
3370 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3371 <low_part>). */
3372 if ((code == ASHIFTRT || code == LSHIFTRT)
3373 && !VECTOR_MODE_P (mode)
3374 && SUBREG_P (op0)
3375 && CONST_INT_P (op1)
3376 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3377 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3378 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3379 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3380 > GET_MODE_BITSIZE (mode))
3381 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3382 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3383 - GET_MODE_BITSIZE (mode)))
3384 && subreg_lowpart_p (op0))
3385 {
3386 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3387 + INTVAL (op1));
3388 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3389 tmp = simplify_gen_binary (code,
3390 GET_MODE (SUBREG_REG (op0)),
3391 XEXP (SUBREG_REG (op0), 0),
3392 tmp);
3393 return lowpart_subreg (mode, tmp, inner_mode);
3394 }
3395
3396 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3397 {
3398 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3399 if (val != INTVAL (op1))
3400 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3401 }
3402 break;
3403
3404 case ASHIFT:
3405 case SS_ASHIFT:
3406 case US_ASHIFT:
3407 if (trueop1 == CONST0_RTX (mode))
3408 return op0;
3409 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3410 return op0;
3411 goto canonicalize_shift;
3412
3413 case LSHIFTRT:
3414 if (trueop1 == CONST0_RTX (mode))
3415 return op0;
3416 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3417 return op0;
3418 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3419 if (GET_CODE (op0) == CLZ
3420 && CONST_INT_P (trueop1)
3421 && STORE_FLAG_VALUE == 1
3422 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3423 {
3424 machine_mode imode = GET_MODE (XEXP (op0, 0));
3425 unsigned HOST_WIDE_INT zero_val = 0;
3426
3427 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3428 && zero_val == GET_MODE_PRECISION (imode)
3429 && INTVAL (trueop1) == exact_log2 (zero_val))
3430 return simplify_gen_relational (EQ, mode, imode,
3431 XEXP (op0, 0), const0_rtx);
3432 }
3433 goto canonicalize_shift;
3434
3435 case SMIN:
3436 if (width <= HOST_BITS_PER_WIDE_INT
3437 && mode_signbit_p (mode, trueop1)
3438 && ! side_effects_p (op0))
3439 return op1;
3440 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3441 return op0;
3442 tem = simplify_associative_operation (code, mode, op0, op1);
3443 if (tem)
3444 return tem;
3445 break;
3446
3447 case SMAX:
3448 if (width <= HOST_BITS_PER_WIDE_INT
3449 && CONST_INT_P (trueop1)
3450 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3451 && ! side_effects_p (op0))
3452 return op1;
3453 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3454 return op0;
3455 tem = simplify_associative_operation (code, mode, op0, op1);
3456 if (tem)
3457 return tem;
3458 break;
3459
3460 case UMIN:
3461 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3462 return op1;
3463 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3464 return op0;
3465 tem = simplify_associative_operation (code, mode, op0, op1);
3466 if (tem)
3467 return tem;
3468 break;
3469
3470 case UMAX:
3471 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3472 return op1;
3473 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3474 return op0;
3475 tem = simplify_associative_operation (code, mode, op0, op1);
3476 if (tem)
3477 return tem;
3478 break;
3479
3480 case SS_PLUS:
3481 case US_PLUS:
3482 case SS_MINUS:
3483 case US_MINUS:
3484 case SS_MULT:
3485 case US_MULT:
3486 case SS_DIV:
3487 case US_DIV:
3488 /* ??? There are simplifications that can be done. */
3489 return 0;
3490
3491 case VEC_SELECT:
3492 if (!VECTOR_MODE_P (mode))
3493 {
3494 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3495 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3496 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3497 gcc_assert (XVECLEN (trueop1, 0) == 1);
3498 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3499
3500 if (GET_CODE (trueop0) == CONST_VECTOR)
3501 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3502 (trueop1, 0, 0)));
3503
3504 /* Extract a scalar element from a nested VEC_SELECT expression
3505 (with optional nested VEC_CONCAT expression). Some targets
3506 (i386) extract scalar element from a vector using chain of
3507 nested VEC_SELECT expressions. When input operand is a memory
3508 operand, this operation can be simplified to a simple scalar
3509 load from an offseted memory address. */
3510 if (GET_CODE (trueop0) == VEC_SELECT)
3511 {
3512 rtx op0 = XEXP (trueop0, 0);
3513 rtx op1 = XEXP (trueop0, 1);
3514
3515 machine_mode opmode = GET_MODE (op0);
3516 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3517 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3518
3519 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3520 int elem;
3521
3522 rtvec vec;
3523 rtx tmp_op, tmp;
3524
3525 gcc_assert (GET_CODE (op1) == PARALLEL);
3526 gcc_assert (i < n_elts);
3527
3528 /* Select element, pointed by nested selector. */
3529 elem = INTVAL (XVECEXP (op1, 0, i));
3530
3531 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3532 if (GET_CODE (op0) == VEC_CONCAT)
3533 {
3534 rtx op00 = XEXP (op0, 0);
3535 rtx op01 = XEXP (op0, 1);
3536
3537 machine_mode mode00, mode01;
3538 int n_elts00, n_elts01;
3539
3540 mode00 = GET_MODE (op00);
3541 mode01 = GET_MODE (op01);
3542
3543 /* Find out number of elements of each operand. */
3544 if (VECTOR_MODE_P (mode00))
3545 {
3546 elt_size = GET_MODE_UNIT_SIZE (mode00);
3547 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3548 }
3549 else
3550 n_elts00 = 1;
3551
3552 if (VECTOR_MODE_P (mode01))
3553 {
3554 elt_size = GET_MODE_UNIT_SIZE (mode01);
3555 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3556 }
3557 else
3558 n_elts01 = 1;
3559
3560 gcc_assert (n_elts == n_elts00 + n_elts01);
3561
3562 /* Select correct operand of VEC_CONCAT
3563 and adjust selector. */
3564 if (elem < n_elts01)
3565 tmp_op = op00;
3566 else
3567 {
3568 tmp_op = op01;
3569 elem -= n_elts00;
3570 }
3571 }
3572 else
3573 tmp_op = op0;
3574
3575 vec = rtvec_alloc (1);
3576 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3577
3578 tmp = gen_rtx_fmt_ee (code, mode,
3579 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3580 return tmp;
3581 }
3582 if (GET_CODE (trueop0) == VEC_DUPLICATE
3583 && GET_MODE (XEXP (trueop0, 0)) == mode)
3584 return XEXP (trueop0, 0);
3585 }
3586 else
3587 {
3588 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3589 gcc_assert (GET_MODE_INNER (mode)
3590 == GET_MODE_INNER (GET_MODE (trueop0)));
3591 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3592
3593 if (GET_CODE (trueop0) == CONST_VECTOR)
3594 {
3595 int elt_size = GET_MODE_UNIT_SIZE (mode);
3596 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3597 rtvec v = rtvec_alloc (n_elts);
3598 unsigned int i;
3599
3600 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3601 for (i = 0; i < n_elts; i++)
3602 {
3603 rtx x = XVECEXP (trueop1, 0, i);
3604
3605 gcc_assert (CONST_INT_P (x));
3606 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3607 INTVAL (x));
3608 }
3609
3610 return gen_rtx_CONST_VECTOR (mode, v);
3611 }
3612
3613 /* Recognize the identity. */
3614 if (GET_MODE (trueop0) == mode)
3615 {
3616 bool maybe_ident = true;
3617 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3618 {
3619 rtx j = XVECEXP (trueop1, 0, i);
3620 if (!CONST_INT_P (j) || INTVAL (j) != i)
3621 {
3622 maybe_ident = false;
3623 break;
3624 }
3625 }
3626 if (maybe_ident)
3627 return trueop0;
3628 }
3629
3630 /* If we build {a,b} then permute it, build the result directly. */
3631 if (XVECLEN (trueop1, 0) == 2
3632 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3633 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3634 && GET_CODE (trueop0) == VEC_CONCAT
3635 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3636 && GET_MODE (XEXP (trueop0, 0)) == mode
3637 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3638 && GET_MODE (XEXP (trueop0, 1)) == mode)
3639 {
3640 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3641 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3642 rtx subop0, subop1;
3643
3644 gcc_assert (i0 < 4 && i1 < 4);
3645 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3646 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3647
3648 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3649 }
3650
3651 if (XVECLEN (trueop1, 0) == 2
3652 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3653 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3654 && GET_CODE (trueop0) == VEC_CONCAT
3655 && GET_MODE (trueop0) == mode)
3656 {
3657 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3658 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3659 rtx subop0, subop1;
3660
3661 gcc_assert (i0 < 2 && i1 < 2);
3662 subop0 = XEXP (trueop0, i0);
3663 subop1 = XEXP (trueop0, i1);
3664
3665 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3666 }
3667
3668 /* If we select one half of a vec_concat, return that. */
3669 if (GET_CODE (trueop0) == VEC_CONCAT
3670 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3671 {
3672 rtx subop0 = XEXP (trueop0, 0);
3673 rtx subop1 = XEXP (trueop0, 1);
3674 machine_mode mode0 = GET_MODE (subop0);
3675 machine_mode mode1 = GET_MODE (subop1);
3676 int li = GET_MODE_UNIT_SIZE (mode0);
3677 int l0 = GET_MODE_SIZE (mode0) / li;
3678 int l1 = GET_MODE_SIZE (mode1) / li;
3679 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3680 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3681 {
3682 bool success = true;
3683 for (int i = 1; i < l0; ++i)
3684 {
3685 rtx j = XVECEXP (trueop1, 0, i);
3686 if (!CONST_INT_P (j) || INTVAL (j) != i)
3687 {
3688 success = false;
3689 break;
3690 }
3691 }
3692 if (success)
3693 return subop0;
3694 }
3695 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3696 {
3697 bool success = true;
3698 for (int i = 1; i < l1; ++i)
3699 {
3700 rtx j = XVECEXP (trueop1, 0, i);
3701 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3702 {
3703 success = false;
3704 break;
3705 }
3706 }
3707 if (success)
3708 return subop1;
3709 }
3710 }
3711 }
3712
3713 if (XVECLEN (trueop1, 0) == 1
3714 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3715 && GET_CODE (trueop0) == VEC_CONCAT)
3716 {
3717 rtx vec = trueop0;
3718 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3719
3720 /* Try to find the element in the VEC_CONCAT. */
3721 while (GET_MODE (vec) != mode
3722 && GET_CODE (vec) == VEC_CONCAT)
3723 {
3724 HOST_WIDE_INT vec_size;
3725
3726 if (CONST_INT_P (XEXP (vec, 0)))
3727 {
3728 /* vec_concat of two const_ints doesn't make sense with
3729 respect to modes. */
3730 if (CONST_INT_P (XEXP (vec, 1)))
3731 return 0;
3732
3733 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3734 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3735 }
3736 else
3737 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3738
3739 if (offset < vec_size)
3740 vec = XEXP (vec, 0);
3741 else
3742 {
3743 offset -= vec_size;
3744 vec = XEXP (vec, 1);
3745 }
3746 vec = avoid_constant_pool_reference (vec);
3747 }
3748
3749 if (GET_MODE (vec) == mode)
3750 return vec;
3751 }
3752
3753 /* If we select elements in a vec_merge that all come from the same
3754 operand, select from that operand directly. */
3755 if (GET_CODE (op0) == VEC_MERGE)
3756 {
3757 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3758 if (CONST_INT_P (trueop02))
3759 {
3760 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3761 bool all_operand0 = true;
3762 bool all_operand1 = true;
3763 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3764 {
3765 rtx j = XVECEXP (trueop1, 0, i);
3766 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3767 all_operand1 = false;
3768 else
3769 all_operand0 = false;
3770 }
3771 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3772 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3773 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3774 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3775 }
3776 }
3777
3778 /* If we have two nested selects that are inverses of each
3779 other, replace them with the source operand. */
3780 if (GET_CODE (trueop0) == VEC_SELECT
3781 && GET_MODE (XEXP (trueop0, 0)) == mode)
3782 {
3783 rtx op0_subop1 = XEXP (trueop0, 1);
3784 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3785 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3786
3787 /* Apply the outer ordering vector to the inner one. (The inner
3788 ordering vector is expressly permitted to be of a different
3789 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3790 then the two VEC_SELECTs cancel. */
3791 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3792 {
3793 rtx x = XVECEXP (trueop1, 0, i);
3794 if (!CONST_INT_P (x))
3795 return 0;
3796 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3797 if (!CONST_INT_P (y) || i != INTVAL (y))
3798 return 0;
3799 }
3800 return XEXP (trueop0, 0);
3801 }
3802
3803 return 0;
3804 case VEC_CONCAT:
3805 {
3806 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3807 ? GET_MODE (trueop0)
3808 : GET_MODE_INNER (mode));
3809 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3810 ? GET_MODE (trueop1)
3811 : GET_MODE_INNER (mode));
3812
3813 gcc_assert (VECTOR_MODE_P (mode));
3814 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3815 == GET_MODE_SIZE (mode));
3816
3817 if (VECTOR_MODE_P (op0_mode))
3818 gcc_assert (GET_MODE_INNER (mode)
3819 == GET_MODE_INNER (op0_mode));
3820 else
3821 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3822
3823 if (VECTOR_MODE_P (op1_mode))
3824 gcc_assert (GET_MODE_INNER (mode)
3825 == GET_MODE_INNER (op1_mode));
3826 else
3827 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3828
3829 if ((GET_CODE (trueop0) == CONST_VECTOR
3830 || CONST_SCALAR_INT_P (trueop0)
3831 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3832 && (GET_CODE (trueop1) == CONST_VECTOR
3833 || CONST_SCALAR_INT_P (trueop1)
3834 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3835 {
3836 int elt_size = GET_MODE_UNIT_SIZE (mode);
3837 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3838 rtvec v = rtvec_alloc (n_elts);
3839 unsigned int i;
3840 unsigned in_n_elts = 1;
3841
3842 if (VECTOR_MODE_P (op0_mode))
3843 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3844 for (i = 0; i < n_elts; i++)
3845 {
3846 if (i < in_n_elts)
3847 {
3848 if (!VECTOR_MODE_P (op0_mode))
3849 RTVEC_ELT (v, i) = trueop0;
3850 else
3851 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3852 }
3853 else
3854 {
3855 if (!VECTOR_MODE_P (op1_mode))
3856 RTVEC_ELT (v, i) = trueop1;
3857 else
3858 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3859 i - in_n_elts);
3860 }
3861 }
3862
3863 return gen_rtx_CONST_VECTOR (mode, v);
3864 }
3865
3866 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3867 Restrict the transformation to avoid generating a VEC_SELECT with a
3868 mode unrelated to its operand. */
3869 if (GET_CODE (trueop0) == VEC_SELECT
3870 && GET_CODE (trueop1) == VEC_SELECT
3871 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3872 && GET_MODE (XEXP (trueop0, 0)) == mode)
3873 {
3874 rtx par0 = XEXP (trueop0, 1);
3875 rtx par1 = XEXP (trueop1, 1);
3876 int len0 = XVECLEN (par0, 0);
3877 int len1 = XVECLEN (par1, 0);
3878 rtvec vec = rtvec_alloc (len0 + len1);
3879 for (int i = 0; i < len0; i++)
3880 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3881 for (int i = 0; i < len1; i++)
3882 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3883 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3884 gen_rtx_PARALLEL (VOIDmode, vec));
3885 }
3886 }
3887 return 0;
3888
3889 default:
3890 gcc_unreachable ();
3891 }
3892
3893 return 0;
3894 }
3895
3896 rtx
3897 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3898 rtx op0, rtx op1)
3899 {
3900 unsigned int width = GET_MODE_PRECISION (mode);
3901
3902 if (VECTOR_MODE_P (mode)
3903 && code != VEC_CONCAT
3904 && GET_CODE (op0) == CONST_VECTOR
3905 && GET_CODE (op1) == CONST_VECTOR)
3906 {
3907 unsigned n_elts = GET_MODE_NUNITS (mode);
3908 machine_mode op0mode = GET_MODE (op0);
3909 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3910 machine_mode op1mode = GET_MODE (op1);
3911 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3912 rtvec v = rtvec_alloc (n_elts);
3913 unsigned int i;
3914
3915 gcc_assert (op0_n_elts == n_elts);
3916 gcc_assert (op1_n_elts == n_elts);
3917 for (i = 0; i < n_elts; i++)
3918 {
3919 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3920 CONST_VECTOR_ELT (op0, i),
3921 CONST_VECTOR_ELT (op1, i));
3922 if (!x)
3923 return 0;
3924 RTVEC_ELT (v, i) = x;
3925 }
3926
3927 return gen_rtx_CONST_VECTOR (mode, v);
3928 }
3929
3930 if (VECTOR_MODE_P (mode)
3931 && code == VEC_CONCAT
3932 && (CONST_SCALAR_INT_P (op0)
3933 || GET_CODE (op0) == CONST_FIXED
3934 || CONST_DOUBLE_AS_FLOAT_P (op0))
3935 && (CONST_SCALAR_INT_P (op1)
3936 || CONST_DOUBLE_AS_FLOAT_P (op1)
3937 || GET_CODE (op1) == CONST_FIXED))
3938 {
3939 unsigned n_elts = GET_MODE_NUNITS (mode);
3940 rtvec v = rtvec_alloc (n_elts);
3941
3942 gcc_assert (n_elts >= 2);
3943 if (n_elts == 2)
3944 {
3945 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3946 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3947
3948 RTVEC_ELT (v, 0) = op0;
3949 RTVEC_ELT (v, 1) = op1;
3950 }
3951 else
3952 {
3953 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3954 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3955 unsigned i;
3956
3957 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3958 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3959 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3960
3961 for (i = 0; i < op0_n_elts; ++i)
3962 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3963 for (i = 0; i < op1_n_elts; ++i)
3964 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3965 }
3966
3967 return gen_rtx_CONST_VECTOR (mode, v);
3968 }
3969
3970 if (SCALAR_FLOAT_MODE_P (mode)
3971 && CONST_DOUBLE_AS_FLOAT_P (op0)
3972 && CONST_DOUBLE_AS_FLOAT_P (op1)
3973 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3974 {
3975 if (code == AND
3976 || code == IOR
3977 || code == XOR)
3978 {
3979 long tmp0[4];
3980 long tmp1[4];
3981 REAL_VALUE_TYPE r;
3982 int i;
3983
3984 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3985 GET_MODE (op0));
3986 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3987 GET_MODE (op1));
3988 for (i = 0; i < 4; i++)
3989 {
3990 switch (code)
3991 {
3992 case AND:
3993 tmp0[i] &= tmp1[i];
3994 break;
3995 case IOR:
3996 tmp0[i] |= tmp1[i];
3997 break;
3998 case XOR:
3999 tmp0[i] ^= tmp1[i];
4000 break;
4001 default:
4002 gcc_unreachable ();
4003 }
4004 }
4005 real_from_target (&r, tmp0, mode);
4006 return const_double_from_real_value (r, mode);
4007 }
4008 else
4009 {
4010 REAL_VALUE_TYPE f0, f1, value, result;
4011 const REAL_VALUE_TYPE *opr0, *opr1;
4012 bool inexact;
4013
4014 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4015 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4016
4017 if (HONOR_SNANS (mode)
4018 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4019 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4020 return 0;
4021
4022 real_convert (&f0, mode, opr0);
4023 real_convert (&f1, mode, opr1);
4024
4025 if (code == DIV
4026 && real_equal (&f1, &dconst0)
4027 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4028 return 0;
4029
4030 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4031 && flag_trapping_math
4032 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4033 {
4034 int s0 = REAL_VALUE_NEGATIVE (f0);
4035 int s1 = REAL_VALUE_NEGATIVE (f1);
4036
4037 switch (code)
4038 {
4039 case PLUS:
4040 /* Inf + -Inf = NaN plus exception. */
4041 if (s0 != s1)
4042 return 0;
4043 break;
4044 case MINUS:
4045 /* Inf - Inf = NaN plus exception. */
4046 if (s0 == s1)
4047 return 0;
4048 break;
4049 case DIV:
4050 /* Inf / Inf = NaN plus exception. */
4051 return 0;
4052 default:
4053 break;
4054 }
4055 }
4056
4057 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4058 && flag_trapping_math
4059 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4060 || (REAL_VALUE_ISINF (f1)
4061 && real_equal (&f0, &dconst0))))
4062 /* Inf * 0 = NaN plus exception. */
4063 return 0;
4064
4065 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4066 &f0, &f1);
4067 real_convert (&result, mode, &value);
4068
4069 /* Don't constant fold this floating point operation if
4070 the result has overflowed and flag_trapping_math. */
4071
4072 if (flag_trapping_math
4073 && MODE_HAS_INFINITIES (mode)
4074 && REAL_VALUE_ISINF (result)
4075 && !REAL_VALUE_ISINF (f0)
4076 && !REAL_VALUE_ISINF (f1))
4077 /* Overflow plus exception. */
4078 return 0;
4079
4080 /* Don't constant fold this floating point operation if the
4081 result may dependent upon the run-time rounding mode and
4082 flag_rounding_math is set, or if GCC's software emulation
4083 is unable to accurately represent the result. */
4084
4085 if ((flag_rounding_math
4086 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4087 && (inexact || !real_identical (&result, &value)))
4088 return NULL_RTX;
4089
4090 return const_double_from_real_value (result, mode);
4091 }
4092 }
4093
4094 /* We can fold some multi-word operations. */
4095 if ((GET_MODE_CLASS (mode) == MODE_INT
4096 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4097 && CONST_SCALAR_INT_P (op0)
4098 && CONST_SCALAR_INT_P (op1))
4099 {
4100 wide_int result;
4101 bool overflow;
4102 rtx_mode_t pop0 = rtx_mode_t (op0, mode);
4103 rtx_mode_t pop1 = rtx_mode_t (op1, mode);
4104
4105 #if TARGET_SUPPORTS_WIDE_INT == 0
4106 /* This assert keeps the simplification from producing a result
4107 that cannot be represented in a CONST_DOUBLE but a lot of
4108 upstream callers expect that this function never fails to
4109 simplify something and so you if you added this to the test
4110 above the code would die later anyway. If this assert
4111 happens, you just need to make the port support wide int. */
4112 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4113 #endif
4114 switch (code)
4115 {
4116 case MINUS:
4117 result = wi::sub (pop0, pop1);
4118 break;
4119
4120 case PLUS:
4121 result = wi::add (pop0, pop1);
4122 break;
4123
4124 case MULT:
4125 result = wi::mul (pop0, pop1);
4126 break;
4127
4128 case DIV:
4129 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4130 if (overflow)
4131 return NULL_RTX;
4132 break;
4133
4134 case MOD:
4135 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4136 if (overflow)
4137 return NULL_RTX;
4138 break;
4139
4140 case UDIV:
4141 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4142 if (overflow)
4143 return NULL_RTX;
4144 break;
4145
4146 case UMOD:
4147 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4148 if (overflow)
4149 return NULL_RTX;
4150 break;
4151
4152 case AND:
4153 result = wi::bit_and (pop0, pop1);
4154 break;
4155
4156 case IOR:
4157 result = wi::bit_or (pop0, pop1);
4158 break;
4159
4160 case XOR:
4161 result = wi::bit_xor (pop0, pop1);
4162 break;
4163
4164 case SMIN:
4165 result = wi::smin (pop0, pop1);
4166 break;
4167
4168 case SMAX:
4169 result = wi::smax (pop0, pop1);
4170 break;
4171
4172 case UMIN:
4173 result = wi::umin (pop0, pop1);
4174 break;
4175
4176 case UMAX:
4177 result = wi::umax (pop0, pop1);
4178 break;
4179
4180 case LSHIFTRT:
4181 case ASHIFTRT:
4182 case ASHIFT:
4183 {
4184 wide_int wop1 = pop1;
4185 if (SHIFT_COUNT_TRUNCATED)
4186 wop1 = wi::umod_trunc (wop1, width);
4187 else if (wi::geu_p (wop1, width))
4188 return NULL_RTX;
4189
4190 switch (code)
4191 {
4192 case LSHIFTRT:
4193 result = wi::lrshift (pop0, wop1);
4194 break;
4195
4196 case ASHIFTRT:
4197 result = wi::arshift (pop0, wop1);
4198 break;
4199
4200 case ASHIFT:
4201 result = wi::lshift (pop0, wop1);
4202 break;
4203
4204 default:
4205 gcc_unreachable ();
4206 }
4207 break;
4208 }
4209 case ROTATE:
4210 case ROTATERT:
4211 {
4212 if (wi::neg_p (pop1))
4213 return NULL_RTX;
4214
4215 switch (code)
4216 {
4217 case ROTATE:
4218 result = wi::lrotate (pop0, pop1);
4219 break;
4220
4221 case ROTATERT:
4222 result = wi::rrotate (pop0, pop1);
4223 break;
4224
4225 default:
4226 gcc_unreachable ();
4227 }
4228 break;
4229 }
4230 default:
4231 return NULL_RTX;
4232 }
4233 return immed_wide_int_const (result, mode);
4234 }
4235
4236 return NULL_RTX;
4237 }
4238
4239
4240 \f
4241 /* Return a positive integer if X should sort after Y. The value
4242 returned is 1 if and only if X and Y are both regs. */
4243
4244 static int
4245 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4246 {
4247 int result;
4248
4249 result = (commutative_operand_precedence (y)
4250 - commutative_operand_precedence (x));
4251 if (result)
4252 return result + result;
4253
4254 /* Group together equal REGs to do more simplification. */
4255 if (REG_P (x) && REG_P (y))
4256 return REGNO (x) > REGNO (y);
4257
4258 return 0;
4259 }
4260
4261 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4262 operands may be another PLUS or MINUS.
4263
4264 Rather than test for specific case, we do this by a brute-force method
4265 and do all possible simplifications until no more changes occur. Then
4266 we rebuild the operation.
4267
4268 May return NULL_RTX when no changes were made. */
4269
4270 static rtx
4271 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4272 rtx op1)
4273 {
4274 struct simplify_plus_minus_op_data
4275 {
4276 rtx op;
4277 short neg;
4278 } ops[16];
4279 rtx result, tem;
4280 int n_ops = 2;
4281 int changed, n_constants, canonicalized = 0;
4282 int i, j;
4283
4284 memset (ops, 0, sizeof ops);
4285
4286 /* Set up the two operands and then expand them until nothing has been
4287 changed. If we run out of room in our array, give up; this should
4288 almost never happen. */
4289
4290 ops[0].op = op0;
4291 ops[0].neg = 0;
4292 ops[1].op = op1;
4293 ops[1].neg = (code == MINUS);
4294
4295 do
4296 {
4297 changed = 0;
4298 n_constants = 0;
4299
4300 for (i = 0; i < n_ops; i++)
4301 {
4302 rtx this_op = ops[i].op;
4303 int this_neg = ops[i].neg;
4304 enum rtx_code this_code = GET_CODE (this_op);
4305
4306 switch (this_code)
4307 {
4308 case PLUS:
4309 case MINUS:
4310 if (n_ops == ARRAY_SIZE (ops))
4311 return NULL_RTX;
4312
4313 ops[n_ops].op = XEXP (this_op, 1);
4314 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4315 n_ops++;
4316
4317 ops[i].op = XEXP (this_op, 0);
4318 changed = 1;
4319 /* If this operand was negated then we will potentially
4320 canonicalize the expression. Similarly if we don't
4321 place the operands adjacent we're re-ordering the
4322 expression and thus might be performing a
4323 canonicalization. Ignore register re-ordering.
4324 ??? It might be better to shuffle the ops array here,
4325 but then (plus (plus (A, B), plus (C, D))) wouldn't
4326 be seen as non-canonical. */
4327 if (this_neg
4328 || (i != n_ops - 2
4329 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4330 canonicalized = 1;
4331 break;
4332
4333 case NEG:
4334 ops[i].op = XEXP (this_op, 0);
4335 ops[i].neg = ! this_neg;
4336 changed = 1;
4337 canonicalized = 1;
4338 break;
4339
4340 case CONST:
4341 if (n_ops != ARRAY_SIZE (ops)
4342 && GET_CODE (XEXP (this_op, 0)) == PLUS
4343 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4344 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4345 {
4346 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4347 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4348 ops[n_ops].neg = this_neg;
4349 n_ops++;
4350 changed = 1;
4351 canonicalized = 1;
4352 }
4353 break;
4354
4355 case NOT:
4356 /* ~a -> (-a - 1) */
4357 if (n_ops != ARRAY_SIZE (ops))
4358 {
4359 ops[n_ops].op = CONSTM1_RTX (mode);
4360 ops[n_ops++].neg = this_neg;
4361 ops[i].op = XEXP (this_op, 0);
4362 ops[i].neg = !this_neg;
4363 changed = 1;
4364 canonicalized = 1;
4365 }
4366 break;
4367
4368 case CONST_INT:
4369 n_constants++;
4370 if (this_neg)
4371 {
4372 ops[i].op = neg_const_int (mode, this_op);
4373 ops[i].neg = 0;
4374 changed = 1;
4375 canonicalized = 1;
4376 }
4377 break;
4378
4379 default:
4380 break;
4381 }
4382 }
4383 }
4384 while (changed);
4385
4386 if (n_constants > 1)
4387 canonicalized = 1;
4388
4389 gcc_assert (n_ops >= 2);
4390
4391 /* If we only have two operands, we can avoid the loops. */
4392 if (n_ops == 2)
4393 {
4394 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4395 rtx lhs, rhs;
4396
4397 /* Get the two operands. Be careful with the order, especially for
4398 the cases where code == MINUS. */
4399 if (ops[0].neg && ops[1].neg)
4400 {
4401 lhs = gen_rtx_NEG (mode, ops[0].op);
4402 rhs = ops[1].op;
4403 }
4404 else if (ops[0].neg)
4405 {
4406 lhs = ops[1].op;
4407 rhs = ops[0].op;
4408 }
4409 else
4410 {
4411 lhs = ops[0].op;
4412 rhs = ops[1].op;
4413 }
4414
4415 return simplify_const_binary_operation (code, mode, lhs, rhs);
4416 }
4417
4418 /* Now simplify each pair of operands until nothing changes. */
4419 while (1)
4420 {
4421 /* Insertion sort is good enough for a small array. */
4422 for (i = 1; i < n_ops; i++)
4423 {
4424 struct simplify_plus_minus_op_data save;
4425 int cmp;
4426
4427 j = i - 1;
4428 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4429 if (cmp <= 0)
4430 continue;
4431 /* Just swapping registers doesn't count as canonicalization. */
4432 if (cmp != 1)
4433 canonicalized = 1;
4434
4435 save = ops[i];
4436 do
4437 ops[j + 1] = ops[j];
4438 while (j--
4439 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4440 ops[j + 1] = save;
4441 }
4442
4443 changed = 0;
4444 for (i = n_ops - 1; i > 0; i--)
4445 for (j = i - 1; j >= 0; j--)
4446 {
4447 rtx lhs = ops[j].op, rhs = ops[i].op;
4448 int lneg = ops[j].neg, rneg = ops[i].neg;
4449
4450 if (lhs != 0 && rhs != 0)
4451 {
4452 enum rtx_code ncode = PLUS;
4453
4454 if (lneg != rneg)
4455 {
4456 ncode = MINUS;
4457 if (lneg)
4458 std::swap (lhs, rhs);
4459 }
4460 else if (swap_commutative_operands_p (lhs, rhs))
4461 std::swap (lhs, rhs);
4462
4463 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4464 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4465 {
4466 rtx tem_lhs, tem_rhs;
4467
4468 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4469 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4470 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4471 tem_rhs);
4472
4473 if (tem && !CONSTANT_P (tem))
4474 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4475 }
4476 else
4477 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4478
4479 if (tem)
4480 {
4481 /* Reject "simplifications" that just wrap the two
4482 arguments in a CONST. Failure to do so can result
4483 in infinite recursion with simplify_binary_operation
4484 when it calls us to simplify CONST operations.
4485 Also, if we find such a simplification, don't try
4486 any more combinations with this rhs: We must have
4487 something like symbol+offset, ie. one of the
4488 trivial CONST expressions we handle later. */
4489 if (GET_CODE (tem) == CONST
4490 && GET_CODE (XEXP (tem, 0)) == ncode
4491 && XEXP (XEXP (tem, 0), 0) == lhs
4492 && XEXP (XEXP (tem, 0), 1) == rhs)
4493 break;
4494 lneg &= rneg;
4495 if (GET_CODE (tem) == NEG)
4496 tem = XEXP (tem, 0), lneg = !lneg;
4497 if (CONST_INT_P (tem) && lneg)
4498 tem = neg_const_int (mode, tem), lneg = 0;
4499
4500 ops[i].op = tem;
4501 ops[i].neg = lneg;
4502 ops[j].op = NULL_RTX;
4503 changed = 1;
4504 canonicalized = 1;
4505 }
4506 }
4507 }
4508
4509 if (!changed)
4510 break;
4511
4512 /* Pack all the operands to the lower-numbered entries. */
4513 for (i = 0, j = 0; j < n_ops; j++)
4514 if (ops[j].op)
4515 {
4516 ops[i] = ops[j];
4517 i++;
4518 }
4519 n_ops = i;
4520 }
4521
4522 /* If nothing changed, check that rematerialization of rtl instructions
4523 is still required. */
4524 if (!canonicalized)
4525 {
4526 /* Perform rematerialization if only all operands are registers and
4527 all operations are PLUS. */
4528 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4529 around rs6000 and how it uses the CA register. See PR67145. */
4530 for (i = 0; i < n_ops; i++)
4531 if (ops[i].neg
4532 || !REG_P (ops[i].op)
4533 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4534 && fixed_regs[REGNO (ops[i].op)]
4535 && !global_regs[REGNO (ops[i].op)]
4536 && ops[i].op != frame_pointer_rtx
4537 && ops[i].op != arg_pointer_rtx
4538 && ops[i].op != stack_pointer_rtx))
4539 return NULL_RTX;
4540 goto gen_result;
4541 }
4542
4543 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4544 if (n_ops == 2
4545 && CONST_INT_P (ops[1].op)
4546 && CONSTANT_P (ops[0].op)
4547 && ops[0].neg)
4548 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4549
4550 /* We suppressed creation of trivial CONST expressions in the
4551 combination loop to avoid recursion. Create one manually now.
4552 The combination loop should have ensured that there is exactly
4553 one CONST_INT, and the sort will have ensured that it is last
4554 in the array and that any other constant will be next-to-last. */
4555
4556 if (n_ops > 1
4557 && CONST_INT_P (ops[n_ops - 1].op)
4558 && CONSTANT_P (ops[n_ops - 2].op))
4559 {
4560 rtx value = ops[n_ops - 1].op;
4561 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4562 value = neg_const_int (mode, value);
4563 if (CONST_INT_P (value))
4564 {
4565 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4566 INTVAL (value));
4567 n_ops--;
4568 }
4569 }
4570
4571 /* Put a non-negated operand first, if possible. */
4572
4573 for (i = 0; i < n_ops && ops[i].neg; i++)
4574 continue;
4575 if (i == n_ops)
4576 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4577 else if (i != 0)
4578 {
4579 tem = ops[0].op;
4580 ops[0] = ops[i];
4581 ops[i].op = tem;
4582 ops[i].neg = 1;
4583 }
4584
4585 /* Now make the result by performing the requested operations. */
4586 gen_result:
4587 result = ops[0].op;
4588 for (i = 1; i < n_ops; i++)
4589 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4590 mode, result, ops[i].op);
4591
4592 return result;
4593 }
4594
4595 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4596 static bool
4597 plus_minus_operand_p (const_rtx x)
4598 {
4599 return GET_CODE (x) == PLUS
4600 || GET_CODE (x) == MINUS
4601 || (GET_CODE (x) == CONST
4602 && GET_CODE (XEXP (x, 0)) == PLUS
4603 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4604 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4605 }
4606
4607 /* Like simplify_binary_operation except used for relational operators.
4608 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4609 not also be VOIDmode.
4610
4611 CMP_MODE specifies in which mode the comparison is done in, so it is
4612 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4613 the operands or, if both are VOIDmode, the operands are compared in
4614 "infinite precision". */
4615 rtx
4616 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4617 machine_mode cmp_mode, rtx op0, rtx op1)
4618 {
4619 rtx tem, trueop0, trueop1;
4620
4621 if (cmp_mode == VOIDmode)
4622 cmp_mode = GET_MODE (op0);
4623 if (cmp_mode == VOIDmode)
4624 cmp_mode = GET_MODE (op1);
4625
4626 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4627 if (tem)
4628 {
4629 if (SCALAR_FLOAT_MODE_P (mode))
4630 {
4631 if (tem == const0_rtx)
4632 return CONST0_RTX (mode);
4633 #ifdef FLOAT_STORE_FLAG_VALUE
4634 {
4635 REAL_VALUE_TYPE val;
4636 val = FLOAT_STORE_FLAG_VALUE (mode);
4637 return const_double_from_real_value (val, mode);
4638 }
4639 #else
4640 return NULL_RTX;
4641 #endif
4642 }
4643 if (VECTOR_MODE_P (mode))
4644 {
4645 if (tem == const0_rtx)
4646 return CONST0_RTX (mode);
4647 #ifdef VECTOR_STORE_FLAG_VALUE
4648 {
4649 int i, units;
4650 rtvec v;
4651
4652 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4653 if (val == NULL_RTX)
4654 return NULL_RTX;
4655 if (val == const1_rtx)
4656 return CONST1_RTX (mode);
4657
4658 units = GET_MODE_NUNITS (mode);
4659 v = rtvec_alloc (units);
4660 for (i = 0; i < units; i++)
4661 RTVEC_ELT (v, i) = val;
4662 return gen_rtx_raw_CONST_VECTOR (mode, v);
4663 }
4664 #else
4665 return NULL_RTX;
4666 #endif
4667 }
4668
4669 return tem;
4670 }
4671
4672 /* For the following tests, ensure const0_rtx is op1. */
4673 if (swap_commutative_operands_p (op0, op1)
4674 || (op0 == const0_rtx && op1 != const0_rtx))
4675 std::swap (op0, op1), code = swap_condition (code);
4676
4677 /* If op0 is a compare, extract the comparison arguments from it. */
4678 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4679 return simplify_gen_relational (code, mode, VOIDmode,
4680 XEXP (op0, 0), XEXP (op0, 1));
4681
4682 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4683 || CC0_P (op0))
4684 return NULL_RTX;
4685
4686 trueop0 = avoid_constant_pool_reference (op0);
4687 trueop1 = avoid_constant_pool_reference (op1);
4688 return simplify_relational_operation_1 (code, mode, cmp_mode,
4689 trueop0, trueop1);
4690 }
4691
4692 /* This part of simplify_relational_operation is only used when CMP_MODE
4693 is not in class MODE_CC (i.e. it is a real comparison).
4694
4695 MODE is the mode of the result, while CMP_MODE specifies in which
4696 mode the comparison is done in, so it is the mode of the operands. */
4697
4698 static rtx
4699 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4700 machine_mode cmp_mode, rtx op0, rtx op1)
4701 {
4702 enum rtx_code op0code = GET_CODE (op0);
4703
4704 if (op1 == const0_rtx && COMPARISON_P (op0))
4705 {
4706 /* If op0 is a comparison, extract the comparison arguments
4707 from it. */
4708 if (code == NE)
4709 {
4710 if (GET_MODE (op0) == mode)
4711 return simplify_rtx (op0);
4712 else
4713 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4714 XEXP (op0, 0), XEXP (op0, 1));
4715 }
4716 else if (code == EQ)
4717 {
4718 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4719 if (new_code != UNKNOWN)
4720 return simplify_gen_relational (new_code, mode, VOIDmode,
4721 XEXP (op0, 0), XEXP (op0, 1));
4722 }
4723 }
4724
4725 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4726 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4727 if ((code == LTU || code == GEU)
4728 && GET_CODE (op0) == PLUS
4729 && CONST_INT_P (XEXP (op0, 1))
4730 && (rtx_equal_p (op1, XEXP (op0, 0))
4731 || rtx_equal_p (op1, XEXP (op0, 1)))
4732 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4733 && XEXP (op0, 1) != const0_rtx)
4734 {
4735 rtx new_cmp
4736 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4737 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4738 cmp_mode, XEXP (op0, 0), new_cmp);
4739 }
4740
4741 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4742 transformed into (LTU a -C). */
4743 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4744 && CONST_INT_P (XEXP (op0, 1))
4745 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4746 && XEXP (op0, 1) != const0_rtx)
4747 {
4748 rtx new_cmp
4749 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4750 return simplify_gen_relational (LTU, mode, cmp_mode,
4751 XEXP (op0, 0), new_cmp);
4752 }
4753
4754 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4755 if ((code == LTU || code == GEU)
4756 && GET_CODE (op0) == PLUS
4757 && rtx_equal_p (op1, XEXP (op0, 1))
4758 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4759 && !rtx_equal_p (op1, XEXP (op0, 0)))
4760 return simplify_gen_relational (code, mode, cmp_mode, op0,
4761 copy_rtx (XEXP (op0, 0)));
4762
4763 if (op1 == const0_rtx)
4764 {
4765 /* Canonicalize (GTU x 0) as (NE x 0). */
4766 if (code == GTU)
4767 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4768 /* Canonicalize (LEU x 0) as (EQ x 0). */
4769 if (code == LEU)
4770 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4771 }
4772 else if (op1 == const1_rtx)
4773 {
4774 switch (code)
4775 {
4776 case GE:
4777 /* Canonicalize (GE x 1) as (GT x 0). */
4778 return simplify_gen_relational (GT, mode, cmp_mode,
4779 op0, const0_rtx);
4780 case GEU:
4781 /* Canonicalize (GEU x 1) as (NE x 0). */
4782 return simplify_gen_relational (NE, mode, cmp_mode,
4783 op0, const0_rtx);
4784 case LT:
4785 /* Canonicalize (LT x 1) as (LE x 0). */
4786 return simplify_gen_relational (LE, mode, cmp_mode,
4787 op0, const0_rtx);
4788 case LTU:
4789 /* Canonicalize (LTU x 1) as (EQ x 0). */
4790 return simplify_gen_relational (EQ, mode, cmp_mode,
4791 op0, const0_rtx);
4792 default:
4793 break;
4794 }
4795 }
4796 else if (op1 == constm1_rtx)
4797 {
4798 /* Canonicalize (LE x -1) as (LT x 0). */
4799 if (code == LE)
4800 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4801 /* Canonicalize (GT x -1) as (GE x 0). */
4802 if (code == GT)
4803 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4804 }
4805
4806 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4807 if ((code == EQ || code == NE)
4808 && (op0code == PLUS || op0code == MINUS)
4809 && CONSTANT_P (op1)
4810 && CONSTANT_P (XEXP (op0, 1))
4811 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4812 {
4813 rtx x = XEXP (op0, 0);
4814 rtx c = XEXP (op0, 1);
4815 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4816 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4817
4818 /* Detect an infinite recursive condition, where we oscillate at this
4819 simplification case between:
4820 A + B == C <---> C - B == A,
4821 where A, B, and C are all constants with non-simplifiable expressions,
4822 usually SYMBOL_REFs. */
4823 if (GET_CODE (tem) == invcode
4824 && CONSTANT_P (x)
4825 && rtx_equal_p (c, XEXP (tem, 1)))
4826 return NULL_RTX;
4827
4828 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4829 }
4830
4831 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4832 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4833 if (code == NE
4834 && op1 == const0_rtx
4835 && GET_MODE_CLASS (mode) == MODE_INT
4836 && cmp_mode != VOIDmode
4837 /* ??? Work-around BImode bugs in the ia64 backend. */
4838 && mode != BImode
4839 && cmp_mode != BImode
4840 && nonzero_bits (op0, cmp_mode) == 1
4841 && STORE_FLAG_VALUE == 1)
4842 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4843 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4844 : lowpart_subreg (mode, op0, cmp_mode);
4845
4846 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4847 if ((code == EQ || code == NE)
4848 && op1 == const0_rtx
4849 && op0code == XOR)
4850 return simplify_gen_relational (code, mode, cmp_mode,
4851 XEXP (op0, 0), XEXP (op0, 1));
4852
4853 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4854 if ((code == EQ || code == NE)
4855 && op0code == XOR
4856 && rtx_equal_p (XEXP (op0, 0), op1)
4857 && !side_effects_p (XEXP (op0, 0)))
4858 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4859 CONST0_RTX (mode));
4860
4861 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4862 if ((code == EQ || code == NE)
4863 && op0code == XOR
4864 && rtx_equal_p (XEXP (op0, 1), op1)
4865 && !side_effects_p (XEXP (op0, 1)))
4866 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4867 CONST0_RTX (mode));
4868
4869 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4870 if ((code == EQ || code == NE)
4871 && op0code == XOR
4872 && CONST_SCALAR_INT_P (op1)
4873 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4874 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4875 simplify_gen_binary (XOR, cmp_mode,
4876 XEXP (op0, 1), op1));
4877
4878 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4879 can be implemented with a BICS instruction on some targets, or
4880 constant-folded if y is a constant. */
4881 if ((code == EQ || code == NE)
4882 && op0code == AND
4883 && rtx_equal_p (XEXP (op0, 0), op1)
4884 && !side_effects_p (op1)
4885 && op1 != CONST0_RTX (cmp_mode))
4886 {
4887 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4888 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4889
4890 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4891 CONST0_RTX (cmp_mode));
4892 }
4893
4894 /* Likewise for (eq/ne (and x y) y). */
4895 if ((code == EQ || code == NE)
4896 && op0code == AND
4897 && rtx_equal_p (XEXP (op0, 1), op1)
4898 && !side_effects_p (op1)
4899 && op1 != CONST0_RTX (cmp_mode))
4900 {
4901 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4902 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4903
4904 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4905 CONST0_RTX (cmp_mode));
4906 }
4907
4908 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4909 if ((code == EQ || code == NE)
4910 && GET_CODE (op0) == BSWAP
4911 && CONST_SCALAR_INT_P (op1))
4912 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4913 simplify_gen_unary (BSWAP, cmp_mode,
4914 op1, cmp_mode));
4915
4916 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4917 if ((code == EQ || code == NE)
4918 && GET_CODE (op0) == BSWAP
4919 && GET_CODE (op1) == BSWAP)
4920 return simplify_gen_relational (code, mode, cmp_mode,
4921 XEXP (op0, 0), XEXP (op1, 0));
4922
4923 if (op0code == POPCOUNT && op1 == const0_rtx)
4924 switch (code)
4925 {
4926 case EQ:
4927 case LE:
4928 case LEU:
4929 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4930 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4931 XEXP (op0, 0), const0_rtx);
4932
4933 case NE:
4934 case GT:
4935 case GTU:
4936 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4937 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4938 XEXP (op0, 0), const0_rtx);
4939
4940 default:
4941 break;
4942 }
4943
4944 return NULL_RTX;
4945 }
4946
4947 enum
4948 {
4949 CMP_EQ = 1,
4950 CMP_LT = 2,
4951 CMP_GT = 4,
4952 CMP_LTU = 8,
4953 CMP_GTU = 16
4954 };
4955
4956
4957 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4958 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4959 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4960 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4961 For floating-point comparisons, assume that the operands were ordered. */
4962
4963 static rtx
4964 comparison_result (enum rtx_code code, int known_results)
4965 {
4966 switch (code)
4967 {
4968 case EQ:
4969 case UNEQ:
4970 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4971 case NE:
4972 case LTGT:
4973 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4974
4975 case LT:
4976 case UNLT:
4977 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4978 case GE:
4979 case UNGE:
4980 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4981
4982 case GT:
4983 case UNGT:
4984 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4985 case LE:
4986 case UNLE:
4987 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4988
4989 case LTU:
4990 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4991 case GEU:
4992 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4993
4994 case GTU:
4995 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4996 case LEU:
4997 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4998
4999 case ORDERED:
5000 return const_true_rtx;
5001 case UNORDERED:
5002 return const0_rtx;
5003 default:
5004 gcc_unreachable ();
5005 }
5006 }
5007
5008 /* Check if the given comparison (done in the given MODE) is actually
5009 a tautology or a contradiction. If the mode is VOID_mode, the
5010 comparison is done in "infinite precision". If no simplification
5011 is possible, this function returns zero. Otherwise, it returns
5012 either const_true_rtx or const0_rtx. */
5013
5014 rtx
5015 simplify_const_relational_operation (enum rtx_code code,
5016 machine_mode mode,
5017 rtx op0, rtx op1)
5018 {
5019 rtx tem;
5020 rtx trueop0;
5021 rtx trueop1;
5022
5023 gcc_assert (mode != VOIDmode
5024 || (GET_MODE (op0) == VOIDmode
5025 && GET_MODE (op1) == VOIDmode));
5026
5027 /* If op0 is a compare, extract the comparison arguments from it. */
5028 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5029 {
5030 op1 = XEXP (op0, 1);
5031 op0 = XEXP (op0, 0);
5032
5033 if (GET_MODE (op0) != VOIDmode)
5034 mode = GET_MODE (op0);
5035 else if (GET_MODE (op1) != VOIDmode)
5036 mode = GET_MODE (op1);
5037 else
5038 return 0;
5039 }
5040
5041 /* We can't simplify MODE_CC values since we don't know what the
5042 actual comparison is. */
5043 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5044 return 0;
5045
5046 /* Make sure the constant is second. */
5047 if (swap_commutative_operands_p (op0, op1))
5048 {
5049 std::swap (op0, op1);
5050 code = swap_condition (code);
5051 }
5052
5053 trueop0 = avoid_constant_pool_reference (op0);
5054 trueop1 = avoid_constant_pool_reference (op1);
5055
5056 /* For integer comparisons of A and B maybe we can simplify A - B and can
5057 then simplify a comparison of that with zero. If A and B are both either
5058 a register or a CONST_INT, this can't help; testing for these cases will
5059 prevent infinite recursion here and speed things up.
5060
5061 We can only do this for EQ and NE comparisons as otherwise we may
5062 lose or introduce overflow which we cannot disregard as undefined as
5063 we do not know the signedness of the operation on either the left or
5064 the right hand side of the comparison. */
5065
5066 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5067 && (code == EQ || code == NE)
5068 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5069 && (REG_P (op1) || CONST_INT_P (trueop1)))
5070 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5071 /* We cannot do this if tem is a nonzero address. */
5072 && ! nonzero_address_p (tem))
5073 return simplify_const_relational_operation (signed_condition (code),
5074 mode, tem, const0_rtx);
5075
5076 if (! HONOR_NANS (mode) && code == ORDERED)
5077 return const_true_rtx;
5078
5079 if (! HONOR_NANS (mode) && code == UNORDERED)
5080 return const0_rtx;
5081
5082 /* For modes without NaNs, if the two operands are equal, we know the
5083 result except if they have side-effects. Even with NaNs we know
5084 the result of unordered comparisons and, if signaling NaNs are
5085 irrelevant, also the result of LT/GT/LTGT. */
5086 if ((! HONOR_NANS (trueop0)
5087 || code == UNEQ || code == UNLE || code == UNGE
5088 || ((code == LT || code == GT || code == LTGT)
5089 && ! HONOR_SNANS (trueop0)))
5090 && rtx_equal_p (trueop0, trueop1)
5091 && ! side_effects_p (trueop0))
5092 return comparison_result (code, CMP_EQ);
5093
5094 /* If the operands are floating-point constants, see if we can fold
5095 the result. */
5096 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5097 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5098 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5099 {
5100 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5101 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5102
5103 /* Comparisons are unordered iff at least one of the values is NaN. */
5104 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5105 switch (code)
5106 {
5107 case UNEQ:
5108 case UNLT:
5109 case UNGT:
5110 case UNLE:
5111 case UNGE:
5112 case NE:
5113 case UNORDERED:
5114 return const_true_rtx;
5115 case EQ:
5116 case LT:
5117 case GT:
5118 case LE:
5119 case GE:
5120 case LTGT:
5121 case ORDERED:
5122 return const0_rtx;
5123 default:
5124 return 0;
5125 }
5126
5127 return comparison_result (code,
5128 (real_equal (d0, d1) ? CMP_EQ :
5129 real_less (d0, d1) ? CMP_LT : CMP_GT));
5130 }
5131
5132 /* Otherwise, see if the operands are both integers. */
5133 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5134 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5135 {
5136 /* It would be nice if we really had a mode here. However, the
5137 largest int representable on the target is as good as
5138 infinite. */
5139 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5140 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5141 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5142
5143 if (wi::eq_p (ptrueop0, ptrueop1))
5144 return comparison_result (code, CMP_EQ);
5145 else
5146 {
5147 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5148 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5149 return comparison_result (code, cr);
5150 }
5151 }
5152
5153 /* Optimize comparisons with upper and lower bounds. */
5154 if (HWI_COMPUTABLE_MODE_P (mode)
5155 && CONST_INT_P (trueop1)
5156 && !side_effects_p (trueop0))
5157 {
5158 int sign;
5159 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5160 HOST_WIDE_INT val = INTVAL (trueop1);
5161 HOST_WIDE_INT mmin, mmax;
5162
5163 if (code == GEU
5164 || code == LEU
5165 || code == GTU
5166 || code == LTU)
5167 sign = 0;
5168 else
5169 sign = 1;
5170
5171 /* Get a reduced range if the sign bit is zero. */
5172 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5173 {
5174 mmin = 0;
5175 mmax = nonzero;
5176 }
5177 else
5178 {
5179 rtx mmin_rtx, mmax_rtx;
5180 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5181
5182 mmin = INTVAL (mmin_rtx);
5183 mmax = INTVAL (mmax_rtx);
5184 if (sign)
5185 {
5186 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5187
5188 mmin >>= (sign_copies - 1);
5189 mmax >>= (sign_copies - 1);
5190 }
5191 }
5192
5193 switch (code)
5194 {
5195 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5196 case GEU:
5197 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5198 return const_true_rtx;
5199 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5200 return const0_rtx;
5201 break;
5202 case GE:
5203 if (val <= mmin)
5204 return const_true_rtx;
5205 if (val > mmax)
5206 return const0_rtx;
5207 break;
5208
5209 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5210 case LEU:
5211 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5212 return const_true_rtx;
5213 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5214 return const0_rtx;
5215 break;
5216 case LE:
5217 if (val >= mmax)
5218 return const_true_rtx;
5219 if (val < mmin)
5220 return const0_rtx;
5221 break;
5222
5223 case EQ:
5224 /* x == y is always false for y out of range. */
5225 if (val < mmin || val > mmax)
5226 return const0_rtx;
5227 break;
5228
5229 /* x > y is always false for y >= mmax, always true for y < mmin. */
5230 case GTU:
5231 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5232 return const0_rtx;
5233 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5234 return const_true_rtx;
5235 break;
5236 case GT:
5237 if (val >= mmax)
5238 return const0_rtx;
5239 if (val < mmin)
5240 return const_true_rtx;
5241 break;
5242
5243 /* x < y is always false for y <= mmin, always true for y > mmax. */
5244 case LTU:
5245 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5246 return const0_rtx;
5247 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5248 return const_true_rtx;
5249 break;
5250 case LT:
5251 if (val <= mmin)
5252 return const0_rtx;
5253 if (val > mmax)
5254 return const_true_rtx;
5255 break;
5256
5257 case NE:
5258 /* x != y is always true for y out of range. */
5259 if (val < mmin || val > mmax)
5260 return const_true_rtx;
5261 break;
5262
5263 default:
5264 break;
5265 }
5266 }
5267
5268 /* Optimize integer comparisons with zero. */
5269 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5270 {
5271 /* Some addresses are known to be nonzero. We don't know
5272 their sign, but equality comparisons are known. */
5273 if (nonzero_address_p (trueop0))
5274 {
5275 if (code == EQ || code == LEU)
5276 return const0_rtx;
5277 if (code == NE || code == GTU)
5278 return const_true_rtx;
5279 }
5280
5281 /* See if the first operand is an IOR with a constant. If so, we
5282 may be able to determine the result of this comparison. */
5283 if (GET_CODE (op0) == IOR)
5284 {
5285 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5286 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5287 {
5288 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5289 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5290 && (UINTVAL (inner_const)
5291 & (HOST_WIDE_INT_1U
5292 << sign_bitnum)));
5293
5294 switch (code)
5295 {
5296 case EQ:
5297 case LEU:
5298 return const0_rtx;
5299 case NE:
5300 case GTU:
5301 return const_true_rtx;
5302 case LT:
5303 case LE:
5304 if (has_sign)
5305 return const_true_rtx;
5306 break;
5307 case GT:
5308 case GE:
5309 if (has_sign)
5310 return const0_rtx;
5311 break;
5312 default:
5313 break;
5314 }
5315 }
5316 }
5317 }
5318
5319 /* Optimize comparison of ABS with zero. */
5320 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5321 && (GET_CODE (trueop0) == ABS
5322 || (GET_CODE (trueop0) == FLOAT_EXTEND
5323 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5324 {
5325 switch (code)
5326 {
5327 case LT:
5328 /* Optimize abs(x) < 0.0. */
5329 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5330 return const0_rtx;
5331 break;
5332
5333 case GE:
5334 /* Optimize abs(x) >= 0.0. */
5335 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5336 return const_true_rtx;
5337 break;
5338
5339 case UNGE:
5340 /* Optimize ! (abs(x) < 0.0). */
5341 return const_true_rtx;
5342
5343 default:
5344 break;
5345 }
5346 }
5347
5348 return 0;
5349 }
5350
5351 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5352 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5353 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5354 can be simplified to that or NULL_RTX if not.
5355 Assume X is compared against zero with CMP_CODE and the true
5356 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5357
5358 static rtx
5359 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5360 {
5361 if (cmp_code != EQ && cmp_code != NE)
5362 return NULL_RTX;
5363
5364 /* Result on X == 0 and X !=0 respectively. */
5365 rtx on_zero, on_nonzero;
5366 if (cmp_code == EQ)
5367 {
5368 on_zero = true_val;
5369 on_nonzero = false_val;
5370 }
5371 else
5372 {
5373 on_zero = false_val;
5374 on_nonzero = true_val;
5375 }
5376
5377 rtx_code op_code = GET_CODE (on_nonzero);
5378 if ((op_code != CLZ && op_code != CTZ)
5379 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5380 || !CONST_INT_P (on_zero))
5381 return NULL_RTX;
5382
5383 HOST_WIDE_INT op_val;
5384 if (((op_code == CLZ
5385 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5386 || (op_code == CTZ
5387 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5388 && op_val == INTVAL (on_zero))
5389 return on_nonzero;
5390
5391 return NULL_RTX;
5392 }
5393
5394 \f
5395 /* Simplify CODE, an operation with result mode MODE and three operands,
5396 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5397 a constant. Return 0 if no simplifications is possible. */
5398
5399 rtx
5400 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5401 machine_mode op0_mode, rtx op0, rtx op1,
5402 rtx op2)
5403 {
5404 unsigned int width = GET_MODE_PRECISION (mode);
5405 bool any_change = false;
5406 rtx tem, trueop2;
5407
5408 /* VOIDmode means "infinite" precision. */
5409 if (width == 0)
5410 width = HOST_BITS_PER_WIDE_INT;
5411
5412 switch (code)
5413 {
5414 case FMA:
5415 /* Simplify negations around the multiplication. */
5416 /* -a * -b + c => a * b + c. */
5417 if (GET_CODE (op0) == NEG)
5418 {
5419 tem = simplify_unary_operation (NEG, mode, op1, mode);
5420 if (tem)
5421 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5422 }
5423 else if (GET_CODE (op1) == NEG)
5424 {
5425 tem = simplify_unary_operation (NEG, mode, op0, mode);
5426 if (tem)
5427 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5428 }
5429
5430 /* Canonicalize the two multiplication operands. */
5431 /* a * -b + c => -b * a + c. */
5432 if (swap_commutative_operands_p (op0, op1))
5433 std::swap (op0, op1), any_change = true;
5434
5435 if (any_change)
5436 return gen_rtx_FMA (mode, op0, op1, op2);
5437 return NULL_RTX;
5438
5439 case SIGN_EXTRACT:
5440 case ZERO_EXTRACT:
5441 if (CONST_INT_P (op0)
5442 && CONST_INT_P (op1)
5443 && CONST_INT_P (op2)
5444 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5445 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5446 {
5447 /* Extracting a bit-field from a constant */
5448 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5449 HOST_WIDE_INT op1val = INTVAL (op1);
5450 HOST_WIDE_INT op2val = INTVAL (op2);
5451 if (BITS_BIG_ENDIAN)
5452 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5453 else
5454 val >>= op2val;
5455
5456 if (HOST_BITS_PER_WIDE_INT != op1val)
5457 {
5458 /* First zero-extend. */
5459 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5460 /* If desired, propagate sign bit. */
5461 if (code == SIGN_EXTRACT
5462 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5463 != 0)
5464 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5465 }
5466
5467 return gen_int_mode (val, mode);
5468 }
5469 break;
5470
5471 case IF_THEN_ELSE:
5472 if (CONST_INT_P (op0))
5473 return op0 != const0_rtx ? op1 : op2;
5474
5475 /* Convert c ? a : a into "a". */
5476 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5477 return op1;
5478
5479 /* Convert a != b ? a : b into "a". */
5480 if (GET_CODE (op0) == NE
5481 && ! side_effects_p (op0)
5482 && ! HONOR_NANS (mode)
5483 && ! HONOR_SIGNED_ZEROS (mode)
5484 && ((rtx_equal_p (XEXP (op0, 0), op1)
5485 && rtx_equal_p (XEXP (op0, 1), op2))
5486 || (rtx_equal_p (XEXP (op0, 0), op2)
5487 && rtx_equal_p (XEXP (op0, 1), op1))))
5488 return op1;
5489
5490 /* Convert a == b ? a : b into "b". */
5491 if (GET_CODE (op0) == EQ
5492 && ! side_effects_p (op0)
5493 && ! HONOR_NANS (mode)
5494 && ! HONOR_SIGNED_ZEROS (mode)
5495 && ((rtx_equal_p (XEXP (op0, 0), op1)
5496 && rtx_equal_p (XEXP (op0, 1), op2))
5497 || (rtx_equal_p (XEXP (op0, 0), op2)
5498 && rtx_equal_p (XEXP (op0, 1), op1))))
5499 return op2;
5500
5501 /* Convert (!c) != {0,...,0} ? a : b into
5502 c != {0,...,0} ? b : a for vector modes. */
5503 if (VECTOR_MODE_P (GET_MODE (op1))
5504 && GET_CODE (op0) == NE
5505 && GET_CODE (XEXP (op0, 0)) == NOT
5506 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5507 {
5508 rtx cv = XEXP (op0, 1);
5509 int nunits = CONST_VECTOR_NUNITS (cv);
5510 bool ok = true;
5511 for (int i = 0; i < nunits; ++i)
5512 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5513 {
5514 ok = false;
5515 break;
5516 }
5517 if (ok)
5518 {
5519 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5520 XEXP (XEXP (op0, 0), 0),
5521 XEXP (op0, 1));
5522 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5523 return retval;
5524 }
5525 }
5526
5527 /* Convert x == 0 ? N : clz (x) into clz (x) when
5528 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5529 Similarly for ctz (x). */
5530 if (COMPARISON_P (op0) && !side_effects_p (op0)
5531 && XEXP (op0, 1) == const0_rtx)
5532 {
5533 rtx simplified
5534 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5535 op1, op2);
5536 if (simplified)
5537 return simplified;
5538 }
5539
5540 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5541 {
5542 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5543 ? GET_MODE (XEXP (op0, 1))
5544 : GET_MODE (XEXP (op0, 0)));
5545 rtx temp;
5546
5547 /* Look for happy constants in op1 and op2. */
5548 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5549 {
5550 HOST_WIDE_INT t = INTVAL (op1);
5551 HOST_WIDE_INT f = INTVAL (op2);
5552
5553 if (t == STORE_FLAG_VALUE && f == 0)
5554 code = GET_CODE (op0);
5555 else if (t == 0 && f == STORE_FLAG_VALUE)
5556 {
5557 enum rtx_code tmp;
5558 tmp = reversed_comparison_code (op0, NULL);
5559 if (tmp == UNKNOWN)
5560 break;
5561 code = tmp;
5562 }
5563 else
5564 break;
5565
5566 return simplify_gen_relational (code, mode, cmp_mode,
5567 XEXP (op0, 0), XEXP (op0, 1));
5568 }
5569
5570 if (cmp_mode == VOIDmode)
5571 cmp_mode = op0_mode;
5572 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5573 cmp_mode, XEXP (op0, 0),
5574 XEXP (op0, 1));
5575
5576 /* See if any simplifications were possible. */
5577 if (temp)
5578 {
5579 if (CONST_INT_P (temp))
5580 return temp == const0_rtx ? op2 : op1;
5581 else if (temp)
5582 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5583 }
5584 }
5585 break;
5586
5587 case VEC_MERGE:
5588 gcc_assert (GET_MODE (op0) == mode);
5589 gcc_assert (GET_MODE (op1) == mode);
5590 gcc_assert (VECTOR_MODE_P (mode));
5591 trueop2 = avoid_constant_pool_reference (op2);
5592 if (CONST_INT_P (trueop2))
5593 {
5594 int elt_size = GET_MODE_UNIT_SIZE (mode);
5595 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5596 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5597 unsigned HOST_WIDE_INT mask;
5598 if (n_elts == HOST_BITS_PER_WIDE_INT)
5599 mask = -1;
5600 else
5601 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5602
5603 if (!(sel & mask) && !side_effects_p (op0))
5604 return op1;
5605 if ((sel & mask) == mask && !side_effects_p (op1))
5606 return op0;
5607
5608 rtx trueop0 = avoid_constant_pool_reference (op0);
5609 rtx trueop1 = avoid_constant_pool_reference (op1);
5610 if (GET_CODE (trueop0) == CONST_VECTOR
5611 && GET_CODE (trueop1) == CONST_VECTOR)
5612 {
5613 rtvec v = rtvec_alloc (n_elts);
5614 unsigned int i;
5615
5616 for (i = 0; i < n_elts; i++)
5617 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5618 ? CONST_VECTOR_ELT (trueop0, i)
5619 : CONST_VECTOR_ELT (trueop1, i));
5620 return gen_rtx_CONST_VECTOR (mode, v);
5621 }
5622
5623 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5624 if no element from a appears in the result. */
5625 if (GET_CODE (op0) == VEC_MERGE)
5626 {
5627 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5628 if (CONST_INT_P (tem))
5629 {
5630 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5631 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5632 return simplify_gen_ternary (code, mode, mode,
5633 XEXP (op0, 1), op1, op2);
5634 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5635 return simplify_gen_ternary (code, mode, mode,
5636 XEXP (op0, 0), op1, op2);
5637 }
5638 }
5639 if (GET_CODE (op1) == VEC_MERGE)
5640 {
5641 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5642 if (CONST_INT_P (tem))
5643 {
5644 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5645 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5646 return simplify_gen_ternary (code, mode, mode,
5647 op0, XEXP (op1, 1), op2);
5648 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5649 return simplify_gen_ternary (code, mode, mode,
5650 op0, XEXP (op1, 0), op2);
5651 }
5652 }
5653
5654 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5655 with a. */
5656 if (GET_CODE (op0) == VEC_DUPLICATE
5657 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5658 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5659 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5660 {
5661 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5662 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5663 {
5664 if (XEXP (XEXP (op0, 0), 0) == op1
5665 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5666 return op1;
5667 }
5668 }
5669 }
5670
5671 if (rtx_equal_p (op0, op1)
5672 && !side_effects_p (op2) && !side_effects_p (op1))
5673 return op0;
5674
5675 break;
5676
5677 default:
5678 gcc_unreachable ();
5679 }
5680
5681 return 0;
5682 }
5683
5684 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5685 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5686 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5687
5688 Works by unpacking OP into a collection of 8-bit values
5689 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5690 and then repacking them again for OUTERMODE. */
5691
5692 static rtx
5693 simplify_immed_subreg (machine_mode outermode, rtx op,
5694 machine_mode innermode, unsigned int byte)
5695 {
5696 enum {
5697 value_bit = 8,
5698 value_mask = (1 << value_bit) - 1
5699 };
5700 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5701 int value_start;
5702 int i;
5703 int elem;
5704
5705 int num_elem;
5706 rtx * elems;
5707 int elem_bitsize;
5708 rtx result_s = NULL;
5709 rtvec result_v = NULL;
5710 enum mode_class outer_class;
5711 machine_mode outer_submode;
5712 int max_bitsize;
5713
5714 /* Some ports misuse CCmode. */
5715 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5716 return op;
5717
5718 /* We have no way to represent a complex constant at the rtl level. */
5719 if (COMPLEX_MODE_P (outermode))
5720 return NULL_RTX;
5721
5722 /* We support any size mode. */
5723 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5724 GET_MODE_BITSIZE (innermode));
5725
5726 /* Unpack the value. */
5727
5728 if (GET_CODE (op) == CONST_VECTOR)
5729 {
5730 num_elem = CONST_VECTOR_NUNITS (op);
5731 elems = &CONST_VECTOR_ELT (op, 0);
5732 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5733 }
5734 else
5735 {
5736 num_elem = 1;
5737 elems = &op;
5738 elem_bitsize = max_bitsize;
5739 }
5740 /* If this asserts, it is too complicated; reducing value_bit may help. */
5741 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5742 /* I don't know how to handle endianness of sub-units. */
5743 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5744
5745 for (elem = 0; elem < num_elem; elem++)
5746 {
5747 unsigned char * vp;
5748 rtx el = elems[elem];
5749
5750 /* Vectors are kept in target memory order. (This is probably
5751 a mistake.) */
5752 {
5753 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5754 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5755 / BITS_PER_UNIT);
5756 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5757 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5758 unsigned bytele = (subword_byte % UNITS_PER_WORD
5759 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5760 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5761 }
5762
5763 switch (GET_CODE (el))
5764 {
5765 case CONST_INT:
5766 for (i = 0;
5767 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5768 i += value_bit)
5769 *vp++ = INTVAL (el) >> i;
5770 /* CONST_INTs are always logically sign-extended. */
5771 for (; i < elem_bitsize; i += value_bit)
5772 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5773 break;
5774
5775 case CONST_WIDE_INT:
5776 {
5777 rtx_mode_t val = rtx_mode_t (el, innermode);
5778 unsigned char extend = wi::sign_mask (val);
5779 int prec = wi::get_precision (val);
5780
5781 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5782 *vp++ = wi::extract_uhwi (val, i, value_bit);
5783 for (; i < elem_bitsize; i += value_bit)
5784 *vp++ = extend;
5785 }
5786 break;
5787
5788 case CONST_DOUBLE:
5789 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5790 {
5791 unsigned char extend = 0;
5792 /* If this triggers, someone should have generated a
5793 CONST_INT instead. */
5794 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5795
5796 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5797 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5798 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5799 {
5800 *vp++
5801 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5802 i += value_bit;
5803 }
5804
5805 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5806 extend = -1;
5807 for (; i < elem_bitsize; i += value_bit)
5808 *vp++ = extend;
5809 }
5810 else
5811 {
5812 /* This is big enough for anything on the platform. */
5813 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5814 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5815
5816 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5817 gcc_assert (bitsize <= elem_bitsize);
5818 gcc_assert (bitsize % value_bit == 0);
5819
5820 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5821 GET_MODE (el));
5822
5823 /* real_to_target produces its result in words affected by
5824 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5825 and use WORDS_BIG_ENDIAN instead; see the documentation
5826 of SUBREG in rtl.texi. */
5827 for (i = 0; i < bitsize; i += value_bit)
5828 {
5829 int ibase;
5830 if (WORDS_BIG_ENDIAN)
5831 ibase = bitsize - 1 - i;
5832 else
5833 ibase = i;
5834 *vp++ = tmp[ibase / 32] >> i % 32;
5835 }
5836
5837 /* It shouldn't matter what's done here, so fill it with
5838 zero. */
5839 for (; i < elem_bitsize; i += value_bit)
5840 *vp++ = 0;
5841 }
5842 break;
5843
5844 case CONST_FIXED:
5845 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5846 {
5847 for (i = 0; i < elem_bitsize; i += value_bit)
5848 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5849 }
5850 else
5851 {
5852 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5853 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5854 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5855 i += value_bit)
5856 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5857 >> (i - HOST_BITS_PER_WIDE_INT);
5858 for (; i < elem_bitsize; i += value_bit)
5859 *vp++ = 0;
5860 }
5861 break;
5862
5863 default:
5864 gcc_unreachable ();
5865 }
5866 }
5867
5868 /* Now, pick the right byte to start with. */
5869 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5870 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5871 will already have offset 0. */
5872 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5873 {
5874 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5875 - byte);
5876 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5877 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5878 byte = (subword_byte % UNITS_PER_WORD
5879 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5880 }
5881
5882 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5883 so if it's become negative it will instead be very large.) */
5884 gcc_assert (byte < GET_MODE_SIZE (innermode));
5885
5886 /* Convert from bytes to chunks of size value_bit. */
5887 value_start = byte * (BITS_PER_UNIT / value_bit);
5888
5889 /* Re-pack the value. */
5890 num_elem = GET_MODE_NUNITS (outermode);
5891
5892 if (VECTOR_MODE_P (outermode))
5893 {
5894 result_v = rtvec_alloc (num_elem);
5895 elems = &RTVEC_ELT (result_v, 0);
5896 }
5897 else
5898 elems = &result_s;
5899
5900 outer_submode = GET_MODE_INNER (outermode);
5901 outer_class = GET_MODE_CLASS (outer_submode);
5902 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5903
5904 gcc_assert (elem_bitsize % value_bit == 0);
5905 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5906
5907 for (elem = 0; elem < num_elem; elem++)
5908 {
5909 unsigned char *vp;
5910
5911 /* Vectors are stored in target memory order. (This is probably
5912 a mistake.) */
5913 {
5914 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5915 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5916 / BITS_PER_UNIT);
5917 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5918 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5919 unsigned bytele = (subword_byte % UNITS_PER_WORD
5920 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5921 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5922 }
5923
5924 switch (outer_class)
5925 {
5926 case MODE_INT:
5927 case MODE_PARTIAL_INT:
5928 {
5929 int u;
5930 int base = 0;
5931 int units
5932 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5933 / HOST_BITS_PER_WIDE_INT;
5934 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5935 wide_int r;
5936
5937 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5938 return NULL_RTX;
5939 for (u = 0; u < units; u++)
5940 {
5941 unsigned HOST_WIDE_INT buf = 0;
5942 for (i = 0;
5943 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5944 i += value_bit)
5945 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5946
5947 tmp[u] = buf;
5948 base += HOST_BITS_PER_WIDE_INT;
5949 }
5950 r = wide_int::from_array (tmp, units,
5951 GET_MODE_PRECISION (outer_submode));
5952 #if TARGET_SUPPORTS_WIDE_INT == 0
5953 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5954 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5955 return NULL_RTX;
5956 #endif
5957 elems[elem] = immed_wide_int_const (r, outer_submode);
5958 }
5959 break;
5960
5961 case MODE_FLOAT:
5962 case MODE_DECIMAL_FLOAT:
5963 {
5964 REAL_VALUE_TYPE r;
5965 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5966
5967 /* real_from_target wants its input in words affected by
5968 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5969 and use WORDS_BIG_ENDIAN instead; see the documentation
5970 of SUBREG in rtl.texi. */
5971 for (i = 0; i < elem_bitsize; i += value_bit)
5972 {
5973 int ibase;
5974 if (WORDS_BIG_ENDIAN)
5975 ibase = elem_bitsize - 1 - i;
5976 else
5977 ibase = i;
5978 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5979 }
5980
5981 real_from_target (&r, tmp, outer_submode);
5982 elems[elem] = const_double_from_real_value (r, outer_submode);
5983 }
5984 break;
5985
5986 case MODE_FRACT:
5987 case MODE_UFRACT:
5988 case MODE_ACCUM:
5989 case MODE_UACCUM:
5990 {
5991 FIXED_VALUE_TYPE f;
5992 f.data.low = 0;
5993 f.data.high = 0;
5994 f.mode = outer_submode;
5995
5996 for (i = 0;
5997 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5998 i += value_bit)
5999 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6000 for (; i < elem_bitsize; i += value_bit)
6001 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6002 << (i - HOST_BITS_PER_WIDE_INT));
6003
6004 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6005 }
6006 break;
6007
6008 default:
6009 gcc_unreachable ();
6010 }
6011 }
6012 if (VECTOR_MODE_P (outermode))
6013 return gen_rtx_CONST_VECTOR (outermode, result_v);
6014 else
6015 return result_s;
6016 }
6017
6018 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6019 Return 0 if no simplifications are possible. */
6020 rtx
6021 simplify_subreg (machine_mode outermode, rtx op,
6022 machine_mode innermode, unsigned int byte)
6023 {
6024 /* Little bit of sanity checking. */
6025 gcc_assert (innermode != VOIDmode);
6026 gcc_assert (outermode != VOIDmode);
6027 gcc_assert (innermode != BLKmode);
6028 gcc_assert (outermode != BLKmode);
6029
6030 gcc_assert (GET_MODE (op) == innermode
6031 || GET_MODE (op) == VOIDmode);
6032
6033 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6034 return NULL_RTX;
6035
6036 if (byte >= GET_MODE_SIZE (innermode))
6037 return NULL_RTX;
6038
6039 if (outermode == innermode && !byte)
6040 return op;
6041
6042 if (CONST_SCALAR_INT_P (op)
6043 || CONST_DOUBLE_AS_FLOAT_P (op)
6044 || GET_CODE (op) == CONST_FIXED
6045 || GET_CODE (op) == CONST_VECTOR)
6046 return simplify_immed_subreg (outermode, op, innermode, byte);
6047
6048 /* Changing mode twice with SUBREG => just change it once,
6049 or not at all if changing back op starting mode. */
6050 if (GET_CODE (op) == SUBREG)
6051 {
6052 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6053 int final_offset = byte + SUBREG_BYTE (op);
6054 rtx newx;
6055
6056 if (outermode == innermostmode
6057 && byte == 0 && SUBREG_BYTE (op) == 0)
6058 return SUBREG_REG (op);
6059
6060 /* The SUBREG_BYTE represents offset, as if the value were stored
6061 in memory. Irritating exception is paradoxical subreg, where
6062 we define SUBREG_BYTE to be 0. On big endian machines, this
6063 value should be negative. For a moment, undo this exception. */
6064 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6065 {
6066 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6067 if (WORDS_BIG_ENDIAN)
6068 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6069 if (BYTES_BIG_ENDIAN)
6070 final_offset += difference % UNITS_PER_WORD;
6071 }
6072 if (SUBREG_BYTE (op) == 0
6073 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6074 {
6075 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6076 if (WORDS_BIG_ENDIAN)
6077 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6078 if (BYTES_BIG_ENDIAN)
6079 final_offset += difference % UNITS_PER_WORD;
6080 }
6081
6082 /* See whether resulting subreg will be paradoxical. */
6083 if (!paradoxical_subreg_p (outermode, innermostmode))
6084 {
6085 /* In nonparadoxical subregs we can't handle negative offsets. */
6086 if (final_offset < 0)
6087 return NULL_RTX;
6088 /* Bail out in case resulting subreg would be incorrect. */
6089 if (final_offset % GET_MODE_SIZE (outermode)
6090 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6091 return NULL_RTX;
6092 }
6093 else
6094 {
6095 int offset = 0;
6096 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6097
6098 /* In paradoxical subreg, see if we are still looking on lower part.
6099 If so, our SUBREG_BYTE will be 0. */
6100 if (WORDS_BIG_ENDIAN)
6101 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6102 if (BYTES_BIG_ENDIAN)
6103 offset += difference % UNITS_PER_WORD;
6104 if (offset == final_offset)
6105 final_offset = 0;
6106 else
6107 return NULL_RTX;
6108 }
6109
6110 /* Recurse for further possible simplifications. */
6111 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6112 final_offset);
6113 if (newx)
6114 return newx;
6115 if (validate_subreg (outermode, innermostmode,
6116 SUBREG_REG (op), final_offset))
6117 {
6118 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6119 if (SUBREG_PROMOTED_VAR_P (op)
6120 && SUBREG_PROMOTED_SIGN (op) >= 0
6121 && GET_MODE_CLASS (outermode) == MODE_INT
6122 && IN_RANGE (GET_MODE_SIZE (outermode),
6123 GET_MODE_SIZE (innermode),
6124 GET_MODE_SIZE (innermostmode))
6125 && subreg_lowpart_p (newx))
6126 {
6127 SUBREG_PROMOTED_VAR_P (newx) = 1;
6128 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6129 }
6130 return newx;
6131 }
6132 return NULL_RTX;
6133 }
6134
6135 /* SUBREG of a hard register => just change the register number
6136 and/or mode. If the hard register is not valid in that mode,
6137 suppress this simplification. If the hard register is the stack,
6138 frame, or argument pointer, leave this as a SUBREG. */
6139
6140 if (REG_P (op) && HARD_REGISTER_P (op))
6141 {
6142 unsigned int regno, final_regno;
6143
6144 regno = REGNO (op);
6145 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6146 if (HARD_REGISTER_NUM_P (final_regno))
6147 {
6148 rtx x;
6149 int final_offset = byte;
6150
6151 /* Adjust offset for paradoxical subregs. */
6152 if (byte == 0
6153 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6154 {
6155 int difference = (GET_MODE_SIZE (innermode)
6156 - GET_MODE_SIZE (outermode));
6157 if (WORDS_BIG_ENDIAN)
6158 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6159 if (BYTES_BIG_ENDIAN)
6160 final_offset += difference % UNITS_PER_WORD;
6161 }
6162
6163 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6164
6165 /* Propagate original regno. We don't have any way to specify
6166 the offset inside original regno, so do so only for lowpart.
6167 The information is used only by alias analysis that can not
6168 grog partial register anyway. */
6169
6170 if (subreg_lowpart_offset (outermode, innermode) == byte)
6171 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6172 return x;
6173 }
6174 }
6175
6176 /* If we have a SUBREG of a register that we are replacing and we are
6177 replacing it with a MEM, make a new MEM and try replacing the
6178 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6179 or if we would be widening it. */
6180
6181 if (MEM_P (op)
6182 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6183 /* Allow splitting of volatile memory references in case we don't
6184 have instruction to move the whole thing. */
6185 && (! MEM_VOLATILE_P (op)
6186 || ! have_insn_for (SET, innermode))
6187 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6188 return adjust_address_nv (op, outermode, byte);
6189
6190 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6191 of two parts. */
6192 if (GET_CODE (op) == CONCAT
6193 || GET_CODE (op) == VEC_CONCAT)
6194 {
6195 unsigned int part_size, final_offset;
6196 rtx part, res;
6197
6198 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6199 if (part_mode == VOIDmode)
6200 part_mode = GET_MODE_INNER (GET_MODE (op));
6201 part_size = GET_MODE_SIZE (part_mode);
6202 if (byte < part_size)
6203 {
6204 part = XEXP (op, 0);
6205 final_offset = byte;
6206 }
6207 else
6208 {
6209 part = XEXP (op, 1);
6210 final_offset = byte - part_size;
6211 }
6212
6213 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6214 return NULL_RTX;
6215
6216 part_mode = GET_MODE (part);
6217 if (part_mode == VOIDmode)
6218 part_mode = GET_MODE_INNER (GET_MODE (op));
6219 res = simplify_subreg (outermode, part, part_mode, final_offset);
6220 if (res)
6221 return res;
6222 if (validate_subreg (outermode, part_mode, part, final_offset))
6223 return gen_rtx_SUBREG (outermode, part, final_offset);
6224 return NULL_RTX;
6225 }
6226
6227 /* A SUBREG resulting from a zero extension may fold to zero if
6228 it extracts higher bits that the ZERO_EXTEND's source bits. */
6229 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6230 {
6231 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6232 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6233 return CONST0_RTX (outermode);
6234 }
6235
6236 if (SCALAR_INT_MODE_P (outermode)
6237 && SCALAR_INT_MODE_P (innermode)
6238 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6239 && byte == subreg_lowpart_offset (outermode, innermode))
6240 {
6241 rtx tem = simplify_truncation (outermode, op, innermode);
6242 if (tem)
6243 return tem;
6244 }
6245
6246 return NULL_RTX;
6247 }
6248
6249 /* Make a SUBREG operation or equivalent if it folds. */
6250
6251 rtx
6252 simplify_gen_subreg (machine_mode outermode, rtx op,
6253 machine_mode innermode, unsigned int byte)
6254 {
6255 rtx newx;
6256
6257 newx = simplify_subreg (outermode, op, innermode, byte);
6258 if (newx)
6259 return newx;
6260
6261 if (GET_CODE (op) == SUBREG
6262 || GET_CODE (op) == CONCAT
6263 || GET_MODE (op) == VOIDmode)
6264 return NULL_RTX;
6265
6266 if (validate_subreg (outermode, innermode, op, byte))
6267 return gen_rtx_SUBREG (outermode, op, byte);
6268
6269 return NULL_RTX;
6270 }
6271
6272 /* Generates a subreg to get the least significant part of EXPR (in mode
6273 INNER_MODE) to OUTER_MODE. */
6274
6275 rtx
6276 lowpart_subreg (machine_mode outer_mode, rtx expr,
6277 machine_mode inner_mode)
6278 {
6279 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6280 subreg_lowpart_offset (outer_mode, inner_mode));
6281 }
6282
6283 /* Simplify X, an rtx expression.
6284
6285 Return the simplified expression or NULL if no simplifications
6286 were possible.
6287
6288 This is the preferred entry point into the simplification routines;
6289 however, we still allow passes to call the more specific routines.
6290
6291 Right now GCC has three (yes, three) major bodies of RTL simplification
6292 code that need to be unified.
6293
6294 1. fold_rtx in cse.c. This code uses various CSE specific
6295 information to aid in RTL simplification.
6296
6297 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6298 it uses combine specific information to aid in RTL
6299 simplification.
6300
6301 3. The routines in this file.
6302
6303
6304 Long term we want to only have one body of simplification code; to
6305 get to that state I recommend the following steps:
6306
6307 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6308 which are not pass dependent state into these routines.
6309
6310 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6311 use this routine whenever possible.
6312
6313 3. Allow for pass dependent state to be provided to these
6314 routines and add simplifications based on the pass dependent
6315 state. Remove code from cse.c & combine.c that becomes
6316 redundant/dead.
6317
6318 It will take time, but ultimately the compiler will be easier to
6319 maintain and improve. It's totally silly that when we add a
6320 simplification that it needs to be added to 4 places (3 for RTL
6321 simplification and 1 for tree simplification. */
6322
6323 rtx
6324 simplify_rtx (const_rtx x)
6325 {
6326 const enum rtx_code code = GET_CODE (x);
6327 const machine_mode mode = GET_MODE (x);
6328
6329 switch (GET_RTX_CLASS (code))
6330 {
6331 case RTX_UNARY:
6332 return simplify_unary_operation (code, mode,
6333 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6334 case RTX_COMM_ARITH:
6335 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6336 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6337
6338 /* Fall through. */
6339
6340 case RTX_BIN_ARITH:
6341 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6342
6343 case RTX_TERNARY:
6344 case RTX_BITFIELD_OPS:
6345 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6346 XEXP (x, 0), XEXP (x, 1),
6347 XEXP (x, 2));
6348
6349 case RTX_COMPARE:
6350 case RTX_COMM_COMPARE:
6351 return simplify_relational_operation (code, mode,
6352 ((GET_MODE (XEXP (x, 0))
6353 != VOIDmode)
6354 ? GET_MODE (XEXP (x, 0))
6355 : GET_MODE (XEXP (x, 1))),
6356 XEXP (x, 0),
6357 XEXP (x, 1));
6358
6359 case RTX_EXTRA:
6360 if (code == SUBREG)
6361 return simplify_subreg (mode, SUBREG_REG (x),
6362 GET_MODE (SUBREG_REG (x)),
6363 SUBREG_BYTE (x));
6364 break;
6365
6366 case RTX_OBJ:
6367 if (code == LO_SUM)
6368 {
6369 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6370 if (GET_CODE (XEXP (x, 0)) == HIGH
6371 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6372 return XEXP (x, 1);
6373 }
6374 break;
6375
6376 default:
6377 break;
6378 }
6379 return NULL;
6380 }