]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/rtlanal.c
a19bdfdc0d1ad64680b7aad860af3563f77ac1b6
[thirdparty/gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "hard-reg-set.h"
29 #include "rtl.h"
30 #include "insn-config.h"
31 #include "recog.h"
32 #include "target.h"
33 #include "output.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "regs.h"
37 #include "function.h"
38 #include "df.h"
39 #include "tree.h"
40 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
41
42 /* Forward declarations */
43 static void set_of_1 (rtx, const_rtx, void *);
44 static bool covers_regno_p (const_rtx, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
46 static int rtx_referenced_p_1 (rtx *, void *);
47 static int computed_jump_p_1 (const_rtx);
48 static void parms_set (rtx, const_rtx, void *);
49
50 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
51 const_rtx, enum machine_mode,
52 unsigned HOST_WIDE_INT);
53 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
54 const_rtx, enum machine_mode,
55 unsigned HOST_WIDE_INT);
56 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
57 enum machine_mode,
58 unsigned int);
59 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
60 enum machine_mode, unsigned int);
61
62 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
63 -1 if a code has no such operand. */
64 static int non_rtx_starting_operands[NUM_RTX_CODE];
65
66 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
67 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
68 SIGN_EXTEND then while narrowing we also have to enforce the
69 representation and sign-extend the value to mode DESTINATION_REP.
70
71 If the value is already sign-extended to DESTINATION_REP mode we
72 can just switch to DESTINATION mode on it. For each pair of
73 integral modes SOURCE and DESTINATION, when truncating from SOURCE
74 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
75 contains the number of high-order bits in SOURCE that have to be
76 copies of the sign-bit so that we can do this mode-switch to
77 DESTINATION. */
78
79 static unsigned int
80 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
81 \f
82 /* Return 1 if the value of X is unstable
83 (would be different at a different point in the program).
84 The frame pointer, arg pointer, etc. are considered stable
85 (within one function) and so is anything marked `unchanging'. */
86
87 int
88 rtx_unstable_p (const_rtx x)
89 {
90 const RTX_CODE code = GET_CODE (x);
91 int i;
92 const char *fmt;
93
94 switch (code)
95 {
96 case MEM:
97 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
98
99 case CONST:
100 CASE_CONST_ANY:
101 case SYMBOL_REF:
102 case LABEL_REF:
103 return 0;
104
105 case REG:
106 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
107 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
108 /* The arg pointer varies if it is not a fixed register. */
109 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
110 return 0;
111 /* ??? When call-clobbered, the value is stable modulo the restore
112 that must happen after a call. This currently screws up local-alloc
113 into believing that the restore is not needed. */
114 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
115 return 0;
116 return 1;
117
118 case ASM_OPERANDS:
119 if (MEM_VOLATILE_P (x))
120 return 1;
121
122 /* Fall through. */
123
124 default:
125 break;
126 }
127
128 fmt = GET_RTX_FORMAT (code);
129 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
130 if (fmt[i] == 'e')
131 {
132 if (rtx_unstable_p (XEXP (x, i)))
133 return 1;
134 }
135 else if (fmt[i] == 'E')
136 {
137 int j;
138 for (j = 0; j < XVECLEN (x, i); j++)
139 if (rtx_unstable_p (XVECEXP (x, i, j)))
140 return 1;
141 }
142
143 return 0;
144 }
145
146 /* Return 1 if X has a value that can vary even between two
147 executions of the program. 0 means X can be compared reliably
148 against certain constants or near-constants.
149 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
150 zero, we are slightly more conservative.
151 The frame pointer and the arg pointer are considered constant. */
152
153 bool
154 rtx_varies_p (const_rtx x, bool for_alias)
155 {
156 RTX_CODE code;
157 int i;
158 const char *fmt;
159
160 if (!x)
161 return 0;
162
163 code = GET_CODE (x);
164 switch (code)
165 {
166 case MEM:
167 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
168
169 case CONST:
170 CASE_CONST_ANY:
171 case SYMBOL_REF:
172 case LABEL_REF:
173 return 0;
174
175 case REG:
176 /* Note that we have to test for the actual rtx used for the frame
177 and arg pointers and not just the register number in case we have
178 eliminated the frame and/or arg pointer and are using it
179 for pseudos. */
180 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
181 /* The arg pointer varies if it is not a fixed register. */
182 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
183 return 0;
184 if (x == pic_offset_table_rtx
185 /* ??? When call-clobbered, the value is stable modulo the restore
186 that must happen after a call. This currently screws up
187 local-alloc into believing that the restore is not needed, so we
188 must return 0 only if we are called from alias analysis. */
189 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
190 return 0;
191 return 1;
192
193 case LO_SUM:
194 /* The operand 0 of a LO_SUM is considered constant
195 (in fact it is related specifically to operand 1)
196 during alias analysis. */
197 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
198 || rtx_varies_p (XEXP (x, 1), for_alias);
199
200 case ASM_OPERANDS:
201 if (MEM_VOLATILE_P (x))
202 return 1;
203
204 /* Fall through. */
205
206 default:
207 break;
208 }
209
210 fmt = GET_RTX_FORMAT (code);
211 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
212 if (fmt[i] == 'e')
213 {
214 if (rtx_varies_p (XEXP (x, i), for_alias))
215 return 1;
216 }
217 else if (fmt[i] == 'E')
218 {
219 int j;
220 for (j = 0; j < XVECLEN (x, i); j++)
221 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
222 return 1;
223 }
224
225 return 0;
226 }
227
228 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
229 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
230 whether nonzero is returned for unaligned memory accesses on strict
231 alignment machines. */
232
233 static int
234 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
235 enum machine_mode mode, bool unaligned_mems)
236 {
237 enum rtx_code code = GET_CODE (x);
238
239 if (STRICT_ALIGNMENT
240 && unaligned_mems
241 && GET_MODE_SIZE (mode) != 0)
242 {
243 HOST_WIDE_INT actual_offset = offset;
244 #ifdef SPARC_STACK_BOUNDARY_HACK
245 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
246 the real alignment of %sp. However, when it does this, the
247 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
248 if (SPARC_STACK_BOUNDARY_HACK
249 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
250 actual_offset -= STACK_POINTER_OFFSET;
251 #endif
252
253 if (actual_offset % GET_MODE_SIZE (mode) != 0)
254 return 1;
255 }
256
257 switch (code)
258 {
259 case SYMBOL_REF:
260 if (SYMBOL_REF_WEAK (x))
261 return 1;
262 if (!CONSTANT_POOL_ADDRESS_P (x))
263 {
264 tree decl;
265 HOST_WIDE_INT decl_size;
266
267 if (offset < 0)
268 return 1;
269 if (size == 0)
270 size = GET_MODE_SIZE (mode);
271 if (size == 0)
272 return offset != 0;
273
274 /* If the size of the access or of the symbol is unknown,
275 assume the worst. */
276 decl = SYMBOL_REF_DECL (x);
277
278 /* Else check that the access is in bounds. TODO: restructure
279 expr_size/tree_expr_size/int_expr_size and just use the latter. */
280 if (!decl)
281 decl_size = -1;
282 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
283 decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0)
284 ? tree_low_cst (DECL_SIZE_UNIT (decl), 0)
285 : -1);
286 else if (TREE_CODE (decl) == STRING_CST)
287 decl_size = TREE_STRING_LENGTH (decl);
288 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
289 decl_size = int_size_in_bytes (TREE_TYPE (decl));
290 else
291 decl_size = -1;
292
293 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
294 }
295
296 return 0;
297
298 case LABEL_REF:
299 return 0;
300
301 case REG:
302 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
303 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
304 || x == stack_pointer_rtx
305 /* The arg pointer varies if it is not a fixed register. */
306 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
307 return 0;
308 /* All of the virtual frame registers are stack references. */
309 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
310 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
311 return 0;
312 return 1;
313
314 case CONST:
315 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
316 mode, unaligned_mems);
317
318 case PLUS:
319 /* An address is assumed not to trap if:
320 - it is the pic register plus a constant. */
321 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
322 return 0;
323
324 /* - or it is an address that can't trap plus a constant integer,
325 with the proper remainder modulo the mode size if we are
326 considering unaligned memory references. */
327 if (CONST_INT_P (XEXP (x, 1))
328 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
329 size, mode, unaligned_mems))
330 return 0;
331
332 return 1;
333
334 case LO_SUM:
335 case PRE_MODIFY:
336 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
337 mode, unaligned_mems);
338
339 case PRE_DEC:
340 case PRE_INC:
341 case POST_DEC:
342 case POST_INC:
343 case POST_MODIFY:
344 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
345 mode, unaligned_mems);
346
347 default:
348 break;
349 }
350
351 /* If it isn't one of the case above, it can cause a trap. */
352 return 1;
353 }
354
355 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
356
357 int
358 rtx_addr_can_trap_p (const_rtx x)
359 {
360 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
361 }
362
363 /* Return true if X is an address that is known to not be zero. */
364
365 bool
366 nonzero_address_p (const_rtx x)
367 {
368 const enum rtx_code code = GET_CODE (x);
369
370 switch (code)
371 {
372 case SYMBOL_REF:
373 return !SYMBOL_REF_WEAK (x);
374
375 case LABEL_REF:
376 return true;
377
378 case REG:
379 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
380 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
381 || x == stack_pointer_rtx
382 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
383 return true;
384 /* All of the virtual frame registers are stack references. */
385 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
386 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
387 return true;
388 return false;
389
390 case CONST:
391 return nonzero_address_p (XEXP (x, 0));
392
393 case PLUS:
394 if (CONST_INT_P (XEXP (x, 1)))
395 return nonzero_address_p (XEXP (x, 0));
396 /* Handle PIC references. */
397 else if (XEXP (x, 0) == pic_offset_table_rtx
398 && CONSTANT_P (XEXP (x, 1)))
399 return true;
400 return false;
401
402 case PRE_MODIFY:
403 /* Similar to the above; allow positive offsets. Further, since
404 auto-inc is only allowed in memories, the register must be a
405 pointer. */
406 if (CONST_INT_P (XEXP (x, 1))
407 && INTVAL (XEXP (x, 1)) > 0)
408 return true;
409 return nonzero_address_p (XEXP (x, 0));
410
411 case PRE_INC:
412 /* Similarly. Further, the offset is always positive. */
413 return true;
414
415 case PRE_DEC:
416 case POST_DEC:
417 case POST_INC:
418 case POST_MODIFY:
419 return nonzero_address_p (XEXP (x, 0));
420
421 case LO_SUM:
422 return nonzero_address_p (XEXP (x, 1));
423
424 default:
425 break;
426 }
427
428 /* If it isn't one of the case above, might be zero. */
429 return false;
430 }
431
432 /* Return 1 if X refers to a memory location whose address
433 cannot be compared reliably with constant addresses,
434 or if X refers to a BLKmode memory object.
435 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
436 zero, we are slightly more conservative. */
437
438 bool
439 rtx_addr_varies_p (const_rtx x, bool for_alias)
440 {
441 enum rtx_code code;
442 int i;
443 const char *fmt;
444
445 if (x == 0)
446 return 0;
447
448 code = GET_CODE (x);
449 if (code == MEM)
450 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
451
452 fmt = GET_RTX_FORMAT (code);
453 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
454 if (fmt[i] == 'e')
455 {
456 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
457 return 1;
458 }
459 else if (fmt[i] == 'E')
460 {
461 int j;
462 for (j = 0; j < XVECLEN (x, i); j++)
463 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
464 return 1;
465 }
466 return 0;
467 }
468 \f
469 /* Return the CALL in X if there is one. */
470
471 rtx
472 get_call_rtx_from (rtx x)
473 {
474 if (INSN_P (x))
475 x = PATTERN (x);
476 if (GET_CODE (x) == PARALLEL)
477 x = XVECEXP (x, 0, 0);
478 if (GET_CODE (x) == SET)
479 x = SET_SRC (x);
480 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
481 return x;
482 return NULL_RTX;
483 }
484 \f
485 /* Return the value of the integer term in X, if one is apparent;
486 otherwise return 0.
487 Only obvious integer terms are detected.
488 This is used in cse.c with the `related_value' field. */
489
490 HOST_WIDE_INT
491 get_integer_term (const_rtx x)
492 {
493 if (GET_CODE (x) == CONST)
494 x = XEXP (x, 0);
495
496 if (GET_CODE (x) == MINUS
497 && CONST_INT_P (XEXP (x, 1)))
498 return - INTVAL (XEXP (x, 1));
499 if (GET_CODE (x) == PLUS
500 && CONST_INT_P (XEXP (x, 1)))
501 return INTVAL (XEXP (x, 1));
502 return 0;
503 }
504
505 /* If X is a constant, return the value sans apparent integer term;
506 otherwise return 0.
507 Only obvious integer terms are detected. */
508
509 rtx
510 get_related_value (const_rtx x)
511 {
512 if (GET_CODE (x) != CONST)
513 return 0;
514 x = XEXP (x, 0);
515 if (GET_CODE (x) == PLUS
516 && CONST_INT_P (XEXP (x, 1)))
517 return XEXP (x, 0);
518 else if (GET_CODE (x) == MINUS
519 && CONST_INT_P (XEXP (x, 1)))
520 return XEXP (x, 0);
521 return 0;
522 }
523 \f
524 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
525 to somewhere in the same object or object_block as SYMBOL. */
526
527 bool
528 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
529 {
530 tree decl;
531
532 if (GET_CODE (symbol) != SYMBOL_REF)
533 return false;
534
535 if (offset == 0)
536 return true;
537
538 if (offset > 0)
539 {
540 if (CONSTANT_POOL_ADDRESS_P (symbol)
541 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
542 return true;
543
544 decl = SYMBOL_REF_DECL (symbol);
545 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
546 return true;
547 }
548
549 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
550 && SYMBOL_REF_BLOCK (symbol)
551 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
552 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
553 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
554 return true;
555
556 return false;
557 }
558
559 /* Split X into a base and a constant offset, storing them in *BASE_OUT
560 and *OFFSET_OUT respectively. */
561
562 void
563 split_const (rtx x, rtx *base_out, rtx *offset_out)
564 {
565 if (GET_CODE (x) == CONST)
566 {
567 x = XEXP (x, 0);
568 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
569 {
570 *base_out = XEXP (x, 0);
571 *offset_out = XEXP (x, 1);
572 return;
573 }
574 }
575 *base_out = x;
576 *offset_out = const0_rtx;
577 }
578 \f
579 /* Return the number of places FIND appears within X. If COUNT_DEST is
580 zero, we do not count occurrences inside the destination of a SET. */
581
582 int
583 count_occurrences (const_rtx x, const_rtx find, int count_dest)
584 {
585 int i, j;
586 enum rtx_code code;
587 const char *format_ptr;
588 int count;
589
590 if (x == find)
591 return 1;
592
593 code = GET_CODE (x);
594
595 switch (code)
596 {
597 case REG:
598 CASE_CONST_ANY:
599 case SYMBOL_REF:
600 case CODE_LABEL:
601 case PC:
602 case CC0:
603 return 0;
604
605 case EXPR_LIST:
606 count = count_occurrences (XEXP (x, 0), find, count_dest);
607 if (XEXP (x, 1))
608 count += count_occurrences (XEXP (x, 1), find, count_dest);
609 return count;
610
611 case MEM:
612 if (MEM_P (find) && rtx_equal_p (x, find))
613 return 1;
614 break;
615
616 case SET:
617 if (SET_DEST (x) == find && ! count_dest)
618 return count_occurrences (SET_SRC (x), find, count_dest);
619 break;
620
621 default:
622 break;
623 }
624
625 format_ptr = GET_RTX_FORMAT (code);
626 count = 0;
627
628 for (i = 0; i < GET_RTX_LENGTH (code); i++)
629 {
630 switch (*format_ptr++)
631 {
632 case 'e':
633 count += count_occurrences (XEXP (x, i), find, count_dest);
634 break;
635
636 case 'E':
637 for (j = 0; j < XVECLEN (x, i); j++)
638 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
639 break;
640 }
641 }
642 return count;
643 }
644
645 \f
646 /* Return TRUE if OP is a register or subreg of a register that
647 holds an unsigned quantity. Otherwise, return FALSE. */
648
649 bool
650 unsigned_reg_p (rtx op)
651 {
652 if (REG_P (op)
653 && REG_EXPR (op)
654 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
655 return true;
656
657 if (GET_CODE (op) == SUBREG
658 && SUBREG_PROMOTED_UNSIGNED_P (op))
659 return true;
660
661 return false;
662 }
663
664 \f
665 /* Nonzero if register REG appears somewhere within IN.
666 Also works if REG is not a register; in this case it checks
667 for a subexpression of IN that is Lisp "equal" to REG. */
668
669 int
670 reg_mentioned_p (const_rtx reg, const_rtx in)
671 {
672 const char *fmt;
673 int i;
674 enum rtx_code code;
675
676 if (in == 0)
677 return 0;
678
679 if (reg == in)
680 return 1;
681
682 if (GET_CODE (in) == LABEL_REF)
683 return reg == XEXP (in, 0);
684
685 code = GET_CODE (in);
686
687 switch (code)
688 {
689 /* Compare registers by number. */
690 case REG:
691 return REG_P (reg) && REGNO (in) == REGNO (reg);
692
693 /* These codes have no constituent expressions
694 and are unique. */
695 case SCRATCH:
696 case CC0:
697 case PC:
698 return 0;
699
700 CASE_CONST_ANY:
701 /* These are kept unique for a given value. */
702 return 0;
703
704 default:
705 break;
706 }
707
708 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
709 return 1;
710
711 fmt = GET_RTX_FORMAT (code);
712
713 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
714 {
715 if (fmt[i] == 'E')
716 {
717 int j;
718 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
719 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
720 return 1;
721 }
722 else if (fmt[i] == 'e'
723 && reg_mentioned_p (reg, XEXP (in, i)))
724 return 1;
725 }
726 return 0;
727 }
728 \f
729 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
730 no CODE_LABEL insn. */
731
732 int
733 no_labels_between_p (const_rtx beg, const_rtx end)
734 {
735 rtx p;
736 if (beg == end)
737 return 0;
738 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
739 if (LABEL_P (p))
740 return 0;
741 return 1;
742 }
743
744 /* Nonzero if register REG is used in an insn between
745 FROM_INSN and TO_INSN (exclusive of those two). */
746
747 int
748 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
749 {
750 rtx insn;
751
752 if (from_insn == to_insn)
753 return 0;
754
755 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
756 if (NONDEBUG_INSN_P (insn)
757 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
758 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
759 return 1;
760 return 0;
761 }
762 \f
763 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
764 is entirely replaced by a new value and the only use is as a SET_DEST,
765 we do not consider it a reference. */
766
767 int
768 reg_referenced_p (const_rtx x, const_rtx body)
769 {
770 int i;
771
772 switch (GET_CODE (body))
773 {
774 case SET:
775 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
776 return 1;
777
778 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
779 of a REG that occupies all of the REG, the insn references X if
780 it is mentioned in the destination. */
781 if (GET_CODE (SET_DEST (body)) != CC0
782 && GET_CODE (SET_DEST (body)) != PC
783 && !REG_P (SET_DEST (body))
784 && ! (GET_CODE (SET_DEST (body)) == SUBREG
785 && REG_P (SUBREG_REG (SET_DEST (body)))
786 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
787 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
788 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
789 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
790 && reg_overlap_mentioned_p (x, SET_DEST (body)))
791 return 1;
792 return 0;
793
794 case ASM_OPERANDS:
795 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
796 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
797 return 1;
798 return 0;
799
800 case CALL:
801 case USE:
802 case IF_THEN_ELSE:
803 return reg_overlap_mentioned_p (x, body);
804
805 case TRAP_IF:
806 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
807
808 case PREFETCH:
809 return reg_overlap_mentioned_p (x, XEXP (body, 0));
810
811 case UNSPEC:
812 case UNSPEC_VOLATILE:
813 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
814 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
815 return 1;
816 return 0;
817
818 case PARALLEL:
819 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
820 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
821 return 1;
822 return 0;
823
824 case CLOBBER:
825 if (MEM_P (XEXP (body, 0)))
826 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
827 return 1;
828 return 0;
829
830 case COND_EXEC:
831 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
832 return 1;
833 return reg_referenced_p (x, COND_EXEC_CODE (body));
834
835 default:
836 return 0;
837 }
838 }
839 \f
840 /* Nonzero if register REG is set or clobbered in an insn between
841 FROM_INSN and TO_INSN (exclusive of those two). */
842
843 int
844 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
845 {
846 const_rtx insn;
847
848 if (from_insn == to_insn)
849 return 0;
850
851 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
852 if (INSN_P (insn) && reg_set_p (reg, insn))
853 return 1;
854 return 0;
855 }
856
857 /* Internals of reg_set_between_p. */
858 int
859 reg_set_p (const_rtx reg, const_rtx insn)
860 {
861 /* We can be passed an insn or part of one. If we are passed an insn,
862 check if a side-effect of the insn clobbers REG. */
863 if (INSN_P (insn)
864 && (FIND_REG_INC_NOTE (insn, reg)
865 || (CALL_P (insn)
866 && ((REG_P (reg)
867 && REGNO (reg) < FIRST_PSEUDO_REGISTER
868 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
869 GET_MODE (reg), REGNO (reg)))
870 || MEM_P (reg)
871 || find_reg_fusage (insn, CLOBBER, reg)))))
872 return 1;
873
874 return set_of (reg, insn) != NULL_RTX;
875 }
876
877 /* Similar to reg_set_between_p, but check all registers in X. Return 0
878 only if none of them are modified between START and END. Return 1 if
879 X contains a MEM; this routine does use memory aliasing. */
880
881 int
882 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
883 {
884 const enum rtx_code code = GET_CODE (x);
885 const char *fmt;
886 int i, j;
887 rtx insn;
888
889 if (start == end)
890 return 0;
891
892 switch (code)
893 {
894 CASE_CONST_ANY:
895 case CONST:
896 case SYMBOL_REF:
897 case LABEL_REF:
898 return 0;
899
900 case PC:
901 case CC0:
902 return 1;
903
904 case MEM:
905 if (modified_between_p (XEXP (x, 0), start, end))
906 return 1;
907 if (MEM_READONLY_P (x))
908 return 0;
909 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
910 if (memory_modified_in_insn_p (x, insn))
911 return 1;
912 return 0;
913 break;
914
915 case REG:
916 return reg_set_between_p (x, start, end);
917
918 default:
919 break;
920 }
921
922 fmt = GET_RTX_FORMAT (code);
923 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
924 {
925 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
926 return 1;
927
928 else if (fmt[i] == 'E')
929 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
930 if (modified_between_p (XVECEXP (x, i, j), start, end))
931 return 1;
932 }
933
934 return 0;
935 }
936
937 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
938 of them are modified in INSN. Return 1 if X contains a MEM; this routine
939 does use memory aliasing. */
940
941 int
942 modified_in_p (const_rtx x, const_rtx insn)
943 {
944 const enum rtx_code code = GET_CODE (x);
945 const char *fmt;
946 int i, j;
947
948 switch (code)
949 {
950 CASE_CONST_ANY:
951 case CONST:
952 case SYMBOL_REF:
953 case LABEL_REF:
954 return 0;
955
956 case PC:
957 case CC0:
958 return 1;
959
960 case MEM:
961 if (modified_in_p (XEXP (x, 0), insn))
962 return 1;
963 if (MEM_READONLY_P (x))
964 return 0;
965 if (memory_modified_in_insn_p (x, insn))
966 return 1;
967 return 0;
968 break;
969
970 case REG:
971 return reg_set_p (x, insn);
972
973 default:
974 break;
975 }
976
977 fmt = GET_RTX_FORMAT (code);
978 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
979 {
980 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
981 return 1;
982
983 else if (fmt[i] == 'E')
984 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
985 if (modified_in_p (XVECEXP (x, i, j), insn))
986 return 1;
987 }
988
989 return 0;
990 }
991 \f
992 /* Helper function for set_of. */
993 struct set_of_data
994 {
995 const_rtx found;
996 const_rtx pat;
997 };
998
999 static void
1000 set_of_1 (rtx x, const_rtx pat, void *data1)
1001 {
1002 struct set_of_data *const data = (struct set_of_data *) (data1);
1003 if (rtx_equal_p (x, data->pat)
1004 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1005 data->found = pat;
1006 }
1007
1008 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1009 (either directly or via STRICT_LOW_PART and similar modifiers). */
1010 const_rtx
1011 set_of (const_rtx pat, const_rtx insn)
1012 {
1013 struct set_of_data data;
1014 data.found = NULL_RTX;
1015 data.pat = pat;
1016 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1017 return data.found;
1018 }
1019
1020 /* This function, called through note_stores, collects sets and
1021 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1022 by DATA. */
1023 void
1024 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1025 {
1026 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1027 if (REG_P (x) && HARD_REGISTER_P (x))
1028 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1029 }
1030
1031 /* Examine INSN, and compute the set of hard registers written by it.
1032 Store it in *PSET. Should only be called after reload. */
1033 void
1034 find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset)
1035 {
1036 rtx link;
1037
1038 CLEAR_HARD_REG_SET (*pset);
1039 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1040 if (CALL_P (insn))
1041 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1042 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1043 if (REG_NOTE_KIND (link) == REG_INC)
1044 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1045 }
1046
1047 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1048 static int
1049 record_hard_reg_uses_1 (rtx *px, void *data)
1050 {
1051 rtx x = *px;
1052 HARD_REG_SET *pused = (HARD_REG_SET *)data;
1053
1054 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1055 {
1056 int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
1057 while (nregs-- > 0)
1058 SET_HARD_REG_BIT (*pused, REGNO (x) + nregs);
1059 }
1060 return 0;
1061 }
1062
1063 /* Like record_hard_reg_sets, but called through note_uses. */
1064 void
1065 record_hard_reg_uses (rtx *px, void *data)
1066 {
1067 for_each_rtx (px, record_hard_reg_uses_1, data);
1068 }
1069 \f
1070 /* Given an INSN, return a SET expression if this insn has only a single SET.
1071 It may also have CLOBBERs, USEs, or SET whose output
1072 will not be used, which we ignore. */
1073
1074 rtx
1075 single_set_2 (const_rtx insn, const_rtx pat)
1076 {
1077 rtx set = NULL;
1078 int set_verified = 1;
1079 int i;
1080
1081 if (GET_CODE (pat) == PARALLEL)
1082 {
1083 for (i = 0; i < XVECLEN (pat, 0); i++)
1084 {
1085 rtx sub = XVECEXP (pat, 0, i);
1086 switch (GET_CODE (sub))
1087 {
1088 case USE:
1089 case CLOBBER:
1090 break;
1091
1092 case SET:
1093 /* We can consider insns having multiple sets, where all
1094 but one are dead as single set insns. In common case
1095 only single set is present in the pattern so we want
1096 to avoid checking for REG_UNUSED notes unless necessary.
1097
1098 When we reach set first time, we just expect this is
1099 the single set we are looking for and only when more
1100 sets are found in the insn, we check them. */
1101 if (!set_verified)
1102 {
1103 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1104 && !side_effects_p (set))
1105 set = NULL;
1106 else
1107 set_verified = 1;
1108 }
1109 if (!set)
1110 set = sub, set_verified = 0;
1111 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1112 || side_effects_p (sub))
1113 return NULL_RTX;
1114 break;
1115
1116 default:
1117 return NULL_RTX;
1118 }
1119 }
1120 }
1121 return set;
1122 }
1123
1124 /* Given an INSN, return nonzero if it has more than one SET, else return
1125 zero. */
1126
1127 int
1128 multiple_sets (const_rtx insn)
1129 {
1130 int found;
1131 int i;
1132
1133 /* INSN must be an insn. */
1134 if (! INSN_P (insn))
1135 return 0;
1136
1137 /* Only a PARALLEL can have multiple SETs. */
1138 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1139 {
1140 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1141 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1142 {
1143 /* If we have already found a SET, then return now. */
1144 if (found)
1145 return 1;
1146 else
1147 found = 1;
1148 }
1149 }
1150
1151 /* Either zero or one SET. */
1152 return 0;
1153 }
1154 \f
1155 /* Return nonzero if the destination of SET equals the source
1156 and there are no side effects. */
1157
1158 int
1159 set_noop_p (const_rtx set)
1160 {
1161 rtx src = SET_SRC (set);
1162 rtx dst = SET_DEST (set);
1163
1164 if (dst == pc_rtx && src == pc_rtx)
1165 return 1;
1166
1167 if (MEM_P (dst) && MEM_P (src))
1168 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1169
1170 if (GET_CODE (dst) == ZERO_EXTRACT)
1171 return rtx_equal_p (XEXP (dst, 0), src)
1172 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1173 && !side_effects_p (src);
1174
1175 if (GET_CODE (dst) == STRICT_LOW_PART)
1176 dst = XEXP (dst, 0);
1177
1178 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1179 {
1180 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1181 return 0;
1182 src = SUBREG_REG (src);
1183 dst = SUBREG_REG (dst);
1184 }
1185
1186 return (REG_P (src) && REG_P (dst)
1187 && REGNO (src) == REGNO (dst));
1188 }
1189 \f
1190 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1191 value to itself. */
1192
1193 int
1194 noop_move_p (const_rtx insn)
1195 {
1196 rtx pat = PATTERN (insn);
1197
1198 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1199 return 1;
1200
1201 /* Insns carrying these notes are useful later on. */
1202 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1203 return 0;
1204
1205 if (GET_CODE (pat) == SET && set_noop_p (pat))
1206 return 1;
1207
1208 if (GET_CODE (pat) == PARALLEL)
1209 {
1210 int i;
1211 /* If nothing but SETs of registers to themselves,
1212 this insn can also be deleted. */
1213 for (i = 0; i < XVECLEN (pat, 0); i++)
1214 {
1215 rtx tem = XVECEXP (pat, 0, i);
1216
1217 if (GET_CODE (tem) == USE
1218 || GET_CODE (tem) == CLOBBER)
1219 continue;
1220
1221 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1222 return 0;
1223 }
1224
1225 return 1;
1226 }
1227 return 0;
1228 }
1229 \f
1230
1231 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1232 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1233 If the object was modified, if we hit a partial assignment to X, or hit a
1234 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1235 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1236 be the src. */
1237
1238 rtx
1239 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1240 {
1241 rtx p;
1242
1243 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1244 p = PREV_INSN (p))
1245 if (INSN_P (p))
1246 {
1247 rtx set = single_set (p);
1248 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1249
1250 if (set && rtx_equal_p (x, SET_DEST (set)))
1251 {
1252 rtx src = SET_SRC (set);
1253
1254 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1255 src = XEXP (note, 0);
1256
1257 if ((valid_to == NULL_RTX
1258 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1259 /* Reject hard registers because we don't usually want
1260 to use them; we'd rather use a pseudo. */
1261 && (! (REG_P (src)
1262 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1263 {
1264 *pinsn = p;
1265 return src;
1266 }
1267 }
1268
1269 /* If set in non-simple way, we don't have a value. */
1270 if (reg_set_p (x, p))
1271 break;
1272 }
1273
1274 return x;
1275 }
1276 \f
1277 /* Return nonzero if register in range [REGNO, ENDREGNO)
1278 appears either explicitly or implicitly in X
1279 other than being stored into.
1280
1281 References contained within the substructure at LOC do not count.
1282 LOC may be zero, meaning don't ignore anything. */
1283
1284 int
1285 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1286 rtx *loc)
1287 {
1288 int i;
1289 unsigned int x_regno;
1290 RTX_CODE code;
1291 const char *fmt;
1292
1293 repeat:
1294 /* The contents of a REG_NONNEG note is always zero, so we must come here
1295 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1296 if (x == 0)
1297 return 0;
1298
1299 code = GET_CODE (x);
1300
1301 switch (code)
1302 {
1303 case REG:
1304 x_regno = REGNO (x);
1305
1306 /* If we modifying the stack, frame, or argument pointer, it will
1307 clobber a virtual register. In fact, we could be more precise,
1308 but it isn't worth it. */
1309 if ((x_regno == STACK_POINTER_REGNUM
1310 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1311 || x_regno == ARG_POINTER_REGNUM
1312 #endif
1313 || x_regno == FRAME_POINTER_REGNUM)
1314 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1315 return 1;
1316
1317 return endregno > x_regno && regno < END_REGNO (x);
1318
1319 case SUBREG:
1320 /* If this is a SUBREG of a hard reg, we can see exactly which
1321 registers are being modified. Otherwise, handle normally. */
1322 if (REG_P (SUBREG_REG (x))
1323 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1324 {
1325 unsigned int inner_regno = subreg_regno (x);
1326 unsigned int inner_endregno
1327 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1328 ? subreg_nregs (x) : 1);
1329
1330 return endregno > inner_regno && regno < inner_endregno;
1331 }
1332 break;
1333
1334 case CLOBBER:
1335 case SET:
1336 if (&SET_DEST (x) != loc
1337 /* Note setting a SUBREG counts as referring to the REG it is in for
1338 a pseudo but not for hard registers since we can
1339 treat each word individually. */
1340 && ((GET_CODE (SET_DEST (x)) == SUBREG
1341 && loc != &SUBREG_REG (SET_DEST (x))
1342 && REG_P (SUBREG_REG (SET_DEST (x)))
1343 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1344 && refers_to_regno_p (regno, endregno,
1345 SUBREG_REG (SET_DEST (x)), loc))
1346 || (!REG_P (SET_DEST (x))
1347 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1348 return 1;
1349
1350 if (code == CLOBBER || loc == &SET_SRC (x))
1351 return 0;
1352 x = SET_SRC (x);
1353 goto repeat;
1354
1355 default:
1356 break;
1357 }
1358
1359 /* X does not match, so try its subexpressions. */
1360
1361 fmt = GET_RTX_FORMAT (code);
1362 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1363 {
1364 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1365 {
1366 if (i == 0)
1367 {
1368 x = XEXP (x, 0);
1369 goto repeat;
1370 }
1371 else
1372 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1373 return 1;
1374 }
1375 else if (fmt[i] == 'E')
1376 {
1377 int j;
1378 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1379 if (loc != &XVECEXP (x, i, j)
1380 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1381 return 1;
1382 }
1383 }
1384 return 0;
1385 }
1386
1387 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1388 we check if any register number in X conflicts with the relevant register
1389 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1390 contains a MEM (we don't bother checking for memory addresses that can't
1391 conflict because we expect this to be a rare case. */
1392
1393 int
1394 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1395 {
1396 unsigned int regno, endregno;
1397
1398 /* If either argument is a constant, then modifying X can not
1399 affect IN. Here we look at IN, we can profitably combine
1400 CONSTANT_P (x) with the switch statement below. */
1401 if (CONSTANT_P (in))
1402 return 0;
1403
1404 recurse:
1405 switch (GET_CODE (x))
1406 {
1407 case STRICT_LOW_PART:
1408 case ZERO_EXTRACT:
1409 case SIGN_EXTRACT:
1410 /* Overly conservative. */
1411 x = XEXP (x, 0);
1412 goto recurse;
1413
1414 case SUBREG:
1415 regno = REGNO (SUBREG_REG (x));
1416 if (regno < FIRST_PSEUDO_REGISTER)
1417 regno = subreg_regno (x);
1418 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1419 ? subreg_nregs (x) : 1);
1420 goto do_reg;
1421
1422 case REG:
1423 regno = REGNO (x);
1424 endregno = END_REGNO (x);
1425 do_reg:
1426 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1427
1428 case MEM:
1429 {
1430 const char *fmt;
1431 int i;
1432
1433 if (MEM_P (in))
1434 return 1;
1435
1436 fmt = GET_RTX_FORMAT (GET_CODE (in));
1437 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1438 if (fmt[i] == 'e')
1439 {
1440 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1441 return 1;
1442 }
1443 else if (fmt[i] == 'E')
1444 {
1445 int j;
1446 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1447 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1448 return 1;
1449 }
1450
1451 return 0;
1452 }
1453
1454 case SCRATCH:
1455 case PC:
1456 case CC0:
1457 return reg_mentioned_p (x, in);
1458
1459 case PARALLEL:
1460 {
1461 int i;
1462
1463 /* If any register in here refers to it we return true. */
1464 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1465 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1466 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1467 return 1;
1468 return 0;
1469 }
1470
1471 default:
1472 gcc_assert (CONSTANT_P (x));
1473 return 0;
1474 }
1475 }
1476 \f
1477 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1478 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1479 ignored by note_stores, but passed to FUN.
1480
1481 FUN receives three arguments:
1482 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1483 2. the SET or CLOBBER rtx that does the store,
1484 3. the pointer DATA provided to note_stores.
1485
1486 If the item being stored in or clobbered is a SUBREG of a hard register,
1487 the SUBREG will be passed. */
1488
1489 void
1490 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1491 {
1492 int i;
1493
1494 if (GET_CODE (x) == COND_EXEC)
1495 x = COND_EXEC_CODE (x);
1496
1497 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1498 {
1499 rtx dest = SET_DEST (x);
1500
1501 while ((GET_CODE (dest) == SUBREG
1502 && (!REG_P (SUBREG_REG (dest))
1503 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1504 || GET_CODE (dest) == ZERO_EXTRACT
1505 || GET_CODE (dest) == STRICT_LOW_PART)
1506 dest = XEXP (dest, 0);
1507
1508 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1509 each of whose first operand is a register. */
1510 if (GET_CODE (dest) == PARALLEL)
1511 {
1512 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1513 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1514 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1515 }
1516 else
1517 (*fun) (dest, x, data);
1518 }
1519
1520 else if (GET_CODE (x) == PARALLEL)
1521 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1522 note_stores (XVECEXP (x, 0, i), fun, data);
1523 }
1524 \f
1525 /* Like notes_stores, but call FUN for each expression that is being
1526 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1527 FUN for each expression, not any interior subexpressions. FUN receives a
1528 pointer to the expression and the DATA passed to this function.
1529
1530 Note that this is not quite the same test as that done in reg_referenced_p
1531 since that considers something as being referenced if it is being
1532 partially set, while we do not. */
1533
1534 void
1535 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1536 {
1537 rtx body = *pbody;
1538 int i;
1539
1540 switch (GET_CODE (body))
1541 {
1542 case COND_EXEC:
1543 (*fun) (&COND_EXEC_TEST (body), data);
1544 note_uses (&COND_EXEC_CODE (body), fun, data);
1545 return;
1546
1547 case PARALLEL:
1548 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1549 note_uses (&XVECEXP (body, 0, i), fun, data);
1550 return;
1551
1552 case SEQUENCE:
1553 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1554 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1555 return;
1556
1557 case USE:
1558 (*fun) (&XEXP (body, 0), data);
1559 return;
1560
1561 case ASM_OPERANDS:
1562 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1563 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1564 return;
1565
1566 case TRAP_IF:
1567 (*fun) (&TRAP_CONDITION (body), data);
1568 return;
1569
1570 case PREFETCH:
1571 (*fun) (&XEXP (body, 0), data);
1572 return;
1573
1574 case UNSPEC:
1575 case UNSPEC_VOLATILE:
1576 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1577 (*fun) (&XVECEXP (body, 0, i), data);
1578 return;
1579
1580 case CLOBBER:
1581 if (MEM_P (XEXP (body, 0)))
1582 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1583 return;
1584
1585 case SET:
1586 {
1587 rtx dest = SET_DEST (body);
1588
1589 /* For sets we replace everything in source plus registers in memory
1590 expression in store and operands of a ZERO_EXTRACT. */
1591 (*fun) (&SET_SRC (body), data);
1592
1593 if (GET_CODE (dest) == ZERO_EXTRACT)
1594 {
1595 (*fun) (&XEXP (dest, 1), data);
1596 (*fun) (&XEXP (dest, 2), data);
1597 }
1598
1599 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1600 dest = XEXP (dest, 0);
1601
1602 if (MEM_P (dest))
1603 (*fun) (&XEXP (dest, 0), data);
1604 }
1605 return;
1606
1607 default:
1608 /* All the other possibilities never store. */
1609 (*fun) (pbody, data);
1610 return;
1611 }
1612 }
1613 \f
1614 /* Return nonzero if X's old contents don't survive after INSN.
1615 This will be true if X is (cc0) or if X is a register and
1616 X dies in INSN or because INSN entirely sets X.
1617
1618 "Entirely set" means set directly and not through a SUBREG, or
1619 ZERO_EXTRACT, so no trace of the old contents remains.
1620 Likewise, REG_INC does not count.
1621
1622 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1623 but for this use that makes no difference, since regs don't overlap
1624 during their lifetimes. Therefore, this function may be used
1625 at any time after deaths have been computed.
1626
1627 If REG is a hard reg that occupies multiple machine registers, this
1628 function will only return 1 if each of those registers will be replaced
1629 by INSN. */
1630
1631 int
1632 dead_or_set_p (const_rtx insn, const_rtx x)
1633 {
1634 unsigned int regno, end_regno;
1635 unsigned int i;
1636
1637 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1638 if (GET_CODE (x) == CC0)
1639 return 1;
1640
1641 gcc_assert (REG_P (x));
1642
1643 regno = REGNO (x);
1644 end_regno = END_REGNO (x);
1645 for (i = regno; i < end_regno; i++)
1646 if (! dead_or_set_regno_p (insn, i))
1647 return 0;
1648
1649 return 1;
1650 }
1651
1652 /* Return TRUE iff DEST is a register or subreg of a register and
1653 doesn't change the number of words of the inner register, and any
1654 part of the register is TEST_REGNO. */
1655
1656 static bool
1657 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1658 {
1659 unsigned int regno, endregno;
1660
1661 if (GET_CODE (dest) == SUBREG
1662 && (((GET_MODE_SIZE (GET_MODE (dest))
1663 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1664 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1665 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1666 dest = SUBREG_REG (dest);
1667
1668 if (!REG_P (dest))
1669 return false;
1670
1671 regno = REGNO (dest);
1672 endregno = END_REGNO (dest);
1673 return (test_regno >= regno && test_regno < endregno);
1674 }
1675
1676 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1677 any member matches the covers_regno_no_parallel_p criteria. */
1678
1679 static bool
1680 covers_regno_p (const_rtx dest, unsigned int test_regno)
1681 {
1682 if (GET_CODE (dest) == PARALLEL)
1683 {
1684 /* Some targets place small structures in registers for return
1685 values of functions, and those registers are wrapped in
1686 PARALLELs that we may see as the destination of a SET. */
1687 int i;
1688
1689 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1690 {
1691 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1692 if (inner != NULL_RTX
1693 && covers_regno_no_parallel_p (inner, test_regno))
1694 return true;
1695 }
1696
1697 return false;
1698 }
1699 else
1700 return covers_regno_no_parallel_p (dest, test_regno);
1701 }
1702
1703 /* Utility function for dead_or_set_p to check an individual register. */
1704
1705 int
1706 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1707 {
1708 const_rtx pattern;
1709
1710 /* See if there is a death note for something that includes TEST_REGNO. */
1711 if (find_regno_note (insn, REG_DEAD, test_regno))
1712 return 1;
1713
1714 if (CALL_P (insn)
1715 && find_regno_fusage (insn, CLOBBER, test_regno))
1716 return 1;
1717
1718 pattern = PATTERN (insn);
1719
1720 /* If a COND_EXEC is not executed, the value survives. */
1721 if (GET_CODE (pattern) == COND_EXEC)
1722 return 0;
1723
1724 if (GET_CODE (pattern) == SET)
1725 return covers_regno_p (SET_DEST (pattern), test_regno);
1726 else if (GET_CODE (pattern) == PARALLEL)
1727 {
1728 int i;
1729
1730 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1731 {
1732 rtx body = XVECEXP (pattern, 0, i);
1733
1734 if (GET_CODE (body) == COND_EXEC)
1735 body = COND_EXEC_CODE (body);
1736
1737 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1738 && covers_regno_p (SET_DEST (body), test_regno))
1739 return 1;
1740 }
1741 }
1742
1743 return 0;
1744 }
1745
1746 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1747 If DATUM is nonzero, look for one whose datum is DATUM. */
1748
1749 rtx
1750 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1751 {
1752 rtx link;
1753
1754 gcc_checking_assert (insn);
1755
1756 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1757 if (! INSN_P (insn))
1758 return 0;
1759 if (datum == 0)
1760 {
1761 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1762 if (REG_NOTE_KIND (link) == kind)
1763 return link;
1764 return 0;
1765 }
1766
1767 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1768 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1769 return link;
1770 return 0;
1771 }
1772
1773 /* Return the reg-note of kind KIND in insn INSN which applies to register
1774 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1775 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1776 it might be the case that the note overlaps REGNO. */
1777
1778 rtx
1779 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1780 {
1781 rtx link;
1782
1783 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1784 if (! INSN_P (insn))
1785 return 0;
1786
1787 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1788 if (REG_NOTE_KIND (link) == kind
1789 /* Verify that it is a register, so that scratch and MEM won't cause a
1790 problem here. */
1791 && REG_P (XEXP (link, 0))
1792 && REGNO (XEXP (link, 0)) <= regno
1793 && END_REGNO (XEXP (link, 0)) > regno)
1794 return link;
1795 return 0;
1796 }
1797
1798 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1799 has such a note. */
1800
1801 rtx
1802 find_reg_equal_equiv_note (const_rtx insn)
1803 {
1804 rtx link;
1805
1806 if (!INSN_P (insn))
1807 return 0;
1808
1809 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1810 if (REG_NOTE_KIND (link) == REG_EQUAL
1811 || REG_NOTE_KIND (link) == REG_EQUIV)
1812 {
1813 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1814 insns that have multiple sets. Checking single_set to
1815 make sure of this is not the proper check, as explained
1816 in the comment in set_unique_reg_note.
1817
1818 This should be changed into an assert. */
1819 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1820 return 0;
1821 return link;
1822 }
1823 return NULL;
1824 }
1825
1826 /* Check whether INSN is a single_set whose source is known to be
1827 equivalent to a constant. Return that constant if so, otherwise
1828 return null. */
1829
1830 rtx
1831 find_constant_src (const_rtx insn)
1832 {
1833 rtx note, set, x;
1834
1835 set = single_set (insn);
1836 if (set)
1837 {
1838 x = avoid_constant_pool_reference (SET_SRC (set));
1839 if (CONSTANT_P (x))
1840 return x;
1841 }
1842
1843 note = find_reg_equal_equiv_note (insn);
1844 if (note && CONSTANT_P (XEXP (note, 0)))
1845 return XEXP (note, 0);
1846
1847 return NULL_RTX;
1848 }
1849
1850 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1851 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1852
1853 int
1854 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1855 {
1856 /* If it's not a CALL_INSN, it can't possibly have a
1857 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1858 if (!CALL_P (insn))
1859 return 0;
1860
1861 gcc_assert (datum);
1862
1863 if (!REG_P (datum))
1864 {
1865 rtx link;
1866
1867 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1868 link;
1869 link = XEXP (link, 1))
1870 if (GET_CODE (XEXP (link, 0)) == code
1871 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1872 return 1;
1873 }
1874 else
1875 {
1876 unsigned int regno = REGNO (datum);
1877
1878 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1879 to pseudo registers, so don't bother checking. */
1880
1881 if (regno < FIRST_PSEUDO_REGISTER)
1882 {
1883 unsigned int end_regno = END_HARD_REGNO (datum);
1884 unsigned int i;
1885
1886 for (i = regno; i < end_regno; i++)
1887 if (find_regno_fusage (insn, code, i))
1888 return 1;
1889 }
1890 }
1891
1892 return 0;
1893 }
1894
1895 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1896 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1897
1898 int
1899 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1900 {
1901 rtx link;
1902
1903 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1904 to pseudo registers, so don't bother checking. */
1905
1906 if (regno >= FIRST_PSEUDO_REGISTER
1907 || !CALL_P (insn) )
1908 return 0;
1909
1910 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1911 {
1912 rtx op, reg;
1913
1914 if (GET_CODE (op = XEXP (link, 0)) == code
1915 && REG_P (reg = XEXP (op, 0))
1916 && REGNO (reg) <= regno
1917 && END_HARD_REGNO (reg) > regno)
1918 return 1;
1919 }
1920
1921 return 0;
1922 }
1923
1924 \f
1925 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1926 stored as the pointer to the next register note. */
1927
1928 rtx
1929 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
1930 {
1931 rtx note;
1932
1933 switch (kind)
1934 {
1935 case REG_CC_SETTER:
1936 case REG_CC_USER:
1937 case REG_LABEL_TARGET:
1938 case REG_LABEL_OPERAND:
1939 case REG_TM:
1940 /* These types of register notes use an INSN_LIST rather than an
1941 EXPR_LIST, so that copying is done right and dumps look
1942 better. */
1943 note = alloc_INSN_LIST (datum, list);
1944 PUT_REG_NOTE_KIND (note, kind);
1945 break;
1946
1947 default:
1948 note = alloc_EXPR_LIST (kind, datum, list);
1949 break;
1950 }
1951
1952 return note;
1953 }
1954
1955 /* Add register note with kind KIND and datum DATUM to INSN. */
1956
1957 void
1958 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
1959 {
1960 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
1961 }
1962
1963 /* Remove register note NOTE from the REG_NOTES of INSN. */
1964
1965 void
1966 remove_note (rtx insn, const_rtx note)
1967 {
1968 rtx link;
1969
1970 if (note == NULL_RTX)
1971 return;
1972
1973 if (REG_NOTES (insn) == note)
1974 REG_NOTES (insn) = XEXP (note, 1);
1975 else
1976 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1977 if (XEXP (link, 1) == note)
1978 {
1979 XEXP (link, 1) = XEXP (note, 1);
1980 break;
1981 }
1982
1983 switch (REG_NOTE_KIND (note))
1984 {
1985 case REG_EQUAL:
1986 case REG_EQUIV:
1987 df_notes_rescan (insn);
1988 break;
1989 default:
1990 break;
1991 }
1992 }
1993
1994 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1995
1996 void
1997 remove_reg_equal_equiv_notes (rtx insn)
1998 {
1999 rtx *loc;
2000
2001 loc = &REG_NOTES (insn);
2002 while (*loc)
2003 {
2004 enum reg_note kind = REG_NOTE_KIND (*loc);
2005 if (kind == REG_EQUAL || kind == REG_EQUIV)
2006 *loc = XEXP (*loc, 1);
2007 else
2008 loc = &XEXP (*loc, 1);
2009 }
2010 }
2011
2012 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2013
2014 void
2015 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2016 {
2017 df_ref eq_use;
2018
2019 if (!df)
2020 return;
2021
2022 /* This loop is a little tricky. We cannot just go down the chain because
2023 it is being modified by some actions in the loop. So we just iterate
2024 over the head. We plan to drain the list anyway. */
2025 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2026 {
2027 rtx insn = DF_REF_INSN (eq_use);
2028 rtx note = find_reg_equal_equiv_note (insn);
2029
2030 /* This assert is generally triggered when someone deletes a REG_EQUAL
2031 or REG_EQUIV note by hacking the list manually rather than calling
2032 remove_note. */
2033 gcc_assert (note);
2034
2035 remove_note (insn, note);
2036 }
2037 }
2038
2039 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2040 return 1 if it is found. A simple equality test is used to determine if
2041 NODE matches. */
2042
2043 int
2044 in_expr_list_p (const_rtx listp, const_rtx node)
2045 {
2046 const_rtx x;
2047
2048 for (x = listp; x; x = XEXP (x, 1))
2049 if (node == XEXP (x, 0))
2050 return 1;
2051
2052 return 0;
2053 }
2054
2055 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2056 remove that entry from the list if it is found.
2057
2058 A simple equality test is used to determine if NODE matches. */
2059
2060 void
2061 remove_node_from_expr_list (const_rtx node, rtx *listp)
2062 {
2063 rtx temp = *listp;
2064 rtx prev = NULL_RTX;
2065
2066 while (temp)
2067 {
2068 if (node == XEXP (temp, 0))
2069 {
2070 /* Splice the node out of the list. */
2071 if (prev)
2072 XEXP (prev, 1) = XEXP (temp, 1);
2073 else
2074 *listp = XEXP (temp, 1);
2075
2076 return;
2077 }
2078
2079 prev = temp;
2080 temp = XEXP (temp, 1);
2081 }
2082 }
2083 \f
2084 /* Nonzero if X contains any volatile instructions. These are instructions
2085 which may cause unpredictable machine state instructions, and thus no
2086 instructions should be moved or combined across them. This includes
2087 only volatile asms and UNSPEC_VOLATILE instructions. */
2088
2089 int
2090 volatile_insn_p (const_rtx x)
2091 {
2092 const RTX_CODE code = GET_CODE (x);
2093 switch (code)
2094 {
2095 case LABEL_REF:
2096 case SYMBOL_REF:
2097 case CONST:
2098 CASE_CONST_ANY:
2099 case CC0:
2100 case PC:
2101 case REG:
2102 case SCRATCH:
2103 case CLOBBER:
2104 case ADDR_VEC:
2105 case ADDR_DIFF_VEC:
2106 case CALL:
2107 case MEM:
2108 return 0;
2109
2110 case UNSPEC_VOLATILE:
2111 /* case TRAP_IF: This isn't clear yet. */
2112 return 1;
2113
2114 case ASM_INPUT:
2115 case ASM_OPERANDS:
2116 if (MEM_VOLATILE_P (x))
2117 return 1;
2118
2119 default:
2120 break;
2121 }
2122
2123 /* Recursively scan the operands of this expression. */
2124
2125 {
2126 const char *const fmt = GET_RTX_FORMAT (code);
2127 int i;
2128
2129 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2130 {
2131 if (fmt[i] == 'e')
2132 {
2133 if (volatile_insn_p (XEXP (x, i)))
2134 return 1;
2135 }
2136 else if (fmt[i] == 'E')
2137 {
2138 int j;
2139 for (j = 0; j < XVECLEN (x, i); j++)
2140 if (volatile_insn_p (XVECEXP (x, i, j)))
2141 return 1;
2142 }
2143 }
2144 }
2145 return 0;
2146 }
2147
2148 /* Nonzero if X contains any volatile memory references
2149 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2150
2151 int
2152 volatile_refs_p (const_rtx x)
2153 {
2154 const RTX_CODE code = GET_CODE (x);
2155 switch (code)
2156 {
2157 case LABEL_REF:
2158 case SYMBOL_REF:
2159 case CONST:
2160 CASE_CONST_ANY:
2161 case CC0:
2162 case PC:
2163 case REG:
2164 case SCRATCH:
2165 case CLOBBER:
2166 case ADDR_VEC:
2167 case ADDR_DIFF_VEC:
2168 return 0;
2169
2170 case UNSPEC_VOLATILE:
2171 return 1;
2172
2173 case MEM:
2174 case ASM_INPUT:
2175 case ASM_OPERANDS:
2176 if (MEM_VOLATILE_P (x))
2177 return 1;
2178
2179 default:
2180 break;
2181 }
2182
2183 /* Recursively scan the operands of this expression. */
2184
2185 {
2186 const char *const fmt = GET_RTX_FORMAT (code);
2187 int i;
2188
2189 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2190 {
2191 if (fmt[i] == 'e')
2192 {
2193 if (volatile_refs_p (XEXP (x, i)))
2194 return 1;
2195 }
2196 else if (fmt[i] == 'E')
2197 {
2198 int j;
2199 for (j = 0; j < XVECLEN (x, i); j++)
2200 if (volatile_refs_p (XVECEXP (x, i, j)))
2201 return 1;
2202 }
2203 }
2204 }
2205 return 0;
2206 }
2207
2208 /* Similar to above, except that it also rejects register pre- and post-
2209 incrementing. */
2210
2211 int
2212 side_effects_p (const_rtx x)
2213 {
2214 const RTX_CODE code = GET_CODE (x);
2215 switch (code)
2216 {
2217 case LABEL_REF:
2218 case SYMBOL_REF:
2219 case CONST:
2220 CASE_CONST_ANY:
2221 case CC0:
2222 case PC:
2223 case REG:
2224 case SCRATCH:
2225 case ADDR_VEC:
2226 case ADDR_DIFF_VEC:
2227 case VAR_LOCATION:
2228 return 0;
2229
2230 case CLOBBER:
2231 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2232 when some combination can't be done. If we see one, don't think
2233 that we can simplify the expression. */
2234 return (GET_MODE (x) != VOIDmode);
2235
2236 case PRE_INC:
2237 case PRE_DEC:
2238 case POST_INC:
2239 case POST_DEC:
2240 case PRE_MODIFY:
2241 case POST_MODIFY:
2242 case CALL:
2243 case UNSPEC_VOLATILE:
2244 /* case TRAP_IF: This isn't clear yet. */
2245 return 1;
2246
2247 case MEM:
2248 case ASM_INPUT:
2249 case ASM_OPERANDS:
2250 if (MEM_VOLATILE_P (x))
2251 return 1;
2252
2253 default:
2254 break;
2255 }
2256
2257 /* Recursively scan the operands of this expression. */
2258
2259 {
2260 const char *fmt = GET_RTX_FORMAT (code);
2261 int i;
2262
2263 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2264 {
2265 if (fmt[i] == 'e')
2266 {
2267 if (side_effects_p (XEXP (x, i)))
2268 return 1;
2269 }
2270 else if (fmt[i] == 'E')
2271 {
2272 int j;
2273 for (j = 0; j < XVECLEN (x, i); j++)
2274 if (side_effects_p (XVECEXP (x, i, j)))
2275 return 1;
2276 }
2277 }
2278 }
2279 return 0;
2280 }
2281 \f
2282 /* Return nonzero if evaluating rtx X might cause a trap.
2283 FLAGS controls how to consider MEMs. A nonzero means the context
2284 of the access may have changed from the original, such that the
2285 address may have become invalid. */
2286
2287 int
2288 may_trap_p_1 (const_rtx x, unsigned flags)
2289 {
2290 int i;
2291 enum rtx_code code;
2292 const char *fmt;
2293
2294 /* We make no distinction currently, but this function is part of
2295 the internal target-hooks ABI so we keep the parameter as
2296 "unsigned flags". */
2297 bool code_changed = flags != 0;
2298
2299 if (x == 0)
2300 return 0;
2301 code = GET_CODE (x);
2302 switch (code)
2303 {
2304 /* Handle these cases quickly. */
2305 CASE_CONST_ANY:
2306 case SYMBOL_REF:
2307 case LABEL_REF:
2308 case CONST:
2309 case PC:
2310 case CC0:
2311 case REG:
2312 case SCRATCH:
2313 return 0;
2314
2315 case UNSPEC:
2316 case UNSPEC_VOLATILE:
2317 return targetm.unspec_may_trap_p (x, flags);
2318
2319 case ASM_INPUT:
2320 case TRAP_IF:
2321 return 1;
2322
2323 case ASM_OPERANDS:
2324 return MEM_VOLATILE_P (x);
2325
2326 /* Memory ref can trap unless it's a static var or a stack slot. */
2327 case MEM:
2328 /* Recognize specific pattern of stack checking probes. */
2329 if (flag_stack_check
2330 && MEM_VOLATILE_P (x)
2331 && XEXP (x, 0) == stack_pointer_rtx)
2332 return 1;
2333 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2334 reference; moving it out of context such as when moving code
2335 when optimizing, might cause its address to become invalid. */
2336 code_changed
2337 || !MEM_NOTRAP_P (x))
2338 {
2339 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2340 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2341 GET_MODE (x), code_changed);
2342 }
2343
2344 return 0;
2345
2346 /* Division by a non-constant might trap. */
2347 case DIV:
2348 case MOD:
2349 case UDIV:
2350 case UMOD:
2351 if (HONOR_SNANS (GET_MODE (x)))
2352 return 1;
2353 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2354 return flag_trapping_math;
2355 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2356 return 1;
2357 break;
2358
2359 case EXPR_LIST:
2360 /* An EXPR_LIST is used to represent a function call. This
2361 certainly may trap. */
2362 return 1;
2363
2364 case GE:
2365 case GT:
2366 case LE:
2367 case LT:
2368 case LTGT:
2369 case COMPARE:
2370 /* Some floating point comparisons may trap. */
2371 if (!flag_trapping_math)
2372 break;
2373 /* ??? There is no machine independent way to check for tests that trap
2374 when COMPARE is used, though many targets do make this distinction.
2375 For instance, sparc uses CCFPE for compares which generate exceptions
2376 and CCFP for compares which do not generate exceptions. */
2377 if (HONOR_NANS (GET_MODE (x)))
2378 return 1;
2379 /* But often the compare has some CC mode, so check operand
2380 modes as well. */
2381 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2382 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2383 return 1;
2384 break;
2385
2386 case EQ:
2387 case NE:
2388 if (HONOR_SNANS (GET_MODE (x)))
2389 return 1;
2390 /* Often comparison is CC mode, so check operand modes. */
2391 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2392 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2393 return 1;
2394 break;
2395
2396 case FIX:
2397 /* Conversion of floating point might trap. */
2398 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2399 return 1;
2400 break;
2401
2402 case NEG:
2403 case ABS:
2404 case SUBREG:
2405 /* These operations don't trap even with floating point. */
2406 break;
2407
2408 default:
2409 /* Any floating arithmetic may trap. */
2410 if (SCALAR_FLOAT_MODE_P (GET_MODE (x))
2411 && flag_trapping_math)
2412 return 1;
2413 }
2414
2415 fmt = GET_RTX_FORMAT (code);
2416 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2417 {
2418 if (fmt[i] == 'e')
2419 {
2420 if (may_trap_p_1 (XEXP (x, i), flags))
2421 return 1;
2422 }
2423 else if (fmt[i] == 'E')
2424 {
2425 int j;
2426 for (j = 0; j < XVECLEN (x, i); j++)
2427 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2428 return 1;
2429 }
2430 }
2431 return 0;
2432 }
2433
2434 /* Return nonzero if evaluating rtx X might cause a trap. */
2435
2436 int
2437 may_trap_p (const_rtx x)
2438 {
2439 return may_trap_p_1 (x, 0);
2440 }
2441
2442 /* Same as above, but additionally return nonzero if evaluating rtx X might
2443 cause a fault. We define a fault for the purpose of this function as a
2444 erroneous execution condition that cannot be encountered during the normal
2445 execution of a valid program; the typical example is an unaligned memory
2446 access on a strict alignment machine. The compiler guarantees that it
2447 doesn't generate code that will fault from a valid program, but this
2448 guarantee doesn't mean anything for individual instructions. Consider
2449 the following example:
2450
2451 struct S { int d; union { char *cp; int *ip; }; };
2452
2453 int foo(struct S *s)
2454 {
2455 if (s->d == 1)
2456 return *s->ip;
2457 else
2458 return *s->cp;
2459 }
2460
2461 on a strict alignment machine. In a valid program, foo will never be
2462 invoked on a structure for which d is equal to 1 and the underlying
2463 unique field of the union not aligned on a 4-byte boundary, but the
2464 expression *s->ip might cause a fault if considered individually.
2465
2466 At the RTL level, potentially problematic expressions will almost always
2467 verify may_trap_p; for example, the above dereference can be emitted as
2468 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2469 However, suppose that foo is inlined in a caller that causes s->cp to
2470 point to a local character variable and guarantees that s->d is not set
2471 to 1; foo may have been effectively translated into pseudo-RTL as:
2472
2473 if ((reg:SI) == 1)
2474 (set (reg:SI) (mem:SI (%fp - 7)))
2475 else
2476 (set (reg:QI) (mem:QI (%fp - 7)))
2477
2478 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2479 memory reference to a stack slot, but it will certainly cause a fault
2480 on a strict alignment machine. */
2481
2482 int
2483 may_trap_or_fault_p (const_rtx x)
2484 {
2485 return may_trap_p_1 (x, 1);
2486 }
2487 \f
2488 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2489 i.e., an inequality. */
2490
2491 int
2492 inequality_comparisons_p (const_rtx x)
2493 {
2494 const char *fmt;
2495 int len, i;
2496 const enum rtx_code code = GET_CODE (x);
2497
2498 switch (code)
2499 {
2500 case REG:
2501 case SCRATCH:
2502 case PC:
2503 case CC0:
2504 CASE_CONST_ANY:
2505 case CONST:
2506 case LABEL_REF:
2507 case SYMBOL_REF:
2508 return 0;
2509
2510 case LT:
2511 case LTU:
2512 case GT:
2513 case GTU:
2514 case LE:
2515 case LEU:
2516 case GE:
2517 case GEU:
2518 return 1;
2519
2520 default:
2521 break;
2522 }
2523
2524 len = GET_RTX_LENGTH (code);
2525 fmt = GET_RTX_FORMAT (code);
2526
2527 for (i = 0; i < len; i++)
2528 {
2529 if (fmt[i] == 'e')
2530 {
2531 if (inequality_comparisons_p (XEXP (x, i)))
2532 return 1;
2533 }
2534 else if (fmt[i] == 'E')
2535 {
2536 int j;
2537 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2538 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2539 return 1;
2540 }
2541 }
2542
2543 return 0;
2544 }
2545 \f
2546 /* Replace any occurrence of FROM in X with TO. The function does
2547 not enter into CONST_DOUBLE for the replace.
2548
2549 Note that copying is not done so X must not be shared unless all copies
2550 are to be modified. */
2551
2552 rtx
2553 replace_rtx (rtx x, rtx from, rtx to)
2554 {
2555 int i, j;
2556 const char *fmt;
2557
2558 if (x == from)
2559 return to;
2560
2561 /* Allow this function to make replacements in EXPR_LISTs. */
2562 if (x == 0)
2563 return 0;
2564
2565 if (GET_CODE (x) == SUBREG)
2566 {
2567 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2568
2569 if (CONST_INT_P (new_rtx))
2570 {
2571 x = simplify_subreg (GET_MODE (x), new_rtx,
2572 GET_MODE (SUBREG_REG (x)),
2573 SUBREG_BYTE (x));
2574 gcc_assert (x);
2575 }
2576 else
2577 SUBREG_REG (x) = new_rtx;
2578
2579 return x;
2580 }
2581 else if (GET_CODE (x) == ZERO_EXTEND)
2582 {
2583 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2584
2585 if (CONST_INT_P (new_rtx))
2586 {
2587 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2588 new_rtx, GET_MODE (XEXP (x, 0)));
2589 gcc_assert (x);
2590 }
2591 else
2592 XEXP (x, 0) = new_rtx;
2593
2594 return x;
2595 }
2596
2597 fmt = GET_RTX_FORMAT (GET_CODE (x));
2598 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2599 {
2600 if (fmt[i] == 'e')
2601 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2602 else if (fmt[i] == 'E')
2603 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2604 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2605 }
2606
2607 return x;
2608 }
2609 \f
2610 /* Replace occurrences of the old label in *X with the new one.
2611 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2612
2613 int
2614 replace_label (rtx *x, void *data)
2615 {
2616 rtx l = *x;
2617 rtx old_label = ((replace_label_data *) data)->r1;
2618 rtx new_label = ((replace_label_data *) data)->r2;
2619 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2620
2621 if (l == NULL_RTX)
2622 return 0;
2623
2624 if (GET_CODE (l) == SYMBOL_REF
2625 && CONSTANT_POOL_ADDRESS_P (l))
2626 {
2627 rtx c = get_pool_constant (l);
2628 if (rtx_referenced_p (old_label, c))
2629 {
2630 rtx new_c, new_l;
2631 replace_label_data *d = (replace_label_data *) data;
2632
2633 /* Create a copy of constant C; replace the label inside
2634 but do not update LABEL_NUSES because uses in constant pool
2635 are not counted. */
2636 new_c = copy_rtx (c);
2637 d->update_label_nuses = false;
2638 for_each_rtx (&new_c, replace_label, data);
2639 d->update_label_nuses = update_label_nuses;
2640
2641 /* Add the new constant NEW_C to constant pool and replace
2642 the old reference to constant by new reference. */
2643 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2644 *x = replace_rtx (l, l, new_l);
2645 }
2646 return 0;
2647 }
2648
2649 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2650 field. This is not handled by for_each_rtx because it doesn't
2651 handle unprinted ('0') fields. */
2652 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2653 JUMP_LABEL (l) = new_label;
2654
2655 if ((GET_CODE (l) == LABEL_REF
2656 || GET_CODE (l) == INSN_LIST)
2657 && XEXP (l, 0) == old_label)
2658 {
2659 XEXP (l, 0) = new_label;
2660 if (update_label_nuses)
2661 {
2662 ++LABEL_NUSES (new_label);
2663 --LABEL_NUSES (old_label);
2664 }
2665 return 0;
2666 }
2667
2668 return 0;
2669 }
2670
2671 /* When *BODY is equal to X or X is directly referenced by *BODY
2672 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2673 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2674
2675 static int
2676 rtx_referenced_p_1 (rtx *body, void *x)
2677 {
2678 rtx y = (rtx) x;
2679
2680 if (*body == NULL_RTX)
2681 return y == NULL_RTX;
2682
2683 /* Return true if a label_ref *BODY refers to label Y. */
2684 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2685 return XEXP (*body, 0) == y;
2686
2687 /* If *BODY is a reference to pool constant traverse the constant. */
2688 if (GET_CODE (*body) == SYMBOL_REF
2689 && CONSTANT_POOL_ADDRESS_P (*body))
2690 return rtx_referenced_p (y, get_pool_constant (*body));
2691
2692 /* By default, compare the RTL expressions. */
2693 return rtx_equal_p (*body, y);
2694 }
2695
2696 /* Return true if X is referenced in BODY. */
2697
2698 int
2699 rtx_referenced_p (rtx x, rtx body)
2700 {
2701 return for_each_rtx (&body, rtx_referenced_p_1, x);
2702 }
2703
2704 /* If INSN is a tablejump return true and store the label (before jump table) to
2705 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2706
2707 bool
2708 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2709 {
2710 rtx label, table;
2711
2712 if (!JUMP_P (insn))
2713 return false;
2714
2715 label = JUMP_LABEL (insn);
2716 if (label != NULL_RTX && !ANY_RETURN_P (label)
2717 && (table = next_active_insn (label)) != NULL_RTX
2718 && JUMP_TABLE_DATA_P (table))
2719 {
2720 if (labelp)
2721 *labelp = label;
2722 if (tablep)
2723 *tablep = table;
2724 return true;
2725 }
2726 return false;
2727 }
2728
2729 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2730 constant that is not in the constant pool and not in the condition
2731 of an IF_THEN_ELSE. */
2732
2733 static int
2734 computed_jump_p_1 (const_rtx x)
2735 {
2736 const enum rtx_code code = GET_CODE (x);
2737 int i, j;
2738 const char *fmt;
2739
2740 switch (code)
2741 {
2742 case LABEL_REF:
2743 case PC:
2744 return 0;
2745
2746 case CONST:
2747 CASE_CONST_ANY:
2748 case SYMBOL_REF:
2749 case REG:
2750 return 1;
2751
2752 case MEM:
2753 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2754 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2755
2756 case IF_THEN_ELSE:
2757 return (computed_jump_p_1 (XEXP (x, 1))
2758 || computed_jump_p_1 (XEXP (x, 2)));
2759
2760 default:
2761 break;
2762 }
2763
2764 fmt = GET_RTX_FORMAT (code);
2765 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2766 {
2767 if (fmt[i] == 'e'
2768 && computed_jump_p_1 (XEXP (x, i)))
2769 return 1;
2770
2771 else if (fmt[i] == 'E')
2772 for (j = 0; j < XVECLEN (x, i); j++)
2773 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2774 return 1;
2775 }
2776
2777 return 0;
2778 }
2779
2780 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2781
2782 Tablejumps and casesi insns are not considered indirect jumps;
2783 we can recognize them by a (use (label_ref)). */
2784
2785 int
2786 computed_jump_p (const_rtx insn)
2787 {
2788 int i;
2789 if (JUMP_P (insn))
2790 {
2791 rtx pat = PATTERN (insn);
2792
2793 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2794 if (JUMP_LABEL (insn) != NULL)
2795 return 0;
2796
2797 if (GET_CODE (pat) == PARALLEL)
2798 {
2799 int len = XVECLEN (pat, 0);
2800 int has_use_labelref = 0;
2801
2802 for (i = len - 1; i >= 0; i--)
2803 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2804 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2805 == LABEL_REF))
2806 has_use_labelref = 1;
2807
2808 if (! has_use_labelref)
2809 for (i = len - 1; i >= 0; i--)
2810 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2811 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2812 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2813 return 1;
2814 }
2815 else if (GET_CODE (pat) == SET
2816 && SET_DEST (pat) == pc_rtx
2817 && computed_jump_p_1 (SET_SRC (pat)))
2818 return 1;
2819 }
2820 return 0;
2821 }
2822
2823 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2824 calls. Processes the subexpressions of EXP and passes them to F. */
2825 static int
2826 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2827 {
2828 int result, i, j;
2829 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2830 rtx *x;
2831
2832 for (; format[n] != '\0'; n++)
2833 {
2834 switch (format[n])
2835 {
2836 case 'e':
2837 /* Call F on X. */
2838 x = &XEXP (exp, n);
2839 result = (*f) (x, data);
2840 if (result == -1)
2841 /* Do not traverse sub-expressions. */
2842 continue;
2843 else if (result != 0)
2844 /* Stop the traversal. */
2845 return result;
2846
2847 if (*x == NULL_RTX)
2848 /* There are no sub-expressions. */
2849 continue;
2850
2851 i = non_rtx_starting_operands[GET_CODE (*x)];
2852 if (i >= 0)
2853 {
2854 result = for_each_rtx_1 (*x, i, f, data);
2855 if (result != 0)
2856 return result;
2857 }
2858 break;
2859
2860 case 'V':
2861 case 'E':
2862 if (XVEC (exp, n) == 0)
2863 continue;
2864 for (j = 0; j < XVECLEN (exp, n); ++j)
2865 {
2866 /* Call F on X. */
2867 x = &XVECEXP (exp, n, j);
2868 result = (*f) (x, data);
2869 if (result == -1)
2870 /* Do not traverse sub-expressions. */
2871 continue;
2872 else if (result != 0)
2873 /* Stop the traversal. */
2874 return result;
2875
2876 if (*x == NULL_RTX)
2877 /* There are no sub-expressions. */
2878 continue;
2879
2880 i = non_rtx_starting_operands[GET_CODE (*x)];
2881 if (i >= 0)
2882 {
2883 result = for_each_rtx_1 (*x, i, f, data);
2884 if (result != 0)
2885 return result;
2886 }
2887 }
2888 break;
2889
2890 default:
2891 /* Nothing to do. */
2892 break;
2893 }
2894 }
2895
2896 return 0;
2897 }
2898
2899 /* Traverse X via depth-first search, calling F for each
2900 sub-expression (including X itself). F is also passed the DATA.
2901 If F returns -1, do not traverse sub-expressions, but continue
2902 traversing the rest of the tree. If F ever returns any other
2903 nonzero value, stop the traversal, and return the value returned
2904 by F. Otherwise, return 0. This function does not traverse inside
2905 tree structure that contains RTX_EXPRs, or into sub-expressions
2906 whose format code is `0' since it is not known whether or not those
2907 codes are actually RTL.
2908
2909 This routine is very general, and could (should?) be used to
2910 implement many of the other routines in this file. */
2911
2912 int
2913 for_each_rtx (rtx *x, rtx_function f, void *data)
2914 {
2915 int result;
2916 int i;
2917
2918 /* Call F on X. */
2919 result = (*f) (x, data);
2920 if (result == -1)
2921 /* Do not traverse sub-expressions. */
2922 return 0;
2923 else if (result != 0)
2924 /* Stop the traversal. */
2925 return result;
2926
2927 if (*x == NULL_RTX)
2928 /* There are no sub-expressions. */
2929 return 0;
2930
2931 i = non_rtx_starting_operands[GET_CODE (*x)];
2932 if (i < 0)
2933 return 0;
2934
2935 return for_each_rtx_1 (*x, i, f, data);
2936 }
2937
2938 \f
2939
2940 /* Data structure that holds the internal state communicated between
2941 for_each_inc_dec, for_each_inc_dec_find_mem and
2942 for_each_inc_dec_find_inc_dec. */
2943
2944 struct for_each_inc_dec_ops {
2945 /* The function to be called for each autoinc operation found. */
2946 for_each_inc_dec_fn fn;
2947 /* The opaque argument to be passed to it. */
2948 void *arg;
2949 /* The MEM we're visiting, if any. */
2950 rtx mem;
2951 };
2952
2953 static int for_each_inc_dec_find_mem (rtx *r, void *d);
2954
2955 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
2956 operands of the equivalent add insn and pass the result to the
2957 operator specified by *D. */
2958
2959 static int
2960 for_each_inc_dec_find_inc_dec (rtx *r, void *d)
2961 {
2962 rtx x = *r;
2963 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *)d;
2964
2965 switch (GET_CODE (x))
2966 {
2967 case PRE_INC:
2968 case POST_INC:
2969 {
2970 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2971 rtx r1 = XEXP (x, 0);
2972 rtx c = gen_int_mode (size, GET_MODE (r1));
2973 return data->fn (data->mem, x, r1, r1, c, data->arg);
2974 }
2975
2976 case PRE_DEC:
2977 case POST_DEC:
2978 {
2979 int size = GET_MODE_SIZE (GET_MODE (data->mem));
2980 rtx r1 = XEXP (x, 0);
2981 rtx c = gen_int_mode (-size, GET_MODE (r1));
2982 return data->fn (data->mem, x, r1, r1, c, data->arg);
2983 }
2984
2985 case PRE_MODIFY:
2986 case POST_MODIFY:
2987 {
2988 rtx r1 = XEXP (x, 0);
2989 rtx add = XEXP (x, 1);
2990 return data->fn (data->mem, x, r1, add, NULL, data->arg);
2991 }
2992
2993 case MEM:
2994 {
2995 rtx save = data->mem;
2996 int ret = for_each_inc_dec_find_mem (r, d);
2997 data->mem = save;
2998 return ret;
2999 }
3000
3001 default:
3002 return 0;
3003 }
3004 }
3005
3006 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3007 address, extract the operands of the equivalent add insn and pass
3008 the result to the operator specified by *D. */
3009
3010 static int
3011 for_each_inc_dec_find_mem (rtx *r, void *d)
3012 {
3013 rtx x = *r;
3014 if (x != NULL_RTX && MEM_P (x))
3015 {
3016 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *) d;
3017 int result;
3018
3019 data->mem = x;
3020
3021 result = for_each_rtx (&XEXP (x, 0), for_each_inc_dec_find_inc_dec,
3022 data);
3023 if (result)
3024 return result;
3025
3026 return -1;
3027 }
3028 return 0;
3029 }
3030
3031 /* Traverse *X looking for MEMs, and for autoinc operations within
3032 them. For each such autoinc operation found, call FN, passing it
3033 the innermost enclosing MEM, the operation itself, the RTX modified
3034 by the operation, two RTXs (the second may be NULL) that, once
3035 added, represent the value to be held by the modified RTX
3036 afterwards, and ARG. FN is to return -1 to skip looking for other
3037 autoinc operations within the visited operation, 0 to continue the
3038 traversal, or any other value to have it returned to the caller of
3039 for_each_inc_dec. */
3040
3041 int
3042 for_each_inc_dec (rtx *x,
3043 for_each_inc_dec_fn fn,
3044 void *arg)
3045 {
3046 struct for_each_inc_dec_ops data;
3047
3048 data.fn = fn;
3049 data.arg = arg;
3050 data.mem = NULL;
3051
3052 return for_each_rtx (x, for_each_inc_dec_find_mem, &data);
3053 }
3054
3055 \f
3056 /* Searches X for any reference to REGNO, returning the rtx of the
3057 reference found if any. Otherwise, returns NULL_RTX. */
3058
3059 rtx
3060 regno_use_in (unsigned int regno, rtx x)
3061 {
3062 const char *fmt;
3063 int i, j;
3064 rtx tem;
3065
3066 if (REG_P (x) && REGNO (x) == regno)
3067 return x;
3068
3069 fmt = GET_RTX_FORMAT (GET_CODE (x));
3070 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3071 {
3072 if (fmt[i] == 'e')
3073 {
3074 if ((tem = regno_use_in (regno, XEXP (x, i))))
3075 return tem;
3076 }
3077 else if (fmt[i] == 'E')
3078 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3079 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3080 return tem;
3081 }
3082
3083 return NULL_RTX;
3084 }
3085
3086 /* Return a value indicating whether OP, an operand of a commutative
3087 operation, is preferred as the first or second operand. The higher
3088 the value, the stronger the preference for being the first operand.
3089 We use negative values to indicate a preference for the first operand
3090 and positive values for the second operand. */
3091
3092 int
3093 commutative_operand_precedence (rtx op)
3094 {
3095 enum rtx_code code = GET_CODE (op);
3096
3097 /* Constants always come the second operand. Prefer "nice" constants. */
3098 if (code == CONST_INT)
3099 return -8;
3100 if (code == CONST_DOUBLE)
3101 return -7;
3102 if (code == CONST_FIXED)
3103 return -7;
3104 op = avoid_constant_pool_reference (op);
3105 code = GET_CODE (op);
3106
3107 switch (GET_RTX_CLASS (code))
3108 {
3109 case RTX_CONST_OBJ:
3110 if (code == CONST_INT)
3111 return -6;
3112 if (code == CONST_DOUBLE)
3113 return -5;
3114 if (code == CONST_FIXED)
3115 return -5;
3116 return -4;
3117
3118 case RTX_EXTRA:
3119 /* SUBREGs of objects should come second. */
3120 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3121 return -3;
3122 return 0;
3123
3124 case RTX_OBJ:
3125 /* Complex expressions should be the first, so decrease priority
3126 of objects. Prefer pointer objects over non pointer objects. */
3127 if ((REG_P (op) && REG_POINTER (op))
3128 || (MEM_P (op) && MEM_POINTER (op)))
3129 return -1;
3130 return -2;
3131
3132 case RTX_COMM_ARITH:
3133 /* Prefer operands that are themselves commutative to be first.
3134 This helps to make things linear. In particular,
3135 (and (and (reg) (reg)) (not (reg))) is canonical. */
3136 return 4;
3137
3138 case RTX_BIN_ARITH:
3139 /* If only one operand is a binary expression, it will be the first
3140 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3141 is canonical, although it will usually be further simplified. */
3142 return 2;
3143
3144 case RTX_UNARY:
3145 /* Then prefer NEG and NOT. */
3146 if (code == NEG || code == NOT)
3147 return 1;
3148
3149 default:
3150 return 0;
3151 }
3152 }
3153
3154 /* Return 1 iff it is necessary to swap operands of commutative operation
3155 in order to canonicalize expression. */
3156
3157 bool
3158 swap_commutative_operands_p (rtx x, rtx y)
3159 {
3160 return (commutative_operand_precedence (x)
3161 < commutative_operand_precedence (y));
3162 }
3163
3164 /* Return 1 if X is an autoincrement side effect and the register is
3165 not the stack pointer. */
3166 int
3167 auto_inc_p (const_rtx x)
3168 {
3169 switch (GET_CODE (x))
3170 {
3171 case PRE_INC:
3172 case POST_INC:
3173 case PRE_DEC:
3174 case POST_DEC:
3175 case PRE_MODIFY:
3176 case POST_MODIFY:
3177 /* There are no REG_INC notes for SP. */
3178 if (XEXP (x, 0) != stack_pointer_rtx)
3179 return 1;
3180 default:
3181 break;
3182 }
3183 return 0;
3184 }
3185
3186 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3187 int
3188 loc_mentioned_in_p (rtx *loc, const_rtx in)
3189 {
3190 enum rtx_code code;
3191 const char *fmt;
3192 int i, j;
3193
3194 if (!in)
3195 return 0;
3196
3197 code = GET_CODE (in);
3198 fmt = GET_RTX_FORMAT (code);
3199 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3200 {
3201 if (fmt[i] == 'e')
3202 {
3203 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3204 return 1;
3205 }
3206 else if (fmt[i] == 'E')
3207 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3208 if (loc == &XVECEXP (in, i, j)
3209 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3210 return 1;
3211 }
3212 return 0;
3213 }
3214
3215 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3216 and SUBREG_BYTE, return the bit offset where the subreg begins
3217 (counting from the least significant bit of the operand). */
3218
3219 unsigned int
3220 subreg_lsb_1 (enum machine_mode outer_mode,
3221 enum machine_mode inner_mode,
3222 unsigned int subreg_byte)
3223 {
3224 unsigned int bitpos;
3225 unsigned int byte;
3226 unsigned int word;
3227
3228 /* A paradoxical subreg begins at bit position 0. */
3229 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3230 return 0;
3231
3232 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3233 /* If the subreg crosses a word boundary ensure that
3234 it also begins and ends on a word boundary. */
3235 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3236 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3237 && (subreg_byte % UNITS_PER_WORD
3238 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3239
3240 if (WORDS_BIG_ENDIAN)
3241 word = (GET_MODE_SIZE (inner_mode)
3242 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3243 else
3244 word = subreg_byte / UNITS_PER_WORD;
3245 bitpos = word * BITS_PER_WORD;
3246
3247 if (BYTES_BIG_ENDIAN)
3248 byte = (GET_MODE_SIZE (inner_mode)
3249 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3250 else
3251 byte = subreg_byte % UNITS_PER_WORD;
3252 bitpos += byte * BITS_PER_UNIT;
3253
3254 return bitpos;
3255 }
3256
3257 /* Given a subreg X, return the bit offset where the subreg begins
3258 (counting from the least significant bit of the reg). */
3259
3260 unsigned int
3261 subreg_lsb (const_rtx x)
3262 {
3263 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3264 SUBREG_BYTE (x));
3265 }
3266
3267 /* Fill in information about a subreg of a hard register.
3268 xregno - A regno of an inner hard subreg_reg (or what will become one).
3269 xmode - The mode of xregno.
3270 offset - The byte offset.
3271 ymode - The mode of a top level SUBREG (or what may become one).
3272 info - Pointer to structure to fill in. */
3273 void
3274 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3275 unsigned int offset, enum machine_mode ymode,
3276 struct subreg_info *info)
3277 {
3278 int nregs_xmode, nregs_ymode;
3279 int mode_multiple, nregs_multiple;
3280 int offset_adj, y_offset, y_offset_adj;
3281 int regsize_xmode, regsize_ymode;
3282 bool rknown;
3283
3284 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3285
3286 rknown = false;
3287
3288 /* If there are holes in a non-scalar mode in registers, we expect
3289 that it is made up of its units concatenated together. */
3290 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3291 {
3292 enum machine_mode xmode_unit;
3293
3294 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3295 if (GET_MODE_INNER (xmode) == VOIDmode)
3296 xmode_unit = xmode;
3297 else
3298 xmode_unit = GET_MODE_INNER (xmode);
3299 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3300 gcc_assert (nregs_xmode
3301 == (GET_MODE_NUNITS (xmode)
3302 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3303 gcc_assert (hard_regno_nregs[xregno][xmode]
3304 == (hard_regno_nregs[xregno][xmode_unit]
3305 * GET_MODE_NUNITS (xmode)));
3306
3307 /* You can only ask for a SUBREG of a value with holes in the middle
3308 if you don't cross the holes. (Such a SUBREG should be done by
3309 picking a different register class, or doing it in memory if
3310 necessary.) An example of a value with holes is XCmode on 32-bit
3311 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3312 3 for each part, but in memory it's two 128-bit parts.
3313 Padding is assumed to be at the end (not necessarily the 'high part')
3314 of each unit. */
3315 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3316 < GET_MODE_NUNITS (xmode))
3317 && (offset / GET_MODE_SIZE (xmode_unit)
3318 != ((offset + GET_MODE_SIZE (ymode) - 1)
3319 / GET_MODE_SIZE (xmode_unit))))
3320 {
3321 info->representable_p = false;
3322 rknown = true;
3323 }
3324 }
3325 else
3326 nregs_xmode = hard_regno_nregs[xregno][xmode];
3327
3328 nregs_ymode = hard_regno_nregs[xregno][ymode];
3329
3330 /* Paradoxical subregs are otherwise valid. */
3331 if (!rknown
3332 && offset == 0
3333 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3334 {
3335 info->representable_p = true;
3336 /* If this is a big endian paradoxical subreg, which uses more
3337 actual hard registers than the original register, we must
3338 return a negative offset so that we find the proper highpart
3339 of the register. */
3340 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3341 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3342 info->offset = nregs_xmode - nregs_ymode;
3343 else
3344 info->offset = 0;
3345 info->nregs = nregs_ymode;
3346 return;
3347 }
3348
3349 /* If registers store different numbers of bits in the different
3350 modes, we cannot generally form this subreg. */
3351 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3352 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3353 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3354 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3355 {
3356 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3357 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3358 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3359 {
3360 info->representable_p = false;
3361 info->nregs
3362 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3363 info->offset = offset / regsize_xmode;
3364 return;
3365 }
3366 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3367 {
3368 info->representable_p = false;
3369 info->nregs
3370 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3371 info->offset = offset / regsize_xmode;
3372 return;
3373 }
3374 }
3375
3376 /* Lowpart subregs are otherwise valid. */
3377 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3378 {
3379 info->representable_p = true;
3380 rknown = true;
3381
3382 if (offset == 0 || nregs_xmode == nregs_ymode)
3383 {
3384 info->offset = 0;
3385 info->nregs = nregs_ymode;
3386 return;
3387 }
3388 }
3389
3390 /* This should always pass, otherwise we don't know how to verify
3391 the constraint. These conditions may be relaxed but
3392 subreg_regno_offset would need to be redesigned. */
3393 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3394 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3395
3396 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3397 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3398 {
3399 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3400 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3401 HOST_WIDE_INT off_low = offset & (ysize - 1);
3402 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3403 offset = (xsize - ysize - off_high) | off_low;
3404 }
3405 /* The XMODE value can be seen as a vector of NREGS_XMODE
3406 values. The subreg must represent a lowpart of given field.
3407 Compute what field it is. */
3408 offset_adj = offset;
3409 offset_adj -= subreg_lowpart_offset (ymode,
3410 mode_for_size (GET_MODE_BITSIZE (xmode)
3411 / nregs_xmode,
3412 MODE_INT, 0));
3413
3414 /* Size of ymode must not be greater than the size of xmode. */
3415 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3416 gcc_assert (mode_multiple != 0);
3417
3418 y_offset = offset / GET_MODE_SIZE (ymode);
3419 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3420 nregs_multiple = nregs_xmode / nregs_ymode;
3421
3422 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3423 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3424
3425 if (!rknown)
3426 {
3427 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3428 rknown = true;
3429 }
3430 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3431 info->nregs = nregs_ymode;
3432 }
3433
3434 /* This function returns the regno offset of a subreg expression.
3435 xregno - A regno of an inner hard subreg_reg (or what will become one).
3436 xmode - The mode of xregno.
3437 offset - The byte offset.
3438 ymode - The mode of a top level SUBREG (or what may become one).
3439 RETURN - The regno offset which would be used. */
3440 unsigned int
3441 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3442 unsigned int offset, enum machine_mode ymode)
3443 {
3444 struct subreg_info info;
3445 subreg_get_info (xregno, xmode, offset, ymode, &info);
3446 return info.offset;
3447 }
3448
3449 /* This function returns true when the offset is representable via
3450 subreg_offset in the given regno.
3451 xregno - A regno of an inner hard subreg_reg (or what will become one).
3452 xmode - The mode of xregno.
3453 offset - The byte offset.
3454 ymode - The mode of a top level SUBREG (or what may become one).
3455 RETURN - Whether the offset is representable. */
3456 bool
3457 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3458 unsigned int offset, enum machine_mode ymode)
3459 {
3460 struct subreg_info info;
3461 subreg_get_info (xregno, xmode, offset, ymode, &info);
3462 return info.representable_p;
3463 }
3464
3465 /* Return the number of a YMODE register to which
3466
3467 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3468
3469 can be simplified. Return -1 if the subreg can't be simplified.
3470
3471 XREGNO is a hard register number. */
3472
3473 int
3474 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3475 unsigned int offset, enum machine_mode ymode)
3476 {
3477 struct subreg_info info;
3478 unsigned int yregno;
3479
3480 #ifdef CANNOT_CHANGE_MODE_CLASS
3481 /* Give the backend a chance to disallow the mode change. */
3482 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3483 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3484 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode))
3485 return -1;
3486 #endif
3487
3488 /* We shouldn't simplify stack-related registers. */
3489 if ((!reload_completed || frame_pointer_needed)
3490 && xregno == FRAME_POINTER_REGNUM)
3491 return -1;
3492
3493 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3494 && xregno == ARG_POINTER_REGNUM)
3495 return -1;
3496
3497 if (xregno == STACK_POINTER_REGNUM)
3498 return -1;
3499
3500 /* Try to get the register offset. */
3501 subreg_get_info (xregno, xmode, offset, ymode, &info);
3502 if (!info.representable_p)
3503 return -1;
3504
3505 /* Make sure that the offsetted register value is in range. */
3506 yregno = xregno + info.offset;
3507 if (!HARD_REGISTER_NUM_P (yregno))
3508 return -1;
3509
3510 /* See whether (reg:YMODE YREGNO) is valid.
3511
3512 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3513 This is a kludge to work around how complex FP arguments are passed
3514 on IA-64 and should be fixed. See PR target/49226. */
3515 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3516 && HARD_REGNO_MODE_OK (xregno, xmode))
3517 return -1;
3518
3519 return (int) yregno;
3520 }
3521
3522 /* Return the final regno that a subreg expression refers to. */
3523 unsigned int
3524 subreg_regno (const_rtx x)
3525 {
3526 unsigned int ret;
3527 rtx subreg = SUBREG_REG (x);
3528 int regno = REGNO (subreg);
3529
3530 ret = regno + subreg_regno_offset (regno,
3531 GET_MODE (subreg),
3532 SUBREG_BYTE (x),
3533 GET_MODE (x));
3534 return ret;
3535
3536 }
3537
3538 /* Return the number of registers that a subreg expression refers
3539 to. */
3540 unsigned int
3541 subreg_nregs (const_rtx x)
3542 {
3543 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3544 }
3545
3546 /* Return the number of registers that a subreg REG with REGNO
3547 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3548 changed so that the regno can be passed in. */
3549
3550 unsigned int
3551 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3552 {
3553 struct subreg_info info;
3554 rtx subreg = SUBREG_REG (x);
3555
3556 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3557 &info);
3558 return info.nregs;
3559 }
3560
3561
3562 struct parms_set_data
3563 {
3564 int nregs;
3565 HARD_REG_SET regs;
3566 };
3567
3568 /* Helper function for noticing stores to parameter registers. */
3569 static void
3570 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3571 {
3572 struct parms_set_data *const d = (struct parms_set_data *) data;
3573 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3574 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3575 {
3576 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3577 d->nregs--;
3578 }
3579 }
3580
3581 /* Look backward for first parameter to be loaded.
3582 Note that loads of all parameters will not necessarily be
3583 found if CSE has eliminated some of them (e.g., an argument
3584 to the outer function is passed down as a parameter).
3585 Do not skip BOUNDARY. */
3586 rtx
3587 find_first_parameter_load (rtx call_insn, rtx boundary)
3588 {
3589 struct parms_set_data parm;
3590 rtx p, before, first_set;
3591
3592 /* Since different machines initialize their parameter registers
3593 in different orders, assume nothing. Collect the set of all
3594 parameter registers. */
3595 CLEAR_HARD_REG_SET (parm.regs);
3596 parm.nregs = 0;
3597 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3598 if (GET_CODE (XEXP (p, 0)) == USE
3599 && REG_P (XEXP (XEXP (p, 0), 0)))
3600 {
3601 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3602
3603 /* We only care about registers which can hold function
3604 arguments. */
3605 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3606 continue;
3607
3608 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3609 parm.nregs++;
3610 }
3611 before = call_insn;
3612 first_set = call_insn;
3613
3614 /* Search backward for the first set of a register in this set. */
3615 while (parm.nregs && before != boundary)
3616 {
3617 before = PREV_INSN (before);
3618
3619 /* It is possible that some loads got CSEed from one call to
3620 another. Stop in that case. */
3621 if (CALL_P (before))
3622 break;
3623
3624 /* Our caller needs either ensure that we will find all sets
3625 (in case code has not been optimized yet), or take care
3626 for possible labels in a way by setting boundary to preceding
3627 CODE_LABEL. */
3628 if (LABEL_P (before))
3629 {
3630 gcc_assert (before == boundary);
3631 break;
3632 }
3633
3634 if (INSN_P (before))
3635 {
3636 int nregs_old = parm.nregs;
3637 note_stores (PATTERN (before), parms_set, &parm);
3638 /* If we found something that did not set a parameter reg,
3639 we're done. Do not keep going, as that might result
3640 in hoisting an insn before the setting of a pseudo
3641 that is used by the hoisted insn. */
3642 if (nregs_old != parm.nregs)
3643 first_set = before;
3644 else
3645 break;
3646 }
3647 }
3648 return first_set;
3649 }
3650
3651 /* Return true if we should avoid inserting code between INSN and preceding
3652 call instruction. */
3653
3654 bool
3655 keep_with_call_p (const_rtx insn)
3656 {
3657 rtx set;
3658
3659 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3660 {
3661 if (REG_P (SET_DEST (set))
3662 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3663 && fixed_regs[REGNO (SET_DEST (set))]
3664 && general_operand (SET_SRC (set), VOIDmode))
3665 return true;
3666 if (REG_P (SET_SRC (set))
3667 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3668 && REG_P (SET_DEST (set))
3669 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3670 return true;
3671 /* There may be a stack pop just after the call and before the store
3672 of the return register. Search for the actual store when deciding
3673 if we can break or not. */
3674 if (SET_DEST (set) == stack_pointer_rtx)
3675 {
3676 /* This CONST_CAST is okay because next_nonnote_insn just
3677 returns its argument and we assign it to a const_rtx
3678 variable. */
3679 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX(insn));
3680 if (i2 && keep_with_call_p (i2))
3681 return true;
3682 }
3683 }
3684 return false;
3685 }
3686
3687 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3688 to non-complex jumps. That is, direct unconditional, conditional,
3689 and tablejumps, but not computed jumps or returns. It also does
3690 not apply to the fallthru case of a conditional jump. */
3691
3692 bool
3693 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3694 {
3695 rtx tmp = JUMP_LABEL (jump_insn);
3696
3697 if (label == tmp)
3698 return true;
3699
3700 if (tablejump_p (jump_insn, NULL, &tmp))
3701 {
3702 rtvec vec = XVEC (PATTERN (tmp),
3703 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3704 int i, veclen = GET_NUM_ELEM (vec);
3705
3706 for (i = 0; i < veclen; ++i)
3707 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3708 return true;
3709 }
3710
3711 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3712 return true;
3713
3714 return false;
3715 }
3716
3717 \f
3718 /* Return an estimate of the cost of computing rtx X.
3719 One use is in cse, to decide which expression to keep in the hash table.
3720 Another is in rtl generation, to pick the cheapest way to multiply.
3721 Other uses like the latter are expected in the future.
3722
3723 X appears as operand OPNO in an expression with code OUTER_CODE.
3724 SPEED specifies whether costs optimized for speed or size should
3725 be returned. */
3726
3727 int
3728 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3729 {
3730 int i, j;
3731 enum rtx_code code;
3732 const char *fmt;
3733 int total;
3734 int factor;
3735
3736 if (x == 0)
3737 return 0;
3738
3739 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3740 many insns, taking N times as long. */
3741 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3742 if (factor == 0)
3743 factor = 1;
3744
3745 /* Compute the default costs of certain things.
3746 Note that targetm.rtx_costs can override the defaults. */
3747
3748 code = GET_CODE (x);
3749 switch (code)
3750 {
3751 case MULT:
3752 /* Multiplication has time-complexity O(N*N), where N is the
3753 number of units (translated from digits) when using
3754 schoolbook long multiplication. */
3755 total = factor * factor * COSTS_N_INSNS (5);
3756 break;
3757 case DIV:
3758 case UDIV:
3759 case MOD:
3760 case UMOD:
3761 /* Similarly, complexity for schoolbook long division. */
3762 total = factor * factor * COSTS_N_INSNS (7);
3763 break;
3764 case USE:
3765 /* Used in combine.c as a marker. */
3766 total = 0;
3767 break;
3768 case SET:
3769 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3770 the mode for the factor. */
3771 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3772 if (factor == 0)
3773 factor = 1;
3774 /* Pass through. */
3775 default:
3776 total = factor * COSTS_N_INSNS (1);
3777 }
3778
3779 switch (code)
3780 {
3781 case REG:
3782 return 0;
3783
3784 case SUBREG:
3785 total = 0;
3786 /* If we can't tie these modes, make this expensive. The larger
3787 the mode, the more expensive it is. */
3788 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3789 return COSTS_N_INSNS (2 + factor);
3790 break;
3791
3792 default:
3793 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3794 return total;
3795 break;
3796 }
3797
3798 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3799 which is already in total. */
3800
3801 fmt = GET_RTX_FORMAT (code);
3802 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3803 if (fmt[i] == 'e')
3804 total += rtx_cost (XEXP (x, i), code, i, speed);
3805 else if (fmt[i] == 'E')
3806 for (j = 0; j < XVECLEN (x, i); j++)
3807 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3808
3809 return total;
3810 }
3811
3812 /* Fill in the structure C with information about both speed and size rtx
3813 costs for X, which is operand OPNO in an expression with code OUTER. */
3814
3815 void
3816 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3817 struct full_rtx_costs *c)
3818 {
3819 c->speed = rtx_cost (x, outer, opno, true);
3820 c->size = rtx_cost (x, outer, opno, false);
3821 }
3822
3823 \f
3824 /* Return cost of address expression X.
3825 Expect that X is properly formed address reference.
3826
3827 SPEED parameter specify whether costs optimized for speed or size should
3828 be returned. */
3829
3830 int
3831 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
3832 {
3833 /* We may be asked for cost of various unusual addresses, such as operands
3834 of push instruction. It is not worthwhile to complicate writing
3835 of the target hook by such cases. */
3836
3837 if (!memory_address_addr_space_p (mode, x, as))
3838 return 1000;
3839
3840 return targetm.address_cost (x, mode, as, speed);
3841 }
3842
3843 /* If the target doesn't override, compute the cost as with arithmetic. */
3844
3845 int
3846 default_address_cost (rtx x, enum machine_mode, addr_space_t, bool speed)
3847 {
3848 return rtx_cost (x, MEM, 0, speed);
3849 }
3850 \f
3851
3852 unsigned HOST_WIDE_INT
3853 nonzero_bits (const_rtx x, enum machine_mode mode)
3854 {
3855 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3856 }
3857
3858 unsigned int
3859 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3860 {
3861 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3862 }
3863
3864 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3865 It avoids exponential behavior in nonzero_bits1 when X has
3866 identical subexpressions on the first or the second level. */
3867
3868 static unsigned HOST_WIDE_INT
3869 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3870 enum machine_mode known_mode,
3871 unsigned HOST_WIDE_INT known_ret)
3872 {
3873 if (x == known_x && mode == known_mode)
3874 return known_ret;
3875
3876 /* Try to find identical subexpressions. If found call
3877 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3878 precomputed value for the subexpression as KNOWN_RET. */
3879
3880 if (ARITHMETIC_P (x))
3881 {
3882 rtx x0 = XEXP (x, 0);
3883 rtx x1 = XEXP (x, 1);
3884
3885 /* Check the first level. */
3886 if (x0 == x1)
3887 return nonzero_bits1 (x, mode, x0, mode,
3888 cached_nonzero_bits (x0, mode, known_x,
3889 known_mode, known_ret));
3890
3891 /* Check the second level. */
3892 if (ARITHMETIC_P (x0)
3893 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3894 return nonzero_bits1 (x, mode, x1, mode,
3895 cached_nonzero_bits (x1, mode, known_x,
3896 known_mode, known_ret));
3897
3898 if (ARITHMETIC_P (x1)
3899 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3900 return nonzero_bits1 (x, mode, x0, mode,
3901 cached_nonzero_bits (x0, mode, known_x,
3902 known_mode, known_ret));
3903 }
3904
3905 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3906 }
3907
3908 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3909 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3910 is less useful. We can't allow both, because that results in exponential
3911 run time recursion. There is a nullstone testcase that triggered
3912 this. This macro avoids accidental uses of num_sign_bit_copies. */
3913 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3914
3915 /* Given an expression, X, compute which bits in X can be nonzero.
3916 We don't care about bits outside of those defined in MODE.
3917
3918 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3919 an arithmetic operation, we can do better. */
3920
3921 static unsigned HOST_WIDE_INT
3922 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
3923 enum machine_mode known_mode,
3924 unsigned HOST_WIDE_INT known_ret)
3925 {
3926 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3927 unsigned HOST_WIDE_INT inner_nz;
3928 enum rtx_code code;
3929 enum machine_mode inner_mode;
3930 unsigned int mode_width = GET_MODE_PRECISION (mode);
3931
3932 /* For floating-point and vector values, assume all bits are needed. */
3933 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
3934 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
3935 return nonzero;
3936
3937 /* If X is wider than MODE, use its mode instead. */
3938 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
3939 {
3940 mode = GET_MODE (x);
3941 nonzero = GET_MODE_MASK (mode);
3942 mode_width = GET_MODE_PRECISION (mode);
3943 }
3944
3945 if (mode_width > HOST_BITS_PER_WIDE_INT)
3946 /* Our only callers in this case look for single bit values. So
3947 just return the mode mask. Those tests will then be false. */
3948 return nonzero;
3949
3950 #ifndef WORD_REGISTER_OPERATIONS
3951 /* If MODE is wider than X, but both are a single word for both the host
3952 and target machines, we can compute this from which bits of the
3953 object might be nonzero in its own mode, taking into account the fact
3954 that on many CISC machines, accessing an object in a wider mode
3955 causes the high-order bits to become undefined. So they are
3956 not known to be zero. */
3957
3958 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3959 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
3960 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3961 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
3962 {
3963 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3964 known_x, known_mode, known_ret);
3965 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3966 return nonzero;
3967 }
3968 #endif
3969
3970 code = GET_CODE (x);
3971 switch (code)
3972 {
3973 case REG:
3974 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3975 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3976 all the bits above ptr_mode are known to be zero. */
3977 /* As we do not know which address space the pointer is referring to,
3978 we can do this only if the target does not support different pointer
3979 or address modes depending on the address space. */
3980 if (target_default_pointer_address_modes_p ()
3981 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3982 && REG_POINTER (x))
3983 nonzero &= GET_MODE_MASK (ptr_mode);
3984 #endif
3985
3986 /* Include declared information about alignment of pointers. */
3987 /* ??? We don't properly preserve REG_POINTER changes across
3988 pointer-to-integer casts, so we can't trust it except for
3989 things that we know must be pointers. See execute/960116-1.c. */
3990 if ((x == stack_pointer_rtx
3991 || x == frame_pointer_rtx
3992 || x == arg_pointer_rtx)
3993 && REGNO_POINTER_ALIGN (REGNO (x)))
3994 {
3995 unsigned HOST_WIDE_INT alignment
3996 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3997
3998 #ifdef PUSH_ROUNDING
3999 /* If PUSH_ROUNDING is defined, it is possible for the
4000 stack to be momentarily aligned only to that amount,
4001 so we pick the least alignment. */
4002 if (x == stack_pointer_rtx && PUSH_ARGS)
4003 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4004 alignment);
4005 #endif
4006
4007 nonzero &= ~(alignment - 1);
4008 }
4009
4010 {
4011 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4012 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4013 known_mode, known_ret,
4014 &nonzero_for_hook);
4015
4016 if (new_rtx)
4017 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4018 known_mode, known_ret);
4019
4020 return nonzero_for_hook;
4021 }
4022
4023 case CONST_INT:
4024 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4025 /* If X is negative in MODE, sign-extend the value. */
4026 if (INTVAL (x) > 0
4027 && mode_width < BITS_PER_WORD
4028 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4029 != 0)
4030 return UINTVAL (x) | ((unsigned HOST_WIDE_INT) (-1) << mode_width);
4031 #endif
4032
4033 return UINTVAL (x);
4034
4035 case MEM:
4036 #ifdef LOAD_EXTEND_OP
4037 /* In many, if not most, RISC machines, reading a byte from memory
4038 zeros the rest of the register. Noticing that fact saves a lot
4039 of extra zero-extends. */
4040 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4041 nonzero &= GET_MODE_MASK (GET_MODE (x));
4042 #endif
4043 break;
4044
4045 case EQ: case NE:
4046 case UNEQ: case LTGT:
4047 case GT: case GTU: case UNGT:
4048 case LT: case LTU: case UNLT:
4049 case GE: case GEU: case UNGE:
4050 case LE: case LEU: case UNLE:
4051 case UNORDERED: case ORDERED:
4052 /* If this produces an integer result, we know which bits are set.
4053 Code here used to clear bits outside the mode of X, but that is
4054 now done above. */
4055 /* Mind that MODE is the mode the caller wants to look at this
4056 operation in, and not the actual operation mode. We can wind
4057 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4058 that describes the results of a vector compare. */
4059 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4060 && mode_width <= HOST_BITS_PER_WIDE_INT)
4061 nonzero = STORE_FLAG_VALUE;
4062 break;
4063
4064 case NEG:
4065 #if 0
4066 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4067 and num_sign_bit_copies. */
4068 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4069 == GET_MODE_PRECISION (GET_MODE (x)))
4070 nonzero = 1;
4071 #endif
4072
4073 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4074 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4075 break;
4076
4077 case ABS:
4078 #if 0
4079 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4080 and num_sign_bit_copies. */
4081 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4082 == GET_MODE_PRECISION (GET_MODE (x)))
4083 nonzero = 1;
4084 #endif
4085 break;
4086
4087 case TRUNCATE:
4088 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4089 known_x, known_mode, known_ret)
4090 & GET_MODE_MASK (mode));
4091 break;
4092
4093 case ZERO_EXTEND:
4094 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4095 known_x, known_mode, known_ret);
4096 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4097 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4098 break;
4099
4100 case SIGN_EXTEND:
4101 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4102 Otherwise, show all the bits in the outer mode but not the inner
4103 may be nonzero. */
4104 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4105 known_x, known_mode, known_ret);
4106 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4107 {
4108 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4109 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4110 inner_nz |= (GET_MODE_MASK (mode)
4111 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4112 }
4113
4114 nonzero &= inner_nz;
4115 break;
4116
4117 case AND:
4118 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4119 known_x, known_mode, known_ret)
4120 & cached_nonzero_bits (XEXP (x, 1), mode,
4121 known_x, known_mode, known_ret);
4122 break;
4123
4124 case XOR: case IOR:
4125 case UMIN: case UMAX: case SMIN: case SMAX:
4126 {
4127 unsigned HOST_WIDE_INT nonzero0
4128 = cached_nonzero_bits (XEXP (x, 0), mode,
4129 known_x, known_mode, known_ret);
4130
4131 /* Don't call nonzero_bits for the second time if it cannot change
4132 anything. */
4133 if ((nonzero & nonzero0) != nonzero)
4134 nonzero &= nonzero0
4135 | cached_nonzero_bits (XEXP (x, 1), mode,
4136 known_x, known_mode, known_ret);
4137 }
4138 break;
4139
4140 case PLUS: case MINUS:
4141 case MULT:
4142 case DIV: case UDIV:
4143 case MOD: case UMOD:
4144 /* We can apply the rules of arithmetic to compute the number of
4145 high- and low-order zero bits of these operations. We start by
4146 computing the width (position of the highest-order nonzero bit)
4147 and the number of low-order zero bits for each value. */
4148 {
4149 unsigned HOST_WIDE_INT nz0
4150 = cached_nonzero_bits (XEXP (x, 0), mode,
4151 known_x, known_mode, known_ret);
4152 unsigned HOST_WIDE_INT nz1
4153 = cached_nonzero_bits (XEXP (x, 1), mode,
4154 known_x, known_mode, known_ret);
4155 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4156 int width0 = floor_log2 (nz0) + 1;
4157 int width1 = floor_log2 (nz1) + 1;
4158 int low0 = floor_log2 (nz0 & -nz0);
4159 int low1 = floor_log2 (nz1 & -nz1);
4160 unsigned HOST_WIDE_INT op0_maybe_minusp
4161 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4162 unsigned HOST_WIDE_INT op1_maybe_minusp
4163 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4164 unsigned int result_width = mode_width;
4165 int result_low = 0;
4166
4167 switch (code)
4168 {
4169 case PLUS:
4170 result_width = MAX (width0, width1) + 1;
4171 result_low = MIN (low0, low1);
4172 break;
4173 case MINUS:
4174 result_low = MIN (low0, low1);
4175 break;
4176 case MULT:
4177 result_width = width0 + width1;
4178 result_low = low0 + low1;
4179 break;
4180 case DIV:
4181 if (width1 == 0)
4182 break;
4183 if (!op0_maybe_minusp && !op1_maybe_minusp)
4184 result_width = width0;
4185 break;
4186 case UDIV:
4187 if (width1 == 0)
4188 break;
4189 result_width = width0;
4190 break;
4191 case MOD:
4192 if (width1 == 0)
4193 break;
4194 if (!op0_maybe_minusp && !op1_maybe_minusp)
4195 result_width = MIN (width0, width1);
4196 result_low = MIN (low0, low1);
4197 break;
4198 case UMOD:
4199 if (width1 == 0)
4200 break;
4201 result_width = MIN (width0, width1);
4202 result_low = MIN (low0, low1);
4203 break;
4204 default:
4205 gcc_unreachable ();
4206 }
4207
4208 if (result_width < mode_width)
4209 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4210
4211 if (result_low > 0)
4212 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4213 }
4214 break;
4215
4216 case ZERO_EXTRACT:
4217 if (CONST_INT_P (XEXP (x, 1))
4218 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4219 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4220 break;
4221
4222 case SUBREG:
4223 /* If this is a SUBREG formed for a promoted variable that has
4224 been zero-extended, we know that at least the high-order bits
4225 are zero, though others might be too. */
4226
4227 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
4228 nonzero = GET_MODE_MASK (GET_MODE (x))
4229 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4230 known_x, known_mode, known_ret);
4231
4232 inner_mode = GET_MODE (SUBREG_REG (x));
4233 /* If the inner mode is a single word for both the host and target
4234 machines, we can compute this from which bits of the inner
4235 object might be nonzero. */
4236 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4237 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4238 {
4239 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4240 known_x, known_mode, known_ret);
4241
4242 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4243 /* If this is a typical RISC machine, we only have to worry
4244 about the way loads are extended. */
4245 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4246 ? val_signbit_known_set_p (inner_mode, nonzero)
4247 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4248 || !MEM_P (SUBREG_REG (x)))
4249 #endif
4250 {
4251 /* On many CISC machines, accessing an object in a wider mode
4252 causes the high-order bits to become undefined. So they are
4253 not known to be zero. */
4254 if (GET_MODE_PRECISION (GET_MODE (x))
4255 > GET_MODE_PRECISION (inner_mode))
4256 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4257 & ~GET_MODE_MASK (inner_mode));
4258 }
4259 }
4260 break;
4261
4262 case ASHIFTRT:
4263 case LSHIFTRT:
4264 case ASHIFT:
4265 case ROTATE:
4266 /* The nonzero bits are in two classes: any bits within MODE
4267 that aren't in GET_MODE (x) are always significant. The rest of the
4268 nonzero bits are those that are significant in the operand of
4269 the shift when shifted the appropriate number of bits. This
4270 shows that high-order bits are cleared by the right shift and
4271 low-order bits by left shifts. */
4272 if (CONST_INT_P (XEXP (x, 1))
4273 && INTVAL (XEXP (x, 1)) >= 0
4274 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4275 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4276 {
4277 enum machine_mode inner_mode = GET_MODE (x);
4278 unsigned int width = GET_MODE_PRECISION (inner_mode);
4279 int count = INTVAL (XEXP (x, 1));
4280 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4281 unsigned HOST_WIDE_INT op_nonzero
4282 = cached_nonzero_bits (XEXP (x, 0), mode,
4283 known_x, known_mode, known_ret);
4284 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4285 unsigned HOST_WIDE_INT outer = 0;
4286
4287 if (mode_width > width)
4288 outer = (op_nonzero & nonzero & ~mode_mask);
4289
4290 if (code == LSHIFTRT)
4291 inner >>= count;
4292 else if (code == ASHIFTRT)
4293 {
4294 inner >>= count;
4295
4296 /* If the sign bit may have been nonzero before the shift, we
4297 need to mark all the places it could have been copied to
4298 by the shift as possibly nonzero. */
4299 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4300 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4301 << (width - count);
4302 }
4303 else if (code == ASHIFT)
4304 inner <<= count;
4305 else
4306 inner = ((inner << (count % width)
4307 | (inner >> (width - (count % width)))) & mode_mask);
4308
4309 nonzero &= (outer | inner);
4310 }
4311 break;
4312
4313 case FFS:
4314 case POPCOUNT:
4315 /* This is at most the number of bits in the mode. */
4316 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4317 break;
4318
4319 case CLZ:
4320 /* If CLZ has a known value at zero, then the nonzero bits are
4321 that value, plus the number of bits in the mode minus one. */
4322 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4323 nonzero
4324 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4325 else
4326 nonzero = -1;
4327 break;
4328
4329 case CTZ:
4330 /* If CTZ has a known value at zero, then the nonzero bits are
4331 that value, plus the number of bits in the mode minus one. */
4332 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4333 nonzero
4334 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4335 else
4336 nonzero = -1;
4337 break;
4338
4339 case CLRSB:
4340 /* This is at most the number of bits in the mode minus 1. */
4341 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4342 break;
4343
4344 case PARITY:
4345 nonzero = 1;
4346 break;
4347
4348 case IF_THEN_ELSE:
4349 {
4350 unsigned HOST_WIDE_INT nonzero_true
4351 = cached_nonzero_bits (XEXP (x, 1), mode,
4352 known_x, known_mode, known_ret);
4353
4354 /* Don't call nonzero_bits for the second time if it cannot change
4355 anything. */
4356 if ((nonzero & nonzero_true) != nonzero)
4357 nonzero &= nonzero_true
4358 | cached_nonzero_bits (XEXP (x, 2), mode,
4359 known_x, known_mode, known_ret);
4360 }
4361 break;
4362
4363 default:
4364 break;
4365 }
4366
4367 return nonzero;
4368 }
4369
4370 /* See the macro definition above. */
4371 #undef cached_num_sign_bit_copies
4372
4373 \f
4374 /* The function cached_num_sign_bit_copies is a wrapper around
4375 num_sign_bit_copies1. It avoids exponential behavior in
4376 num_sign_bit_copies1 when X has identical subexpressions on the
4377 first or the second level. */
4378
4379 static unsigned int
4380 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4381 enum machine_mode known_mode,
4382 unsigned int known_ret)
4383 {
4384 if (x == known_x && mode == known_mode)
4385 return known_ret;
4386
4387 /* Try to find identical subexpressions. If found call
4388 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4389 the precomputed value for the subexpression as KNOWN_RET. */
4390
4391 if (ARITHMETIC_P (x))
4392 {
4393 rtx x0 = XEXP (x, 0);
4394 rtx x1 = XEXP (x, 1);
4395
4396 /* Check the first level. */
4397 if (x0 == x1)
4398 return
4399 num_sign_bit_copies1 (x, mode, x0, mode,
4400 cached_num_sign_bit_copies (x0, mode, known_x,
4401 known_mode,
4402 known_ret));
4403
4404 /* Check the second level. */
4405 if (ARITHMETIC_P (x0)
4406 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4407 return
4408 num_sign_bit_copies1 (x, mode, x1, mode,
4409 cached_num_sign_bit_copies (x1, mode, known_x,
4410 known_mode,
4411 known_ret));
4412
4413 if (ARITHMETIC_P (x1)
4414 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4415 return
4416 num_sign_bit_copies1 (x, mode, x0, mode,
4417 cached_num_sign_bit_copies (x0, mode, known_x,
4418 known_mode,
4419 known_ret));
4420 }
4421
4422 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4423 }
4424
4425 /* Return the number of bits at the high-order end of X that are known to
4426 be equal to the sign bit. X will be used in mode MODE; if MODE is
4427 VOIDmode, X will be used in its own mode. The returned value will always
4428 be between 1 and the number of bits in MODE. */
4429
4430 static unsigned int
4431 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4432 enum machine_mode known_mode,
4433 unsigned int known_ret)
4434 {
4435 enum rtx_code code = GET_CODE (x);
4436 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4437 int num0, num1, result;
4438 unsigned HOST_WIDE_INT nonzero;
4439
4440 /* If we weren't given a mode, use the mode of X. If the mode is still
4441 VOIDmode, we don't know anything. Likewise if one of the modes is
4442 floating-point. */
4443
4444 if (mode == VOIDmode)
4445 mode = GET_MODE (x);
4446
4447 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4448 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4449 return 1;
4450
4451 /* For a smaller object, just ignore the high bits. */
4452 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4453 {
4454 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4455 known_x, known_mode, known_ret);
4456 return MAX (1,
4457 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4458 }
4459
4460 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4461 {
4462 #ifndef WORD_REGISTER_OPERATIONS
4463 /* If this machine does not do all register operations on the entire
4464 register and MODE is wider than the mode of X, we can say nothing
4465 at all about the high-order bits. */
4466 return 1;
4467 #else
4468 /* Likewise on machines that do, if the mode of the object is smaller
4469 than a word and loads of that size don't sign extend, we can say
4470 nothing about the high order bits. */
4471 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4472 #ifdef LOAD_EXTEND_OP
4473 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4474 #endif
4475 )
4476 return 1;
4477 #endif
4478 }
4479
4480 switch (code)
4481 {
4482 case REG:
4483
4484 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4485 /* If pointers extend signed and this is a pointer in Pmode, say that
4486 all the bits above ptr_mode are known to be sign bit copies. */
4487 /* As we do not know which address space the pointer is referring to,
4488 we can do this only if the target does not support different pointer
4489 or address modes depending on the address space. */
4490 if (target_default_pointer_address_modes_p ()
4491 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4492 && mode == Pmode && REG_POINTER (x))
4493 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4494 #endif
4495
4496 {
4497 unsigned int copies_for_hook = 1, copies = 1;
4498 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4499 known_mode, known_ret,
4500 &copies_for_hook);
4501
4502 if (new_rtx)
4503 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4504 known_mode, known_ret);
4505
4506 if (copies > 1 || copies_for_hook > 1)
4507 return MAX (copies, copies_for_hook);
4508
4509 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4510 }
4511 break;
4512
4513 case MEM:
4514 #ifdef LOAD_EXTEND_OP
4515 /* Some RISC machines sign-extend all loads of smaller than a word. */
4516 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4517 return MAX (1, ((int) bitwidth
4518 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4519 #endif
4520 break;
4521
4522 case CONST_INT:
4523 /* If the constant is negative, take its 1's complement and remask.
4524 Then see how many zero bits we have. */
4525 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4526 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4527 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4528 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4529
4530 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4531
4532 case SUBREG:
4533 /* If this is a SUBREG for a promoted object that is sign-extended
4534 and we are looking at it in a wider mode, we know that at least the
4535 high-order bits are known to be sign bit copies. */
4536
4537 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4538 {
4539 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4540 known_x, known_mode, known_ret);
4541 return MAX ((int) bitwidth
4542 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4543 num0);
4544 }
4545
4546 /* For a smaller object, just ignore the high bits. */
4547 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4548 {
4549 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4550 known_x, known_mode, known_ret);
4551 return MAX (1, (num0
4552 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4553 - bitwidth)));
4554 }
4555
4556 #ifdef WORD_REGISTER_OPERATIONS
4557 #ifdef LOAD_EXTEND_OP
4558 /* For paradoxical SUBREGs on machines where all register operations
4559 affect the entire register, just look inside. Note that we are
4560 passing MODE to the recursive call, so the number of sign bit copies
4561 will remain relative to that mode, not the inner mode. */
4562
4563 /* This works only if loads sign extend. Otherwise, if we get a
4564 reload for the inner part, it may be loaded from the stack, and
4565 then we lose all sign bit copies that existed before the store
4566 to the stack. */
4567
4568 if (paradoxical_subreg_p (x)
4569 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4570 && MEM_P (SUBREG_REG (x)))
4571 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4572 known_x, known_mode, known_ret);
4573 #endif
4574 #endif
4575 break;
4576
4577 case SIGN_EXTRACT:
4578 if (CONST_INT_P (XEXP (x, 1)))
4579 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4580 break;
4581
4582 case SIGN_EXTEND:
4583 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4584 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4585 known_x, known_mode, known_ret));
4586
4587 case TRUNCATE:
4588 /* For a smaller object, just ignore the high bits. */
4589 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4590 known_x, known_mode, known_ret);
4591 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4592 - bitwidth)));
4593
4594 case NOT:
4595 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4596 known_x, known_mode, known_ret);
4597
4598 case ROTATE: case ROTATERT:
4599 /* If we are rotating left by a number of bits less than the number
4600 of sign bit copies, we can just subtract that amount from the
4601 number. */
4602 if (CONST_INT_P (XEXP (x, 1))
4603 && INTVAL (XEXP (x, 1)) >= 0
4604 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4605 {
4606 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4607 known_x, known_mode, known_ret);
4608 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4609 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4610 }
4611 break;
4612
4613 case NEG:
4614 /* In general, this subtracts one sign bit copy. But if the value
4615 is known to be positive, the number of sign bit copies is the
4616 same as that of the input. Finally, if the input has just one bit
4617 that might be nonzero, all the bits are copies of the sign bit. */
4618 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4619 known_x, known_mode, known_ret);
4620 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4621 return num0 > 1 ? num0 - 1 : 1;
4622
4623 nonzero = nonzero_bits (XEXP (x, 0), mode);
4624 if (nonzero == 1)
4625 return bitwidth;
4626
4627 if (num0 > 1
4628 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4629 num0--;
4630
4631 return num0;
4632
4633 case IOR: case AND: case XOR:
4634 case SMIN: case SMAX: case UMIN: case UMAX:
4635 /* Logical operations will preserve the number of sign-bit copies.
4636 MIN and MAX operations always return one of the operands. */
4637 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4638 known_x, known_mode, known_ret);
4639 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4640 known_x, known_mode, known_ret);
4641
4642 /* If num1 is clearing some of the top bits then regardless of
4643 the other term, we are guaranteed to have at least that many
4644 high-order zero bits. */
4645 if (code == AND
4646 && num1 > 1
4647 && bitwidth <= HOST_BITS_PER_WIDE_INT
4648 && CONST_INT_P (XEXP (x, 1))
4649 && (UINTVAL (XEXP (x, 1))
4650 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4651 return num1;
4652
4653 /* Similarly for IOR when setting high-order bits. */
4654 if (code == IOR
4655 && num1 > 1
4656 && bitwidth <= HOST_BITS_PER_WIDE_INT
4657 && CONST_INT_P (XEXP (x, 1))
4658 && (UINTVAL (XEXP (x, 1))
4659 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4660 return num1;
4661
4662 return MIN (num0, num1);
4663
4664 case PLUS: case MINUS:
4665 /* For addition and subtraction, we can have a 1-bit carry. However,
4666 if we are subtracting 1 from a positive number, there will not
4667 be such a carry. Furthermore, if the positive number is known to
4668 be 0 or 1, we know the result is either -1 or 0. */
4669
4670 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4671 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4672 {
4673 nonzero = nonzero_bits (XEXP (x, 0), mode);
4674 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4675 return (nonzero == 1 || nonzero == 0 ? bitwidth
4676 : bitwidth - floor_log2 (nonzero) - 1);
4677 }
4678
4679 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4680 known_x, known_mode, known_ret);
4681 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4682 known_x, known_mode, known_ret);
4683 result = MAX (1, MIN (num0, num1) - 1);
4684
4685 return result;
4686
4687 case MULT:
4688 /* The number of bits of the product is the sum of the number of
4689 bits of both terms. However, unless one of the terms if known
4690 to be positive, we must allow for an additional bit since negating
4691 a negative number can remove one sign bit copy. */
4692
4693 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4694 known_x, known_mode, known_ret);
4695 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4696 known_x, known_mode, known_ret);
4697
4698 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4699 if (result > 0
4700 && (bitwidth > HOST_BITS_PER_WIDE_INT
4701 || (((nonzero_bits (XEXP (x, 0), mode)
4702 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4703 && ((nonzero_bits (XEXP (x, 1), mode)
4704 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4705 != 0))))
4706 result--;
4707
4708 return MAX (1, result);
4709
4710 case UDIV:
4711 /* The result must be <= the first operand. If the first operand
4712 has the high bit set, we know nothing about the number of sign
4713 bit copies. */
4714 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4715 return 1;
4716 else if ((nonzero_bits (XEXP (x, 0), mode)
4717 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4718 return 1;
4719 else
4720 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4721 known_x, known_mode, known_ret);
4722
4723 case UMOD:
4724 /* The result must be <= the second operand. If the second operand
4725 has (or just might have) the high bit set, we know nothing about
4726 the number of sign bit copies. */
4727 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4728 return 1;
4729 else if ((nonzero_bits (XEXP (x, 1), mode)
4730 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4731 return 1;
4732 else
4733 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4734 known_x, known_mode, known_ret);
4735
4736 case DIV:
4737 /* Similar to unsigned division, except that we have to worry about
4738 the case where the divisor is negative, in which case we have
4739 to add 1. */
4740 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4741 known_x, known_mode, known_ret);
4742 if (result > 1
4743 && (bitwidth > HOST_BITS_PER_WIDE_INT
4744 || (nonzero_bits (XEXP (x, 1), mode)
4745 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4746 result--;
4747
4748 return result;
4749
4750 case MOD:
4751 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4752 known_x, known_mode, known_ret);
4753 if (result > 1
4754 && (bitwidth > HOST_BITS_PER_WIDE_INT
4755 || (nonzero_bits (XEXP (x, 1), mode)
4756 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4757 result--;
4758
4759 return result;
4760
4761 case ASHIFTRT:
4762 /* Shifts by a constant add to the number of bits equal to the
4763 sign bit. */
4764 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4765 known_x, known_mode, known_ret);
4766 if (CONST_INT_P (XEXP (x, 1))
4767 && INTVAL (XEXP (x, 1)) > 0
4768 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4769 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4770
4771 return num0;
4772
4773 case ASHIFT:
4774 /* Left shifts destroy copies. */
4775 if (!CONST_INT_P (XEXP (x, 1))
4776 || INTVAL (XEXP (x, 1)) < 0
4777 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4778 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4779 return 1;
4780
4781 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4782 known_x, known_mode, known_ret);
4783 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4784
4785 case IF_THEN_ELSE:
4786 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4787 known_x, known_mode, known_ret);
4788 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4789 known_x, known_mode, known_ret);
4790 return MIN (num0, num1);
4791
4792 case EQ: case NE: case GE: case GT: case LE: case LT:
4793 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4794 case GEU: case GTU: case LEU: case LTU:
4795 case UNORDERED: case ORDERED:
4796 /* If the constant is negative, take its 1's complement and remask.
4797 Then see how many zero bits we have. */
4798 nonzero = STORE_FLAG_VALUE;
4799 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4800 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4801 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4802
4803 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4804
4805 default:
4806 break;
4807 }
4808
4809 /* If we haven't been able to figure it out by one of the above rules,
4810 see if some of the high-order bits are known to be zero. If so,
4811 count those bits and return one less than that amount. If we can't
4812 safely compute the mask for this mode, always return BITWIDTH. */
4813
4814 bitwidth = GET_MODE_PRECISION (mode);
4815 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4816 return 1;
4817
4818 nonzero = nonzero_bits (x, mode);
4819 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4820 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4821 }
4822
4823 /* Calculate the rtx_cost of a single instruction. A return value of
4824 zero indicates an instruction pattern without a known cost. */
4825
4826 int
4827 insn_rtx_cost (rtx pat, bool speed)
4828 {
4829 int i, cost;
4830 rtx set;
4831
4832 /* Extract the single set rtx from the instruction pattern.
4833 We can't use single_set since we only have the pattern. */
4834 if (GET_CODE (pat) == SET)
4835 set = pat;
4836 else if (GET_CODE (pat) == PARALLEL)
4837 {
4838 set = NULL_RTX;
4839 for (i = 0; i < XVECLEN (pat, 0); i++)
4840 {
4841 rtx x = XVECEXP (pat, 0, i);
4842 if (GET_CODE (x) == SET)
4843 {
4844 if (set)
4845 return 0;
4846 set = x;
4847 }
4848 }
4849 if (!set)
4850 return 0;
4851 }
4852 else
4853 return 0;
4854
4855 cost = set_src_cost (SET_SRC (set), speed);
4856 return cost > 0 ? cost : COSTS_N_INSNS (1);
4857 }
4858
4859 /* Given an insn INSN and condition COND, return the condition in a
4860 canonical form to simplify testing by callers. Specifically:
4861
4862 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4863 (2) Both operands will be machine operands; (cc0) will have been replaced.
4864 (3) If an operand is a constant, it will be the second operand.
4865 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4866 for GE, GEU, and LEU.
4867
4868 If the condition cannot be understood, or is an inequality floating-point
4869 comparison which needs to be reversed, 0 will be returned.
4870
4871 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4872
4873 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4874 insn used in locating the condition was found. If a replacement test
4875 of the condition is desired, it should be placed in front of that
4876 insn and we will be sure that the inputs are still valid.
4877
4878 If WANT_REG is nonzero, we wish the condition to be relative to that
4879 register, if possible. Therefore, do not canonicalize the condition
4880 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4881 to be a compare to a CC mode register.
4882
4883 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4884 and at INSN. */
4885
4886 rtx
4887 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4888 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4889 {
4890 enum rtx_code code;
4891 rtx prev = insn;
4892 const_rtx set;
4893 rtx tem;
4894 rtx op0, op1;
4895 int reverse_code = 0;
4896 enum machine_mode mode;
4897 basic_block bb = BLOCK_FOR_INSN (insn);
4898
4899 code = GET_CODE (cond);
4900 mode = GET_MODE (cond);
4901 op0 = XEXP (cond, 0);
4902 op1 = XEXP (cond, 1);
4903
4904 if (reverse)
4905 code = reversed_comparison_code (cond, insn);
4906 if (code == UNKNOWN)
4907 return 0;
4908
4909 if (earliest)
4910 *earliest = insn;
4911
4912 /* If we are comparing a register with zero, see if the register is set
4913 in the previous insn to a COMPARE or a comparison operation. Perform
4914 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4915 in cse.c */
4916
4917 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4918 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4919 && op1 == CONST0_RTX (GET_MODE (op0))
4920 && op0 != want_reg)
4921 {
4922 /* Set nonzero when we find something of interest. */
4923 rtx x = 0;
4924
4925 #ifdef HAVE_cc0
4926 /* If comparison with cc0, import actual comparison from compare
4927 insn. */
4928 if (op0 == cc0_rtx)
4929 {
4930 if ((prev = prev_nonnote_insn (prev)) == 0
4931 || !NONJUMP_INSN_P (prev)
4932 || (set = single_set (prev)) == 0
4933 || SET_DEST (set) != cc0_rtx)
4934 return 0;
4935
4936 op0 = SET_SRC (set);
4937 op1 = CONST0_RTX (GET_MODE (op0));
4938 if (earliest)
4939 *earliest = prev;
4940 }
4941 #endif
4942
4943 /* If this is a COMPARE, pick up the two things being compared. */
4944 if (GET_CODE (op0) == COMPARE)
4945 {
4946 op1 = XEXP (op0, 1);
4947 op0 = XEXP (op0, 0);
4948 continue;
4949 }
4950 else if (!REG_P (op0))
4951 break;
4952
4953 /* Go back to the previous insn. Stop if it is not an INSN. We also
4954 stop if it isn't a single set or if it has a REG_INC note because
4955 we don't want to bother dealing with it. */
4956
4957 prev = prev_nonnote_nondebug_insn (prev);
4958
4959 if (prev == 0
4960 || !NONJUMP_INSN_P (prev)
4961 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4962 /* In cfglayout mode, there do not have to be labels at the
4963 beginning of a block, or jumps at the end, so the previous
4964 conditions would not stop us when we reach bb boundary. */
4965 || BLOCK_FOR_INSN (prev) != bb)
4966 break;
4967
4968 set = set_of (op0, prev);
4969
4970 if (set
4971 && (GET_CODE (set) != SET
4972 || !rtx_equal_p (SET_DEST (set), op0)))
4973 break;
4974
4975 /* If this is setting OP0, get what it sets it to if it looks
4976 relevant. */
4977 if (set)
4978 {
4979 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4980 #ifdef FLOAT_STORE_FLAG_VALUE
4981 REAL_VALUE_TYPE fsfv;
4982 #endif
4983
4984 /* ??? We may not combine comparisons done in a CCmode with
4985 comparisons not done in a CCmode. This is to aid targets
4986 like Alpha that have an IEEE compliant EQ instruction, and
4987 a non-IEEE compliant BEQ instruction. The use of CCmode is
4988 actually artificial, simply to prevent the combination, but
4989 should not affect other platforms.
4990
4991 However, we must allow VOIDmode comparisons to match either
4992 CCmode or non-CCmode comparison, because some ports have
4993 modeless comparisons inside branch patterns.
4994
4995 ??? This mode check should perhaps look more like the mode check
4996 in simplify_comparison in combine. */
4997
4998 if ((GET_CODE (SET_SRC (set)) == COMPARE
4999 || (((code == NE
5000 || (code == LT
5001 && val_signbit_known_set_p (inner_mode,
5002 STORE_FLAG_VALUE))
5003 #ifdef FLOAT_STORE_FLAG_VALUE
5004 || (code == LT
5005 && SCALAR_FLOAT_MODE_P (inner_mode)
5006 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5007 REAL_VALUE_NEGATIVE (fsfv)))
5008 #endif
5009 ))
5010 && COMPARISON_P (SET_SRC (set))))
5011 && (((GET_MODE_CLASS (mode) == MODE_CC)
5012 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5013 || mode == VOIDmode || inner_mode == VOIDmode))
5014 x = SET_SRC (set);
5015 else if (((code == EQ
5016 || (code == GE
5017 && val_signbit_known_set_p (inner_mode,
5018 STORE_FLAG_VALUE))
5019 #ifdef FLOAT_STORE_FLAG_VALUE
5020 || (code == GE
5021 && SCALAR_FLOAT_MODE_P (inner_mode)
5022 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5023 REAL_VALUE_NEGATIVE (fsfv)))
5024 #endif
5025 ))
5026 && COMPARISON_P (SET_SRC (set))
5027 && (((GET_MODE_CLASS (mode) == MODE_CC)
5028 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5029 || mode == VOIDmode || inner_mode == VOIDmode))
5030
5031 {
5032 reverse_code = 1;
5033 x = SET_SRC (set);
5034 }
5035 else
5036 break;
5037 }
5038
5039 else if (reg_set_p (op0, prev))
5040 /* If this sets OP0, but not directly, we have to give up. */
5041 break;
5042
5043 if (x)
5044 {
5045 /* If the caller is expecting the condition to be valid at INSN,
5046 make sure X doesn't change before INSN. */
5047 if (valid_at_insn_p)
5048 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5049 break;
5050 if (COMPARISON_P (x))
5051 code = GET_CODE (x);
5052 if (reverse_code)
5053 {
5054 code = reversed_comparison_code (x, prev);
5055 if (code == UNKNOWN)
5056 return 0;
5057 reverse_code = 0;
5058 }
5059
5060 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5061 if (earliest)
5062 *earliest = prev;
5063 }
5064 }
5065
5066 /* If constant is first, put it last. */
5067 if (CONSTANT_P (op0))
5068 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5069
5070 /* If OP0 is the result of a comparison, we weren't able to find what
5071 was really being compared, so fail. */
5072 if (!allow_cc_mode
5073 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5074 return 0;
5075
5076 /* Canonicalize any ordered comparison with integers involving equality
5077 if we can do computations in the relevant mode and we do not
5078 overflow. */
5079
5080 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5081 && CONST_INT_P (op1)
5082 && GET_MODE (op0) != VOIDmode
5083 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5084 {
5085 HOST_WIDE_INT const_val = INTVAL (op1);
5086 unsigned HOST_WIDE_INT uconst_val = const_val;
5087 unsigned HOST_WIDE_INT max_val
5088 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5089
5090 switch (code)
5091 {
5092 case LE:
5093 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5094 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5095 break;
5096
5097 /* When cross-compiling, const_val might be sign-extended from
5098 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5099 case GE:
5100 if ((const_val & max_val)
5101 != ((unsigned HOST_WIDE_INT) 1
5102 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5103 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5104 break;
5105
5106 case LEU:
5107 if (uconst_val < max_val)
5108 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5109 break;
5110
5111 case GEU:
5112 if (uconst_val != 0)
5113 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5114 break;
5115
5116 default:
5117 break;
5118 }
5119 }
5120
5121 /* Never return CC0; return zero instead. */
5122 if (CC0_P (op0))
5123 return 0;
5124
5125 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5126 }
5127
5128 /* Given a jump insn JUMP, return the condition that will cause it to branch
5129 to its JUMP_LABEL. If the condition cannot be understood, or is an
5130 inequality floating-point comparison which needs to be reversed, 0 will
5131 be returned.
5132
5133 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5134 insn used in locating the condition was found. If a replacement test
5135 of the condition is desired, it should be placed in front of that
5136 insn and we will be sure that the inputs are still valid. If EARLIEST
5137 is null, the returned condition will be valid at INSN.
5138
5139 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5140 compare CC mode register.
5141
5142 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5143
5144 rtx
5145 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
5146 {
5147 rtx cond;
5148 int reverse;
5149 rtx set;
5150
5151 /* If this is not a standard conditional jump, we can't parse it. */
5152 if (!JUMP_P (jump)
5153 || ! any_condjump_p (jump))
5154 return 0;
5155 set = pc_set (jump);
5156
5157 cond = XEXP (SET_SRC (set), 0);
5158
5159 /* If this branches to JUMP_LABEL when the condition is false, reverse
5160 the condition. */
5161 reverse
5162 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5163 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
5164
5165 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5166 allow_cc_mode, valid_at_insn_p);
5167 }
5168
5169 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5170 TARGET_MODE_REP_EXTENDED.
5171
5172 Note that we assume that the property of
5173 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5174 narrower than mode B. I.e., if A is a mode narrower than B then in
5175 order to be able to operate on it in mode B, mode A needs to
5176 satisfy the requirements set by the representation of mode B. */
5177
5178 static void
5179 init_num_sign_bit_copies_in_rep (void)
5180 {
5181 enum machine_mode mode, in_mode;
5182
5183 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5184 in_mode = GET_MODE_WIDER_MODE (mode))
5185 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5186 mode = GET_MODE_WIDER_MODE (mode))
5187 {
5188 enum machine_mode i;
5189
5190 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5191 extends to the next widest mode. */
5192 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5193 || GET_MODE_WIDER_MODE (mode) == in_mode);
5194
5195 /* We are in in_mode. Count how many bits outside of mode
5196 have to be copies of the sign-bit. */
5197 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5198 {
5199 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5200
5201 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5202 /* We can only check sign-bit copies starting from the
5203 top-bit. In order to be able to check the bits we
5204 have already seen we pretend that subsequent bits
5205 have to be sign-bit copies too. */
5206 || num_sign_bit_copies_in_rep [in_mode][mode])
5207 num_sign_bit_copies_in_rep [in_mode][mode]
5208 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5209 }
5210 }
5211 }
5212
5213 /* Suppose that truncation from the machine mode of X to MODE is not a
5214 no-op. See if there is anything special about X so that we can
5215 assume it already contains a truncated value of MODE. */
5216
5217 bool
5218 truncated_to_mode (enum machine_mode mode, const_rtx x)
5219 {
5220 /* This register has already been used in MODE without explicit
5221 truncation. */
5222 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5223 return true;
5224
5225 /* See if we already satisfy the requirements of MODE. If yes we
5226 can just switch to MODE. */
5227 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5228 && (num_sign_bit_copies (x, GET_MODE (x))
5229 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5230 return true;
5231
5232 return false;
5233 }
5234 \f
5235 /* Initialize non_rtx_starting_operands, which is used to speed up
5236 for_each_rtx. */
5237 void
5238 init_rtlanal (void)
5239 {
5240 int i;
5241 for (i = 0; i < NUM_RTX_CODE; i++)
5242 {
5243 const char *format = GET_RTX_FORMAT (i);
5244 const char *first = strpbrk (format, "eEV");
5245 non_rtx_starting_operands[i] = first ? first - format : -1;
5246 }
5247
5248 init_num_sign_bit_copies_in_rep ();
5249 }
5250 \f
5251 /* Check whether this is a constant pool constant. */
5252 bool
5253 constant_pool_constant_p (rtx x)
5254 {
5255 x = avoid_constant_pool_reference (x);
5256 return CONST_DOUBLE_P (x);
5257 }
5258 \f
5259 /* If M is a bitmask that selects a field of low-order bits within an item but
5260 not the entire word, return the length of the field. Return -1 otherwise.
5261 M is used in machine mode MODE. */
5262
5263 int
5264 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5265 {
5266 if (mode != VOIDmode)
5267 {
5268 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5269 return -1;
5270 m &= GET_MODE_MASK (mode);
5271 }
5272
5273 return exact_log2 (m + 1);
5274 }
5275
5276 /* Return the mode of MEM's address. */
5277
5278 enum machine_mode
5279 get_address_mode (rtx mem)
5280 {
5281 enum machine_mode mode;
5282
5283 gcc_assert (MEM_P (mem));
5284 mode = GET_MODE (XEXP (mem, 0));
5285 if (mode != VOIDmode)
5286 return mode;
5287 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5288 }
5289 \f
5290 /* Split up a CONST_DOUBLE or integer constant rtx
5291 into two rtx's for single words,
5292 storing in *FIRST the word that comes first in memory in the target
5293 and in *SECOND the other. */
5294
5295 void
5296 split_double (rtx value, rtx *first, rtx *second)
5297 {
5298 if (CONST_INT_P (value))
5299 {
5300 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5301 {
5302 /* In this case the CONST_INT holds both target words.
5303 Extract the bits from it into two word-sized pieces.
5304 Sign extend each half to HOST_WIDE_INT. */
5305 unsigned HOST_WIDE_INT low, high;
5306 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5307 unsigned bits_per_word = BITS_PER_WORD;
5308
5309 /* Set sign_bit to the most significant bit of a word. */
5310 sign_bit = 1;
5311 sign_bit <<= bits_per_word - 1;
5312
5313 /* Set mask so that all bits of the word are set. We could
5314 have used 1 << BITS_PER_WORD instead of basing the
5315 calculation on sign_bit. However, on machines where
5316 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5317 compiler warning, even though the code would never be
5318 executed. */
5319 mask = sign_bit << 1;
5320 mask--;
5321
5322 /* Set sign_extend as any remaining bits. */
5323 sign_extend = ~mask;
5324
5325 /* Pick the lower word and sign-extend it. */
5326 low = INTVAL (value);
5327 low &= mask;
5328 if (low & sign_bit)
5329 low |= sign_extend;
5330
5331 /* Pick the higher word, shifted to the least significant
5332 bits, and sign-extend it. */
5333 high = INTVAL (value);
5334 high >>= bits_per_word - 1;
5335 high >>= 1;
5336 high &= mask;
5337 if (high & sign_bit)
5338 high |= sign_extend;
5339
5340 /* Store the words in the target machine order. */
5341 if (WORDS_BIG_ENDIAN)
5342 {
5343 *first = GEN_INT (high);
5344 *second = GEN_INT (low);
5345 }
5346 else
5347 {
5348 *first = GEN_INT (low);
5349 *second = GEN_INT (high);
5350 }
5351 }
5352 else
5353 {
5354 /* The rule for using CONST_INT for a wider mode
5355 is that we regard the value as signed.
5356 So sign-extend it. */
5357 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5358 if (WORDS_BIG_ENDIAN)
5359 {
5360 *first = high;
5361 *second = value;
5362 }
5363 else
5364 {
5365 *first = value;
5366 *second = high;
5367 }
5368 }
5369 }
5370 else if (!CONST_DOUBLE_P (value))
5371 {
5372 if (WORDS_BIG_ENDIAN)
5373 {
5374 *first = const0_rtx;
5375 *second = value;
5376 }
5377 else
5378 {
5379 *first = value;
5380 *second = const0_rtx;
5381 }
5382 }
5383 else if (GET_MODE (value) == VOIDmode
5384 /* This is the old way we did CONST_DOUBLE integers. */
5385 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5386 {
5387 /* In an integer, the words are defined as most and least significant.
5388 So order them by the target's convention. */
5389 if (WORDS_BIG_ENDIAN)
5390 {
5391 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5392 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5393 }
5394 else
5395 {
5396 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5397 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5398 }
5399 }
5400 else
5401 {
5402 REAL_VALUE_TYPE r;
5403 long l[2];
5404 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5405
5406 /* Note, this converts the REAL_VALUE_TYPE to the target's
5407 format, splits up the floating point double and outputs
5408 exactly 32 bits of it into each of l[0] and l[1] --
5409 not necessarily BITS_PER_WORD bits. */
5410 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5411
5412 /* If 32 bits is an entire word for the target, but not for the host,
5413 then sign-extend on the host so that the number will look the same
5414 way on the host that it would on the target. See for instance
5415 simplify_unary_operation. The #if is needed to avoid compiler
5416 warnings. */
5417
5418 #if HOST_BITS_PER_LONG > 32
5419 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5420 {
5421 if (l[0] & ((long) 1 << 31))
5422 l[0] |= ((long) (-1) << 32);
5423 if (l[1] & ((long) 1 << 31))
5424 l[1] |= ((long) (-1) << 32);
5425 }
5426 #endif
5427
5428 *first = GEN_INT (l[0]);
5429 *second = GEN_INT (l[1]);
5430 }
5431 }
5432