]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/rtlanal.c
Update copyright years in gcc/
[thirdparty/gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "df.h"
37 #include "tree.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39 #include "addresses.h"
40
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int rtx_referenced_p_1 (rtx *, void *);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
48
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
50 const_rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
53 const_rtx, enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
59 enum machine_mode, unsigned int);
60
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
64
65 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67 SIGN_EXTEND then while narrowing we also have to enforce the
68 representation and sign-extend the value to mode DESTINATION_REP.
69
70 If the value is already sign-extended to DESTINATION_REP mode we
71 can just switch to DESTINATION mode on it. For each pair of
72 integral modes SOURCE and DESTINATION, when truncating from SOURCE
73 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74 contains the number of high-order bits in SOURCE that have to be
75 copies of the sign-bit so that we can do this mode-switch to
76 DESTINATION. */
77
78 static unsigned int
79 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
80 \f
81 /* Return 1 if the value of X is unstable
82 (would be different at a different point in the program).
83 The frame pointer, arg pointer, etc. are considered stable
84 (within one function) and so is anything marked `unchanging'. */
85
86 int
87 rtx_unstable_p (const_rtx x)
88 {
89 const RTX_CODE code = GET_CODE (x);
90 int i;
91 const char *fmt;
92
93 switch (code)
94 {
95 case MEM:
96 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
97
98 case CONST:
99 CASE_CONST_ANY:
100 case SYMBOL_REF:
101 case LABEL_REF:
102 return 0;
103
104 case REG:
105 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
106 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
107 /* The arg pointer varies if it is not a fixed register. */
108 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
109 return 0;
110 /* ??? When call-clobbered, the value is stable modulo the restore
111 that must happen after a call. This currently screws up local-alloc
112 into believing that the restore is not needed. */
113 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
114 return 0;
115 return 1;
116
117 case ASM_OPERANDS:
118 if (MEM_VOLATILE_P (x))
119 return 1;
120
121 /* Fall through. */
122
123 default:
124 break;
125 }
126
127 fmt = GET_RTX_FORMAT (code);
128 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
129 if (fmt[i] == 'e')
130 {
131 if (rtx_unstable_p (XEXP (x, i)))
132 return 1;
133 }
134 else if (fmt[i] == 'E')
135 {
136 int j;
137 for (j = 0; j < XVECLEN (x, i); j++)
138 if (rtx_unstable_p (XVECEXP (x, i, j)))
139 return 1;
140 }
141
142 return 0;
143 }
144
145 /* Return 1 if X has a value that can vary even between two
146 executions of the program. 0 means X can be compared reliably
147 against certain constants or near-constants.
148 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
149 zero, we are slightly more conservative.
150 The frame pointer and the arg pointer are considered constant. */
151
152 bool
153 rtx_varies_p (const_rtx x, bool for_alias)
154 {
155 RTX_CODE code;
156 int i;
157 const char *fmt;
158
159 if (!x)
160 return 0;
161
162 code = GET_CODE (x);
163 switch (code)
164 {
165 case MEM:
166 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
167
168 case CONST:
169 CASE_CONST_ANY:
170 case SYMBOL_REF:
171 case LABEL_REF:
172 return 0;
173
174 case REG:
175 /* Note that we have to test for the actual rtx used for the frame
176 and arg pointers and not just the register number in case we have
177 eliminated the frame and/or arg pointer and are using it
178 for pseudos. */
179 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
180 /* The arg pointer varies if it is not a fixed register. */
181 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
182 return 0;
183 if (x == pic_offset_table_rtx
184 /* ??? When call-clobbered, the value is stable modulo the restore
185 that must happen after a call. This currently screws up
186 local-alloc into believing that the restore is not needed, so we
187 must return 0 only if we are called from alias analysis. */
188 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
189 return 0;
190 return 1;
191
192 case LO_SUM:
193 /* The operand 0 of a LO_SUM is considered constant
194 (in fact it is related specifically to operand 1)
195 during alias analysis. */
196 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
197 || rtx_varies_p (XEXP (x, 1), for_alias);
198
199 case ASM_OPERANDS:
200 if (MEM_VOLATILE_P (x))
201 return 1;
202
203 /* Fall through. */
204
205 default:
206 break;
207 }
208
209 fmt = GET_RTX_FORMAT (code);
210 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
211 if (fmt[i] == 'e')
212 {
213 if (rtx_varies_p (XEXP (x, i), for_alias))
214 return 1;
215 }
216 else if (fmt[i] == 'E')
217 {
218 int j;
219 for (j = 0; j < XVECLEN (x, i); j++)
220 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
221 return 1;
222 }
223
224 return 0;
225 }
226
227 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
228 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
229 whether nonzero is returned for unaligned memory accesses on strict
230 alignment machines. */
231
232 static int
233 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
234 enum machine_mode mode, bool unaligned_mems)
235 {
236 enum rtx_code code = GET_CODE (x);
237
238 if (STRICT_ALIGNMENT
239 && unaligned_mems
240 && GET_MODE_SIZE (mode) != 0)
241 {
242 HOST_WIDE_INT actual_offset = offset;
243 #ifdef SPARC_STACK_BOUNDARY_HACK
244 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
245 the real alignment of %sp. However, when it does this, the
246 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
247 if (SPARC_STACK_BOUNDARY_HACK
248 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
249 actual_offset -= STACK_POINTER_OFFSET;
250 #endif
251
252 if (actual_offset % GET_MODE_SIZE (mode) != 0)
253 return 1;
254 }
255
256 switch (code)
257 {
258 case SYMBOL_REF:
259 if (SYMBOL_REF_WEAK (x))
260 return 1;
261 if (!CONSTANT_POOL_ADDRESS_P (x))
262 {
263 tree decl;
264 HOST_WIDE_INT decl_size;
265
266 if (offset < 0)
267 return 1;
268 if (size == 0)
269 size = GET_MODE_SIZE (mode);
270 if (size == 0)
271 return offset != 0;
272
273 /* If the size of the access or of the symbol is unknown,
274 assume the worst. */
275 decl = SYMBOL_REF_DECL (x);
276
277 /* Else check that the access is in bounds. TODO: restructure
278 expr_size/tree_expr_size/int_expr_size and just use the latter. */
279 if (!decl)
280 decl_size = -1;
281 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
282 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
283 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
284 : -1);
285 else if (TREE_CODE (decl) == STRING_CST)
286 decl_size = TREE_STRING_LENGTH (decl);
287 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
288 decl_size = int_size_in_bytes (TREE_TYPE (decl));
289 else
290 decl_size = -1;
291
292 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
293 }
294
295 return 0;
296
297 case LABEL_REF:
298 return 0;
299
300 case REG:
301 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
302 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
303 || x == stack_pointer_rtx
304 /* The arg pointer varies if it is not a fixed register. */
305 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
306 return 0;
307 /* All of the virtual frame registers are stack references. */
308 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
309 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
310 return 0;
311 return 1;
312
313 case CONST:
314 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
315 mode, unaligned_mems);
316
317 case PLUS:
318 /* An address is assumed not to trap if:
319 - it is the pic register plus a constant. */
320 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
321 return 0;
322
323 /* - or it is an address that can't trap plus a constant integer,
324 with the proper remainder modulo the mode size if we are
325 considering unaligned memory references. */
326 if (CONST_INT_P (XEXP (x, 1))
327 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
328 size, mode, unaligned_mems))
329 return 0;
330
331 return 1;
332
333 case LO_SUM:
334 case PRE_MODIFY:
335 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
336 mode, unaligned_mems);
337
338 case PRE_DEC:
339 case PRE_INC:
340 case POST_DEC:
341 case POST_INC:
342 case POST_MODIFY:
343 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
344 mode, unaligned_mems);
345
346 default:
347 break;
348 }
349
350 /* If it isn't one of the case above, it can cause a trap. */
351 return 1;
352 }
353
354 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
355
356 int
357 rtx_addr_can_trap_p (const_rtx x)
358 {
359 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
360 }
361
362 /* Return true if X is an address that is known to not be zero. */
363
364 bool
365 nonzero_address_p (const_rtx x)
366 {
367 const enum rtx_code code = GET_CODE (x);
368
369 switch (code)
370 {
371 case SYMBOL_REF:
372 return !SYMBOL_REF_WEAK (x);
373
374 case LABEL_REF:
375 return true;
376
377 case REG:
378 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
379 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
380 || x == stack_pointer_rtx
381 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
382 return true;
383 /* All of the virtual frame registers are stack references. */
384 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
385 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
386 return true;
387 return false;
388
389 case CONST:
390 return nonzero_address_p (XEXP (x, 0));
391
392 case PLUS:
393 /* Handle PIC references. */
394 if (XEXP (x, 0) == pic_offset_table_rtx
395 && CONSTANT_P (XEXP (x, 1)))
396 return true;
397 return false;
398
399 case PRE_MODIFY:
400 /* Similar to the above; allow positive offsets. Further, since
401 auto-inc is only allowed in memories, the register must be a
402 pointer. */
403 if (CONST_INT_P (XEXP (x, 1))
404 && INTVAL (XEXP (x, 1)) > 0)
405 return true;
406 return nonzero_address_p (XEXP (x, 0));
407
408 case PRE_INC:
409 /* Similarly. Further, the offset is always positive. */
410 return true;
411
412 case PRE_DEC:
413 case POST_DEC:
414 case POST_INC:
415 case POST_MODIFY:
416 return nonzero_address_p (XEXP (x, 0));
417
418 case LO_SUM:
419 return nonzero_address_p (XEXP (x, 1));
420
421 default:
422 break;
423 }
424
425 /* If it isn't one of the case above, might be zero. */
426 return false;
427 }
428
429 /* Return 1 if X refers to a memory location whose address
430 cannot be compared reliably with constant addresses,
431 or if X refers to a BLKmode memory object.
432 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
433 zero, we are slightly more conservative. */
434
435 bool
436 rtx_addr_varies_p (const_rtx x, bool for_alias)
437 {
438 enum rtx_code code;
439 int i;
440 const char *fmt;
441
442 if (x == 0)
443 return 0;
444
445 code = GET_CODE (x);
446 if (code == MEM)
447 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
448
449 fmt = GET_RTX_FORMAT (code);
450 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
451 if (fmt[i] == 'e')
452 {
453 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
454 return 1;
455 }
456 else if (fmt[i] == 'E')
457 {
458 int j;
459 for (j = 0; j < XVECLEN (x, i); j++)
460 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
461 return 1;
462 }
463 return 0;
464 }
465 \f
466 /* Return the CALL in X if there is one. */
467
468 rtx
469 get_call_rtx_from (rtx x)
470 {
471 if (INSN_P (x))
472 x = PATTERN (x);
473 if (GET_CODE (x) == PARALLEL)
474 x = XVECEXP (x, 0, 0);
475 if (GET_CODE (x) == SET)
476 x = SET_SRC (x);
477 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
478 return x;
479 return NULL_RTX;
480 }
481 \f
482 /* Return the value of the integer term in X, if one is apparent;
483 otherwise return 0.
484 Only obvious integer terms are detected.
485 This is used in cse.c with the `related_value' field. */
486
487 HOST_WIDE_INT
488 get_integer_term (const_rtx x)
489 {
490 if (GET_CODE (x) == CONST)
491 x = XEXP (x, 0);
492
493 if (GET_CODE (x) == MINUS
494 && CONST_INT_P (XEXP (x, 1)))
495 return - INTVAL (XEXP (x, 1));
496 if (GET_CODE (x) == PLUS
497 && CONST_INT_P (XEXP (x, 1)))
498 return INTVAL (XEXP (x, 1));
499 return 0;
500 }
501
502 /* If X is a constant, return the value sans apparent integer term;
503 otherwise return 0.
504 Only obvious integer terms are detected. */
505
506 rtx
507 get_related_value (const_rtx x)
508 {
509 if (GET_CODE (x) != CONST)
510 return 0;
511 x = XEXP (x, 0);
512 if (GET_CODE (x) == PLUS
513 && CONST_INT_P (XEXP (x, 1)))
514 return XEXP (x, 0);
515 else if (GET_CODE (x) == MINUS
516 && CONST_INT_P (XEXP (x, 1)))
517 return XEXP (x, 0);
518 return 0;
519 }
520 \f
521 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
522 to somewhere in the same object or object_block as SYMBOL. */
523
524 bool
525 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
526 {
527 tree decl;
528
529 if (GET_CODE (symbol) != SYMBOL_REF)
530 return false;
531
532 if (offset == 0)
533 return true;
534
535 if (offset > 0)
536 {
537 if (CONSTANT_POOL_ADDRESS_P (symbol)
538 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
539 return true;
540
541 decl = SYMBOL_REF_DECL (symbol);
542 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
543 return true;
544 }
545
546 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
547 && SYMBOL_REF_BLOCK (symbol)
548 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
549 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
550 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
551 return true;
552
553 return false;
554 }
555
556 /* Split X into a base and a constant offset, storing them in *BASE_OUT
557 and *OFFSET_OUT respectively. */
558
559 void
560 split_const (rtx x, rtx *base_out, rtx *offset_out)
561 {
562 if (GET_CODE (x) == CONST)
563 {
564 x = XEXP (x, 0);
565 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
566 {
567 *base_out = XEXP (x, 0);
568 *offset_out = XEXP (x, 1);
569 return;
570 }
571 }
572 *base_out = x;
573 *offset_out = const0_rtx;
574 }
575 \f
576 /* Return the number of places FIND appears within X. If COUNT_DEST is
577 zero, we do not count occurrences inside the destination of a SET. */
578
579 int
580 count_occurrences (const_rtx x, const_rtx find, int count_dest)
581 {
582 int i, j;
583 enum rtx_code code;
584 const char *format_ptr;
585 int count;
586
587 if (x == find)
588 return 1;
589
590 code = GET_CODE (x);
591
592 switch (code)
593 {
594 case REG:
595 CASE_CONST_ANY:
596 case SYMBOL_REF:
597 case CODE_LABEL:
598 case PC:
599 case CC0:
600 return 0;
601
602 case EXPR_LIST:
603 count = count_occurrences (XEXP (x, 0), find, count_dest);
604 if (XEXP (x, 1))
605 count += count_occurrences (XEXP (x, 1), find, count_dest);
606 return count;
607
608 case MEM:
609 if (MEM_P (find) && rtx_equal_p (x, find))
610 return 1;
611 break;
612
613 case SET:
614 if (SET_DEST (x) == find && ! count_dest)
615 return count_occurrences (SET_SRC (x), find, count_dest);
616 break;
617
618 default:
619 break;
620 }
621
622 format_ptr = GET_RTX_FORMAT (code);
623 count = 0;
624
625 for (i = 0; i < GET_RTX_LENGTH (code); i++)
626 {
627 switch (*format_ptr++)
628 {
629 case 'e':
630 count += count_occurrences (XEXP (x, i), find, count_dest);
631 break;
632
633 case 'E':
634 for (j = 0; j < XVECLEN (x, i); j++)
635 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
636 break;
637 }
638 }
639 return count;
640 }
641
642 \f
643 /* Return TRUE if OP is a register or subreg of a register that
644 holds an unsigned quantity. Otherwise, return FALSE. */
645
646 bool
647 unsigned_reg_p (rtx op)
648 {
649 if (REG_P (op)
650 && REG_EXPR (op)
651 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
652 return true;
653
654 if (GET_CODE (op) == SUBREG
655 && SUBREG_PROMOTED_UNSIGNED_P (op))
656 return true;
657
658 return false;
659 }
660
661 \f
662 /* Nonzero if register REG appears somewhere within IN.
663 Also works if REG is not a register; in this case it checks
664 for a subexpression of IN that is Lisp "equal" to REG. */
665
666 int
667 reg_mentioned_p (const_rtx reg, const_rtx in)
668 {
669 const char *fmt;
670 int i;
671 enum rtx_code code;
672
673 if (in == 0)
674 return 0;
675
676 if (reg == in)
677 return 1;
678
679 if (GET_CODE (in) == LABEL_REF)
680 return reg == XEXP (in, 0);
681
682 code = GET_CODE (in);
683
684 switch (code)
685 {
686 /* Compare registers by number. */
687 case REG:
688 return REG_P (reg) && REGNO (in) == REGNO (reg);
689
690 /* These codes have no constituent expressions
691 and are unique. */
692 case SCRATCH:
693 case CC0:
694 case PC:
695 return 0;
696
697 CASE_CONST_ANY:
698 /* These are kept unique for a given value. */
699 return 0;
700
701 default:
702 break;
703 }
704
705 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
706 return 1;
707
708 fmt = GET_RTX_FORMAT (code);
709
710 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
711 {
712 if (fmt[i] == 'E')
713 {
714 int j;
715 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
716 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
717 return 1;
718 }
719 else if (fmt[i] == 'e'
720 && reg_mentioned_p (reg, XEXP (in, i)))
721 return 1;
722 }
723 return 0;
724 }
725 \f
726 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
727 no CODE_LABEL insn. */
728
729 int
730 no_labels_between_p (const_rtx beg, const_rtx end)
731 {
732 rtx p;
733 if (beg == end)
734 return 0;
735 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
736 if (LABEL_P (p))
737 return 0;
738 return 1;
739 }
740
741 /* Nonzero if register REG is used in an insn between
742 FROM_INSN and TO_INSN (exclusive of those two). */
743
744 int
745 reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
746 {
747 rtx insn;
748
749 if (from_insn == to_insn)
750 return 0;
751
752 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
753 if (NONDEBUG_INSN_P (insn)
754 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
755 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
756 return 1;
757 return 0;
758 }
759 \f
760 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
761 is entirely replaced by a new value and the only use is as a SET_DEST,
762 we do not consider it a reference. */
763
764 int
765 reg_referenced_p (const_rtx x, const_rtx body)
766 {
767 int i;
768
769 switch (GET_CODE (body))
770 {
771 case SET:
772 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
773 return 1;
774
775 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
776 of a REG that occupies all of the REG, the insn references X if
777 it is mentioned in the destination. */
778 if (GET_CODE (SET_DEST (body)) != CC0
779 && GET_CODE (SET_DEST (body)) != PC
780 && !REG_P (SET_DEST (body))
781 && ! (GET_CODE (SET_DEST (body)) == SUBREG
782 && REG_P (SUBREG_REG (SET_DEST (body)))
783 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
784 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
785 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
786 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
787 && reg_overlap_mentioned_p (x, SET_DEST (body)))
788 return 1;
789 return 0;
790
791 case ASM_OPERANDS:
792 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
793 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
794 return 1;
795 return 0;
796
797 case CALL:
798 case USE:
799 case IF_THEN_ELSE:
800 return reg_overlap_mentioned_p (x, body);
801
802 case TRAP_IF:
803 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
804
805 case PREFETCH:
806 return reg_overlap_mentioned_p (x, XEXP (body, 0));
807
808 case UNSPEC:
809 case UNSPEC_VOLATILE:
810 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
811 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
812 return 1;
813 return 0;
814
815 case PARALLEL:
816 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
817 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
818 return 1;
819 return 0;
820
821 case CLOBBER:
822 if (MEM_P (XEXP (body, 0)))
823 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
824 return 1;
825 return 0;
826
827 case COND_EXEC:
828 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
829 return 1;
830 return reg_referenced_p (x, COND_EXEC_CODE (body));
831
832 default:
833 return 0;
834 }
835 }
836 \f
837 /* Nonzero if register REG is set or clobbered in an insn between
838 FROM_INSN and TO_INSN (exclusive of those two). */
839
840 int
841 reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
842 {
843 const_rtx insn;
844
845 if (from_insn == to_insn)
846 return 0;
847
848 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
849 if (INSN_P (insn) && reg_set_p (reg, insn))
850 return 1;
851 return 0;
852 }
853
854 /* Internals of reg_set_between_p. */
855 int
856 reg_set_p (const_rtx reg, const_rtx insn)
857 {
858 /* We can be passed an insn or part of one. If we are passed an insn,
859 check if a side-effect of the insn clobbers REG. */
860 if (INSN_P (insn)
861 && (FIND_REG_INC_NOTE (insn, reg)
862 || (CALL_P (insn)
863 && ((REG_P (reg)
864 && REGNO (reg) < FIRST_PSEUDO_REGISTER
865 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
866 GET_MODE (reg), REGNO (reg)))
867 || MEM_P (reg)
868 || find_reg_fusage (insn, CLOBBER, reg)))))
869 return 1;
870
871 return set_of (reg, insn) != NULL_RTX;
872 }
873
874 /* Similar to reg_set_between_p, but check all registers in X. Return 0
875 only if none of them are modified between START and END. Return 1 if
876 X contains a MEM; this routine does use memory aliasing. */
877
878 int
879 modified_between_p (const_rtx x, const_rtx start, const_rtx end)
880 {
881 const enum rtx_code code = GET_CODE (x);
882 const char *fmt;
883 int i, j;
884 rtx insn;
885
886 if (start == end)
887 return 0;
888
889 switch (code)
890 {
891 CASE_CONST_ANY:
892 case CONST:
893 case SYMBOL_REF:
894 case LABEL_REF:
895 return 0;
896
897 case PC:
898 case CC0:
899 return 1;
900
901 case MEM:
902 if (modified_between_p (XEXP (x, 0), start, end))
903 return 1;
904 if (MEM_READONLY_P (x))
905 return 0;
906 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
907 if (memory_modified_in_insn_p (x, insn))
908 return 1;
909 return 0;
910 break;
911
912 case REG:
913 return reg_set_between_p (x, start, end);
914
915 default:
916 break;
917 }
918
919 fmt = GET_RTX_FORMAT (code);
920 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
921 {
922 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
923 return 1;
924
925 else if (fmt[i] == 'E')
926 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
927 if (modified_between_p (XVECEXP (x, i, j), start, end))
928 return 1;
929 }
930
931 return 0;
932 }
933
934 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
935 of them are modified in INSN. Return 1 if X contains a MEM; this routine
936 does use memory aliasing. */
937
938 int
939 modified_in_p (const_rtx x, const_rtx insn)
940 {
941 const enum rtx_code code = GET_CODE (x);
942 const char *fmt;
943 int i, j;
944
945 switch (code)
946 {
947 CASE_CONST_ANY:
948 case CONST:
949 case SYMBOL_REF:
950 case LABEL_REF:
951 return 0;
952
953 case PC:
954 case CC0:
955 return 1;
956
957 case MEM:
958 if (modified_in_p (XEXP (x, 0), insn))
959 return 1;
960 if (MEM_READONLY_P (x))
961 return 0;
962 if (memory_modified_in_insn_p (x, insn))
963 return 1;
964 return 0;
965 break;
966
967 case REG:
968 return reg_set_p (x, insn);
969
970 default:
971 break;
972 }
973
974 fmt = GET_RTX_FORMAT (code);
975 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
976 {
977 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
978 return 1;
979
980 else if (fmt[i] == 'E')
981 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
982 if (modified_in_p (XVECEXP (x, i, j), insn))
983 return 1;
984 }
985
986 return 0;
987 }
988 \f
989 /* Helper function for set_of. */
990 struct set_of_data
991 {
992 const_rtx found;
993 const_rtx pat;
994 };
995
996 static void
997 set_of_1 (rtx x, const_rtx pat, void *data1)
998 {
999 struct set_of_data *const data = (struct set_of_data *) (data1);
1000 if (rtx_equal_p (x, data->pat)
1001 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1002 data->found = pat;
1003 }
1004
1005 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1006 (either directly or via STRICT_LOW_PART and similar modifiers). */
1007 const_rtx
1008 set_of (const_rtx pat, const_rtx insn)
1009 {
1010 struct set_of_data data;
1011 data.found = NULL_RTX;
1012 data.pat = pat;
1013 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1014 return data.found;
1015 }
1016
1017 /* This function, called through note_stores, collects sets and
1018 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1019 by DATA. */
1020 void
1021 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1022 {
1023 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1024 if (REG_P (x) && HARD_REGISTER_P (x))
1025 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1026 }
1027
1028 /* Examine INSN, and compute the set of hard registers written by it.
1029 Store it in *PSET. Should only be called after reload. */
1030 void
1031 find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset)
1032 {
1033 rtx link;
1034
1035 CLEAR_HARD_REG_SET (*pset);
1036 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1037 if (CALL_P (insn))
1038 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1039 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1040 if (REG_NOTE_KIND (link) == REG_INC)
1041 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1042 }
1043
1044 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1045 static int
1046 record_hard_reg_uses_1 (rtx *px, void *data)
1047 {
1048 rtx x = *px;
1049 HARD_REG_SET *pused = (HARD_REG_SET *)data;
1050
1051 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1052 {
1053 int nregs = hard_regno_nregs[REGNO (x)][GET_MODE (x)];
1054 while (nregs-- > 0)
1055 SET_HARD_REG_BIT (*pused, REGNO (x) + nregs);
1056 }
1057 return 0;
1058 }
1059
1060 /* Like record_hard_reg_sets, but called through note_uses. */
1061 void
1062 record_hard_reg_uses (rtx *px, void *data)
1063 {
1064 for_each_rtx (px, record_hard_reg_uses_1, data);
1065 }
1066 \f
1067 /* Given an INSN, return a SET expression if this insn has only a single SET.
1068 It may also have CLOBBERs, USEs, or SET whose output
1069 will not be used, which we ignore. */
1070
1071 rtx
1072 single_set_2 (const_rtx insn, const_rtx pat)
1073 {
1074 rtx set = NULL;
1075 int set_verified = 1;
1076 int i;
1077
1078 if (GET_CODE (pat) == PARALLEL)
1079 {
1080 for (i = 0; i < XVECLEN (pat, 0); i++)
1081 {
1082 rtx sub = XVECEXP (pat, 0, i);
1083 switch (GET_CODE (sub))
1084 {
1085 case USE:
1086 case CLOBBER:
1087 break;
1088
1089 case SET:
1090 /* We can consider insns having multiple sets, where all
1091 but one are dead as single set insns. In common case
1092 only single set is present in the pattern so we want
1093 to avoid checking for REG_UNUSED notes unless necessary.
1094
1095 When we reach set first time, we just expect this is
1096 the single set we are looking for and only when more
1097 sets are found in the insn, we check them. */
1098 if (!set_verified)
1099 {
1100 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1101 && !side_effects_p (set))
1102 set = NULL;
1103 else
1104 set_verified = 1;
1105 }
1106 if (!set)
1107 set = sub, set_verified = 0;
1108 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1109 || side_effects_p (sub))
1110 return NULL_RTX;
1111 break;
1112
1113 default:
1114 return NULL_RTX;
1115 }
1116 }
1117 }
1118 return set;
1119 }
1120
1121 /* Given an INSN, return nonzero if it has more than one SET, else return
1122 zero. */
1123
1124 int
1125 multiple_sets (const_rtx insn)
1126 {
1127 int found;
1128 int i;
1129
1130 /* INSN must be an insn. */
1131 if (! INSN_P (insn))
1132 return 0;
1133
1134 /* Only a PARALLEL can have multiple SETs. */
1135 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1136 {
1137 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1138 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1139 {
1140 /* If we have already found a SET, then return now. */
1141 if (found)
1142 return 1;
1143 else
1144 found = 1;
1145 }
1146 }
1147
1148 /* Either zero or one SET. */
1149 return 0;
1150 }
1151 \f
1152 /* Return nonzero if the destination of SET equals the source
1153 and there are no side effects. */
1154
1155 int
1156 set_noop_p (const_rtx set)
1157 {
1158 rtx src = SET_SRC (set);
1159 rtx dst = SET_DEST (set);
1160
1161 if (dst == pc_rtx && src == pc_rtx)
1162 return 1;
1163
1164 if (MEM_P (dst) && MEM_P (src))
1165 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1166
1167 if (GET_CODE (dst) == ZERO_EXTRACT)
1168 return rtx_equal_p (XEXP (dst, 0), src)
1169 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1170 && !side_effects_p (src);
1171
1172 if (GET_CODE (dst) == STRICT_LOW_PART)
1173 dst = XEXP (dst, 0);
1174
1175 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1176 {
1177 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1178 return 0;
1179 src = SUBREG_REG (src);
1180 dst = SUBREG_REG (dst);
1181 }
1182
1183 /* It is a NOOP if destination overlaps with selected src vector
1184 elements. */
1185 if (GET_CODE (src) == VEC_SELECT
1186 && REG_P (XEXP (src, 0)) && REG_P (dst)
1187 && HARD_REGISTER_P (XEXP (src, 0))
1188 && HARD_REGISTER_P (dst))
1189 {
1190 int i;
1191 rtx par = XEXP (src, 1);
1192 rtx src0 = XEXP (src, 0);
1193 int c0 = INTVAL (XVECEXP (par, 0, 0));
1194 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1195
1196 for (i = 1; i < XVECLEN (par, 0); i++)
1197 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1198 return 0;
1199 return
1200 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1201 offset, GET_MODE (dst)) == (int) REGNO (dst);
1202 }
1203
1204 return (REG_P (src) && REG_P (dst)
1205 && REGNO (src) == REGNO (dst));
1206 }
1207 \f
1208 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1209 value to itself. */
1210
1211 int
1212 noop_move_p (const_rtx insn)
1213 {
1214 rtx pat = PATTERN (insn);
1215
1216 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1217 return 1;
1218
1219 /* Insns carrying these notes are useful later on. */
1220 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1221 return 0;
1222
1223 /* Check the code to be executed for COND_EXEC. */
1224 if (GET_CODE (pat) == COND_EXEC)
1225 pat = COND_EXEC_CODE (pat);
1226
1227 if (GET_CODE (pat) == SET && set_noop_p (pat))
1228 return 1;
1229
1230 if (GET_CODE (pat) == PARALLEL)
1231 {
1232 int i;
1233 /* If nothing but SETs of registers to themselves,
1234 this insn can also be deleted. */
1235 for (i = 0; i < XVECLEN (pat, 0); i++)
1236 {
1237 rtx tem = XVECEXP (pat, 0, i);
1238
1239 if (GET_CODE (tem) == USE
1240 || GET_CODE (tem) == CLOBBER)
1241 continue;
1242
1243 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1244 return 0;
1245 }
1246
1247 return 1;
1248 }
1249 return 0;
1250 }
1251 \f
1252
1253 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1254 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1255 If the object was modified, if we hit a partial assignment to X, or hit a
1256 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1257 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1258 be the src. */
1259
1260 rtx
1261 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1262 {
1263 rtx p;
1264
1265 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1266 p = PREV_INSN (p))
1267 if (INSN_P (p))
1268 {
1269 rtx set = single_set (p);
1270 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1271
1272 if (set && rtx_equal_p (x, SET_DEST (set)))
1273 {
1274 rtx src = SET_SRC (set);
1275
1276 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1277 src = XEXP (note, 0);
1278
1279 if ((valid_to == NULL_RTX
1280 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1281 /* Reject hard registers because we don't usually want
1282 to use them; we'd rather use a pseudo. */
1283 && (! (REG_P (src)
1284 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1285 {
1286 *pinsn = p;
1287 return src;
1288 }
1289 }
1290
1291 /* If set in non-simple way, we don't have a value. */
1292 if (reg_set_p (x, p))
1293 break;
1294 }
1295
1296 return x;
1297 }
1298 \f
1299 /* Return nonzero if register in range [REGNO, ENDREGNO)
1300 appears either explicitly or implicitly in X
1301 other than being stored into.
1302
1303 References contained within the substructure at LOC do not count.
1304 LOC may be zero, meaning don't ignore anything. */
1305
1306 int
1307 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1308 rtx *loc)
1309 {
1310 int i;
1311 unsigned int x_regno;
1312 RTX_CODE code;
1313 const char *fmt;
1314
1315 repeat:
1316 /* The contents of a REG_NONNEG note is always zero, so we must come here
1317 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1318 if (x == 0)
1319 return 0;
1320
1321 code = GET_CODE (x);
1322
1323 switch (code)
1324 {
1325 case REG:
1326 x_regno = REGNO (x);
1327
1328 /* If we modifying the stack, frame, or argument pointer, it will
1329 clobber a virtual register. In fact, we could be more precise,
1330 but it isn't worth it. */
1331 if ((x_regno == STACK_POINTER_REGNUM
1332 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1333 || x_regno == ARG_POINTER_REGNUM
1334 #endif
1335 || x_regno == FRAME_POINTER_REGNUM)
1336 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1337 return 1;
1338
1339 return endregno > x_regno && regno < END_REGNO (x);
1340
1341 case SUBREG:
1342 /* If this is a SUBREG of a hard reg, we can see exactly which
1343 registers are being modified. Otherwise, handle normally. */
1344 if (REG_P (SUBREG_REG (x))
1345 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1346 {
1347 unsigned int inner_regno = subreg_regno (x);
1348 unsigned int inner_endregno
1349 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1350 ? subreg_nregs (x) : 1);
1351
1352 return endregno > inner_regno && regno < inner_endregno;
1353 }
1354 break;
1355
1356 case CLOBBER:
1357 case SET:
1358 if (&SET_DEST (x) != loc
1359 /* Note setting a SUBREG counts as referring to the REG it is in for
1360 a pseudo but not for hard registers since we can
1361 treat each word individually. */
1362 && ((GET_CODE (SET_DEST (x)) == SUBREG
1363 && loc != &SUBREG_REG (SET_DEST (x))
1364 && REG_P (SUBREG_REG (SET_DEST (x)))
1365 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1366 && refers_to_regno_p (regno, endregno,
1367 SUBREG_REG (SET_DEST (x)), loc))
1368 || (!REG_P (SET_DEST (x))
1369 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1370 return 1;
1371
1372 if (code == CLOBBER || loc == &SET_SRC (x))
1373 return 0;
1374 x = SET_SRC (x);
1375 goto repeat;
1376
1377 default:
1378 break;
1379 }
1380
1381 /* X does not match, so try its subexpressions. */
1382
1383 fmt = GET_RTX_FORMAT (code);
1384 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1385 {
1386 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1387 {
1388 if (i == 0)
1389 {
1390 x = XEXP (x, 0);
1391 goto repeat;
1392 }
1393 else
1394 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1395 return 1;
1396 }
1397 else if (fmt[i] == 'E')
1398 {
1399 int j;
1400 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1401 if (loc != &XVECEXP (x, i, j)
1402 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1403 return 1;
1404 }
1405 }
1406 return 0;
1407 }
1408
1409 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1410 we check if any register number in X conflicts with the relevant register
1411 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1412 contains a MEM (we don't bother checking for memory addresses that can't
1413 conflict because we expect this to be a rare case. */
1414
1415 int
1416 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1417 {
1418 unsigned int regno, endregno;
1419
1420 /* If either argument is a constant, then modifying X can not
1421 affect IN. Here we look at IN, we can profitably combine
1422 CONSTANT_P (x) with the switch statement below. */
1423 if (CONSTANT_P (in))
1424 return 0;
1425
1426 recurse:
1427 switch (GET_CODE (x))
1428 {
1429 case STRICT_LOW_PART:
1430 case ZERO_EXTRACT:
1431 case SIGN_EXTRACT:
1432 /* Overly conservative. */
1433 x = XEXP (x, 0);
1434 goto recurse;
1435
1436 case SUBREG:
1437 regno = REGNO (SUBREG_REG (x));
1438 if (regno < FIRST_PSEUDO_REGISTER)
1439 regno = subreg_regno (x);
1440 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1441 ? subreg_nregs (x) : 1);
1442 goto do_reg;
1443
1444 case REG:
1445 regno = REGNO (x);
1446 endregno = END_REGNO (x);
1447 do_reg:
1448 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1449
1450 case MEM:
1451 {
1452 const char *fmt;
1453 int i;
1454
1455 if (MEM_P (in))
1456 return 1;
1457
1458 fmt = GET_RTX_FORMAT (GET_CODE (in));
1459 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1460 if (fmt[i] == 'e')
1461 {
1462 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1463 return 1;
1464 }
1465 else if (fmt[i] == 'E')
1466 {
1467 int j;
1468 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1469 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1470 return 1;
1471 }
1472
1473 return 0;
1474 }
1475
1476 case SCRATCH:
1477 case PC:
1478 case CC0:
1479 return reg_mentioned_p (x, in);
1480
1481 case PARALLEL:
1482 {
1483 int i;
1484
1485 /* If any register in here refers to it we return true. */
1486 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1487 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1488 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1489 return 1;
1490 return 0;
1491 }
1492
1493 default:
1494 gcc_assert (CONSTANT_P (x));
1495 return 0;
1496 }
1497 }
1498 \f
1499 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1500 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1501 ignored by note_stores, but passed to FUN.
1502
1503 FUN receives three arguments:
1504 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1505 2. the SET or CLOBBER rtx that does the store,
1506 3. the pointer DATA provided to note_stores.
1507
1508 If the item being stored in or clobbered is a SUBREG of a hard register,
1509 the SUBREG will be passed. */
1510
1511 void
1512 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1513 {
1514 int i;
1515
1516 if (GET_CODE (x) == COND_EXEC)
1517 x = COND_EXEC_CODE (x);
1518
1519 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1520 {
1521 rtx dest = SET_DEST (x);
1522
1523 while ((GET_CODE (dest) == SUBREG
1524 && (!REG_P (SUBREG_REG (dest))
1525 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1526 || GET_CODE (dest) == ZERO_EXTRACT
1527 || GET_CODE (dest) == STRICT_LOW_PART)
1528 dest = XEXP (dest, 0);
1529
1530 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1531 each of whose first operand is a register. */
1532 if (GET_CODE (dest) == PARALLEL)
1533 {
1534 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1535 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1536 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1537 }
1538 else
1539 (*fun) (dest, x, data);
1540 }
1541
1542 else if (GET_CODE (x) == PARALLEL)
1543 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1544 note_stores (XVECEXP (x, 0, i), fun, data);
1545 }
1546 \f
1547 /* Like notes_stores, but call FUN for each expression that is being
1548 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1549 FUN for each expression, not any interior subexpressions. FUN receives a
1550 pointer to the expression and the DATA passed to this function.
1551
1552 Note that this is not quite the same test as that done in reg_referenced_p
1553 since that considers something as being referenced if it is being
1554 partially set, while we do not. */
1555
1556 void
1557 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1558 {
1559 rtx body = *pbody;
1560 int i;
1561
1562 switch (GET_CODE (body))
1563 {
1564 case COND_EXEC:
1565 (*fun) (&COND_EXEC_TEST (body), data);
1566 note_uses (&COND_EXEC_CODE (body), fun, data);
1567 return;
1568
1569 case PARALLEL:
1570 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1571 note_uses (&XVECEXP (body, 0, i), fun, data);
1572 return;
1573
1574 case SEQUENCE:
1575 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1576 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1577 return;
1578
1579 case USE:
1580 (*fun) (&XEXP (body, 0), data);
1581 return;
1582
1583 case ASM_OPERANDS:
1584 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1585 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1586 return;
1587
1588 case TRAP_IF:
1589 (*fun) (&TRAP_CONDITION (body), data);
1590 return;
1591
1592 case PREFETCH:
1593 (*fun) (&XEXP (body, 0), data);
1594 return;
1595
1596 case UNSPEC:
1597 case UNSPEC_VOLATILE:
1598 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1599 (*fun) (&XVECEXP (body, 0, i), data);
1600 return;
1601
1602 case CLOBBER:
1603 if (MEM_P (XEXP (body, 0)))
1604 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1605 return;
1606
1607 case SET:
1608 {
1609 rtx dest = SET_DEST (body);
1610
1611 /* For sets we replace everything in source plus registers in memory
1612 expression in store and operands of a ZERO_EXTRACT. */
1613 (*fun) (&SET_SRC (body), data);
1614
1615 if (GET_CODE (dest) == ZERO_EXTRACT)
1616 {
1617 (*fun) (&XEXP (dest, 1), data);
1618 (*fun) (&XEXP (dest, 2), data);
1619 }
1620
1621 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1622 dest = XEXP (dest, 0);
1623
1624 if (MEM_P (dest))
1625 (*fun) (&XEXP (dest, 0), data);
1626 }
1627 return;
1628
1629 default:
1630 /* All the other possibilities never store. */
1631 (*fun) (pbody, data);
1632 return;
1633 }
1634 }
1635 \f
1636 /* Return nonzero if X's old contents don't survive after INSN.
1637 This will be true if X is (cc0) or if X is a register and
1638 X dies in INSN or because INSN entirely sets X.
1639
1640 "Entirely set" means set directly and not through a SUBREG, or
1641 ZERO_EXTRACT, so no trace of the old contents remains.
1642 Likewise, REG_INC does not count.
1643
1644 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1645 but for this use that makes no difference, since regs don't overlap
1646 during their lifetimes. Therefore, this function may be used
1647 at any time after deaths have been computed.
1648
1649 If REG is a hard reg that occupies multiple machine registers, this
1650 function will only return 1 if each of those registers will be replaced
1651 by INSN. */
1652
1653 int
1654 dead_or_set_p (const_rtx insn, const_rtx x)
1655 {
1656 unsigned int regno, end_regno;
1657 unsigned int i;
1658
1659 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1660 if (GET_CODE (x) == CC0)
1661 return 1;
1662
1663 gcc_assert (REG_P (x));
1664
1665 regno = REGNO (x);
1666 end_regno = END_REGNO (x);
1667 for (i = regno; i < end_regno; i++)
1668 if (! dead_or_set_regno_p (insn, i))
1669 return 0;
1670
1671 return 1;
1672 }
1673
1674 /* Return TRUE iff DEST is a register or subreg of a register and
1675 doesn't change the number of words of the inner register, and any
1676 part of the register is TEST_REGNO. */
1677
1678 static bool
1679 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1680 {
1681 unsigned int regno, endregno;
1682
1683 if (GET_CODE (dest) == SUBREG
1684 && (((GET_MODE_SIZE (GET_MODE (dest))
1685 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1686 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1687 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1688 dest = SUBREG_REG (dest);
1689
1690 if (!REG_P (dest))
1691 return false;
1692
1693 regno = REGNO (dest);
1694 endregno = END_REGNO (dest);
1695 return (test_regno >= regno && test_regno < endregno);
1696 }
1697
1698 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1699 any member matches the covers_regno_no_parallel_p criteria. */
1700
1701 static bool
1702 covers_regno_p (const_rtx dest, unsigned int test_regno)
1703 {
1704 if (GET_CODE (dest) == PARALLEL)
1705 {
1706 /* Some targets place small structures in registers for return
1707 values of functions, and those registers are wrapped in
1708 PARALLELs that we may see as the destination of a SET. */
1709 int i;
1710
1711 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1712 {
1713 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1714 if (inner != NULL_RTX
1715 && covers_regno_no_parallel_p (inner, test_regno))
1716 return true;
1717 }
1718
1719 return false;
1720 }
1721 else
1722 return covers_regno_no_parallel_p (dest, test_regno);
1723 }
1724
1725 /* Utility function for dead_or_set_p to check an individual register. */
1726
1727 int
1728 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1729 {
1730 const_rtx pattern;
1731
1732 /* See if there is a death note for something that includes TEST_REGNO. */
1733 if (find_regno_note (insn, REG_DEAD, test_regno))
1734 return 1;
1735
1736 if (CALL_P (insn)
1737 && find_regno_fusage (insn, CLOBBER, test_regno))
1738 return 1;
1739
1740 pattern = PATTERN (insn);
1741
1742 /* If a COND_EXEC is not executed, the value survives. */
1743 if (GET_CODE (pattern) == COND_EXEC)
1744 return 0;
1745
1746 if (GET_CODE (pattern) == SET)
1747 return covers_regno_p (SET_DEST (pattern), test_regno);
1748 else if (GET_CODE (pattern) == PARALLEL)
1749 {
1750 int i;
1751
1752 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1753 {
1754 rtx body = XVECEXP (pattern, 0, i);
1755
1756 if (GET_CODE (body) == COND_EXEC)
1757 body = COND_EXEC_CODE (body);
1758
1759 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1760 && covers_regno_p (SET_DEST (body), test_regno))
1761 return 1;
1762 }
1763 }
1764
1765 return 0;
1766 }
1767
1768 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1769 If DATUM is nonzero, look for one whose datum is DATUM. */
1770
1771 rtx
1772 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1773 {
1774 rtx link;
1775
1776 gcc_checking_assert (insn);
1777
1778 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1779 if (! INSN_P (insn))
1780 return 0;
1781 if (datum == 0)
1782 {
1783 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1784 if (REG_NOTE_KIND (link) == kind)
1785 return link;
1786 return 0;
1787 }
1788
1789 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1790 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1791 return link;
1792 return 0;
1793 }
1794
1795 /* Return the reg-note of kind KIND in insn INSN which applies to register
1796 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1797 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1798 it might be the case that the note overlaps REGNO. */
1799
1800 rtx
1801 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1802 {
1803 rtx link;
1804
1805 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1806 if (! INSN_P (insn))
1807 return 0;
1808
1809 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1810 if (REG_NOTE_KIND (link) == kind
1811 /* Verify that it is a register, so that scratch and MEM won't cause a
1812 problem here. */
1813 && REG_P (XEXP (link, 0))
1814 && REGNO (XEXP (link, 0)) <= regno
1815 && END_REGNO (XEXP (link, 0)) > regno)
1816 return link;
1817 return 0;
1818 }
1819
1820 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1821 has such a note. */
1822
1823 rtx
1824 find_reg_equal_equiv_note (const_rtx insn)
1825 {
1826 rtx link;
1827
1828 if (!INSN_P (insn))
1829 return 0;
1830
1831 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1832 if (REG_NOTE_KIND (link) == REG_EQUAL
1833 || REG_NOTE_KIND (link) == REG_EQUIV)
1834 {
1835 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1836 insns that have multiple sets. Checking single_set to
1837 make sure of this is not the proper check, as explained
1838 in the comment in set_unique_reg_note.
1839
1840 This should be changed into an assert. */
1841 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1842 return 0;
1843 return link;
1844 }
1845 return NULL;
1846 }
1847
1848 /* Check whether INSN is a single_set whose source is known to be
1849 equivalent to a constant. Return that constant if so, otherwise
1850 return null. */
1851
1852 rtx
1853 find_constant_src (const_rtx insn)
1854 {
1855 rtx note, set, x;
1856
1857 set = single_set (insn);
1858 if (set)
1859 {
1860 x = avoid_constant_pool_reference (SET_SRC (set));
1861 if (CONSTANT_P (x))
1862 return x;
1863 }
1864
1865 note = find_reg_equal_equiv_note (insn);
1866 if (note && CONSTANT_P (XEXP (note, 0)))
1867 return XEXP (note, 0);
1868
1869 return NULL_RTX;
1870 }
1871
1872 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1873 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1874
1875 int
1876 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1877 {
1878 /* If it's not a CALL_INSN, it can't possibly have a
1879 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1880 if (!CALL_P (insn))
1881 return 0;
1882
1883 gcc_assert (datum);
1884
1885 if (!REG_P (datum))
1886 {
1887 rtx link;
1888
1889 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1890 link;
1891 link = XEXP (link, 1))
1892 if (GET_CODE (XEXP (link, 0)) == code
1893 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1894 return 1;
1895 }
1896 else
1897 {
1898 unsigned int regno = REGNO (datum);
1899
1900 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1901 to pseudo registers, so don't bother checking. */
1902
1903 if (regno < FIRST_PSEUDO_REGISTER)
1904 {
1905 unsigned int end_regno = END_HARD_REGNO (datum);
1906 unsigned int i;
1907
1908 for (i = regno; i < end_regno; i++)
1909 if (find_regno_fusage (insn, code, i))
1910 return 1;
1911 }
1912 }
1913
1914 return 0;
1915 }
1916
1917 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1918 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1919
1920 int
1921 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1922 {
1923 rtx link;
1924
1925 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1926 to pseudo registers, so don't bother checking. */
1927
1928 if (regno >= FIRST_PSEUDO_REGISTER
1929 || !CALL_P (insn) )
1930 return 0;
1931
1932 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1933 {
1934 rtx op, reg;
1935
1936 if (GET_CODE (op = XEXP (link, 0)) == code
1937 && REG_P (reg = XEXP (op, 0))
1938 && REGNO (reg) <= regno
1939 && END_HARD_REGNO (reg) > regno)
1940 return 1;
1941 }
1942
1943 return 0;
1944 }
1945
1946 \f
1947 /* Return true if KIND is an integer REG_NOTE. */
1948
1949 static bool
1950 int_reg_note_p (enum reg_note kind)
1951 {
1952 return kind == REG_BR_PROB;
1953 }
1954
1955 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1956 stored as the pointer to the next register note. */
1957
1958 rtx
1959 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
1960 {
1961 rtx note;
1962
1963 gcc_checking_assert (!int_reg_note_p (kind));
1964 switch (kind)
1965 {
1966 case REG_CC_SETTER:
1967 case REG_CC_USER:
1968 case REG_LABEL_TARGET:
1969 case REG_LABEL_OPERAND:
1970 case REG_TM:
1971 /* These types of register notes use an INSN_LIST rather than an
1972 EXPR_LIST, so that copying is done right and dumps look
1973 better. */
1974 note = alloc_INSN_LIST (datum, list);
1975 PUT_REG_NOTE_KIND (note, kind);
1976 break;
1977
1978 default:
1979 note = alloc_EXPR_LIST (kind, datum, list);
1980 break;
1981 }
1982
1983 return note;
1984 }
1985
1986 /* Add register note with kind KIND and datum DATUM to INSN. */
1987
1988 void
1989 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
1990 {
1991 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
1992 }
1993
1994 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
1995
1996 void
1997 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
1998 {
1999 gcc_checking_assert (int_reg_note_p (kind));
2000 REG_NOTES (insn) = gen_rtx_INT_LIST ((enum machine_mode) kind,
2001 datum, REG_NOTES (insn));
2002 }
2003
2004 /* Add a register note like NOTE to INSN. */
2005
2006 void
2007 add_shallow_copy_of_reg_note (rtx insn, rtx note)
2008 {
2009 if (GET_CODE (note) == INT_LIST)
2010 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2011 else
2012 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2013 }
2014
2015 /* Remove register note NOTE from the REG_NOTES of INSN. */
2016
2017 void
2018 remove_note (rtx insn, const_rtx note)
2019 {
2020 rtx link;
2021
2022 if (note == NULL_RTX)
2023 return;
2024
2025 if (REG_NOTES (insn) == note)
2026 REG_NOTES (insn) = XEXP (note, 1);
2027 else
2028 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2029 if (XEXP (link, 1) == note)
2030 {
2031 XEXP (link, 1) = XEXP (note, 1);
2032 break;
2033 }
2034
2035 switch (REG_NOTE_KIND (note))
2036 {
2037 case REG_EQUAL:
2038 case REG_EQUIV:
2039 df_notes_rescan (insn);
2040 break;
2041 default:
2042 break;
2043 }
2044 }
2045
2046 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2047
2048 void
2049 remove_reg_equal_equiv_notes (rtx insn)
2050 {
2051 rtx *loc;
2052
2053 loc = &REG_NOTES (insn);
2054 while (*loc)
2055 {
2056 enum reg_note kind = REG_NOTE_KIND (*loc);
2057 if (kind == REG_EQUAL || kind == REG_EQUIV)
2058 *loc = XEXP (*loc, 1);
2059 else
2060 loc = &XEXP (*loc, 1);
2061 }
2062 }
2063
2064 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2065
2066 void
2067 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2068 {
2069 df_ref eq_use;
2070
2071 if (!df)
2072 return;
2073
2074 /* This loop is a little tricky. We cannot just go down the chain because
2075 it is being modified by some actions in the loop. So we just iterate
2076 over the head. We plan to drain the list anyway. */
2077 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2078 {
2079 rtx insn = DF_REF_INSN (eq_use);
2080 rtx note = find_reg_equal_equiv_note (insn);
2081
2082 /* This assert is generally triggered when someone deletes a REG_EQUAL
2083 or REG_EQUIV note by hacking the list manually rather than calling
2084 remove_note. */
2085 gcc_assert (note);
2086
2087 remove_note (insn, note);
2088 }
2089 }
2090
2091 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2092 return 1 if it is found. A simple equality test is used to determine if
2093 NODE matches. */
2094
2095 int
2096 in_expr_list_p (const_rtx listp, const_rtx node)
2097 {
2098 const_rtx x;
2099
2100 for (x = listp; x; x = XEXP (x, 1))
2101 if (node == XEXP (x, 0))
2102 return 1;
2103
2104 return 0;
2105 }
2106
2107 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2108 remove that entry from the list if it is found.
2109
2110 A simple equality test is used to determine if NODE matches. */
2111
2112 void
2113 remove_node_from_expr_list (const_rtx node, rtx *listp)
2114 {
2115 rtx temp = *listp;
2116 rtx prev = NULL_RTX;
2117
2118 while (temp)
2119 {
2120 if (node == XEXP (temp, 0))
2121 {
2122 /* Splice the node out of the list. */
2123 if (prev)
2124 XEXP (prev, 1) = XEXP (temp, 1);
2125 else
2126 *listp = XEXP (temp, 1);
2127
2128 return;
2129 }
2130
2131 prev = temp;
2132 temp = XEXP (temp, 1);
2133 }
2134 }
2135 \f
2136 /* Nonzero if X contains any volatile instructions. These are instructions
2137 which may cause unpredictable machine state instructions, and thus no
2138 instructions or register uses should be moved or combined across them.
2139 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2140
2141 int
2142 volatile_insn_p (const_rtx x)
2143 {
2144 const RTX_CODE code = GET_CODE (x);
2145 switch (code)
2146 {
2147 case LABEL_REF:
2148 case SYMBOL_REF:
2149 case CONST:
2150 CASE_CONST_ANY:
2151 case CC0:
2152 case PC:
2153 case REG:
2154 case SCRATCH:
2155 case CLOBBER:
2156 case ADDR_VEC:
2157 case ADDR_DIFF_VEC:
2158 case CALL:
2159 case MEM:
2160 return 0;
2161
2162 case UNSPEC_VOLATILE:
2163 return 1;
2164
2165 case ASM_INPUT:
2166 case ASM_OPERANDS:
2167 if (MEM_VOLATILE_P (x))
2168 return 1;
2169
2170 default:
2171 break;
2172 }
2173
2174 /* Recursively scan the operands of this expression. */
2175
2176 {
2177 const char *const fmt = GET_RTX_FORMAT (code);
2178 int i;
2179
2180 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2181 {
2182 if (fmt[i] == 'e')
2183 {
2184 if (volatile_insn_p (XEXP (x, i)))
2185 return 1;
2186 }
2187 else if (fmt[i] == 'E')
2188 {
2189 int j;
2190 for (j = 0; j < XVECLEN (x, i); j++)
2191 if (volatile_insn_p (XVECEXP (x, i, j)))
2192 return 1;
2193 }
2194 }
2195 }
2196 return 0;
2197 }
2198
2199 /* Nonzero if X contains any volatile memory references
2200 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2201
2202 int
2203 volatile_refs_p (const_rtx x)
2204 {
2205 const RTX_CODE code = GET_CODE (x);
2206 switch (code)
2207 {
2208 case LABEL_REF:
2209 case SYMBOL_REF:
2210 case CONST:
2211 CASE_CONST_ANY:
2212 case CC0:
2213 case PC:
2214 case REG:
2215 case SCRATCH:
2216 case CLOBBER:
2217 case ADDR_VEC:
2218 case ADDR_DIFF_VEC:
2219 return 0;
2220
2221 case UNSPEC_VOLATILE:
2222 return 1;
2223
2224 case MEM:
2225 case ASM_INPUT:
2226 case ASM_OPERANDS:
2227 if (MEM_VOLATILE_P (x))
2228 return 1;
2229
2230 default:
2231 break;
2232 }
2233
2234 /* Recursively scan the operands of this expression. */
2235
2236 {
2237 const char *const fmt = GET_RTX_FORMAT (code);
2238 int i;
2239
2240 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2241 {
2242 if (fmt[i] == 'e')
2243 {
2244 if (volatile_refs_p (XEXP (x, i)))
2245 return 1;
2246 }
2247 else if (fmt[i] == 'E')
2248 {
2249 int j;
2250 for (j = 0; j < XVECLEN (x, i); j++)
2251 if (volatile_refs_p (XVECEXP (x, i, j)))
2252 return 1;
2253 }
2254 }
2255 }
2256 return 0;
2257 }
2258
2259 /* Similar to above, except that it also rejects register pre- and post-
2260 incrementing. */
2261
2262 int
2263 side_effects_p (const_rtx x)
2264 {
2265 const RTX_CODE code = GET_CODE (x);
2266 switch (code)
2267 {
2268 case LABEL_REF:
2269 case SYMBOL_REF:
2270 case CONST:
2271 CASE_CONST_ANY:
2272 case CC0:
2273 case PC:
2274 case REG:
2275 case SCRATCH:
2276 case ADDR_VEC:
2277 case ADDR_DIFF_VEC:
2278 case VAR_LOCATION:
2279 return 0;
2280
2281 case CLOBBER:
2282 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2283 when some combination can't be done. If we see one, don't think
2284 that we can simplify the expression. */
2285 return (GET_MODE (x) != VOIDmode);
2286
2287 case PRE_INC:
2288 case PRE_DEC:
2289 case POST_INC:
2290 case POST_DEC:
2291 case PRE_MODIFY:
2292 case POST_MODIFY:
2293 case CALL:
2294 case UNSPEC_VOLATILE:
2295 return 1;
2296
2297 case MEM:
2298 case ASM_INPUT:
2299 case ASM_OPERANDS:
2300 if (MEM_VOLATILE_P (x))
2301 return 1;
2302
2303 default:
2304 break;
2305 }
2306
2307 /* Recursively scan the operands of this expression. */
2308
2309 {
2310 const char *fmt = GET_RTX_FORMAT (code);
2311 int i;
2312
2313 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2314 {
2315 if (fmt[i] == 'e')
2316 {
2317 if (side_effects_p (XEXP (x, i)))
2318 return 1;
2319 }
2320 else if (fmt[i] == 'E')
2321 {
2322 int j;
2323 for (j = 0; j < XVECLEN (x, i); j++)
2324 if (side_effects_p (XVECEXP (x, i, j)))
2325 return 1;
2326 }
2327 }
2328 }
2329 return 0;
2330 }
2331 \f
2332 /* Return nonzero if evaluating rtx X might cause a trap.
2333 FLAGS controls how to consider MEMs. A nonzero means the context
2334 of the access may have changed from the original, such that the
2335 address may have become invalid. */
2336
2337 int
2338 may_trap_p_1 (const_rtx x, unsigned flags)
2339 {
2340 int i;
2341 enum rtx_code code;
2342 const char *fmt;
2343
2344 /* We make no distinction currently, but this function is part of
2345 the internal target-hooks ABI so we keep the parameter as
2346 "unsigned flags". */
2347 bool code_changed = flags != 0;
2348
2349 if (x == 0)
2350 return 0;
2351 code = GET_CODE (x);
2352 switch (code)
2353 {
2354 /* Handle these cases quickly. */
2355 CASE_CONST_ANY:
2356 case SYMBOL_REF:
2357 case LABEL_REF:
2358 case CONST:
2359 case PC:
2360 case CC0:
2361 case REG:
2362 case SCRATCH:
2363 return 0;
2364
2365 case UNSPEC:
2366 return targetm.unspec_may_trap_p (x, flags);
2367
2368 case UNSPEC_VOLATILE:
2369 case ASM_INPUT:
2370 case TRAP_IF:
2371 return 1;
2372
2373 case ASM_OPERANDS:
2374 return MEM_VOLATILE_P (x);
2375
2376 /* Memory ref can trap unless it's a static var or a stack slot. */
2377 case MEM:
2378 /* Recognize specific pattern of stack checking probes. */
2379 if (flag_stack_check
2380 && MEM_VOLATILE_P (x)
2381 && XEXP (x, 0) == stack_pointer_rtx)
2382 return 1;
2383 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2384 reference; moving it out of context such as when moving code
2385 when optimizing, might cause its address to become invalid. */
2386 code_changed
2387 || !MEM_NOTRAP_P (x))
2388 {
2389 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2390 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2391 GET_MODE (x), code_changed);
2392 }
2393
2394 return 0;
2395
2396 /* Division by a non-constant might trap. */
2397 case DIV:
2398 case MOD:
2399 case UDIV:
2400 case UMOD:
2401 if (HONOR_SNANS (GET_MODE (x)))
2402 return 1;
2403 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2404 return flag_trapping_math;
2405 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2406 return 1;
2407 break;
2408
2409 case EXPR_LIST:
2410 /* An EXPR_LIST is used to represent a function call. This
2411 certainly may trap. */
2412 return 1;
2413
2414 case GE:
2415 case GT:
2416 case LE:
2417 case LT:
2418 case LTGT:
2419 case COMPARE:
2420 /* Some floating point comparisons may trap. */
2421 if (!flag_trapping_math)
2422 break;
2423 /* ??? There is no machine independent way to check for tests that trap
2424 when COMPARE is used, though many targets do make this distinction.
2425 For instance, sparc uses CCFPE for compares which generate exceptions
2426 and CCFP for compares which do not generate exceptions. */
2427 if (HONOR_NANS (GET_MODE (x)))
2428 return 1;
2429 /* But often the compare has some CC mode, so check operand
2430 modes as well. */
2431 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2432 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2433 return 1;
2434 break;
2435
2436 case EQ:
2437 case NE:
2438 if (HONOR_SNANS (GET_MODE (x)))
2439 return 1;
2440 /* Often comparison is CC mode, so check operand modes. */
2441 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2442 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2443 return 1;
2444 break;
2445
2446 case FIX:
2447 /* Conversion of floating point might trap. */
2448 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2449 return 1;
2450 break;
2451
2452 case NEG:
2453 case ABS:
2454 case SUBREG:
2455 /* These operations don't trap even with floating point. */
2456 break;
2457
2458 default:
2459 /* Any floating arithmetic may trap. */
2460 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2461 return 1;
2462 }
2463
2464 fmt = GET_RTX_FORMAT (code);
2465 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2466 {
2467 if (fmt[i] == 'e')
2468 {
2469 if (may_trap_p_1 (XEXP (x, i), flags))
2470 return 1;
2471 }
2472 else if (fmt[i] == 'E')
2473 {
2474 int j;
2475 for (j = 0; j < XVECLEN (x, i); j++)
2476 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2477 return 1;
2478 }
2479 }
2480 return 0;
2481 }
2482
2483 /* Return nonzero if evaluating rtx X might cause a trap. */
2484
2485 int
2486 may_trap_p (const_rtx x)
2487 {
2488 return may_trap_p_1 (x, 0);
2489 }
2490
2491 /* Same as above, but additionally return nonzero if evaluating rtx X might
2492 cause a fault. We define a fault for the purpose of this function as a
2493 erroneous execution condition that cannot be encountered during the normal
2494 execution of a valid program; the typical example is an unaligned memory
2495 access on a strict alignment machine. The compiler guarantees that it
2496 doesn't generate code that will fault from a valid program, but this
2497 guarantee doesn't mean anything for individual instructions. Consider
2498 the following example:
2499
2500 struct S { int d; union { char *cp; int *ip; }; };
2501
2502 int foo(struct S *s)
2503 {
2504 if (s->d == 1)
2505 return *s->ip;
2506 else
2507 return *s->cp;
2508 }
2509
2510 on a strict alignment machine. In a valid program, foo will never be
2511 invoked on a structure for which d is equal to 1 and the underlying
2512 unique field of the union not aligned on a 4-byte boundary, but the
2513 expression *s->ip might cause a fault if considered individually.
2514
2515 At the RTL level, potentially problematic expressions will almost always
2516 verify may_trap_p; for example, the above dereference can be emitted as
2517 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2518 However, suppose that foo is inlined in a caller that causes s->cp to
2519 point to a local character variable and guarantees that s->d is not set
2520 to 1; foo may have been effectively translated into pseudo-RTL as:
2521
2522 if ((reg:SI) == 1)
2523 (set (reg:SI) (mem:SI (%fp - 7)))
2524 else
2525 (set (reg:QI) (mem:QI (%fp - 7)))
2526
2527 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2528 memory reference to a stack slot, but it will certainly cause a fault
2529 on a strict alignment machine. */
2530
2531 int
2532 may_trap_or_fault_p (const_rtx x)
2533 {
2534 return may_trap_p_1 (x, 1);
2535 }
2536 \f
2537 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2538 i.e., an inequality. */
2539
2540 int
2541 inequality_comparisons_p (const_rtx x)
2542 {
2543 const char *fmt;
2544 int len, i;
2545 const enum rtx_code code = GET_CODE (x);
2546
2547 switch (code)
2548 {
2549 case REG:
2550 case SCRATCH:
2551 case PC:
2552 case CC0:
2553 CASE_CONST_ANY:
2554 case CONST:
2555 case LABEL_REF:
2556 case SYMBOL_REF:
2557 return 0;
2558
2559 case LT:
2560 case LTU:
2561 case GT:
2562 case GTU:
2563 case LE:
2564 case LEU:
2565 case GE:
2566 case GEU:
2567 return 1;
2568
2569 default:
2570 break;
2571 }
2572
2573 len = GET_RTX_LENGTH (code);
2574 fmt = GET_RTX_FORMAT (code);
2575
2576 for (i = 0; i < len; i++)
2577 {
2578 if (fmt[i] == 'e')
2579 {
2580 if (inequality_comparisons_p (XEXP (x, i)))
2581 return 1;
2582 }
2583 else if (fmt[i] == 'E')
2584 {
2585 int j;
2586 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2587 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2588 return 1;
2589 }
2590 }
2591
2592 return 0;
2593 }
2594 \f
2595 /* Replace any occurrence of FROM in X with TO. The function does
2596 not enter into CONST_DOUBLE for the replace.
2597
2598 Note that copying is not done so X must not be shared unless all copies
2599 are to be modified. */
2600
2601 rtx
2602 replace_rtx (rtx x, rtx from, rtx to)
2603 {
2604 int i, j;
2605 const char *fmt;
2606
2607 if (x == from)
2608 return to;
2609
2610 /* Allow this function to make replacements in EXPR_LISTs. */
2611 if (x == 0)
2612 return 0;
2613
2614 if (GET_CODE (x) == SUBREG)
2615 {
2616 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2617
2618 if (CONST_INT_P (new_rtx))
2619 {
2620 x = simplify_subreg (GET_MODE (x), new_rtx,
2621 GET_MODE (SUBREG_REG (x)),
2622 SUBREG_BYTE (x));
2623 gcc_assert (x);
2624 }
2625 else
2626 SUBREG_REG (x) = new_rtx;
2627
2628 return x;
2629 }
2630 else if (GET_CODE (x) == ZERO_EXTEND)
2631 {
2632 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2633
2634 if (CONST_INT_P (new_rtx))
2635 {
2636 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2637 new_rtx, GET_MODE (XEXP (x, 0)));
2638 gcc_assert (x);
2639 }
2640 else
2641 XEXP (x, 0) = new_rtx;
2642
2643 return x;
2644 }
2645
2646 fmt = GET_RTX_FORMAT (GET_CODE (x));
2647 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2648 {
2649 if (fmt[i] == 'e')
2650 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2651 else if (fmt[i] == 'E')
2652 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2653 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2654 }
2655
2656 return x;
2657 }
2658 \f
2659 /* Replace occurrences of the old label in *X with the new one.
2660 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2661
2662 int
2663 replace_label (rtx *x, void *data)
2664 {
2665 rtx l = *x;
2666 rtx old_label = ((replace_label_data *) data)->r1;
2667 rtx new_label = ((replace_label_data *) data)->r2;
2668 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2669
2670 if (l == NULL_RTX)
2671 return 0;
2672
2673 if (GET_CODE (l) == SYMBOL_REF
2674 && CONSTANT_POOL_ADDRESS_P (l))
2675 {
2676 rtx c = get_pool_constant (l);
2677 if (rtx_referenced_p (old_label, c))
2678 {
2679 rtx new_c, new_l;
2680 replace_label_data *d = (replace_label_data *) data;
2681
2682 /* Create a copy of constant C; replace the label inside
2683 but do not update LABEL_NUSES because uses in constant pool
2684 are not counted. */
2685 new_c = copy_rtx (c);
2686 d->update_label_nuses = false;
2687 for_each_rtx (&new_c, replace_label, data);
2688 d->update_label_nuses = update_label_nuses;
2689
2690 /* Add the new constant NEW_C to constant pool and replace
2691 the old reference to constant by new reference. */
2692 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2693 *x = replace_rtx (l, l, new_l);
2694 }
2695 return 0;
2696 }
2697
2698 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2699 field. This is not handled by for_each_rtx because it doesn't
2700 handle unprinted ('0') fields. */
2701 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2702 JUMP_LABEL (l) = new_label;
2703
2704 if ((GET_CODE (l) == LABEL_REF
2705 || GET_CODE (l) == INSN_LIST)
2706 && XEXP (l, 0) == old_label)
2707 {
2708 XEXP (l, 0) = new_label;
2709 if (update_label_nuses)
2710 {
2711 ++LABEL_NUSES (new_label);
2712 --LABEL_NUSES (old_label);
2713 }
2714 return 0;
2715 }
2716
2717 return 0;
2718 }
2719
2720 /* When *BODY is equal to X or X is directly referenced by *BODY
2721 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2722 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2723
2724 static int
2725 rtx_referenced_p_1 (rtx *body, void *x)
2726 {
2727 rtx y = (rtx) x;
2728
2729 if (*body == NULL_RTX)
2730 return y == NULL_RTX;
2731
2732 /* Return true if a label_ref *BODY refers to label Y. */
2733 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2734 return XEXP (*body, 0) == y;
2735
2736 /* If *BODY is a reference to pool constant traverse the constant. */
2737 if (GET_CODE (*body) == SYMBOL_REF
2738 && CONSTANT_POOL_ADDRESS_P (*body))
2739 return rtx_referenced_p (y, get_pool_constant (*body));
2740
2741 /* By default, compare the RTL expressions. */
2742 return rtx_equal_p (*body, y);
2743 }
2744
2745 /* Return true if X is referenced in BODY. */
2746
2747 int
2748 rtx_referenced_p (rtx x, rtx body)
2749 {
2750 return for_each_rtx (&body, rtx_referenced_p_1, x);
2751 }
2752
2753 /* If INSN is a tablejump return true and store the label (before jump table) to
2754 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2755
2756 bool
2757 tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep)
2758 {
2759 rtx label, table;
2760
2761 if (!JUMP_P (insn))
2762 return false;
2763
2764 label = JUMP_LABEL (insn);
2765 if (label != NULL_RTX && !ANY_RETURN_P (label)
2766 && (table = NEXT_INSN (label)) != NULL_RTX
2767 && JUMP_TABLE_DATA_P (table))
2768 {
2769 if (labelp)
2770 *labelp = label;
2771 if (tablep)
2772 *tablep = table;
2773 return true;
2774 }
2775 return false;
2776 }
2777
2778 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2779 constant that is not in the constant pool and not in the condition
2780 of an IF_THEN_ELSE. */
2781
2782 static int
2783 computed_jump_p_1 (const_rtx x)
2784 {
2785 const enum rtx_code code = GET_CODE (x);
2786 int i, j;
2787 const char *fmt;
2788
2789 switch (code)
2790 {
2791 case LABEL_REF:
2792 case PC:
2793 return 0;
2794
2795 case CONST:
2796 CASE_CONST_ANY:
2797 case SYMBOL_REF:
2798 case REG:
2799 return 1;
2800
2801 case MEM:
2802 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2803 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2804
2805 case IF_THEN_ELSE:
2806 return (computed_jump_p_1 (XEXP (x, 1))
2807 || computed_jump_p_1 (XEXP (x, 2)));
2808
2809 default:
2810 break;
2811 }
2812
2813 fmt = GET_RTX_FORMAT (code);
2814 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2815 {
2816 if (fmt[i] == 'e'
2817 && computed_jump_p_1 (XEXP (x, i)))
2818 return 1;
2819
2820 else if (fmt[i] == 'E')
2821 for (j = 0; j < XVECLEN (x, i); j++)
2822 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2823 return 1;
2824 }
2825
2826 return 0;
2827 }
2828
2829 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2830
2831 Tablejumps and casesi insns are not considered indirect jumps;
2832 we can recognize them by a (use (label_ref)). */
2833
2834 int
2835 computed_jump_p (const_rtx insn)
2836 {
2837 int i;
2838 if (JUMP_P (insn))
2839 {
2840 rtx pat = PATTERN (insn);
2841
2842 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2843 if (JUMP_LABEL (insn) != NULL)
2844 return 0;
2845
2846 if (GET_CODE (pat) == PARALLEL)
2847 {
2848 int len = XVECLEN (pat, 0);
2849 int has_use_labelref = 0;
2850
2851 for (i = len - 1; i >= 0; i--)
2852 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2853 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2854 == LABEL_REF))
2855 {
2856 has_use_labelref = 1;
2857 break;
2858 }
2859
2860 if (! has_use_labelref)
2861 for (i = len - 1; i >= 0; i--)
2862 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2863 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2864 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2865 return 1;
2866 }
2867 else if (GET_CODE (pat) == SET
2868 && SET_DEST (pat) == pc_rtx
2869 && computed_jump_p_1 (SET_SRC (pat)))
2870 return 1;
2871 }
2872 return 0;
2873 }
2874
2875 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2876 calls. Processes the subexpressions of EXP and passes them to F. */
2877 static int
2878 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2879 {
2880 int result, i, j;
2881 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2882 rtx *x;
2883
2884 for (; format[n] != '\0'; n++)
2885 {
2886 switch (format[n])
2887 {
2888 case 'e':
2889 /* Call F on X. */
2890 x = &XEXP (exp, n);
2891 result = (*f) (x, data);
2892 if (result == -1)
2893 /* Do not traverse sub-expressions. */
2894 continue;
2895 else if (result != 0)
2896 /* Stop the traversal. */
2897 return result;
2898
2899 if (*x == NULL_RTX)
2900 /* There are no sub-expressions. */
2901 continue;
2902
2903 i = non_rtx_starting_operands[GET_CODE (*x)];
2904 if (i >= 0)
2905 {
2906 result = for_each_rtx_1 (*x, i, f, data);
2907 if (result != 0)
2908 return result;
2909 }
2910 break;
2911
2912 case 'V':
2913 case 'E':
2914 if (XVEC (exp, n) == 0)
2915 continue;
2916 for (j = 0; j < XVECLEN (exp, n); ++j)
2917 {
2918 /* Call F on X. */
2919 x = &XVECEXP (exp, n, j);
2920 result = (*f) (x, data);
2921 if (result == -1)
2922 /* Do not traverse sub-expressions. */
2923 continue;
2924 else if (result != 0)
2925 /* Stop the traversal. */
2926 return result;
2927
2928 if (*x == NULL_RTX)
2929 /* There are no sub-expressions. */
2930 continue;
2931
2932 i = non_rtx_starting_operands[GET_CODE (*x)];
2933 if (i >= 0)
2934 {
2935 result = for_each_rtx_1 (*x, i, f, data);
2936 if (result != 0)
2937 return result;
2938 }
2939 }
2940 break;
2941
2942 default:
2943 /* Nothing to do. */
2944 break;
2945 }
2946 }
2947
2948 return 0;
2949 }
2950
2951 /* Traverse X via depth-first search, calling F for each
2952 sub-expression (including X itself). F is also passed the DATA.
2953 If F returns -1, do not traverse sub-expressions, but continue
2954 traversing the rest of the tree. If F ever returns any other
2955 nonzero value, stop the traversal, and return the value returned
2956 by F. Otherwise, return 0. This function does not traverse inside
2957 tree structure that contains RTX_EXPRs, or into sub-expressions
2958 whose format code is `0' since it is not known whether or not those
2959 codes are actually RTL.
2960
2961 This routine is very general, and could (should?) be used to
2962 implement many of the other routines in this file. */
2963
2964 int
2965 for_each_rtx (rtx *x, rtx_function f, void *data)
2966 {
2967 int result;
2968 int i;
2969
2970 /* Call F on X. */
2971 result = (*f) (x, data);
2972 if (result == -1)
2973 /* Do not traverse sub-expressions. */
2974 return 0;
2975 else if (result != 0)
2976 /* Stop the traversal. */
2977 return result;
2978
2979 if (*x == NULL_RTX)
2980 /* There are no sub-expressions. */
2981 return 0;
2982
2983 i = non_rtx_starting_operands[GET_CODE (*x)];
2984 if (i < 0)
2985 return 0;
2986
2987 return for_each_rtx_1 (*x, i, f, data);
2988 }
2989
2990 \f
2991
2992 /* Data structure that holds the internal state communicated between
2993 for_each_inc_dec, for_each_inc_dec_find_mem and
2994 for_each_inc_dec_find_inc_dec. */
2995
2996 struct for_each_inc_dec_ops {
2997 /* The function to be called for each autoinc operation found. */
2998 for_each_inc_dec_fn fn;
2999 /* The opaque argument to be passed to it. */
3000 void *arg;
3001 /* The MEM we're visiting, if any. */
3002 rtx mem;
3003 };
3004
3005 static int for_each_inc_dec_find_mem (rtx *r, void *d);
3006
3007 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
3008 operands of the equivalent add insn and pass the result to the
3009 operator specified by *D. */
3010
3011 static int
3012 for_each_inc_dec_find_inc_dec (rtx *r, void *d)
3013 {
3014 rtx x = *r;
3015 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *)d;
3016
3017 switch (GET_CODE (x))
3018 {
3019 case PRE_INC:
3020 case POST_INC:
3021 {
3022 int size = GET_MODE_SIZE (GET_MODE (data->mem));
3023 rtx r1 = XEXP (x, 0);
3024 rtx c = gen_int_mode (size, GET_MODE (r1));
3025 return data->fn (data->mem, x, r1, r1, c, data->arg);
3026 }
3027
3028 case PRE_DEC:
3029 case POST_DEC:
3030 {
3031 int size = GET_MODE_SIZE (GET_MODE (data->mem));
3032 rtx r1 = XEXP (x, 0);
3033 rtx c = gen_int_mode (-size, GET_MODE (r1));
3034 return data->fn (data->mem, x, r1, r1, c, data->arg);
3035 }
3036
3037 case PRE_MODIFY:
3038 case POST_MODIFY:
3039 {
3040 rtx r1 = XEXP (x, 0);
3041 rtx add = XEXP (x, 1);
3042 return data->fn (data->mem, x, r1, add, NULL, data->arg);
3043 }
3044
3045 case MEM:
3046 {
3047 rtx save = data->mem;
3048 int ret = for_each_inc_dec_find_mem (r, d);
3049 data->mem = save;
3050 return ret;
3051 }
3052
3053 default:
3054 return 0;
3055 }
3056 }
3057
3058 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3059 address, extract the operands of the equivalent add insn and pass
3060 the result to the operator specified by *D. */
3061
3062 static int
3063 for_each_inc_dec_find_mem (rtx *r, void *d)
3064 {
3065 rtx x = *r;
3066 if (x != NULL_RTX && MEM_P (x))
3067 {
3068 struct for_each_inc_dec_ops *data = (struct for_each_inc_dec_ops *) d;
3069 int result;
3070
3071 data->mem = x;
3072
3073 result = for_each_rtx (&XEXP (x, 0), for_each_inc_dec_find_inc_dec,
3074 data);
3075 if (result)
3076 return result;
3077
3078 return -1;
3079 }
3080 return 0;
3081 }
3082
3083 /* Traverse *X looking for MEMs, and for autoinc operations within
3084 them. For each such autoinc operation found, call FN, passing it
3085 the innermost enclosing MEM, the operation itself, the RTX modified
3086 by the operation, two RTXs (the second may be NULL) that, once
3087 added, represent the value to be held by the modified RTX
3088 afterwards, and ARG. FN is to return -1 to skip looking for other
3089 autoinc operations within the visited operation, 0 to continue the
3090 traversal, or any other value to have it returned to the caller of
3091 for_each_inc_dec. */
3092
3093 int
3094 for_each_inc_dec (rtx *x,
3095 for_each_inc_dec_fn fn,
3096 void *arg)
3097 {
3098 struct for_each_inc_dec_ops data;
3099
3100 data.fn = fn;
3101 data.arg = arg;
3102 data.mem = NULL;
3103
3104 return for_each_rtx (x, for_each_inc_dec_find_mem, &data);
3105 }
3106
3107 \f
3108 /* Searches X for any reference to REGNO, returning the rtx of the
3109 reference found if any. Otherwise, returns NULL_RTX. */
3110
3111 rtx
3112 regno_use_in (unsigned int regno, rtx x)
3113 {
3114 const char *fmt;
3115 int i, j;
3116 rtx tem;
3117
3118 if (REG_P (x) && REGNO (x) == regno)
3119 return x;
3120
3121 fmt = GET_RTX_FORMAT (GET_CODE (x));
3122 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3123 {
3124 if (fmt[i] == 'e')
3125 {
3126 if ((tem = regno_use_in (regno, XEXP (x, i))))
3127 return tem;
3128 }
3129 else if (fmt[i] == 'E')
3130 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3131 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3132 return tem;
3133 }
3134
3135 return NULL_RTX;
3136 }
3137
3138 /* Return a value indicating whether OP, an operand of a commutative
3139 operation, is preferred as the first or second operand. The higher
3140 the value, the stronger the preference for being the first operand.
3141 We use negative values to indicate a preference for the first operand
3142 and positive values for the second operand. */
3143
3144 int
3145 commutative_operand_precedence (rtx op)
3146 {
3147 enum rtx_code code = GET_CODE (op);
3148
3149 /* Constants always come the second operand. Prefer "nice" constants. */
3150 if (code == CONST_INT)
3151 return -8;
3152 if (code == CONST_DOUBLE)
3153 return -7;
3154 if (code == CONST_FIXED)
3155 return -7;
3156 op = avoid_constant_pool_reference (op);
3157 code = GET_CODE (op);
3158
3159 switch (GET_RTX_CLASS (code))
3160 {
3161 case RTX_CONST_OBJ:
3162 if (code == CONST_INT)
3163 return -6;
3164 if (code == CONST_DOUBLE)
3165 return -5;
3166 if (code == CONST_FIXED)
3167 return -5;
3168 return -4;
3169
3170 case RTX_EXTRA:
3171 /* SUBREGs of objects should come second. */
3172 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3173 return -3;
3174 return 0;
3175
3176 case RTX_OBJ:
3177 /* Complex expressions should be the first, so decrease priority
3178 of objects. Prefer pointer objects over non pointer objects. */
3179 if ((REG_P (op) && REG_POINTER (op))
3180 || (MEM_P (op) && MEM_POINTER (op)))
3181 return -1;
3182 return -2;
3183
3184 case RTX_COMM_ARITH:
3185 /* Prefer operands that are themselves commutative to be first.
3186 This helps to make things linear. In particular,
3187 (and (and (reg) (reg)) (not (reg))) is canonical. */
3188 return 4;
3189
3190 case RTX_BIN_ARITH:
3191 /* If only one operand is a binary expression, it will be the first
3192 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3193 is canonical, although it will usually be further simplified. */
3194 return 2;
3195
3196 case RTX_UNARY:
3197 /* Then prefer NEG and NOT. */
3198 if (code == NEG || code == NOT)
3199 return 1;
3200
3201 default:
3202 return 0;
3203 }
3204 }
3205
3206 /* Return 1 iff it is necessary to swap operands of commutative operation
3207 in order to canonicalize expression. */
3208
3209 bool
3210 swap_commutative_operands_p (rtx x, rtx y)
3211 {
3212 return (commutative_operand_precedence (x)
3213 < commutative_operand_precedence (y));
3214 }
3215
3216 /* Return 1 if X is an autoincrement side effect and the register is
3217 not the stack pointer. */
3218 int
3219 auto_inc_p (const_rtx x)
3220 {
3221 switch (GET_CODE (x))
3222 {
3223 case PRE_INC:
3224 case POST_INC:
3225 case PRE_DEC:
3226 case POST_DEC:
3227 case PRE_MODIFY:
3228 case POST_MODIFY:
3229 /* There are no REG_INC notes for SP. */
3230 if (XEXP (x, 0) != stack_pointer_rtx)
3231 return 1;
3232 default:
3233 break;
3234 }
3235 return 0;
3236 }
3237
3238 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3239 int
3240 loc_mentioned_in_p (rtx *loc, const_rtx in)
3241 {
3242 enum rtx_code code;
3243 const char *fmt;
3244 int i, j;
3245
3246 if (!in)
3247 return 0;
3248
3249 code = GET_CODE (in);
3250 fmt = GET_RTX_FORMAT (code);
3251 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3252 {
3253 if (fmt[i] == 'e')
3254 {
3255 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3256 return 1;
3257 }
3258 else if (fmt[i] == 'E')
3259 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3260 if (loc == &XVECEXP (in, i, j)
3261 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3262 return 1;
3263 }
3264 return 0;
3265 }
3266
3267 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3268 and SUBREG_BYTE, return the bit offset where the subreg begins
3269 (counting from the least significant bit of the operand). */
3270
3271 unsigned int
3272 subreg_lsb_1 (enum machine_mode outer_mode,
3273 enum machine_mode inner_mode,
3274 unsigned int subreg_byte)
3275 {
3276 unsigned int bitpos;
3277 unsigned int byte;
3278 unsigned int word;
3279
3280 /* A paradoxical subreg begins at bit position 0. */
3281 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3282 return 0;
3283
3284 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3285 /* If the subreg crosses a word boundary ensure that
3286 it also begins and ends on a word boundary. */
3287 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3288 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3289 && (subreg_byte % UNITS_PER_WORD
3290 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3291
3292 if (WORDS_BIG_ENDIAN)
3293 word = (GET_MODE_SIZE (inner_mode)
3294 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3295 else
3296 word = subreg_byte / UNITS_PER_WORD;
3297 bitpos = word * BITS_PER_WORD;
3298
3299 if (BYTES_BIG_ENDIAN)
3300 byte = (GET_MODE_SIZE (inner_mode)
3301 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3302 else
3303 byte = subreg_byte % UNITS_PER_WORD;
3304 bitpos += byte * BITS_PER_UNIT;
3305
3306 return bitpos;
3307 }
3308
3309 /* Given a subreg X, return the bit offset where the subreg begins
3310 (counting from the least significant bit of the reg). */
3311
3312 unsigned int
3313 subreg_lsb (const_rtx x)
3314 {
3315 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3316 SUBREG_BYTE (x));
3317 }
3318
3319 /* Fill in information about a subreg of a hard register.
3320 xregno - A regno of an inner hard subreg_reg (or what will become one).
3321 xmode - The mode of xregno.
3322 offset - The byte offset.
3323 ymode - The mode of a top level SUBREG (or what may become one).
3324 info - Pointer to structure to fill in. */
3325 void
3326 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3327 unsigned int offset, enum machine_mode ymode,
3328 struct subreg_info *info)
3329 {
3330 int nregs_xmode, nregs_ymode;
3331 int mode_multiple, nregs_multiple;
3332 int offset_adj, y_offset, y_offset_adj;
3333 int regsize_xmode, regsize_ymode;
3334 bool rknown;
3335
3336 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3337
3338 rknown = false;
3339
3340 /* If there are holes in a non-scalar mode in registers, we expect
3341 that it is made up of its units concatenated together. */
3342 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3343 {
3344 enum machine_mode xmode_unit;
3345
3346 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3347 if (GET_MODE_INNER (xmode) == VOIDmode)
3348 xmode_unit = xmode;
3349 else
3350 xmode_unit = GET_MODE_INNER (xmode);
3351 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3352 gcc_assert (nregs_xmode
3353 == (GET_MODE_NUNITS (xmode)
3354 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3355 gcc_assert (hard_regno_nregs[xregno][xmode]
3356 == (hard_regno_nregs[xregno][xmode_unit]
3357 * GET_MODE_NUNITS (xmode)));
3358
3359 /* You can only ask for a SUBREG of a value with holes in the middle
3360 if you don't cross the holes. (Such a SUBREG should be done by
3361 picking a different register class, or doing it in memory if
3362 necessary.) An example of a value with holes is XCmode on 32-bit
3363 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3364 3 for each part, but in memory it's two 128-bit parts.
3365 Padding is assumed to be at the end (not necessarily the 'high part')
3366 of each unit. */
3367 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3368 < GET_MODE_NUNITS (xmode))
3369 && (offset / GET_MODE_SIZE (xmode_unit)
3370 != ((offset + GET_MODE_SIZE (ymode) - 1)
3371 / GET_MODE_SIZE (xmode_unit))))
3372 {
3373 info->representable_p = false;
3374 rknown = true;
3375 }
3376 }
3377 else
3378 nregs_xmode = hard_regno_nregs[xregno][xmode];
3379
3380 nregs_ymode = hard_regno_nregs[xregno][ymode];
3381
3382 /* Paradoxical subregs are otherwise valid. */
3383 if (!rknown
3384 && offset == 0
3385 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3386 {
3387 info->representable_p = true;
3388 /* If this is a big endian paradoxical subreg, which uses more
3389 actual hard registers than the original register, we must
3390 return a negative offset so that we find the proper highpart
3391 of the register. */
3392 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3393 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3394 info->offset = nregs_xmode - nregs_ymode;
3395 else
3396 info->offset = 0;
3397 info->nregs = nregs_ymode;
3398 return;
3399 }
3400
3401 /* If registers store different numbers of bits in the different
3402 modes, we cannot generally form this subreg. */
3403 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3404 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3405 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3406 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3407 {
3408 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3409 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3410 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3411 {
3412 info->representable_p = false;
3413 info->nregs
3414 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3415 info->offset = offset / regsize_xmode;
3416 return;
3417 }
3418 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3419 {
3420 info->representable_p = false;
3421 info->nregs
3422 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3423 info->offset = offset / regsize_xmode;
3424 return;
3425 }
3426 }
3427
3428 /* Lowpart subregs are otherwise valid. */
3429 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3430 {
3431 info->representable_p = true;
3432 rknown = true;
3433
3434 if (offset == 0 || nregs_xmode == nregs_ymode)
3435 {
3436 info->offset = 0;
3437 info->nregs = nregs_ymode;
3438 return;
3439 }
3440 }
3441
3442 /* This should always pass, otherwise we don't know how to verify
3443 the constraint. These conditions may be relaxed but
3444 subreg_regno_offset would need to be redesigned. */
3445 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3446 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3447
3448 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3449 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3450 {
3451 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3452 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3453 HOST_WIDE_INT off_low = offset & (ysize - 1);
3454 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3455 offset = (xsize - ysize - off_high) | off_low;
3456 }
3457 /* The XMODE value can be seen as a vector of NREGS_XMODE
3458 values. The subreg must represent a lowpart of given field.
3459 Compute what field it is. */
3460 offset_adj = offset;
3461 offset_adj -= subreg_lowpart_offset (ymode,
3462 mode_for_size (GET_MODE_BITSIZE (xmode)
3463 / nregs_xmode,
3464 MODE_INT, 0));
3465
3466 /* Size of ymode must not be greater than the size of xmode. */
3467 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3468 gcc_assert (mode_multiple != 0);
3469
3470 y_offset = offset / GET_MODE_SIZE (ymode);
3471 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3472 nregs_multiple = nregs_xmode / nregs_ymode;
3473
3474 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3475 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3476
3477 if (!rknown)
3478 {
3479 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3480 rknown = true;
3481 }
3482 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3483 info->nregs = nregs_ymode;
3484 }
3485
3486 /* This function returns the regno offset of a subreg expression.
3487 xregno - A regno of an inner hard subreg_reg (or what will become one).
3488 xmode - The mode of xregno.
3489 offset - The byte offset.
3490 ymode - The mode of a top level SUBREG (or what may become one).
3491 RETURN - The regno offset which would be used. */
3492 unsigned int
3493 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3494 unsigned int offset, enum machine_mode ymode)
3495 {
3496 struct subreg_info info;
3497 subreg_get_info (xregno, xmode, offset, ymode, &info);
3498 return info.offset;
3499 }
3500
3501 /* This function returns true when the offset is representable via
3502 subreg_offset in the given regno.
3503 xregno - A regno of an inner hard subreg_reg (or what will become one).
3504 xmode - The mode of xregno.
3505 offset - The byte offset.
3506 ymode - The mode of a top level SUBREG (or what may become one).
3507 RETURN - Whether the offset is representable. */
3508 bool
3509 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3510 unsigned int offset, enum machine_mode ymode)
3511 {
3512 struct subreg_info info;
3513 subreg_get_info (xregno, xmode, offset, ymode, &info);
3514 return info.representable_p;
3515 }
3516
3517 /* Return the number of a YMODE register to which
3518
3519 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3520
3521 can be simplified. Return -1 if the subreg can't be simplified.
3522
3523 XREGNO is a hard register number. */
3524
3525 int
3526 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3527 unsigned int offset, enum machine_mode ymode)
3528 {
3529 struct subreg_info info;
3530 unsigned int yregno;
3531
3532 #ifdef CANNOT_CHANGE_MODE_CLASS
3533 /* Give the backend a chance to disallow the mode change. */
3534 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3535 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3536 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3537 /* We can use mode change in LRA for some transformations. */
3538 && ! lra_in_progress)
3539 return -1;
3540 #endif
3541
3542 /* We shouldn't simplify stack-related registers. */
3543 if ((!reload_completed || frame_pointer_needed)
3544 && xregno == FRAME_POINTER_REGNUM)
3545 return -1;
3546
3547 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3548 && xregno == ARG_POINTER_REGNUM)
3549 return -1;
3550
3551 if (xregno == STACK_POINTER_REGNUM
3552 /* We should convert hard stack register in LRA if it is
3553 possible. */
3554 && ! lra_in_progress)
3555 return -1;
3556
3557 /* Try to get the register offset. */
3558 subreg_get_info (xregno, xmode, offset, ymode, &info);
3559 if (!info.representable_p)
3560 return -1;
3561
3562 /* Make sure that the offsetted register value is in range. */
3563 yregno = xregno + info.offset;
3564 if (!HARD_REGISTER_NUM_P (yregno))
3565 return -1;
3566
3567 /* See whether (reg:YMODE YREGNO) is valid.
3568
3569 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3570 This is a kludge to work around how complex FP arguments are passed
3571 on IA-64 and should be fixed. See PR target/49226. */
3572 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3573 && HARD_REGNO_MODE_OK (xregno, xmode))
3574 return -1;
3575
3576 return (int) yregno;
3577 }
3578
3579 /* Return the final regno that a subreg expression refers to. */
3580 unsigned int
3581 subreg_regno (const_rtx x)
3582 {
3583 unsigned int ret;
3584 rtx subreg = SUBREG_REG (x);
3585 int regno = REGNO (subreg);
3586
3587 ret = regno + subreg_regno_offset (regno,
3588 GET_MODE (subreg),
3589 SUBREG_BYTE (x),
3590 GET_MODE (x));
3591 return ret;
3592
3593 }
3594
3595 /* Return the number of registers that a subreg expression refers
3596 to. */
3597 unsigned int
3598 subreg_nregs (const_rtx x)
3599 {
3600 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3601 }
3602
3603 /* Return the number of registers that a subreg REG with REGNO
3604 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3605 changed so that the regno can be passed in. */
3606
3607 unsigned int
3608 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3609 {
3610 struct subreg_info info;
3611 rtx subreg = SUBREG_REG (x);
3612
3613 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3614 &info);
3615 return info.nregs;
3616 }
3617
3618
3619 struct parms_set_data
3620 {
3621 int nregs;
3622 HARD_REG_SET regs;
3623 };
3624
3625 /* Helper function for noticing stores to parameter registers. */
3626 static void
3627 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3628 {
3629 struct parms_set_data *const d = (struct parms_set_data *) data;
3630 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3631 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3632 {
3633 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3634 d->nregs--;
3635 }
3636 }
3637
3638 /* Look backward for first parameter to be loaded.
3639 Note that loads of all parameters will not necessarily be
3640 found if CSE has eliminated some of them (e.g., an argument
3641 to the outer function is passed down as a parameter).
3642 Do not skip BOUNDARY. */
3643 rtx
3644 find_first_parameter_load (rtx call_insn, rtx boundary)
3645 {
3646 struct parms_set_data parm;
3647 rtx p, before, first_set;
3648
3649 /* Since different machines initialize their parameter registers
3650 in different orders, assume nothing. Collect the set of all
3651 parameter registers. */
3652 CLEAR_HARD_REG_SET (parm.regs);
3653 parm.nregs = 0;
3654 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3655 if (GET_CODE (XEXP (p, 0)) == USE
3656 && REG_P (XEXP (XEXP (p, 0), 0)))
3657 {
3658 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3659
3660 /* We only care about registers which can hold function
3661 arguments. */
3662 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3663 continue;
3664
3665 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3666 parm.nregs++;
3667 }
3668 before = call_insn;
3669 first_set = call_insn;
3670
3671 /* Search backward for the first set of a register in this set. */
3672 while (parm.nregs && before != boundary)
3673 {
3674 before = PREV_INSN (before);
3675
3676 /* It is possible that some loads got CSEed from one call to
3677 another. Stop in that case. */
3678 if (CALL_P (before))
3679 break;
3680
3681 /* Our caller needs either ensure that we will find all sets
3682 (in case code has not been optimized yet), or take care
3683 for possible labels in a way by setting boundary to preceding
3684 CODE_LABEL. */
3685 if (LABEL_P (before))
3686 {
3687 gcc_assert (before == boundary);
3688 break;
3689 }
3690
3691 if (INSN_P (before))
3692 {
3693 int nregs_old = parm.nregs;
3694 note_stores (PATTERN (before), parms_set, &parm);
3695 /* If we found something that did not set a parameter reg,
3696 we're done. Do not keep going, as that might result
3697 in hoisting an insn before the setting of a pseudo
3698 that is used by the hoisted insn. */
3699 if (nregs_old != parm.nregs)
3700 first_set = before;
3701 else
3702 break;
3703 }
3704 }
3705 return first_set;
3706 }
3707
3708 /* Return true if we should avoid inserting code between INSN and preceding
3709 call instruction. */
3710
3711 bool
3712 keep_with_call_p (const_rtx insn)
3713 {
3714 rtx set;
3715
3716 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3717 {
3718 if (REG_P (SET_DEST (set))
3719 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3720 && fixed_regs[REGNO (SET_DEST (set))]
3721 && general_operand (SET_SRC (set), VOIDmode))
3722 return true;
3723 if (REG_P (SET_SRC (set))
3724 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3725 && REG_P (SET_DEST (set))
3726 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3727 return true;
3728 /* There may be a stack pop just after the call and before the store
3729 of the return register. Search for the actual store when deciding
3730 if we can break or not. */
3731 if (SET_DEST (set) == stack_pointer_rtx)
3732 {
3733 /* This CONST_CAST is okay because next_nonnote_insn just
3734 returns its argument and we assign it to a const_rtx
3735 variable. */
3736 const_rtx i2 = next_nonnote_insn (CONST_CAST_RTX (insn));
3737 if (i2 && keep_with_call_p (i2))
3738 return true;
3739 }
3740 }
3741 return false;
3742 }
3743
3744 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3745 to non-complex jumps. That is, direct unconditional, conditional,
3746 and tablejumps, but not computed jumps or returns. It also does
3747 not apply to the fallthru case of a conditional jump. */
3748
3749 bool
3750 label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
3751 {
3752 rtx tmp = JUMP_LABEL (jump_insn);
3753
3754 if (label == tmp)
3755 return true;
3756
3757 if (tablejump_p (jump_insn, NULL, &tmp))
3758 {
3759 rtvec vec = XVEC (PATTERN (tmp),
3760 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3761 int i, veclen = GET_NUM_ELEM (vec);
3762
3763 for (i = 0; i < veclen; ++i)
3764 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3765 return true;
3766 }
3767
3768 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3769 return true;
3770
3771 return false;
3772 }
3773
3774 \f
3775 /* Return an estimate of the cost of computing rtx X.
3776 One use is in cse, to decide which expression to keep in the hash table.
3777 Another is in rtl generation, to pick the cheapest way to multiply.
3778 Other uses like the latter are expected in the future.
3779
3780 X appears as operand OPNO in an expression with code OUTER_CODE.
3781 SPEED specifies whether costs optimized for speed or size should
3782 be returned. */
3783
3784 int
3785 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3786 {
3787 int i, j;
3788 enum rtx_code code;
3789 const char *fmt;
3790 int total;
3791 int factor;
3792
3793 if (x == 0)
3794 return 0;
3795
3796 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3797 many insns, taking N times as long. */
3798 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3799 if (factor == 0)
3800 factor = 1;
3801
3802 /* Compute the default costs of certain things.
3803 Note that targetm.rtx_costs can override the defaults. */
3804
3805 code = GET_CODE (x);
3806 switch (code)
3807 {
3808 case MULT:
3809 /* Multiplication has time-complexity O(N*N), where N is the
3810 number of units (translated from digits) when using
3811 schoolbook long multiplication. */
3812 total = factor * factor * COSTS_N_INSNS (5);
3813 break;
3814 case DIV:
3815 case UDIV:
3816 case MOD:
3817 case UMOD:
3818 /* Similarly, complexity for schoolbook long division. */
3819 total = factor * factor * COSTS_N_INSNS (7);
3820 break;
3821 case USE:
3822 /* Used in combine.c as a marker. */
3823 total = 0;
3824 break;
3825 case SET:
3826 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3827 the mode for the factor. */
3828 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3829 if (factor == 0)
3830 factor = 1;
3831 /* Pass through. */
3832 default:
3833 total = factor * COSTS_N_INSNS (1);
3834 }
3835
3836 switch (code)
3837 {
3838 case REG:
3839 return 0;
3840
3841 case SUBREG:
3842 total = 0;
3843 /* If we can't tie these modes, make this expensive. The larger
3844 the mode, the more expensive it is. */
3845 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3846 return COSTS_N_INSNS (2 + factor);
3847 break;
3848
3849 default:
3850 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3851 return total;
3852 break;
3853 }
3854
3855 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3856 which is already in total. */
3857
3858 fmt = GET_RTX_FORMAT (code);
3859 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3860 if (fmt[i] == 'e')
3861 total += rtx_cost (XEXP (x, i), code, i, speed);
3862 else if (fmt[i] == 'E')
3863 for (j = 0; j < XVECLEN (x, i); j++)
3864 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3865
3866 return total;
3867 }
3868
3869 /* Fill in the structure C with information about both speed and size rtx
3870 costs for X, which is operand OPNO in an expression with code OUTER. */
3871
3872 void
3873 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3874 struct full_rtx_costs *c)
3875 {
3876 c->speed = rtx_cost (x, outer, opno, true);
3877 c->size = rtx_cost (x, outer, opno, false);
3878 }
3879
3880 \f
3881 /* Return cost of address expression X.
3882 Expect that X is properly formed address reference.
3883
3884 SPEED parameter specify whether costs optimized for speed or size should
3885 be returned. */
3886
3887 int
3888 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
3889 {
3890 /* We may be asked for cost of various unusual addresses, such as operands
3891 of push instruction. It is not worthwhile to complicate writing
3892 of the target hook by such cases. */
3893
3894 if (!memory_address_addr_space_p (mode, x, as))
3895 return 1000;
3896
3897 return targetm.address_cost (x, mode, as, speed);
3898 }
3899
3900 /* If the target doesn't override, compute the cost as with arithmetic. */
3901
3902 int
3903 default_address_cost (rtx x, enum machine_mode, addr_space_t, bool speed)
3904 {
3905 return rtx_cost (x, MEM, 0, speed);
3906 }
3907 \f
3908
3909 unsigned HOST_WIDE_INT
3910 nonzero_bits (const_rtx x, enum machine_mode mode)
3911 {
3912 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3913 }
3914
3915 unsigned int
3916 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
3917 {
3918 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3919 }
3920
3921 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3922 It avoids exponential behavior in nonzero_bits1 when X has
3923 identical subexpressions on the first or the second level. */
3924
3925 static unsigned HOST_WIDE_INT
3926 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
3927 enum machine_mode known_mode,
3928 unsigned HOST_WIDE_INT known_ret)
3929 {
3930 if (x == known_x && mode == known_mode)
3931 return known_ret;
3932
3933 /* Try to find identical subexpressions. If found call
3934 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3935 precomputed value for the subexpression as KNOWN_RET. */
3936
3937 if (ARITHMETIC_P (x))
3938 {
3939 rtx x0 = XEXP (x, 0);
3940 rtx x1 = XEXP (x, 1);
3941
3942 /* Check the first level. */
3943 if (x0 == x1)
3944 return nonzero_bits1 (x, mode, x0, mode,
3945 cached_nonzero_bits (x0, mode, known_x,
3946 known_mode, known_ret));
3947
3948 /* Check the second level. */
3949 if (ARITHMETIC_P (x0)
3950 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3951 return nonzero_bits1 (x, mode, x1, mode,
3952 cached_nonzero_bits (x1, mode, known_x,
3953 known_mode, known_ret));
3954
3955 if (ARITHMETIC_P (x1)
3956 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3957 return nonzero_bits1 (x, mode, x0, mode,
3958 cached_nonzero_bits (x0, mode, known_x,
3959 known_mode, known_ret));
3960 }
3961
3962 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3963 }
3964
3965 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3966 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3967 is less useful. We can't allow both, because that results in exponential
3968 run time recursion. There is a nullstone testcase that triggered
3969 this. This macro avoids accidental uses of num_sign_bit_copies. */
3970 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3971
3972 /* Given an expression, X, compute which bits in X can be nonzero.
3973 We don't care about bits outside of those defined in MODE.
3974
3975 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3976 an arithmetic operation, we can do better. */
3977
3978 static unsigned HOST_WIDE_INT
3979 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
3980 enum machine_mode known_mode,
3981 unsigned HOST_WIDE_INT known_ret)
3982 {
3983 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3984 unsigned HOST_WIDE_INT inner_nz;
3985 enum rtx_code code;
3986 enum machine_mode inner_mode;
3987 unsigned int mode_width = GET_MODE_PRECISION (mode);
3988
3989 /* For floating-point and vector values, assume all bits are needed. */
3990 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
3991 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
3992 return nonzero;
3993
3994 /* If X is wider than MODE, use its mode instead. */
3995 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
3996 {
3997 mode = GET_MODE (x);
3998 nonzero = GET_MODE_MASK (mode);
3999 mode_width = GET_MODE_PRECISION (mode);
4000 }
4001
4002 if (mode_width > HOST_BITS_PER_WIDE_INT)
4003 /* Our only callers in this case look for single bit values. So
4004 just return the mode mask. Those tests will then be false. */
4005 return nonzero;
4006
4007 #ifndef WORD_REGISTER_OPERATIONS
4008 /* If MODE is wider than X, but both are a single word for both the host
4009 and target machines, we can compute this from which bits of the
4010 object might be nonzero in its own mode, taking into account the fact
4011 that on many CISC machines, accessing an object in a wider mode
4012 causes the high-order bits to become undefined. So they are
4013 not known to be zero. */
4014
4015 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
4016 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4017 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4018 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4019 {
4020 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4021 known_x, known_mode, known_ret);
4022 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4023 return nonzero;
4024 }
4025 #endif
4026
4027 code = GET_CODE (x);
4028 switch (code)
4029 {
4030 case REG:
4031 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4032 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4033 all the bits above ptr_mode are known to be zero. */
4034 /* As we do not know which address space the pointer is referring to,
4035 we can do this only if the target does not support different pointer
4036 or address modes depending on the address space. */
4037 if (target_default_pointer_address_modes_p ()
4038 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4039 && REG_POINTER (x))
4040 nonzero &= GET_MODE_MASK (ptr_mode);
4041 #endif
4042
4043 /* Include declared information about alignment of pointers. */
4044 /* ??? We don't properly preserve REG_POINTER changes across
4045 pointer-to-integer casts, so we can't trust it except for
4046 things that we know must be pointers. See execute/960116-1.c. */
4047 if ((x == stack_pointer_rtx
4048 || x == frame_pointer_rtx
4049 || x == arg_pointer_rtx)
4050 && REGNO_POINTER_ALIGN (REGNO (x)))
4051 {
4052 unsigned HOST_WIDE_INT alignment
4053 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4054
4055 #ifdef PUSH_ROUNDING
4056 /* If PUSH_ROUNDING is defined, it is possible for the
4057 stack to be momentarily aligned only to that amount,
4058 so we pick the least alignment. */
4059 if (x == stack_pointer_rtx && PUSH_ARGS)
4060 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4061 alignment);
4062 #endif
4063
4064 nonzero &= ~(alignment - 1);
4065 }
4066
4067 {
4068 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4069 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4070 known_mode, known_ret,
4071 &nonzero_for_hook);
4072
4073 if (new_rtx)
4074 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4075 known_mode, known_ret);
4076
4077 return nonzero_for_hook;
4078 }
4079
4080 case CONST_INT:
4081 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4082 /* If X is negative in MODE, sign-extend the value. */
4083 if (INTVAL (x) > 0
4084 && mode_width < BITS_PER_WORD
4085 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4086 != 0)
4087 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4088 #endif
4089
4090 return UINTVAL (x);
4091
4092 case MEM:
4093 #ifdef LOAD_EXTEND_OP
4094 /* In many, if not most, RISC machines, reading a byte from memory
4095 zeros the rest of the register. Noticing that fact saves a lot
4096 of extra zero-extends. */
4097 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4098 nonzero &= GET_MODE_MASK (GET_MODE (x));
4099 #endif
4100 break;
4101
4102 case EQ: case NE:
4103 case UNEQ: case LTGT:
4104 case GT: case GTU: case UNGT:
4105 case LT: case LTU: case UNLT:
4106 case GE: case GEU: case UNGE:
4107 case LE: case LEU: case UNLE:
4108 case UNORDERED: case ORDERED:
4109 /* If this produces an integer result, we know which bits are set.
4110 Code here used to clear bits outside the mode of X, but that is
4111 now done above. */
4112 /* Mind that MODE is the mode the caller wants to look at this
4113 operation in, and not the actual operation mode. We can wind
4114 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4115 that describes the results of a vector compare. */
4116 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4117 && mode_width <= HOST_BITS_PER_WIDE_INT)
4118 nonzero = STORE_FLAG_VALUE;
4119 break;
4120
4121 case NEG:
4122 #if 0
4123 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4124 and num_sign_bit_copies. */
4125 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4126 == GET_MODE_PRECISION (GET_MODE (x)))
4127 nonzero = 1;
4128 #endif
4129
4130 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4131 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4132 break;
4133
4134 case ABS:
4135 #if 0
4136 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4137 and num_sign_bit_copies. */
4138 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4139 == GET_MODE_PRECISION (GET_MODE (x)))
4140 nonzero = 1;
4141 #endif
4142 break;
4143
4144 case TRUNCATE:
4145 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4146 known_x, known_mode, known_ret)
4147 & GET_MODE_MASK (mode));
4148 break;
4149
4150 case ZERO_EXTEND:
4151 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4152 known_x, known_mode, known_ret);
4153 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4154 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4155 break;
4156
4157 case SIGN_EXTEND:
4158 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4159 Otherwise, show all the bits in the outer mode but not the inner
4160 may be nonzero. */
4161 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4162 known_x, known_mode, known_ret);
4163 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4164 {
4165 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4166 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4167 inner_nz |= (GET_MODE_MASK (mode)
4168 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4169 }
4170
4171 nonzero &= inner_nz;
4172 break;
4173
4174 case AND:
4175 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4176 known_x, known_mode, known_ret)
4177 & cached_nonzero_bits (XEXP (x, 1), mode,
4178 known_x, known_mode, known_ret);
4179 break;
4180
4181 case XOR: case IOR:
4182 case UMIN: case UMAX: case SMIN: case SMAX:
4183 {
4184 unsigned HOST_WIDE_INT nonzero0
4185 = cached_nonzero_bits (XEXP (x, 0), mode,
4186 known_x, known_mode, known_ret);
4187
4188 /* Don't call nonzero_bits for the second time if it cannot change
4189 anything. */
4190 if ((nonzero & nonzero0) != nonzero)
4191 nonzero &= nonzero0
4192 | cached_nonzero_bits (XEXP (x, 1), mode,
4193 known_x, known_mode, known_ret);
4194 }
4195 break;
4196
4197 case PLUS: case MINUS:
4198 case MULT:
4199 case DIV: case UDIV:
4200 case MOD: case UMOD:
4201 /* We can apply the rules of arithmetic to compute the number of
4202 high- and low-order zero bits of these operations. We start by
4203 computing the width (position of the highest-order nonzero bit)
4204 and the number of low-order zero bits for each value. */
4205 {
4206 unsigned HOST_WIDE_INT nz0
4207 = cached_nonzero_bits (XEXP (x, 0), mode,
4208 known_x, known_mode, known_ret);
4209 unsigned HOST_WIDE_INT nz1
4210 = cached_nonzero_bits (XEXP (x, 1), mode,
4211 known_x, known_mode, known_ret);
4212 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4213 int width0 = floor_log2 (nz0) + 1;
4214 int width1 = floor_log2 (nz1) + 1;
4215 int low0 = floor_log2 (nz0 & -nz0);
4216 int low1 = floor_log2 (nz1 & -nz1);
4217 unsigned HOST_WIDE_INT op0_maybe_minusp
4218 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4219 unsigned HOST_WIDE_INT op1_maybe_minusp
4220 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4221 unsigned int result_width = mode_width;
4222 int result_low = 0;
4223
4224 switch (code)
4225 {
4226 case PLUS:
4227 result_width = MAX (width0, width1) + 1;
4228 result_low = MIN (low0, low1);
4229 break;
4230 case MINUS:
4231 result_low = MIN (low0, low1);
4232 break;
4233 case MULT:
4234 result_width = width0 + width1;
4235 result_low = low0 + low1;
4236 break;
4237 case DIV:
4238 if (width1 == 0)
4239 break;
4240 if (!op0_maybe_minusp && !op1_maybe_minusp)
4241 result_width = width0;
4242 break;
4243 case UDIV:
4244 if (width1 == 0)
4245 break;
4246 result_width = width0;
4247 break;
4248 case MOD:
4249 if (width1 == 0)
4250 break;
4251 if (!op0_maybe_minusp && !op1_maybe_minusp)
4252 result_width = MIN (width0, width1);
4253 result_low = MIN (low0, low1);
4254 break;
4255 case UMOD:
4256 if (width1 == 0)
4257 break;
4258 result_width = MIN (width0, width1);
4259 result_low = MIN (low0, low1);
4260 break;
4261 default:
4262 gcc_unreachable ();
4263 }
4264
4265 if (result_width < mode_width)
4266 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4267
4268 if (result_low > 0)
4269 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4270 }
4271 break;
4272
4273 case ZERO_EXTRACT:
4274 if (CONST_INT_P (XEXP (x, 1))
4275 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4276 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4277 break;
4278
4279 case SUBREG:
4280 /* If this is a SUBREG formed for a promoted variable that has
4281 been zero-extended, we know that at least the high-order bits
4282 are zero, though others might be too. */
4283
4284 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
4285 nonzero = GET_MODE_MASK (GET_MODE (x))
4286 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4287 known_x, known_mode, known_ret);
4288
4289 inner_mode = GET_MODE (SUBREG_REG (x));
4290 /* If the inner mode is a single word for both the host and target
4291 machines, we can compute this from which bits of the inner
4292 object might be nonzero. */
4293 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4294 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4295 {
4296 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4297 known_x, known_mode, known_ret);
4298
4299 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4300 /* If this is a typical RISC machine, we only have to worry
4301 about the way loads are extended. */
4302 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4303 ? val_signbit_known_set_p (inner_mode, nonzero)
4304 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4305 || !MEM_P (SUBREG_REG (x)))
4306 #endif
4307 {
4308 /* On many CISC machines, accessing an object in a wider mode
4309 causes the high-order bits to become undefined. So they are
4310 not known to be zero. */
4311 if (GET_MODE_PRECISION (GET_MODE (x))
4312 > GET_MODE_PRECISION (inner_mode))
4313 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4314 & ~GET_MODE_MASK (inner_mode));
4315 }
4316 }
4317 break;
4318
4319 case ASHIFTRT:
4320 case LSHIFTRT:
4321 case ASHIFT:
4322 case ROTATE:
4323 /* The nonzero bits are in two classes: any bits within MODE
4324 that aren't in GET_MODE (x) are always significant. The rest of the
4325 nonzero bits are those that are significant in the operand of
4326 the shift when shifted the appropriate number of bits. This
4327 shows that high-order bits are cleared by the right shift and
4328 low-order bits by left shifts. */
4329 if (CONST_INT_P (XEXP (x, 1))
4330 && INTVAL (XEXP (x, 1)) >= 0
4331 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4332 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4333 {
4334 enum machine_mode inner_mode = GET_MODE (x);
4335 unsigned int width = GET_MODE_PRECISION (inner_mode);
4336 int count = INTVAL (XEXP (x, 1));
4337 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4338 unsigned HOST_WIDE_INT op_nonzero
4339 = cached_nonzero_bits (XEXP (x, 0), mode,
4340 known_x, known_mode, known_ret);
4341 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4342 unsigned HOST_WIDE_INT outer = 0;
4343
4344 if (mode_width > width)
4345 outer = (op_nonzero & nonzero & ~mode_mask);
4346
4347 if (code == LSHIFTRT)
4348 inner >>= count;
4349 else if (code == ASHIFTRT)
4350 {
4351 inner >>= count;
4352
4353 /* If the sign bit may have been nonzero before the shift, we
4354 need to mark all the places it could have been copied to
4355 by the shift as possibly nonzero. */
4356 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4357 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4358 << (width - count);
4359 }
4360 else if (code == ASHIFT)
4361 inner <<= count;
4362 else
4363 inner = ((inner << (count % width)
4364 | (inner >> (width - (count % width)))) & mode_mask);
4365
4366 nonzero &= (outer | inner);
4367 }
4368 break;
4369
4370 case FFS:
4371 case POPCOUNT:
4372 /* This is at most the number of bits in the mode. */
4373 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4374 break;
4375
4376 case CLZ:
4377 /* If CLZ has a known value at zero, then the nonzero bits are
4378 that value, plus the number of bits in the mode minus one. */
4379 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4380 nonzero
4381 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4382 else
4383 nonzero = -1;
4384 break;
4385
4386 case CTZ:
4387 /* If CTZ has a known value at zero, then the nonzero bits are
4388 that value, plus the number of bits in the mode minus one. */
4389 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4390 nonzero
4391 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4392 else
4393 nonzero = -1;
4394 break;
4395
4396 case CLRSB:
4397 /* This is at most the number of bits in the mode minus 1. */
4398 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4399 break;
4400
4401 case PARITY:
4402 nonzero = 1;
4403 break;
4404
4405 case IF_THEN_ELSE:
4406 {
4407 unsigned HOST_WIDE_INT nonzero_true
4408 = cached_nonzero_bits (XEXP (x, 1), mode,
4409 known_x, known_mode, known_ret);
4410
4411 /* Don't call nonzero_bits for the second time if it cannot change
4412 anything. */
4413 if ((nonzero & nonzero_true) != nonzero)
4414 nonzero &= nonzero_true
4415 | cached_nonzero_bits (XEXP (x, 2), mode,
4416 known_x, known_mode, known_ret);
4417 }
4418 break;
4419
4420 default:
4421 break;
4422 }
4423
4424 return nonzero;
4425 }
4426
4427 /* See the macro definition above. */
4428 #undef cached_num_sign_bit_copies
4429
4430 \f
4431 /* The function cached_num_sign_bit_copies is a wrapper around
4432 num_sign_bit_copies1. It avoids exponential behavior in
4433 num_sign_bit_copies1 when X has identical subexpressions on the
4434 first or the second level. */
4435
4436 static unsigned int
4437 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4438 enum machine_mode known_mode,
4439 unsigned int known_ret)
4440 {
4441 if (x == known_x && mode == known_mode)
4442 return known_ret;
4443
4444 /* Try to find identical subexpressions. If found call
4445 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4446 the precomputed value for the subexpression as KNOWN_RET. */
4447
4448 if (ARITHMETIC_P (x))
4449 {
4450 rtx x0 = XEXP (x, 0);
4451 rtx x1 = XEXP (x, 1);
4452
4453 /* Check the first level. */
4454 if (x0 == x1)
4455 return
4456 num_sign_bit_copies1 (x, mode, x0, mode,
4457 cached_num_sign_bit_copies (x0, mode, known_x,
4458 known_mode,
4459 known_ret));
4460
4461 /* Check the second level. */
4462 if (ARITHMETIC_P (x0)
4463 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4464 return
4465 num_sign_bit_copies1 (x, mode, x1, mode,
4466 cached_num_sign_bit_copies (x1, mode, known_x,
4467 known_mode,
4468 known_ret));
4469
4470 if (ARITHMETIC_P (x1)
4471 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4472 return
4473 num_sign_bit_copies1 (x, mode, x0, mode,
4474 cached_num_sign_bit_copies (x0, mode, known_x,
4475 known_mode,
4476 known_ret));
4477 }
4478
4479 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4480 }
4481
4482 /* Return the number of bits at the high-order end of X that are known to
4483 be equal to the sign bit. X will be used in mode MODE; if MODE is
4484 VOIDmode, X will be used in its own mode. The returned value will always
4485 be between 1 and the number of bits in MODE. */
4486
4487 static unsigned int
4488 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4489 enum machine_mode known_mode,
4490 unsigned int known_ret)
4491 {
4492 enum rtx_code code = GET_CODE (x);
4493 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4494 int num0, num1, result;
4495 unsigned HOST_WIDE_INT nonzero;
4496
4497 /* If we weren't given a mode, use the mode of X. If the mode is still
4498 VOIDmode, we don't know anything. Likewise if one of the modes is
4499 floating-point. */
4500
4501 if (mode == VOIDmode)
4502 mode = GET_MODE (x);
4503
4504 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4505 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4506 return 1;
4507
4508 /* For a smaller object, just ignore the high bits. */
4509 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4510 {
4511 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4512 known_x, known_mode, known_ret);
4513 return MAX (1,
4514 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4515 }
4516
4517 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4518 {
4519 #ifndef WORD_REGISTER_OPERATIONS
4520 /* If this machine does not do all register operations on the entire
4521 register and MODE is wider than the mode of X, we can say nothing
4522 at all about the high-order bits. */
4523 return 1;
4524 #else
4525 /* Likewise on machines that do, if the mode of the object is smaller
4526 than a word and loads of that size don't sign extend, we can say
4527 nothing about the high order bits. */
4528 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4529 #ifdef LOAD_EXTEND_OP
4530 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4531 #endif
4532 )
4533 return 1;
4534 #endif
4535 }
4536
4537 switch (code)
4538 {
4539 case REG:
4540
4541 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4542 /* If pointers extend signed and this is a pointer in Pmode, say that
4543 all the bits above ptr_mode are known to be sign bit copies. */
4544 /* As we do not know which address space the pointer is referring to,
4545 we can do this only if the target does not support different pointer
4546 or address modes depending on the address space. */
4547 if (target_default_pointer_address_modes_p ()
4548 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4549 && mode == Pmode && REG_POINTER (x))
4550 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4551 #endif
4552
4553 {
4554 unsigned int copies_for_hook = 1, copies = 1;
4555 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4556 known_mode, known_ret,
4557 &copies_for_hook);
4558
4559 if (new_rtx)
4560 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4561 known_mode, known_ret);
4562
4563 if (copies > 1 || copies_for_hook > 1)
4564 return MAX (copies, copies_for_hook);
4565
4566 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4567 }
4568 break;
4569
4570 case MEM:
4571 #ifdef LOAD_EXTEND_OP
4572 /* Some RISC machines sign-extend all loads of smaller than a word. */
4573 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4574 return MAX (1, ((int) bitwidth
4575 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4576 #endif
4577 break;
4578
4579 case CONST_INT:
4580 /* If the constant is negative, take its 1's complement and remask.
4581 Then see how many zero bits we have. */
4582 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4583 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4584 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4585 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4586
4587 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4588
4589 case SUBREG:
4590 /* If this is a SUBREG for a promoted object that is sign-extended
4591 and we are looking at it in a wider mode, we know that at least the
4592 high-order bits are known to be sign bit copies. */
4593
4594 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4595 {
4596 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4597 known_x, known_mode, known_ret);
4598 return MAX ((int) bitwidth
4599 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4600 num0);
4601 }
4602
4603 /* For a smaller object, just ignore the high bits. */
4604 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4605 {
4606 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4607 known_x, known_mode, known_ret);
4608 return MAX (1, (num0
4609 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4610 - bitwidth)));
4611 }
4612
4613 #ifdef WORD_REGISTER_OPERATIONS
4614 #ifdef LOAD_EXTEND_OP
4615 /* For paradoxical SUBREGs on machines where all register operations
4616 affect the entire register, just look inside. Note that we are
4617 passing MODE to the recursive call, so the number of sign bit copies
4618 will remain relative to that mode, not the inner mode. */
4619
4620 /* This works only if loads sign extend. Otherwise, if we get a
4621 reload for the inner part, it may be loaded from the stack, and
4622 then we lose all sign bit copies that existed before the store
4623 to the stack. */
4624
4625 if (paradoxical_subreg_p (x)
4626 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4627 && MEM_P (SUBREG_REG (x)))
4628 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4629 known_x, known_mode, known_ret);
4630 #endif
4631 #endif
4632 break;
4633
4634 case SIGN_EXTRACT:
4635 if (CONST_INT_P (XEXP (x, 1)))
4636 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4637 break;
4638
4639 case SIGN_EXTEND:
4640 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4641 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4642 known_x, known_mode, known_ret));
4643
4644 case TRUNCATE:
4645 /* For a smaller object, just ignore the high bits. */
4646 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4647 known_x, known_mode, known_ret);
4648 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4649 - bitwidth)));
4650
4651 case NOT:
4652 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4653 known_x, known_mode, known_ret);
4654
4655 case ROTATE: case ROTATERT:
4656 /* If we are rotating left by a number of bits less than the number
4657 of sign bit copies, we can just subtract that amount from the
4658 number. */
4659 if (CONST_INT_P (XEXP (x, 1))
4660 && INTVAL (XEXP (x, 1)) >= 0
4661 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4662 {
4663 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4664 known_x, known_mode, known_ret);
4665 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4666 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4667 }
4668 break;
4669
4670 case NEG:
4671 /* In general, this subtracts one sign bit copy. But if the value
4672 is known to be positive, the number of sign bit copies is the
4673 same as that of the input. Finally, if the input has just one bit
4674 that might be nonzero, all the bits are copies of the sign bit. */
4675 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4676 known_x, known_mode, known_ret);
4677 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4678 return num0 > 1 ? num0 - 1 : 1;
4679
4680 nonzero = nonzero_bits (XEXP (x, 0), mode);
4681 if (nonzero == 1)
4682 return bitwidth;
4683
4684 if (num0 > 1
4685 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4686 num0--;
4687
4688 return num0;
4689
4690 case IOR: case AND: case XOR:
4691 case SMIN: case SMAX: case UMIN: case UMAX:
4692 /* Logical operations will preserve the number of sign-bit copies.
4693 MIN and MAX operations always return one of the operands. */
4694 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4695 known_x, known_mode, known_ret);
4696 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4697 known_x, known_mode, known_ret);
4698
4699 /* If num1 is clearing some of the top bits then regardless of
4700 the other term, we are guaranteed to have at least that many
4701 high-order zero bits. */
4702 if (code == AND
4703 && num1 > 1
4704 && bitwidth <= HOST_BITS_PER_WIDE_INT
4705 && CONST_INT_P (XEXP (x, 1))
4706 && (UINTVAL (XEXP (x, 1))
4707 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4708 return num1;
4709
4710 /* Similarly for IOR when setting high-order bits. */
4711 if (code == IOR
4712 && num1 > 1
4713 && bitwidth <= HOST_BITS_PER_WIDE_INT
4714 && CONST_INT_P (XEXP (x, 1))
4715 && (UINTVAL (XEXP (x, 1))
4716 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4717 return num1;
4718
4719 return MIN (num0, num1);
4720
4721 case PLUS: case MINUS:
4722 /* For addition and subtraction, we can have a 1-bit carry. However,
4723 if we are subtracting 1 from a positive number, there will not
4724 be such a carry. Furthermore, if the positive number is known to
4725 be 0 or 1, we know the result is either -1 or 0. */
4726
4727 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4728 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4729 {
4730 nonzero = nonzero_bits (XEXP (x, 0), mode);
4731 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4732 return (nonzero == 1 || nonzero == 0 ? bitwidth
4733 : bitwidth - floor_log2 (nonzero) - 1);
4734 }
4735
4736 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4737 known_x, known_mode, known_ret);
4738 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4739 known_x, known_mode, known_ret);
4740 result = MAX (1, MIN (num0, num1) - 1);
4741
4742 return result;
4743
4744 case MULT:
4745 /* The number of bits of the product is the sum of the number of
4746 bits of both terms. However, unless one of the terms if known
4747 to be positive, we must allow for an additional bit since negating
4748 a negative number can remove one sign bit copy. */
4749
4750 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4751 known_x, known_mode, known_ret);
4752 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4753 known_x, known_mode, known_ret);
4754
4755 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4756 if (result > 0
4757 && (bitwidth > HOST_BITS_PER_WIDE_INT
4758 || (((nonzero_bits (XEXP (x, 0), mode)
4759 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4760 && ((nonzero_bits (XEXP (x, 1), mode)
4761 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4762 != 0))))
4763 result--;
4764
4765 return MAX (1, result);
4766
4767 case UDIV:
4768 /* The result must be <= the first operand. If the first operand
4769 has the high bit set, we know nothing about the number of sign
4770 bit copies. */
4771 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4772 return 1;
4773 else if ((nonzero_bits (XEXP (x, 0), mode)
4774 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4775 return 1;
4776 else
4777 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4778 known_x, known_mode, known_ret);
4779
4780 case UMOD:
4781 /* The result must be <= the second operand. If the second operand
4782 has (or just might have) the high bit set, we know nothing about
4783 the number of sign bit copies. */
4784 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4785 return 1;
4786 else if ((nonzero_bits (XEXP (x, 1), mode)
4787 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4788 return 1;
4789 else
4790 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4791 known_x, known_mode, known_ret);
4792
4793 case DIV:
4794 /* Similar to unsigned division, except that we have to worry about
4795 the case where the divisor is negative, in which case we have
4796 to add 1. */
4797 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4798 known_x, known_mode, known_ret);
4799 if (result > 1
4800 && (bitwidth > HOST_BITS_PER_WIDE_INT
4801 || (nonzero_bits (XEXP (x, 1), mode)
4802 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4803 result--;
4804
4805 return result;
4806
4807 case MOD:
4808 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4809 known_x, known_mode, known_ret);
4810 if (result > 1
4811 && (bitwidth > HOST_BITS_PER_WIDE_INT
4812 || (nonzero_bits (XEXP (x, 1), mode)
4813 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4814 result--;
4815
4816 return result;
4817
4818 case ASHIFTRT:
4819 /* Shifts by a constant add to the number of bits equal to the
4820 sign bit. */
4821 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4822 known_x, known_mode, known_ret);
4823 if (CONST_INT_P (XEXP (x, 1))
4824 && INTVAL (XEXP (x, 1)) > 0
4825 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4826 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4827
4828 return num0;
4829
4830 case ASHIFT:
4831 /* Left shifts destroy copies. */
4832 if (!CONST_INT_P (XEXP (x, 1))
4833 || INTVAL (XEXP (x, 1)) < 0
4834 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4835 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4836 return 1;
4837
4838 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4839 known_x, known_mode, known_ret);
4840 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4841
4842 case IF_THEN_ELSE:
4843 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4844 known_x, known_mode, known_ret);
4845 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4846 known_x, known_mode, known_ret);
4847 return MIN (num0, num1);
4848
4849 case EQ: case NE: case GE: case GT: case LE: case LT:
4850 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4851 case GEU: case GTU: case LEU: case LTU:
4852 case UNORDERED: case ORDERED:
4853 /* If the constant is negative, take its 1's complement and remask.
4854 Then see how many zero bits we have. */
4855 nonzero = STORE_FLAG_VALUE;
4856 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4857 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4858 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4859
4860 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4861
4862 default:
4863 break;
4864 }
4865
4866 /* If we haven't been able to figure it out by one of the above rules,
4867 see if some of the high-order bits are known to be zero. If so,
4868 count those bits and return one less than that amount. If we can't
4869 safely compute the mask for this mode, always return BITWIDTH. */
4870
4871 bitwidth = GET_MODE_PRECISION (mode);
4872 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4873 return 1;
4874
4875 nonzero = nonzero_bits (x, mode);
4876 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4877 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4878 }
4879
4880 /* Calculate the rtx_cost of a single instruction. A return value of
4881 zero indicates an instruction pattern without a known cost. */
4882
4883 int
4884 insn_rtx_cost (rtx pat, bool speed)
4885 {
4886 int i, cost;
4887 rtx set;
4888
4889 /* Extract the single set rtx from the instruction pattern.
4890 We can't use single_set since we only have the pattern. */
4891 if (GET_CODE (pat) == SET)
4892 set = pat;
4893 else if (GET_CODE (pat) == PARALLEL)
4894 {
4895 set = NULL_RTX;
4896 for (i = 0; i < XVECLEN (pat, 0); i++)
4897 {
4898 rtx x = XVECEXP (pat, 0, i);
4899 if (GET_CODE (x) == SET)
4900 {
4901 if (set)
4902 return 0;
4903 set = x;
4904 }
4905 }
4906 if (!set)
4907 return 0;
4908 }
4909 else
4910 return 0;
4911
4912 cost = set_src_cost (SET_SRC (set), speed);
4913 return cost > 0 ? cost : COSTS_N_INSNS (1);
4914 }
4915
4916 /* Given an insn INSN and condition COND, return the condition in a
4917 canonical form to simplify testing by callers. Specifically:
4918
4919 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4920 (2) Both operands will be machine operands; (cc0) will have been replaced.
4921 (3) If an operand is a constant, it will be the second operand.
4922 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4923 for GE, GEU, and LEU.
4924
4925 If the condition cannot be understood, or is an inequality floating-point
4926 comparison which needs to be reversed, 0 will be returned.
4927
4928 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4929
4930 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4931 insn used in locating the condition was found. If a replacement test
4932 of the condition is desired, it should be placed in front of that
4933 insn and we will be sure that the inputs are still valid.
4934
4935 If WANT_REG is nonzero, we wish the condition to be relative to that
4936 register, if possible. Therefore, do not canonicalize the condition
4937 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4938 to be a compare to a CC mode register.
4939
4940 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4941 and at INSN. */
4942
4943 rtx
4944 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4945 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4946 {
4947 enum rtx_code code;
4948 rtx prev = insn;
4949 const_rtx set;
4950 rtx tem;
4951 rtx op0, op1;
4952 int reverse_code = 0;
4953 enum machine_mode mode;
4954 basic_block bb = BLOCK_FOR_INSN (insn);
4955
4956 code = GET_CODE (cond);
4957 mode = GET_MODE (cond);
4958 op0 = XEXP (cond, 0);
4959 op1 = XEXP (cond, 1);
4960
4961 if (reverse)
4962 code = reversed_comparison_code (cond, insn);
4963 if (code == UNKNOWN)
4964 return 0;
4965
4966 if (earliest)
4967 *earliest = insn;
4968
4969 /* If we are comparing a register with zero, see if the register is set
4970 in the previous insn to a COMPARE or a comparison operation. Perform
4971 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4972 in cse.c */
4973
4974 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4975 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4976 && op1 == CONST0_RTX (GET_MODE (op0))
4977 && op0 != want_reg)
4978 {
4979 /* Set nonzero when we find something of interest. */
4980 rtx x = 0;
4981
4982 #ifdef HAVE_cc0
4983 /* If comparison with cc0, import actual comparison from compare
4984 insn. */
4985 if (op0 == cc0_rtx)
4986 {
4987 if ((prev = prev_nonnote_insn (prev)) == 0
4988 || !NONJUMP_INSN_P (prev)
4989 || (set = single_set (prev)) == 0
4990 || SET_DEST (set) != cc0_rtx)
4991 return 0;
4992
4993 op0 = SET_SRC (set);
4994 op1 = CONST0_RTX (GET_MODE (op0));
4995 if (earliest)
4996 *earliest = prev;
4997 }
4998 #endif
4999
5000 /* If this is a COMPARE, pick up the two things being compared. */
5001 if (GET_CODE (op0) == COMPARE)
5002 {
5003 op1 = XEXP (op0, 1);
5004 op0 = XEXP (op0, 0);
5005 continue;
5006 }
5007 else if (!REG_P (op0))
5008 break;
5009
5010 /* Go back to the previous insn. Stop if it is not an INSN. We also
5011 stop if it isn't a single set or if it has a REG_INC note because
5012 we don't want to bother dealing with it. */
5013
5014 prev = prev_nonnote_nondebug_insn (prev);
5015
5016 if (prev == 0
5017 || !NONJUMP_INSN_P (prev)
5018 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5019 /* In cfglayout mode, there do not have to be labels at the
5020 beginning of a block, or jumps at the end, so the previous
5021 conditions would not stop us when we reach bb boundary. */
5022 || BLOCK_FOR_INSN (prev) != bb)
5023 break;
5024
5025 set = set_of (op0, prev);
5026
5027 if (set
5028 && (GET_CODE (set) != SET
5029 || !rtx_equal_p (SET_DEST (set), op0)))
5030 break;
5031
5032 /* If this is setting OP0, get what it sets it to if it looks
5033 relevant. */
5034 if (set)
5035 {
5036 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
5037 #ifdef FLOAT_STORE_FLAG_VALUE
5038 REAL_VALUE_TYPE fsfv;
5039 #endif
5040
5041 /* ??? We may not combine comparisons done in a CCmode with
5042 comparisons not done in a CCmode. This is to aid targets
5043 like Alpha that have an IEEE compliant EQ instruction, and
5044 a non-IEEE compliant BEQ instruction. The use of CCmode is
5045 actually artificial, simply to prevent the combination, but
5046 should not affect other platforms.
5047
5048 However, we must allow VOIDmode comparisons to match either
5049 CCmode or non-CCmode comparison, because some ports have
5050 modeless comparisons inside branch patterns.
5051
5052 ??? This mode check should perhaps look more like the mode check
5053 in simplify_comparison in combine. */
5054
5055 if ((GET_CODE (SET_SRC (set)) == COMPARE
5056 || (((code == NE
5057 || (code == LT
5058 && val_signbit_known_set_p (inner_mode,
5059 STORE_FLAG_VALUE))
5060 #ifdef FLOAT_STORE_FLAG_VALUE
5061 || (code == LT
5062 && SCALAR_FLOAT_MODE_P (inner_mode)
5063 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5064 REAL_VALUE_NEGATIVE (fsfv)))
5065 #endif
5066 ))
5067 && COMPARISON_P (SET_SRC (set))))
5068 && (((GET_MODE_CLASS (mode) == MODE_CC)
5069 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5070 || mode == VOIDmode || inner_mode == VOIDmode))
5071 x = SET_SRC (set);
5072 else if (((code == EQ
5073 || (code == GE
5074 && val_signbit_known_set_p (inner_mode,
5075 STORE_FLAG_VALUE))
5076 #ifdef FLOAT_STORE_FLAG_VALUE
5077 || (code == GE
5078 && SCALAR_FLOAT_MODE_P (inner_mode)
5079 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5080 REAL_VALUE_NEGATIVE (fsfv)))
5081 #endif
5082 ))
5083 && COMPARISON_P (SET_SRC (set))
5084 && (((GET_MODE_CLASS (mode) == MODE_CC)
5085 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
5086 || mode == VOIDmode || inner_mode == VOIDmode))
5087
5088 {
5089 reverse_code = 1;
5090 x = SET_SRC (set);
5091 }
5092 else
5093 break;
5094 }
5095
5096 else if (reg_set_p (op0, prev))
5097 /* If this sets OP0, but not directly, we have to give up. */
5098 break;
5099
5100 if (x)
5101 {
5102 /* If the caller is expecting the condition to be valid at INSN,
5103 make sure X doesn't change before INSN. */
5104 if (valid_at_insn_p)
5105 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5106 break;
5107 if (COMPARISON_P (x))
5108 code = GET_CODE (x);
5109 if (reverse_code)
5110 {
5111 code = reversed_comparison_code (x, prev);
5112 if (code == UNKNOWN)
5113 return 0;
5114 reverse_code = 0;
5115 }
5116
5117 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5118 if (earliest)
5119 *earliest = prev;
5120 }
5121 }
5122
5123 /* If constant is first, put it last. */
5124 if (CONSTANT_P (op0))
5125 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5126
5127 /* If OP0 is the result of a comparison, we weren't able to find what
5128 was really being compared, so fail. */
5129 if (!allow_cc_mode
5130 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5131 return 0;
5132
5133 /* Canonicalize any ordered comparison with integers involving equality
5134 if we can do computations in the relevant mode and we do not
5135 overflow. */
5136
5137 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5138 && CONST_INT_P (op1)
5139 && GET_MODE (op0) != VOIDmode
5140 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5141 {
5142 HOST_WIDE_INT const_val = INTVAL (op1);
5143 unsigned HOST_WIDE_INT uconst_val = const_val;
5144 unsigned HOST_WIDE_INT max_val
5145 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5146
5147 switch (code)
5148 {
5149 case LE:
5150 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5151 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5152 break;
5153
5154 /* When cross-compiling, const_val might be sign-extended from
5155 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5156 case GE:
5157 if ((const_val & max_val)
5158 != ((unsigned HOST_WIDE_INT) 1
5159 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5160 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5161 break;
5162
5163 case LEU:
5164 if (uconst_val < max_val)
5165 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5166 break;
5167
5168 case GEU:
5169 if (uconst_val != 0)
5170 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5171 break;
5172
5173 default:
5174 break;
5175 }
5176 }
5177
5178 /* Never return CC0; return zero instead. */
5179 if (CC0_P (op0))
5180 return 0;
5181
5182 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5183 }
5184
5185 /* Given a jump insn JUMP, return the condition that will cause it to branch
5186 to its JUMP_LABEL. If the condition cannot be understood, or is an
5187 inequality floating-point comparison which needs to be reversed, 0 will
5188 be returned.
5189
5190 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5191 insn used in locating the condition was found. If a replacement test
5192 of the condition is desired, it should be placed in front of that
5193 insn and we will be sure that the inputs are still valid. If EARLIEST
5194 is null, the returned condition will be valid at INSN.
5195
5196 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5197 compare CC mode register.
5198
5199 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5200
5201 rtx
5202 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
5203 {
5204 rtx cond;
5205 int reverse;
5206 rtx set;
5207
5208 /* If this is not a standard conditional jump, we can't parse it. */
5209 if (!JUMP_P (jump)
5210 || ! any_condjump_p (jump))
5211 return 0;
5212 set = pc_set (jump);
5213
5214 cond = XEXP (SET_SRC (set), 0);
5215
5216 /* If this branches to JUMP_LABEL when the condition is false, reverse
5217 the condition. */
5218 reverse
5219 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5220 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
5221
5222 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5223 allow_cc_mode, valid_at_insn_p);
5224 }
5225
5226 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5227 TARGET_MODE_REP_EXTENDED.
5228
5229 Note that we assume that the property of
5230 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5231 narrower than mode B. I.e., if A is a mode narrower than B then in
5232 order to be able to operate on it in mode B, mode A needs to
5233 satisfy the requirements set by the representation of mode B. */
5234
5235 static void
5236 init_num_sign_bit_copies_in_rep (void)
5237 {
5238 enum machine_mode mode, in_mode;
5239
5240 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5241 in_mode = GET_MODE_WIDER_MODE (mode))
5242 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5243 mode = GET_MODE_WIDER_MODE (mode))
5244 {
5245 enum machine_mode i;
5246
5247 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5248 extends to the next widest mode. */
5249 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5250 || GET_MODE_WIDER_MODE (mode) == in_mode);
5251
5252 /* We are in in_mode. Count how many bits outside of mode
5253 have to be copies of the sign-bit. */
5254 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5255 {
5256 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5257
5258 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5259 /* We can only check sign-bit copies starting from the
5260 top-bit. In order to be able to check the bits we
5261 have already seen we pretend that subsequent bits
5262 have to be sign-bit copies too. */
5263 || num_sign_bit_copies_in_rep [in_mode][mode])
5264 num_sign_bit_copies_in_rep [in_mode][mode]
5265 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5266 }
5267 }
5268 }
5269
5270 /* Suppose that truncation from the machine mode of X to MODE is not a
5271 no-op. See if there is anything special about X so that we can
5272 assume it already contains a truncated value of MODE. */
5273
5274 bool
5275 truncated_to_mode (enum machine_mode mode, const_rtx x)
5276 {
5277 /* This register has already been used in MODE without explicit
5278 truncation. */
5279 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5280 return true;
5281
5282 /* See if we already satisfy the requirements of MODE. If yes we
5283 can just switch to MODE. */
5284 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5285 && (num_sign_bit_copies (x, GET_MODE (x))
5286 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5287 return true;
5288
5289 return false;
5290 }
5291 \f
5292 /* Initialize non_rtx_starting_operands, which is used to speed up
5293 for_each_rtx. */
5294 void
5295 init_rtlanal (void)
5296 {
5297 int i;
5298 for (i = 0; i < NUM_RTX_CODE; i++)
5299 {
5300 const char *format = GET_RTX_FORMAT (i);
5301 const char *first = strpbrk (format, "eEV");
5302 non_rtx_starting_operands[i] = first ? first - format : -1;
5303 }
5304
5305 init_num_sign_bit_copies_in_rep ();
5306 }
5307 \f
5308 /* Check whether this is a constant pool constant. */
5309 bool
5310 constant_pool_constant_p (rtx x)
5311 {
5312 x = avoid_constant_pool_reference (x);
5313 return CONST_DOUBLE_P (x);
5314 }
5315 \f
5316 /* If M is a bitmask that selects a field of low-order bits within an item but
5317 not the entire word, return the length of the field. Return -1 otherwise.
5318 M is used in machine mode MODE. */
5319
5320 int
5321 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5322 {
5323 if (mode != VOIDmode)
5324 {
5325 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5326 return -1;
5327 m &= GET_MODE_MASK (mode);
5328 }
5329
5330 return exact_log2 (m + 1);
5331 }
5332
5333 /* Return the mode of MEM's address. */
5334
5335 enum machine_mode
5336 get_address_mode (rtx mem)
5337 {
5338 enum machine_mode mode;
5339
5340 gcc_assert (MEM_P (mem));
5341 mode = GET_MODE (XEXP (mem, 0));
5342 if (mode != VOIDmode)
5343 return mode;
5344 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5345 }
5346 \f
5347 /* Split up a CONST_DOUBLE or integer constant rtx
5348 into two rtx's for single words,
5349 storing in *FIRST the word that comes first in memory in the target
5350 and in *SECOND the other. */
5351
5352 void
5353 split_double (rtx value, rtx *first, rtx *second)
5354 {
5355 if (CONST_INT_P (value))
5356 {
5357 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5358 {
5359 /* In this case the CONST_INT holds both target words.
5360 Extract the bits from it into two word-sized pieces.
5361 Sign extend each half to HOST_WIDE_INT. */
5362 unsigned HOST_WIDE_INT low, high;
5363 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5364 unsigned bits_per_word = BITS_PER_WORD;
5365
5366 /* Set sign_bit to the most significant bit of a word. */
5367 sign_bit = 1;
5368 sign_bit <<= bits_per_word - 1;
5369
5370 /* Set mask so that all bits of the word are set. We could
5371 have used 1 << BITS_PER_WORD instead of basing the
5372 calculation on sign_bit. However, on machines where
5373 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5374 compiler warning, even though the code would never be
5375 executed. */
5376 mask = sign_bit << 1;
5377 mask--;
5378
5379 /* Set sign_extend as any remaining bits. */
5380 sign_extend = ~mask;
5381
5382 /* Pick the lower word and sign-extend it. */
5383 low = INTVAL (value);
5384 low &= mask;
5385 if (low & sign_bit)
5386 low |= sign_extend;
5387
5388 /* Pick the higher word, shifted to the least significant
5389 bits, and sign-extend it. */
5390 high = INTVAL (value);
5391 high >>= bits_per_word - 1;
5392 high >>= 1;
5393 high &= mask;
5394 if (high & sign_bit)
5395 high |= sign_extend;
5396
5397 /* Store the words in the target machine order. */
5398 if (WORDS_BIG_ENDIAN)
5399 {
5400 *first = GEN_INT (high);
5401 *second = GEN_INT (low);
5402 }
5403 else
5404 {
5405 *first = GEN_INT (low);
5406 *second = GEN_INT (high);
5407 }
5408 }
5409 else
5410 {
5411 /* The rule for using CONST_INT for a wider mode
5412 is that we regard the value as signed.
5413 So sign-extend it. */
5414 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5415 if (WORDS_BIG_ENDIAN)
5416 {
5417 *first = high;
5418 *second = value;
5419 }
5420 else
5421 {
5422 *first = value;
5423 *second = high;
5424 }
5425 }
5426 }
5427 else if (!CONST_DOUBLE_P (value))
5428 {
5429 if (WORDS_BIG_ENDIAN)
5430 {
5431 *first = const0_rtx;
5432 *second = value;
5433 }
5434 else
5435 {
5436 *first = value;
5437 *second = const0_rtx;
5438 }
5439 }
5440 else if (GET_MODE (value) == VOIDmode
5441 /* This is the old way we did CONST_DOUBLE integers. */
5442 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5443 {
5444 /* In an integer, the words are defined as most and least significant.
5445 So order them by the target's convention. */
5446 if (WORDS_BIG_ENDIAN)
5447 {
5448 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5449 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5450 }
5451 else
5452 {
5453 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5454 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5455 }
5456 }
5457 else
5458 {
5459 REAL_VALUE_TYPE r;
5460 long l[2];
5461 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5462
5463 /* Note, this converts the REAL_VALUE_TYPE to the target's
5464 format, splits up the floating point double and outputs
5465 exactly 32 bits of it into each of l[0] and l[1] --
5466 not necessarily BITS_PER_WORD bits. */
5467 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5468
5469 /* If 32 bits is an entire word for the target, but not for the host,
5470 then sign-extend on the host so that the number will look the same
5471 way on the host that it would on the target. See for instance
5472 simplify_unary_operation. The #if is needed to avoid compiler
5473 warnings. */
5474
5475 #if HOST_BITS_PER_LONG > 32
5476 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5477 {
5478 if (l[0] & ((long) 1 << 31))
5479 l[0] |= ((long) (-1) << 32);
5480 if (l[1] & ((long) 1 << 31))
5481 l[1] |= ((long) (-1) << 32);
5482 }
5483 #endif
5484
5485 *first = GEN_INT (l[0]);
5486 *second = GEN_INT (l[1]);
5487 }
5488 }
5489
5490 /* Return true if X is a sign_extract or zero_extract from the least
5491 significant bit. */
5492
5493 static bool
5494 lsb_bitfield_op_p (rtx x)
5495 {
5496 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5497 {
5498 enum machine_mode mode = GET_MODE (XEXP (x, 0));
5499 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5500 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5501
5502 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5503 }
5504 return false;
5505 }
5506
5507 /* Strip outer address "mutations" from LOC and return a pointer to the
5508 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5509 stripped expression there.
5510
5511 "Mutations" either convert between modes or apply some kind of
5512 extension, truncation or alignment. */
5513
5514 rtx *
5515 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5516 {
5517 for (;;)
5518 {
5519 enum rtx_code code = GET_CODE (*loc);
5520 if (GET_RTX_CLASS (code) == RTX_UNARY)
5521 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5522 used to convert between pointer sizes. */
5523 loc = &XEXP (*loc, 0);
5524 else if (lsb_bitfield_op_p (*loc))
5525 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5526 acts as a combined truncation and extension. */
5527 loc = &XEXP (*loc, 0);
5528 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5529 /* (and ... (const_int -X)) is used to align to X bytes. */
5530 loc = &XEXP (*loc, 0);
5531 else if (code == SUBREG
5532 && !OBJECT_P (SUBREG_REG (*loc))
5533 && subreg_lowpart_p (*loc))
5534 /* (subreg (operator ...) ...) inside and is used for mode
5535 conversion too. */
5536 loc = &SUBREG_REG (*loc);
5537 else
5538 return loc;
5539 if (outer_code)
5540 *outer_code = code;
5541 }
5542 }
5543
5544 /* Return true if CODE applies some kind of scale. The scaled value is
5545 is the first operand and the scale is the second. */
5546
5547 static bool
5548 binary_scale_code_p (enum rtx_code code)
5549 {
5550 return (code == MULT
5551 || code == ASHIFT
5552 /* Needed by ARM targets. */
5553 || code == ASHIFTRT
5554 || code == LSHIFTRT
5555 || code == ROTATE
5556 || code == ROTATERT);
5557 }
5558
5559 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5560 (see address_info). Return null otherwise. */
5561
5562 static rtx *
5563 get_base_term (rtx *inner)
5564 {
5565 if (GET_CODE (*inner) == LO_SUM)
5566 inner = strip_address_mutations (&XEXP (*inner, 0));
5567 if (REG_P (*inner)
5568 || MEM_P (*inner)
5569 || GET_CODE (*inner) == SUBREG)
5570 return inner;
5571 return 0;
5572 }
5573
5574 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5575 (see address_info). Return null otherwise. */
5576
5577 static rtx *
5578 get_index_term (rtx *inner)
5579 {
5580 /* At present, only constant scales are allowed. */
5581 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5582 inner = strip_address_mutations (&XEXP (*inner, 0));
5583 if (REG_P (*inner)
5584 || MEM_P (*inner)
5585 || GET_CODE (*inner) == SUBREG)
5586 return inner;
5587 return 0;
5588 }
5589
5590 /* Set the segment part of address INFO to LOC, given that INNER is the
5591 unmutated value. */
5592
5593 static void
5594 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5595 {
5596 gcc_assert (!info->segment);
5597 info->segment = loc;
5598 info->segment_term = inner;
5599 }
5600
5601 /* Set the base part of address INFO to LOC, given that INNER is the
5602 unmutated value. */
5603
5604 static void
5605 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5606 {
5607 gcc_assert (!info->base);
5608 info->base = loc;
5609 info->base_term = inner;
5610 }
5611
5612 /* Set the index part of address INFO to LOC, given that INNER is the
5613 unmutated value. */
5614
5615 static void
5616 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5617 {
5618 gcc_assert (!info->index);
5619 info->index = loc;
5620 info->index_term = inner;
5621 }
5622
5623 /* Set the displacement part of address INFO to LOC, given that INNER
5624 is the constant term. */
5625
5626 static void
5627 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5628 {
5629 gcc_assert (!info->disp);
5630 info->disp = loc;
5631 info->disp_term = inner;
5632 }
5633
5634 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5635 rest of INFO accordingly. */
5636
5637 static void
5638 decompose_incdec_address (struct address_info *info)
5639 {
5640 info->autoinc_p = true;
5641
5642 rtx *base = &XEXP (*info->inner, 0);
5643 set_address_base (info, base, base);
5644 gcc_checking_assert (info->base == info->base_term);
5645
5646 /* These addresses are only valid when the size of the addressed
5647 value is known. */
5648 gcc_checking_assert (info->mode != VOIDmode);
5649 }
5650
5651 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5652 of INFO accordingly. */
5653
5654 static void
5655 decompose_automod_address (struct address_info *info)
5656 {
5657 info->autoinc_p = true;
5658
5659 rtx *base = &XEXP (*info->inner, 0);
5660 set_address_base (info, base, base);
5661 gcc_checking_assert (info->base == info->base_term);
5662
5663 rtx plus = XEXP (*info->inner, 1);
5664 gcc_assert (GET_CODE (plus) == PLUS);
5665
5666 info->base_term2 = &XEXP (plus, 0);
5667 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5668
5669 rtx *step = &XEXP (plus, 1);
5670 rtx *inner_step = strip_address_mutations (step);
5671 if (CONSTANT_P (*inner_step))
5672 set_address_disp (info, step, inner_step);
5673 else
5674 set_address_index (info, step, inner_step);
5675 }
5676
5677 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5678 values in [PTR, END). Return a pointer to the end of the used array. */
5679
5680 static rtx **
5681 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5682 {
5683 rtx x = *loc;
5684 if (GET_CODE (x) == PLUS)
5685 {
5686 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5687 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5688 }
5689 else
5690 {
5691 gcc_assert (ptr != end);
5692 *ptr++ = loc;
5693 }
5694 return ptr;
5695 }
5696
5697 /* Evaluate the likelihood of X being a base or index value, returning
5698 positive if it is likely to be a base, negative if it is likely to be
5699 an index, and 0 if we can't tell. Make the magnitude of the return
5700 value reflect the amount of confidence we have in the answer.
5701
5702 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5703
5704 static int
5705 baseness (rtx x, enum machine_mode mode, addr_space_t as,
5706 enum rtx_code outer_code, enum rtx_code index_code)
5707 {
5708 /* Believe *_POINTER unless the address shape requires otherwise. */
5709 if (REG_P (x) && REG_POINTER (x))
5710 return 2;
5711 if (MEM_P (x) && MEM_POINTER (x))
5712 return 2;
5713
5714 if (REG_P (x) && HARD_REGISTER_P (x))
5715 {
5716 /* X is a hard register. If it only fits one of the base
5717 or index classes, choose that interpretation. */
5718 int regno = REGNO (x);
5719 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5720 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5721 if (base_p != index_p)
5722 return base_p ? 1 : -1;
5723 }
5724 return 0;
5725 }
5726
5727 /* INFO->INNER describes a normal, non-automodified address.
5728 Fill in the rest of INFO accordingly. */
5729
5730 static void
5731 decompose_normal_address (struct address_info *info)
5732 {
5733 /* Treat the address as the sum of up to four values. */
5734 rtx *ops[4];
5735 size_t n_ops = extract_plus_operands (info->inner, ops,
5736 ops + ARRAY_SIZE (ops)) - ops;
5737
5738 /* If there is more than one component, any base component is in a PLUS. */
5739 if (n_ops > 1)
5740 info->base_outer_code = PLUS;
5741
5742 /* Try to classify each sum operand now. Leave those that could be
5743 either a base or an index in OPS. */
5744 rtx *inner_ops[4];
5745 size_t out = 0;
5746 for (size_t in = 0; in < n_ops; ++in)
5747 {
5748 rtx *loc = ops[in];
5749 rtx *inner = strip_address_mutations (loc);
5750 if (CONSTANT_P (*inner))
5751 set_address_disp (info, loc, inner);
5752 else if (GET_CODE (*inner) == UNSPEC)
5753 set_address_segment (info, loc, inner);
5754 else
5755 {
5756 /* The only other possibilities are a base or an index. */
5757 rtx *base_term = get_base_term (inner);
5758 rtx *index_term = get_index_term (inner);
5759 gcc_assert (base_term || index_term);
5760 if (!base_term)
5761 set_address_index (info, loc, index_term);
5762 else if (!index_term)
5763 set_address_base (info, loc, base_term);
5764 else
5765 {
5766 gcc_assert (base_term == index_term);
5767 ops[out] = loc;
5768 inner_ops[out] = base_term;
5769 ++out;
5770 }
5771 }
5772 }
5773
5774 /* Classify the remaining OPS members as bases and indexes. */
5775 if (out == 1)
5776 {
5777 /* If we haven't seen a base or an index yet, assume that this is
5778 the base. If we were confident that another term was the base
5779 or index, treat the remaining operand as the other kind. */
5780 if (!info->base)
5781 set_address_base (info, ops[0], inner_ops[0]);
5782 else
5783 set_address_index (info, ops[0], inner_ops[0]);
5784 }
5785 else if (out == 2)
5786 {
5787 /* In the event of a tie, assume the base comes first. */
5788 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5789 GET_CODE (*ops[1]))
5790 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5791 GET_CODE (*ops[0])))
5792 {
5793 set_address_base (info, ops[0], inner_ops[0]);
5794 set_address_index (info, ops[1], inner_ops[1]);
5795 }
5796 else
5797 {
5798 set_address_base (info, ops[1], inner_ops[1]);
5799 set_address_index (info, ops[0], inner_ops[0]);
5800 }
5801 }
5802 else
5803 gcc_assert (out == 0);
5804 }
5805
5806 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5807 or VOIDmode if not known. AS is the address space associated with LOC.
5808 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5809
5810 void
5811 decompose_address (struct address_info *info, rtx *loc, enum machine_mode mode,
5812 addr_space_t as, enum rtx_code outer_code)
5813 {
5814 memset (info, 0, sizeof (*info));
5815 info->mode = mode;
5816 info->as = as;
5817 info->addr_outer_code = outer_code;
5818 info->outer = loc;
5819 info->inner = strip_address_mutations (loc, &outer_code);
5820 info->base_outer_code = outer_code;
5821 switch (GET_CODE (*info->inner))
5822 {
5823 case PRE_DEC:
5824 case PRE_INC:
5825 case POST_DEC:
5826 case POST_INC:
5827 decompose_incdec_address (info);
5828 break;
5829
5830 case PRE_MODIFY:
5831 case POST_MODIFY:
5832 decompose_automod_address (info);
5833 break;
5834
5835 default:
5836 decompose_normal_address (info);
5837 break;
5838 }
5839 }
5840
5841 /* Describe address operand LOC in INFO. */
5842
5843 void
5844 decompose_lea_address (struct address_info *info, rtx *loc)
5845 {
5846 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
5847 }
5848
5849 /* Describe the address of MEM X in INFO. */
5850
5851 void
5852 decompose_mem_address (struct address_info *info, rtx x)
5853 {
5854 gcc_assert (MEM_P (x));
5855 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
5856 MEM_ADDR_SPACE (x), MEM);
5857 }
5858
5859 /* Update INFO after a change to the address it describes. */
5860
5861 void
5862 update_address (struct address_info *info)
5863 {
5864 decompose_address (info, info->outer, info->mode, info->as,
5865 info->addr_outer_code);
5866 }
5867
5868 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5869 more complicated than that. */
5870
5871 HOST_WIDE_INT
5872 get_index_scale (const struct address_info *info)
5873 {
5874 rtx index = *info->index;
5875 if (GET_CODE (index) == MULT
5876 && CONST_INT_P (XEXP (index, 1))
5877 && info->index_term == &XEXP (index, 0))
5878 return INTVAL (XEXP (index, 1));
5879
5880 if (GET_CODE (index) == ASHIFT
5881 && CONST_INT_P (XEXP (index, 1))
5882 && info->index_term == &XEXP (index, 0))
5883 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
5884
5885 if (info->index == info->index_term)
5886 return 1;
5887
5888 return 0;
5889 }
5890
5891 /* Return the "index code" of INFO, in the form required by
5892 ok_for_base_p_1. */
5893
5894 enum rtx_code
5895 get_index_code (const struct address_info *info)
5896 {
5897 if (info->index)
5898 return GET_CODE (*info->index);
5899
5900 if (info->disp)
5901 return GET_CODE (*info->disp);
5902
5903 return SCRATCH;
5904 }