]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/rtlanal.c
PR middle-end/78540
[thirdparty/gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38
39 /* Forward declarations */
40 static void set_of_1 (rtx, const_rtx, void *);
41 static bool covers_regno_p (const_rtx, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
43 static int computed_jump_p_1 (const_rtx);
44 static void parms_set (rtx, const_rtx, void *);
45
46 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
47 const_rtx, machine_mode,
48 unsigned HOST_WIDE_INT);
49 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
50 const_rtx, machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
53 machine_mode,
54 unsigned int);
55 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
56 machine_mode, unsigned int);
57
58 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
59 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
60
61 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
62 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
63 SIGN_EXTEND then while narrowing we also have to enforce the
64 representation and sign-extend the value to mode DESTINATION_REP.
65
66 If the value is already sign-extended to DESTINATION_REP mode we
67 can just switch to DESTINATION mode on it. For each pair of
68 integral modes SOURCE and DESTINATION, when truncating from SOURCE
69 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
70 contains the number of high-order bits in SOURCE that have to be
71 copies of the sign-bit so that we can do this mode-switch to
72 DESTINATION. */
73
74 static unsigned int
75 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
76 \f
77 /* Store X into index I of ARRAY. ARRAY is known to have at least I
78 elements. Return the new base of ARRAY. */
79
80 template <typename T>
81 typename T::value_type *
82 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
83 value_type *base,
84 size_t i, value_type x)
85 {
86 if (base == array.stack)
87 {
88 if (i < LOCAL_ELEMS)
89 {
90 base[i] = x;
91 return base;
92 }
93 gcc_checking_assert (i == LOCAL_ELEMS);
94 /* A previous iteration might also have moved from the stack to the
95 heap, in which case the heap array will already be big enough. */
96 if (vec_safe_length (array.heap) <= i)
97 vec_safe_grow (array.heap, i + 1);
98 base = array.heap->address ();
99 memcpy (base, array.stack, sizeof (array.stack));
100 base[LOCAL_ELEMS] = x;
101 return base;
102 }
103 unsigned int length = array.heap->length ();
104 if (length > i)
105 {
106 gcc_checking_assert (base == array.heap->address ());
107 base[i] = x;
108 return base;
109 }
110 else
111 {
112 gcc_checking_assert (i == length);
113 vec_safe_push (array.heap, x);
114 return array.heap->address ();
115 }
116 }
117
118 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
119 number of elements added to the worklist. */
120
121 template <typename T>
122 size_t
123 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
124 value_type *base,
125 size_t end, rtx_type x)
126 {
127 enum rtx_code code = GET_CODE (x);
128 const char *format = GET_RTX_FORMAT (code);
129 size_t orig_end = end;
130 if (__builtin_expect (INSN_P (x), false))
131 {
132 /* Put the pattern at the top of the queue, since that's what
133 we're likely to want most. It also allows for the SEQUENCE
134 code below. */
135 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
136 if (format[i] == 'e')
137 {
138 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
139 if (__builtin_expect (end < LOCAL_ELEMS, true))
140 base[end++] = subx;
141 else
142 base = add_single_to_queue (array, base, end++, subx);
143 }
144 }
145 else
146 for (int i = 0; format[i]; ++i)
147 if (format[i] == 'e')
148 {
149 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
150 if (__builtin_expect (end < LOCAL_ELEMS, true))
151 base[end++] = subx;
152 else
153 base = add_single_to_queue (array, base, end++, subx);
154 }
155 else if (format[i] == 'E')
156 {
157 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
158 rtx *vec = x->u.fld[i].rt_rtvec->elem;
159 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
160 for (unsigned int j = 0; j < length; j++)
161 base[end++] = T::get_value (vec[j]);
162 else
163 for (unsigned int j = 0; j < length; j++)
164 base = add_single_to_queue (array, base, end++,
165 T::get_value (vec[j]));
166 if (code == SEQUENCE && end == length)
167 /* If the subrtxes of the sequence fill the entire array then
168 we know that no other parts of a containing insn are queued.
169 The caller is therefore iterating over the sequence as a
170 PATTERN (...), so we also want the patterns of the
171 subinstructions. */
172 for (unsigned int j = 0; j < length; j++)
173 {
174 typename T::rtx_type x = T::get_rtx (base[j]);
175 if (INSN_P (x))
176 base[j] = T::get_value (PATTERN (x));
177 }
178 }
179 return end - orig_end;
180 }
181
182 template <typename T>
183 void
184 generic_subrtx_iterator <T>::free_array (array_type &array)
185 {
186 vec_free (array.heap);
187 }
188
189 template <typename T>
190 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
191
192 template class generic_subrtx_iterator <const_rtx_accessor>;
193 template class generic_subrtx_iterator <rtx_var_accessor>;
194 template class generic_subrtx_iterator <rtx_ptr_accessor>;
195
196 /* Return 1 if the value of X is unstable
197 (would be different at a different point in the program).
198 The frame pointer, arg pointer, etc. are considered stable
199 (within one function) and so is anything marked `unchanging'. */
200
201 int
202 rtx_unstable_p (const_rtx x)
203 {
204 const RTX_CODE code = GET_CODE (x);
205 int i;
206 const char *fmt;
207
208 switch (code)
209 {
210 case MEM:
211 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
212
213 case CONST:
214 CASE_CONST_ANY:
215 case SYMBOL_REF:
216 case LABEL_REF:
217 return 0;
218
219 case REG:
220 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
221 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
222 /* The arg pointer varies if it is not a fixed register. */
223 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
224 return 0;
225 /* ??? When call-clobbered, the value is stable modulo the restore
226 that must happen after a call. This currently screws up local-alloc
227 into believing that the restore is not needed. */
228 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
229 return 0;
230 return 1;
231
232 case ASM_OPERANDS:
233 if (MEM_VOLATILE_P (x))
234 return 1;
235
236 /* Fall through. */
237
238 default:
239 break;
240 }
241
242 fmt = GET_RTX_FORMAT (code);
243 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
244 if (fmt[i] == 'e')
245 {
246 if (rtx_unstable_p (XEXP (x, i)))
247 return 1;
248 }
249 else if (fmt[i] == 'E')
250 {
251 int j;
252 for (j = 0; j < XVECLEN (x, i); j++)
253 if (rtx_unstable_p (XVECEXP (x, i, j)))
254 return 1;
255 }
256
257 return 0;
258 }
259
260 /* Return 1 if X has a value that can vary even between two
261 executions of the program. 0 means X can be compared reliably
262 against certain constants or near-constants.
263 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
264 zero, we are slightly more conservative.
265 The frame pointer and the arg pointer are considered constant. */
266
267 bool
268 rtx_varies_p (const_rtx x, bool for_alias)
269 {
270 RTX_CODE code;
271 int i;
272 const char *fmt;
273
274 if (!x)
275 return 0;
276
277 code = GET_CODE (x);
278 switch (code)
279 {
280 case MEM:
281 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
282
283 case CONST:
284 CASE_CONST_ANY:
285 case SYMBOL_REF:
286 case LABEL_REF:
287 return 0;
288
289 case REG:
290 /* Note that we have to test for the actual rtx used for the frame
291 and arg pointers and not just the register number in case we have
292 eliminated the frame and/or arg pointer and are using it
293 for pseudos. */
294 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
295 /* The arg pointer varies if it is not a fixed register. */
296 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
297 return 0;
298 if (x == pic_offset_table_rtx
299 /* ??? When call-clobbered, the value is stable modulo the restore
300 that must happen after a call. This currently screws up
301 local-alloc into believing that the restore is not needed, so we
302 must return 0 only if we are called from alias analysis. */
303 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
304 return 0;
305 return 1;
306
307 case LO_SUM:
308 /* The operand 0 of a LO_SUM is considered constant
309 (in fact it is related specifically to operand 1)
310 during alias analysis. */
311 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
312 || rtx_varies_p (XEXP (x, 1), for_alias);
313
314 case ASM_OPERANDS:
315 if (MEM_VOLATILE_P (x))
316 return 1;
317
318 /* Fall through. */
319
320 default:
321 break;
322 }
323
324 fmt = GET_RTX_FORMAT (code);
325 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
326 if (fmt[i] == 'e')
327 {
328 if (rtx_varies_p (XEXP (x, i), for_alias))
329 return 1;
330 }
331 else if (fmt[i] == 'E')
332 {
333 int j;
334 for (j = 0; j < XVECLEN (x, i); j++)
335 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
336 return 1;
337 }
338
339 return 0;
340 }
341
342 /* Compute an approximation for the offset between the register
343 FROM and TO for the current function, as it was at the start
344 of the routine. */
345
346 static HOST_WIDE_INT
347 get_initial_register_offset (int from, int to)
348 {
349 static const struct elim_table_t
350 {
351 const int from;
352 const int to;
353 } table[] = ELIMINABLE_REGS;
354 HOST_WIDE_INT offset1, offset2;
355 unsigned int i, j;
356
357 if (to == from)
358 return 0;
359
360 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
361 before the reload pass. We need to give at least
362 an estimation for the resulting frame size. */
363 if (! reload_completed)
364 {
365 offset1 = crtl->outgoing_args_size + get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
367 offset1 = - offset1;
368 #endif
369 if (to == STACK_POINTER_REGNUM)
370 return offset1;
371 else if (from == STACK_POINTER_REGNUM)
372 return - offset1;
373 else
374 return 0;
375 }
376
377 for (i = 0; i < ARRAY_SIZE (table); i++)
378 if (table[i].from == from)
379 {
380 if (table[i].to == to)
381 {
382 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
383 offset1);
384 return offset1;
385 }
386 for (j = 0; j < ARRAY_SIZE (table); j++)
387 {
388 if (table[j].to == to
389 && table[j].from == table[i].to)
390 {
391 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
392 offset1);
393 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
394 offset2);
395 return offset1 + offset2;
396 }
397 if (table[j].from == to
398 && table[j].to == table[i].to)
399 {
400 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
401 offset1);
402 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
403 offset2);
404 return offset1 - offset2;
405 }
406 }
407 }
408 else if (table[i].to == from)
409 {
410 if (table[i].from == to)
411 {
412 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
413 offset1);
414 return - offset1;
415 }
416 for (j = 0; j < ARRAY_SIZE (table); j++)
417 {
418 if (table[j].to == to
419 && table[j].from == table[i].from)
420 {
421 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
422 offset1);
423 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
424 offset2);
425 return - offset1 + offset2;
426 }
427 if (table[j].from == to
428 && table[j].to == table[i].from)
429 {
430 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
431 offset1);
432 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
433 offset2);
434 return - offset1 - offset2;
435 }
436 }
437 }
438
439 /* If the requested register combination was not found,
440 try a different more simple combination. */
441 if (from == ARG_POINTER_REGNUM)
442 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
443 else if (to == ARG_POINTER_REGNUM)
444 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
445 else if (from == HARD_FRAME_POINTER_REGNUM)
446 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
447 else if (to == HARD_FRAME_POINTER_REGNUM)
448 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
449 else
450 return 0;
451 }
452
453 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
454 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
455 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
456 references on strict alignment machines. */
457
458 static int
459 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
460 machine_mode mode, bool unaligned_mems)
461 {
462 enum rtx_code code = GET_CODE (x);
463
464 /* The offset must be a multiple of the mode size if we are considering
465 unaligned memory references on strict alignment machines. */
466 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
467 {
468 HOST_WIDE_INT actual_offset = offset;
469
470 #ifdef SPARC_STACK_BOUNDARY_HACK
471 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
472 the real alignment of %sp. However, when it does this, the
473 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
474 if (SPARC_STACK_BOUNDARY_HACK
475 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
476 actual_offset -= STACK_POINTER_OFFSET;
477 #endif
478
479 if (actual_offset % GET_MODE_SIZE (mode) != 0)
480 return 1;
481 }
482
483 switch (code)
484 {
485 case SYMBOL_REF:
486 if (SYMBOL_REF_WEAK (x))
487 return 1;
488 if (!CONSTANT_POOL_ADDRESS_P (x))
489 {
490 tree decl;
491 HOST_WIDE_INT decl_size;
492
493 if (offset < 0)
494 return 1;
495 if (size == 0)
496 size = GET_MODE_SIZE (mode);
497 if (size == 0)
498 return offset != 0;
499
500 /* If the size of the access or of the symbol is unknown,
501 assume the worst. */
502 decl = SYMBOL_REF_DECL (x);
503
504 /* Else check that the access is in bounds. TODO: restructure
505 expr_size/tree_expr_size/int_expr_size and just use the latter. */
506 if (!decl)
507 decl_size = -1;
508 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
509 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
510 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
511 : -1);
512 else if (TREE_CODE (decl) == STRING_CST)
513 decl_size = TREE_STRING_LENGTH (decl);
514 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
515 decl_size = int_size_in_bytes (TREE_TYPE (decl));
516 else
517 decl_size = -1;
518
519 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
520 }
521
522 return 0;
523
524 case LABEL_REF:
525 return 0;
526
527 case REG:
528 /* Stack references are assumed not to trap, but we need to deal with
529 nonsensical offsets. */
530 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
531 || x == stack_pointer_rtx
532 /* The arg pointer varies if it is not a fixed register. */
533 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
534 {
535 #ifdef RED_ZONE_SIZE
536 HOST_WIDE_INT red_zone_size = RED_ZONE_SIZE;
537 #else
538 HOST_WIDE_INT red_zone_size = 0;
539 #endif
540 HOST_WIDE_INT stack_boundary = PREFERRED_STACK_BOUNDARY
541 / BITS_PER_UNIT;
542 HOST_WIDE_INT low_bound, high_bound;
543
544 if (size == 0)
545 size = GET_MODE_SIZE (mode);
546 if (size == 0)
547 return 1;
548
549 if (x == frame_pointer_rtx)
550 {
551 if (FRAME_GROWS_DOWNWARD)
552 {
553 high_bound = STARTING_FRAME_OFFSET;
554 low_bound = high_bound - get_frame_size ();
555 }
556 else
557 {
558 low_bound = STARTING_FRAME_OFFSET;
559 high_bound = low_bound + get_frame_size ();
560 }
561 }
562 else if (x == hard_frame_pointer_rtx)
563 {
564 HOST_WIDE_INT sp_offset
565 = get_initial_register_offset (STACK_POINTER_REGNUM,
566 HARD_FRAME_POINTER_REGNUM);
567 HOST_WIDE_INT ap_offset
568 = get_initial_register_offset (ARG_POINTER_REGNUM,
569 HARD_FRAME_POINTER_REGNUM);
570
571 #if STACK_GROWS_DOWNWARD
572 low_bound = sp_offset - red_zone_size - stack_boundary;
573 high_bound = ap_offset
574 + FIRST_PARM_OFFSET (current_function_decl)
575 #if !ARGS_GROW_DOWNWARD
576 + crtl->args.size
577 #endif
578 + stack_boundary;
579 #else
580 high_bound = sp_offset + red_zone_size + stack_boundary;
581 low_bound = ap_offset
582 + FIRST_PARM_OFFSET (current_function_decl)
583 #if ARGS_GROW_DOWNWARD
584 - crtl->args.size
585 #endif
586 - stack_boundary;
587 #endif
588 }
589 else if (x == stack_pointer_rtx)
590 {
591 HOST_WIDE_INT ap_offset
592 = get_initial_register_offset (ARG_POINTER_REGNUM,
593 STACK_POINTER_REGNUM);
594
595 #if STACK_GROWS_DOWNWARD
596 low_bound = - red_zone_size - stack_boundary;
597 high_bound = ap_offset
598 + FIRST_PARM_OFFSET (current_function_decl)
599 #if !ARGS_GROW_DOWNWARD
600 + crtl->args.size
601 #endif
602 + stack_boundary;
603 #else
604 high_bound = red_zone_size + stack_boundary;
605 low_bound = ap_offset
606 + FIRST_PARM_OFFSET (current_function_decl)
607 #if ARGS_GROW_DOWNWARD
608 - crtl->args.size
609 #endif
610 - stack_boundary;
611 #endif
612 }
613 else
614 {
615 /* We assume that accesses are safe to at least the
616 next stack boundary.
617 Examples are varargs and __builtin_return_address. */
618 #if ARGS_GROW_DOWNWARD
619 high_bound = FIRST_PARM_OFFSET (current_function_decl)
620 + stack_boundary;
621 low_bound = FIRST_PARM_OFFSET (current_function_decl)
622 - crtl->args.size - stack_boundary;
623 #else
624 low_bound = FIRST_PARM_OFFSET (current_function_decl)
625 - stack_boundary;
626 high_bound = FIRST_PARM_OFFSET (current_function_decl)
627 + crtl->args.size + stack_boundary;
628 #endif
629 }
630
631 if (offset >= low_bound && offset <= high_bound - size)
632 return 0;
633 return 1;
634 }
635 /* All of the virtual frame registers are stack references. */
636 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
637 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
638 return 0;
639 return 1;
640
641 case CONST:
642 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
643 mode, unaligned_mems);
644
645 case PLUS:
646 /* An address is assumed not to trap if:
647 - it is the pic register plus a constant. */
648 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
649 return 0;
650
651 /* - or it is an address that can't trap plus a constant integer. */
652 if (CONST_INT_P (XEXP (x, 1))
653 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
654 size, mode, unaligned_mems))
655 return 0;
656
657 return 1;
658
659 case LO_SUM:
660 case PRE_MODIFY:
661 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
662 mode, unaligned_mems);
663
664 case PRE_DEC:
665 case PRE_INC:
666 case POST_DEC:
667 case POST_INC:
668 case POST_MODIFY:
669 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
670 mode, unaligned_mems);
671
672 default:
673 break;
674 }
675
676 /* If it isn't one of the case above, it can cause a trap. */
677 return 1;
678 }
679
680 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
681
682 int
683 rtx_addr_can_trap_p (const_rtx x)
684 {
685 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
686 }
687
688 /* Return true if X is an address that is known to not be zero. */
689
690 bool
691 nonzero_address_p (const_rtx x)
692 {
693 const enum rtx_code code = GET_CODE (x);
694
695 switch (code)
696 {
697 case SYMBOL_REF:
698 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
699
700 case LABEL_REF:
701 return true;
702
703 case REG:
704 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
705 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
706 || x == stack_pointer_rtx
707 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
708 return true;
709 /* All of the virtual frame registers are stack references. */
710 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
711 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
712 return true;
713 return false;
714
715 case CONST:
716 return nonzero_address_p (XEXP (x, 0));
717
718 case PLUS:
719 /* Handle PIC references. */
720 if (XEXP (x, 0) == pic_offset_table_rtx
721 && CONSTANT_P (XEXP (x, 1)))
722 return true;
723 return false;
724
725 case PRE_MODIFY:
726 /* Similar to the above; allow positive offsets. Further, since
727 auto-inc is only allowed in memories, the register must be a
728 pointer. */
729 if (CONST_INT_P (XEXP (x, 1))
730 && INTVAL (XEXP (x, 1)) > 0)
731 return true;
732 return nonzero_address_p (XEXP (x, 0));
733
734 case PRE_INC:
735 /* Similarly. Further, the offset is always positive. */
736 return true;
737
738 case PRE_DEC:
739 case POST_DEC:
740 case POST_INC:
741 case POST_MODIFY:
742 return nonzero_address_p (XEXP (x, 0));
743
744 case LO_SUM:
745 return nonzero_address_p (XEXP (x, 1));
746
747 default:
748 break;
749 }
750
751 /* If it isn't one of the case above, might be zero. */
752 return false;
753 }
754
755 /* Return 1 if X refers to a memory location whose address
756 cannot be compared reliably with constant addresses,
757 or if X refers to a BLKmode memory object.
758 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
759 zero, we are slightly more conservative. */
760
761 bool
762 rtx_addr_varies_p (const_rtx x, bool for_alias)
763 {
764 enum rtx_code code;
765 int i;
766 const char *fmt;
767
768 if (x == 0)
769 return 0;
770
771 code = GET_CODE (x);
772 if (code == MEM)
773 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
774
775 fmt = GET_RTX_FORMAT (code);
776 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
777 if (fmt[i] == 'e')
778 {
779 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
780 return 1;
781 }
782 else if (fmt[i] == 'E')
783 {
784 int j;
785 for (j = 0; j < XVECLEN (x, i); j++)
786 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
787 return 1;
788 }
789 return 0;
790 }
791 \f
792 /* Return the CALL in X if there is one. */
793
794 rtx
795 get_call_rtx_from (rtx x)
796 {
797 if (INSN_P (x))
798 x = PATTERN (x);
799 if (GET_CODE (x) == PARALLEL)
800 x = XVECEXP (x, 0, 0);
801 if (GET_CODE (x) == SET)
802 x = SET_SRC (x);
803 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
804 return x;
805 return NULL_RTX;
806 }
807 \f
808 /* Return the value of the integer term in X, if one is apparent;
809 otherwise return 0.
810 Only obvious integer terms are detected.
811 This is used in cse.c with the `related_value' field. */
812
813 HOST_WIDE_INT
814 get_integer_term (const_rtx x)
815 {
816 if (GET_CODE (x) == CONST)
817 x = XEXP (x, 0);
818
819 if (GET_CODE (x) == MINUS
820 && CONST_INT_P (XEXP (x, 1)))
821 return - INTVAL (XEXP (x, 1));
822 if (GET_CODE (x) == PLUS
823 && CONST_INT_P (XEXP (x, 1)))
824 return INTVAL (XEXP (x, 1));
825 return 0;
826 }
827
828 /* If X is a constant, return the value sans apparent integer term;
829 otherwise return 0.
830 Only obvious integer terms are detected. */
831
832 rtx
833 get_related_value (const_rtx x)
834 {
835 if (GET_CODE (x) != CONST)
836 return 0;
837 x = XEXP (x, 0);
838 if (GET_CODE (x) == PLUS
839 && CONST_INT_P (XEXP (x, 1)))
840 return XEXP (x, 0);
841 else if (GET_CODE (x) == MINUS
842 && CONST_INT_P (XEXP (x, 1)))
843 return XEXP (x, 0);
844 return 0;
845 }
846 \f
847 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
848 to somewhere in the same object or object_block as SYMBOL. */
849
850 bool
851 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
852 {
853 tree decl;
854
855 if (GET_CODE (symbol) != SYMBOL_REF)
856 return false;
857
858 if (offset == 0)
859 return true;
860
861 if (offset > 0)
862 {
863 if (CONSTANT_POOL_ADDRESS_P (symbol)
864 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
865 return true;
866
867 decl = SYMBOL_REF_DECL (symbol);
868 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
869 return true;
870 }
871
872 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
873 && SYMBOL_REF_BLOCK (symbol)
874 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
875 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
876 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
877 return true;
878
879 return false;
880 }
881
882 /* Split X into a base and a constant offset, storing them in *BASE_OUT
883 and *OFFSET_OUT respectively. */
884
885 void
886 split_const (rtx x, rtx *base_out, rtx *offset_out)
887 {
888 if (GET_CODE (x) == CONST)
889 {
890 x = XEXP (x, 0);
891 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
892 {
893 *base_out = XEXP (x, 0);
894 *offset_out = XEXP (x, 1);
895 return;
896 }
897 }
898 *base_out = x;
899 *offset_out = const0_rtx;
900 }
901 \f
902 /* Return the number of places FIND appears within X. If COUNT_DEST is
903 zero, we do not count occurrences inside the destination of a SET. */
904
905 int
906 count_occurrences (const_rtx x, const_rtx find, int count_dest)
907 {
908 int i, j;
909 enum rtx_code code;
910 const char *format_ptr;
911 int count;
912
913 if (x == find)
914 return 1;
915
916 code = GET_CODE (x);
917
918 switch (code)
919 {
920 case REG:
921 CASE_CONST_ANY:
922 case SYMBOL_REF:
923 case CODE_LABEL:
924 case PC:
925 case CC0:
926 return 0;
927
928 case EXPR_LIST:
929 count = count_occurrences (XEXP (x, 0), find, count_dest);
930 if (XEXP (x, 1))
931 count += count_occurrences (XEXP (x, 1), find, count_dest);
932 return count;
933
934 case MEM:
935 if (MEM_P (find) && rtx_equal_p (x, find))
936 return 1;
937 break;
938
939 case SET:
940 if (SET_DEST (x) == find && ! count_dest)
941 return count_occurrences (SET_SRC (x), find, count_dest);
942 break;
943
944 default:
945 break;
946 }
947
948 format_ptr = GET_RTX_FORMAT (code);
949 count = 0;
950
951 for (i = 0; i < GET_RTX_LENGTH (code); i++)
952 {
953 switch (*format_ptr++)
954 {
955 case 'e':
956 count += count_occurrences (XEXP (x, i), find, count_dest);
957 break;
958
959 case 'E':
960 for (j = 0; j < XVECLEN (x, i); j++)
961 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
962 break;
963 }
964 }
965 return count;
966 }
967
968 \f
969 /* Return TRUE if OP is a register or subreg of a register that
970 holds an unsigned quantity. Otherwise, return FALSE. */
971
972 bool
973 unsigned_reg_p (rtx op)
974 {
975 if (REG_P (op)
976 && REG_EXPR (op)
977 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
978 return true;
979
980 if (GET_CODE (op) == SUBREG
981 && SUBREG_PROMOTED_SIGN (op))
982 return true;
983
984 return false;
985 }
986
987 \f
988 /* Nonzero if register REG appears somewhere within IN.
989 Also works if REG is not a register; in this case it checks
990 for a subexpression of IN that is Lisp "equal" to REG. */
991
992 int
993 reg_mentioned_p (const_rtx reg, const_rtx in)
994 {
995 const char *fmt;
996 int i;
997 enum rtx_code code;
998
999 if (in == 0)
1000 return 0;
1001
1002 if (reg == in)
1003 return 1;
1004
1005 if (GET_CODE (in) == LABEL_REF)
1006 return reg == label_ref_label (in);
1007
1008 code = GET_CODE (in);
1009
1010 switch (code)
1011 {
1012 /* Compare registers by number. */
1013 case REG:
1014 return REG_P (reg) && REGNO (in) == REGNO (reg);
1015
1016 /* These codes have no constituent expressions
1017 and are unique. */
1018 case SCRATCH:
1019 case CC0:
1020 case PC:
1021 return 0;
1022
1023 CASE_CONST_ANY:
1024 /* These are kept unique for a given value. */
1025 return 0;
1026
1027 default:
1028 break;
1029 }
1030
1031 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1032 return 1;
1033
1034 fmt = GET_RTX_FORMAT (code);
1035
1036 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1037 {
1038 if (fmt[i] == 'E')
1039 {
1040 int j;
1041 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1042 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1043 return 1;
1044 }
1045 else if (fmt[i] == 'e'
1046 && reg_mentioned_p (reg, XEXP (in, i)))
1047 return 1;
1048 }
1049 return 0;
1050 }
1051 \f
1052 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1053 no CODE_LABEL insn. */
1054
1055 int
1056 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1057 {
1058 rtx_insn *p;
1059 if (beg == end)
1060 return 0;
1061 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1062 if (LABEL_P (p))
1063 return 0;
1064 return 1;
1065 }
1066
1067 /* Nonzero if register REG is used in an insn between
1068 FROM_INSN and TO_INSN (exclusive of those two). */
1069
1070 int
1071 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1072 const rtx_insn *to_insn)
1073 {
1074 rtx_insn *insn;
1075
1076 if (from_insn == to_insn)
1077 return 0;
1078
1079 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1080 if (NONDEBUG_INSN_P (insn)
1081 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1082 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1083 return 1;
1084 return 0;
1085 }
1086 \f
1087 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1088 is entirely replaced by a new value and the only use is as a SET_DEST,
1089 we do not consider it a reference. */
1090
1091 int
1092 reg_referenced_p (const_rtx x, const_rtx body)
1093 {
1094 int i;
1095
1096 switch (GET_CODE (body))
1097 {
1098 case SET:
1099 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1100 return 1;
1101
1102 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1103 of a REG that occupies all of the REG, the insn references X if
1104 it is mentioned in the destination. */
1105 if (GET_CODE (SET_DEST (body)) != CC0
1106 && GET_CODE (SET_DEST (body)) != PC
1107 && !REG_P (SET_DEST (body))
1108 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1109 && REG_P (SUBREG_REG (SET_DEST (body)))
1110 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
1111 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
1112 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
1113 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
1114 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1115 return 1;
1116 return 0;
1117
1118 case ASM_OPERANDS:
1119 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1120 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1121 return 1;
1122 return 0;
1123
1124 case CALL:
1125 case USE:
1126 case IF_THEN_ELSE:
1127 return reg_overlap_mentioned_p (x, body);
1128
1129 case TRAP_IF:
1130 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1131
1132 case PREFETCH:
1133 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1134
1135 case UNSPEC:
1136 case UNSPEC_VOLATILE:
1137 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1138 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1139 return 1;
1140 return 0;
1141
1142 case PARALLEL:
1143 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1144 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1145 return 1;
1146 return 0;
1147
1148 case CLOBBER:
1149 if (MEM_P (XEXP (body, 0)))
1150 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1151 return 1;
1152 return 0;
1153
1154 case COND_EXEC:
1155 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1156 return 1;
1157 return reg_referenced_p (x, COND_EXEC_CODE (body));
1158
1159 default:
1160 return 0;
1161 }
1162 }
1163 \f
1164 /* Nonzero if register REG is set or clobbered in an insn between
1165 FROM_INSN and TO_INSN (exclusive of those two). */
1166
1167 int
1168 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1169 const rtx_insn *to_insn)
1170 {
1171 const rtx_insn *insn;
1172
1173 if (from_insn == to_insn)
1174 return 0;
1175
1176 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1177 if (INSN_P (insn) && reg_set_p (reg, insn))
1178 return 1;
1179 return 0;
1180 }
1181
1182 /* Return true if REG is set or clobbered inside INSN. */
1183
1184 int
1185 reg_set_p (const_rtx reg, const_rtx insn)
1186 {
1187 /* After delay slot handling, call and branch insns might be in a
1188 sequence. Check all the elements there. */
1189 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1190 {
1191 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1192 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1193 return true;
1194
1195 return false;
1196 }
1197
1198 /* We can be passed an insn or part of one. If we are passed an insn,
1199 check if a side-effect of the insn clobbers REG. */
1200 if (INSN_P (insn)
1201 && (FIND_REG_INC_NOTE (insn, reg)
1202 || (CALL_P (insn)
1203 && ((REG_P (reg)
1204 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1205 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1206 GET_MODE (reg), REGNO (reg)))
1207 || MEM_P (reg)
1208 || find_reg_fusage (insn, CLOBBER, reg)))))
1209 return true;
1210
1211 return set_of (reg, insn) != NULL_RTX;
1212 }
1213
1214 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1215 only if none of them are modified between START and END. Return 1 if
1216 X contains a MEM; this routine does use memory aliasing. */
1217
1218 int
1219 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1220 {
1221 const enum rtx_code code = GET_CODE (x);
1222 const char *fmt;
1223 int i, j;
1224 rtx_insn *insn;
1225
1226 if (start == end)
1227 return 0;
1228
1229 switch (code)
1230 {
1231 CASE_CONST_ANY:
1232 case CONST:
1233 case SYMBOL_REF:
1234 case LABEL_REF:
1235 return 0;
1236
1237 case PC:
1238 case CC0:
1239 return 1;
1240
1241 case MEM:
1242 if (modified_between_p (XEXP (x, 0), start, end))
1243 return 1;
1244 if (MEM_READONLY_P (x))
1245 return 0;
1246 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1247 if (memory_modified_in_insn_p (x, insn))
1248 return 1;
1249 return 0;
1250
1251 case REG:
1252 return reg_set_between_p (x, start, end);
1253
1254 default:
1255 break;
1256 }
1257
1258 fmt = GET_RTX_FORMAT (code);
1259 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1260 {
1261 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1262 return 1;
1263
1264 else if (fmt[i] == 'E')
1265 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1266 if (modified_between_p (XVECEXP (x, i, j), start, end))
1267 return 1;
1268 }
1269
1270 return 0;
1271 }
1272
1273 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1274 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1275 does use memory aliasing. */
1276
1277 int
1278 modified_in_p (const_rtx x, const_rtx insn)
1279 {
1280 const enum rtx_code code = GET_CODE (x);
1281 const char *fmt;
1282 int i, j;
1283
1284 switch (code)
1285 {
1286 CASE_CONST_ANY:
1287 case CONST:
1288 case SYMBOL_REF:
1289 case LABEL_REF:
1290 return 0;
1291
1292 case PC:
1293 case CC0:
1294 return 1;
1295
1296 case MEM:
1297 if (modified_in_p (XEXP (x, 0), insn))
1298 return 1;
1299 if (MEM_READONLY_P (x))
1300 return 0;
1301 if (memory_modified_in_insn_p (x, insn))
1302 return 1;
1303 return 0;
1304
1305 case REG:
1306 return reg_set_p (x, insn);
1307
1308 default:
1309 break;
1310 }
1311
1312 fmt = GET_RTX_FORMAT (code);
1313 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1314 {
1315 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1316 return 1;
1317
1318 else if (fmt[i] == 'E')
1319 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1320 if (modified_in_p (XVECEXP (x, i, j), insn))
1321 return 1;
1322 }
1323
1324 return 0;
1325 }
1326 \f
1327 /* Helper function for set_of. */
1328 struct set_of_data
1329 {
1330 const_rtx found;
1331 const_rtx pat;
1332 };
1333
1334 static void
1335 set_of_1 (rtx x, const_rtx pat, void *data1)
1336 {
1337 struct set_of_data *const data = (struct set_of_data *) (data1);
1338 if (rtx_equal_p (x, data->pat)
1339 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1340 data->found = pat;
1341 }
1342
1343 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1344 (either directly or via STRICT_LOW_PART and similar modifiers). */
1345 const_rtx
1346 set_of (const_rtx pat, const_rtx insn)
1347 {
1348 struct set_of_data data;
1349 data.found = NULL_RTX;
1350 data.pat = pat;
1351 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1352 return data.found;
1353 }
1354
1355 /* Add all hard register in X to *PSET. */
1356 void
1357 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1358 {
1359 subrtx_iterator::array_type array;
1360 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1361 {
1362 const_rtx x = *iter;
1363 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1364 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1365 }
1366 }
1367
1368 /* This function, called through note_stores, collects sets and
1369 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1370 by DATA. */
1371 void
1372 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1373 {
1374 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1375 if (REG_P (x) && HARD_REGISTER_P (x))
1376 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1377 }
1378
1379 /* Examine INSN, and compute the set of hard registers written by it.
1380 Store it in *PSET. Should only be called after reload. */
1381 void
1382 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1383 {
1384 rtx link;
1385
1386 CLEAR_HARD_REG_SET (*pset);
1387 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1388 if (CALL_P (insn))
1389 {
1390 if (implicit)
1391 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1392
1393 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1394 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1395 }
1396 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1397 if (REG_NOTE_KIND (link) == REG_INC)
1398 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1399 }
1400
1401 /* Like record_hard_reg_sets, but called through note_uses. */
1402 void
1403 record_hard_reg_uses (rtx *px, void *data)
1404 {
1405 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1406 }
1407 \f
1408 /* Given an INSN, return a SET expression if this insn has only a single SET.
1409 It may also have CLOBBERs, USEs, or SET whose output
1410 will not be used, which we ignore. */
1411
1412 rtx
1413 single_set_2 (const rtx_insn *insn, const_rtx pat)
1414 {
1415 rtx set = NULL;
1416 int set_verified = 1;
1417 int i;
1418
1419 if (GET_CODE (pat) == PARALLEL)
1420 {
1421 for (i = 0; i < XVECLEN (pat, 0); i++)
1422 {
1423 rtx sub = XVECEXP (pat, 0, i);
1424 switch (GET_CODE (sub))
1425 {
1426 case USE:
1427 case CLOBBER:
1428 break;
1429
1430 case SET:
1431 /* We can consider insns having multiple sets, where all
1432 but one are dead as single set insns. In common case
1433 only single set is present in the pattern so we want
1434 to avoid checking for REG_UNUSED notes unless necessary.
1435
1436 When we reach set first time, we just expect this is
1437 the single set we are looking for and only when more
1438 sets are found in the insn, we check them. */
1439 if (!set_verified)
1440 {
1441 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1442 && !side_effects_p (set))
1443 set = NULL;
1444 else
1445 set_verified = 1;
1446 }
1447 if (!set)
1448 set = sub, set_verified = 0;
1449 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1450 || side_effects_p (sub))
1451 return NULL_RTX;
1452 break;
1453
1454 default:
1455 return NULL_RTX;
1456 }
1457 }
1458 }
1459 return set;
1460 }
1461
1462 /* Given an INSN, return nonzero if it has more than one SET, else return
1463 zero. */
1464
1465 int
1466 multiple_sets (const_rtx insn)
1467 {
1468 int found;
1469 int i;
1470
1471 /* INSN must be an insn. */
1472 if (! INSN_P (insn))
1473 return 0;
1474
1475 /* Only a PARALLEL can have multiple SETs. */
1476 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1477 {
1478 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1479 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1480 {
1481 /* If we have already found a SET, then return now. */
1482 if (found)
1483 return 1;
1484 else
1485 found = 1;
1486 }
1487 }
1488
1489 /* Either zero or one SET. */
1490 return 0;
1491 }
1492 \f
1493 /* Return nonzero if the destination of SET equals the source
1494 and there are no side effects. */
1495
1496 int
1497 set_noop_p (const_rtx set)
1498 {
1499 rtx src = SET_SRC (set);
1500 rtx dst = SET_DEST (set);
1501
1502 if (dst == pc_rtx && src == pc_rtx)
1503 return 1;
1504
1505 if (MEM_P (dst) && MEM_P (src))
1506 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1507
1508 if (GET_CODE (dst) == ZERO_EXTRACT)
1509 return rtx_equal_p (XEXP (dst, 0), src)
1510 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1511 && !side_effects_p (src);
1512
1513 if (GET_CODE (dst) == STRICT_LOW_PART)
1514 dst = XEXP (dst, 0);
1515
1516 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1517 {
1518 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1519 return 0;
1520 src = SUBREG_REG (src);
1521 dst = SUBREG_REG (dst);
1522 }
1523
1524 /* It is a NOOP if destination overlaps with selected src vector
1525 elements. */
1526 if (GET_CODE (src) == VEC_SELECT
1527 && REG_P (XEXP (src, 0)) && REG_P (dst)
1528 && HARD_REGISTER_P (XEXP (src, 0))
1529 && HARD_REGISTER_P (dst))
1530 {
1531 int i;
1532 rtx par = XEXP (src, 1);
1533 rtx src0 = XEXP (src, 0);
1534 int c0 = INTVAL (XVECEXP (par, 0, 0));
1535 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1536
1537 for (i = 1; i < XVECLEN (par, 0); i++)
1538 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1539 return 0;
1540 return
1541 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1542 offset, GET_MODE (dst)) == (int) REGNO (dst);
1543 }
1544
1545 return (REG_P (src) && REG_P (dst)
1546 && REGNO (src) == REGNO (dst));
1547 }
1548 \f
1549 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1550 value to itself. */
1551
1552 int
1553 noop_move_p (const rtx_insn *insn)
1554 {
1555 rtx pat = PATTERN (insn);
1556
1557 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1558 return 1;
1559
1560 /* Insns carrying these notes are useful later on. */
1561 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1562 return 0;
1563
1564 /* Check the code to be executed for COND_EXEC. */
1565 if (GET_CODE (pat) == COND_EXEC)
1566 pat = COND_EXEC_CODE (pat);
1567
1568 if (GET_CODE (pat) == SET && set_noop_p (pat))
1569 return 1;
1570
1571 if (GET_CODE (pat) == PARALLEL)
1572 {
1573 int i;
1574 /* If nothing but SETs of registers to themselves,
1575 this insn can also be deleted. */
1576 for (i = 0; i < XVECLEN (pat, 0); i++)
1577 {
1578 rtx tem = XVECEXP (pat, 0, i);
1579
1580 if (GET_CODE (tem) == USE
1581 || GET_CODE (tem) == CLOBBER)
1582 continue;
1583
1584 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1585 return 0;
1586 }
1587
1588 return 1;
1589 }
1590 return 0;
1591 }
1592 \f
1593
1594 /* Return nonzero if register in range [REGNO, ENDREGNO)
1595 appears either explicitly or implicitly in X
1596 other than being stored into.
1597
1598 References contained within the substructure at LOC do not count.
1599 LOC may be zero, meaning don't ignore anything. */
1600
1601 bool
1602 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1603 rtx *loc)
1604 {
1605 int i;
1606 unsigned int x_regno;
1607 RTX_CODE code;
1608 const char *fmt;
1609
1610 repeat:
1611 /* The contents of a REG_NONNEG note is always zero, so we must come here
1612 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1613 if (x == 0)
1614 return false;
1615
1616 code = GET_CODE (x);
1617
1618 switch (code)
1619 {
1620 case REG:
1621 x_regno = REGNO (x);
1622
1623 /* If we modifying the stack, frame, or argument pointer, it will
1624 clobber a virtual register. In fact, we could be more precise,
1625 but it isn't worth it. */
1626 if ((x_regno == STACK_POINTER_REGNUM
1627 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1628 && x_regno == ARG_POINTER_REGNUM)
1629 || x_regno == FRAME_POINTER_REGNUM)
1630 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1631 return true;
1632
1633 return endregno > x_regno && regno < END_REGNO (x);
1634
1635 case SUBREG:
1636 /* If this is a SUBREG of a hard reg, we can see exactly which
1637 registers are being modified. Otherwise, handle normally. */
1638 if (REG_P (SUBREG_REG (x))
1639 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1640 {
1641 unsigned int inner_regno = subreg_regno (x);
1642 unsigned int inner_endregno
1643 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1644 ? subreg_nregs (x) : 1);
1645
1646 return endregno > inner_regno && regno < inner_endregno;
1647 }
1648 break;
1649
1650 case CLOBBER:
1651 case SET:
1652 if (&SET_DEST (x) != loc
1653 /* Note setting a SUBREG counts as referring to the REG it is in for
1654 a pseudo but not for hard registers since we can
1655 treat each word individually. */
1656 && ((GET_CODE (SET_DEST (x)) == SUBREG
1657 && loc != &SUBREG_REG (SET_DEST (x))
1658 && REG_P (SUBREG_REG (SET_DEST (x)))
1659 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1660 && refers_to_regno_p (regno, endregno,
1661 SUBREG_REG (SET_DEST (x)), loc))
1662 || (!REG_P (SET_DEST (x))
1663 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1664 return true;
1665
1666 if (code == CLOBBER || loc == &SET_SRC (x))
1667 return false;
1668 x = SET_SRC (x);
1669 goto repeat;
1670
1671 default:
1672 break;
1673 }
1674
1675 /* X does not match, so try its subexpressions. */
1676
1677 fmt = GET_RTX_FORMAT (code);
1678 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1679 {
1680 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1681 {
1682 if (i == 0)
1683 {
1684 x = XEXP (x, 0);
1685 goto repeat;
1686 }
1687 else
1688 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1689 return true;
1690 }
1691 else if (fmt[i] == 'E')
1692 {
1693 int j;
1694 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1695 if (loc != &XVECEXP (x, i, j)
1696 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1697 return true;
1698 }
1699 }
1700 return false;
1701 }
1702
1703 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1704 we check if any register number in X conflicts with the relevant register
1705 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1706 contains a MEM (we don't bother checking for memory addresses that can't
1707 conflict because we expect this to be a rare case. */
1708
1709 int
1710 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1711 {
1712 unsigned int regno, endregno;
1713
1714 /* If either argument is a constant, then modifying X can not
1715 affect IN. Here we look at IN, we can profitably combine
1716 CONSTANT_P (x) with the switch statement below. */
1717 if (CONSTANT_P (in))
1718 return 0;
1719
1720 recurse:
1721 switch (GET_CODE (x))
1722 {
1723 case STRICT_LOW_PART:
1724 case ZERO_EXTRACT:
1725 case SIGN_EXTRACT:
1726 /* Overly conservative. */
1727 x = XEXP (x, 0);
1728 goto recurse;
1729
1730 case SUBREG:
1731 regno = REGNO (SUBREG_REG (x));
1732 if (regno < FIRST_PSEUDO_REGISTER)
1733 regno = subreg_regno (x);
1734 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1735 ? subreg_nregs (x) : 1);
1736 goto do_reg;
1737
1738 case REG:
1739 regno = REGNO (x);
1740 endregno = END_REGNO (x);
1741 do_reg:
1742 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1743
1744 case MEM:
1745 {
1746 const char *fmt;
1747 int i;
1748
1749 if (MEM_P (in))
1750 return 1;
1751
1752 fmt = GET_RTX_FORMAT (GET_CODE (in));
1753 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1754 if (fmt[i] == 'e')
1755 {
1756 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1757 return 1;
1758 }
1759 else if (fmt[i] == 'E')
1760 {
1761 int j;
1762 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1763 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1764 return 1;
1765 }
1766
1767 return 0;
1768 }
1769
1770 case SCRATCH:
1771 case PC:
1772 case CC0:
1773 return reg_mentioned_p (x, in);
1774
1775 case PARALLEL:
1776 {
1777 int i;
1778
1779 /* If any register in here refers to it we return true. */
1780 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1781 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1782 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1783 return 1;
1784 return 0;
1785 }
1786
1787 default:
1788 gcc_assert (CONSTANT_P (x));
1789 return 0;
1790 }
1791 }
1792 \f
1793 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1794 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1795 ignored by note_stores, but passed to FUN.
1796
1797 FUN receives three arguments:
1798 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1799 2. the SET or CLOBBER rtx that does the store,
1800 3. the pointer DATA provided to note_stores.
1801
1802 If the item being stored in or clobbered is a SUBREG of a hard register,
1803 the SUBREG will be passed. */
1804
1805 void
1806 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1807 {
1808 int i;
1809
1810 if (GET_CODE (x) == COND_EXEC)
1811 x = COND_EXEC_CODE (x);
1812
1813 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1814 {
1815 rtx dest = SET_DEST (x);
1816
1817 while ((GET_CODE (dest) == SUBREG
1818 && (!REG_P (SUBREG_REG (dest))
1819 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1820 || GET_CODE (dest) == ZERO_EXTRACT
1821 || GET_CODE (dest) == STRICT_LOW_PART)
1822 dest = XEXP (dest, 0);
1823
1824 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1825 each of whose first operand is a register. */
1826 if (GET_CODE (dest) == PARALLEL)
1827 {
1828 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1829 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1830 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1831 }
1832 else
1833 (*fun) (dest, x, data);
1834 }
1835
1836 else if (GET_CODE (x) == PARALLEL)
1837 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1838 note_stores (XVECEXP (x, 0, i), fun, data);
1839 }
1840 \f
1841 /* Like notes_stores, but call FUN for each expression that is being
1842 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1843 FUN for each expression, not any interior subexpressions. FUN receives a
1844 pointer to the expression and the DATA passed to this function.
1845
1846 Note that this is not quite the same test as that done in reg_referenced_p
1847 since that considers something as being referenced if it is being
1848 partially set, while we do not. */
1849
1850 void
1851 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1852 {
1853 rtx body = *pbody;
1854 int i;
1855
1856 switch (GET_CODE (body))
1857 {
1858 case COND_EXEC:
1859 (*fun) (&COND_EXEC_TEST (body), data);
1860 note_uses (&COND_EXEC_CODE (body), fun, data);
1861 return;
1862
1863 case PARALLEL:
1864 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1865 note_uses (&XVECEXP (body, 0, i), fun, data);
1866 return;
1867
1868 case SEQUENCE:
1869 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1870 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1871 return;
1872
1873 case USE:
1874 (*fun) (&XEXP (body, 0), data);
1875 return;
1876
1877 case ASM_OPERANDS:
1878 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1879 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1880 return;
1881
1882 case TRAP_IF:
1883 (*fun) (&TRAP_CONDITION (body), data);
1884 return;
1885
1886 case PREFETCH:
1887 (*fun) (&XEXP (body, 0), data);
1888 return;
1889
1890 case UNSPEC:
1891 case UNSPEC_VOLATILE:
1892 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1893 (*fun) (&XVECEXP (body, 0, i), data);
1894 return;
1895
1896 case CLOBBER:
1897 if (MEM_P (XEXP (body, 0)))
1898 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1899 return;
1900
1901 case SET:
1902 {
1903 rtx dest = SET_DEST (body);
1904
1905 /* For sets we replace everything in source plus registers in memory
1906 expression in store and operands of a ZERO_EXTRACT. */
1907 (*fun) (&SET_SRC (body), data);
1908
1909 if (GET_CODE (dest) == ZERO_EXTRACT)
1910 {
1911 (*fun) (&XEXP (dest, 1), data);
1912 (*fun) (&XEXP (dest, 2), data);
1913 }
1914
1915 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1916 dest = XEXP (dest, 0);
1917
1918 if (MEM_P (dest))
1919 (*fun) (&XEXP (dest, 0), data);
1920 }
1921 return;
1922
1923 default:
1924 /* All the other possibilities never store. */
1925 (*fun) (pbody, data);
1926 return;
1927 }
1928 }
1929 \f
1930 /* Return nonzero if X's old contents don't survive after INSN.
1931 This will be true if X is (cc0) or if X is a register and
1932 X dies in INSN or because INSN entirely sets X.
1933
1934 "Entirely set" means set directly and not through a SUBREG, or
1935 ZERO_EXTRACT, so no trace of the old contents remains.
1936 Likewise, REG_INC does not count.
1937
1938 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1939 but for this use that makes no difference, since regs don't overlap
1940 during their lifetimes. Therefore, this function may be used
1941 at any time after deaths have been computed.
1942
1943 If REG is a hard reg that occupies multiple machine registers, this
1944 function will only return 1 if each of those registers will be replaced
1945 by INSN. */
1946
1947 int
1948 dead_or_set_p (const rtx_insn *insn, const_rtx x)
1949 {
1950 unsigned int regno, end_regno;
1951 unsigned int i;
1952
1953 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1954 if (GET_CODE (x) == CC0)
1955 return 1;
1956
1957 gcc_assert (REG_P (x));
1958
1959 regno = REGNO (x);
1960 end_regno = END_REGNO (x);
1961 for (i = regno; i < end_regno; i++)
1962 if (! dead_or_set_regno_p (insn, i))
1963 return 0;
1964
1965 return 1;
1966 }
1967
1968 /* Return TRUE iff DEST is a register or subreg of a register and
1969 doesn't change the number of words of the inner register, and any
1970 part of the register is TEST_REGNO. */
1971
1972 static bool
1973 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1974 {
1975 unsigned int regno, endregno;
1976
1977 if (GET_CODE (dest) == SUBREG
1978 && (((GET_MODE_SIZE (GET_MODE (dest))
1979 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1980 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1981 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1982 dest = SUBREG_REG (dest);
1983
1984 if (!REG_P (dest))
1985 return false;
1986
1987 regno = REGNO (dest);
1988 endregno = END_REGNO (dest);
1989 return (test_regno >= regno && test_regno < endregno);
1990 }
1991
1992 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1993 any member matches the covers_regno_no_parallel_p criteria. */
1994
1995 static bool
1996 covers_regno_p (const_rtx dest, unsigned int test_regno)
1997 {
1998 if (GET_CODE (dest) == PARALLEL)
1999 {
2000 /* Some targets place small structures in registers for return
2001 values of functions, and those registers are wrapped in
2002 PARALLELs that we may see as the destination of a SET. */
2003 int i;
2004
2005 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2006 {
2007 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2008 if (inner != NULL_RTX
2009 && covers_regno_no_parallel_p (inner, test_regno))
2010 return true;
2011 }
2012
2013 return false;
2014 }
2015 else
2016 return covers_regno_no_parallel_p (dest, test_regno);
2017 }
2018
2019 /* Utility function for dead_or_set_p to check an individual register. */
2020
2021 int
2022 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2023 {
2024 const_rtx pattern;
2025
2026 /* See if there is a death note for something that includes TEST_REGNO. */
2027 if (find_regno_note (insn, REG_DEAD, test_regno))
2028 return 1;
2029
2030 if (CALL_P (insn)
2031 && find_regno_fusage (insn, CLOBBER, test_regno))
2032 return 1;
2033
2034 pattern = PATTERN (insn);
2035
2036 /* If a COND_EXEC is not executed, the value survives. */
2037 if (GET_CODE (pattern) == COND_EXEC)
2038 return 0;
2039
2040 if (GET_CODE (pattern) == SET)
2041 return covers_regno_p (SET_DEST (pattern), test_regno);
2042 else if (GET_CODE (pattern) == PARALLEL)
2043 {
2044 int i;
2045
2046 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2047 {
2048 rtx body = XVECEXP (pattern, 0, i);
2049
2050 if (GET_CODE (body) == COND_EXEC)
2051 body = COND_EXEC_CODE (body);
2052
2053 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2054 && covers_regno_p (SET_DEST (body), test_regno))
2055 return 1;
2056 }
2057 }
2058
2059 return 0;
2060 }
2061
2062 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2063 If DATUM is nonzero, look for one whose datum is DATUM. */
2064
2065 rtx
2066 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2067 {
2068 rtx link;
2069
2070 gcc_checking_assert (insn);
2071
2072 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2073 if (! INSN_P (insn))
2074 return 0;
2075 if (datum == 0)
2076 {
2077 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2078 if (REG_NOTE_KIND (link) == kind)
2079 return link;
2080 return 0;
2081 }
2082
2083 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2084 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2085 return link;
2086 return 0;
2087 }
2088
2089 /* Return the reg-note of kind KIND in insn INSN which applies to register
2090 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2091 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2092 it might be the case that the note overlaps REGNO. */
2093
2094 rtx
2095 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2096 {
2097 rtx link;
2098
2099 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2100 if (! INSN_P (insn))
2101 return 0;
2102
2103 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2104 if (REG_NOTE_KIND (link) == kind
2105 /* Verify that it is a register, so that scratch and MEM won't cause a
2106 problem here. */
2107 && REG_P (XEXP (link, 0))
2108 && REGNO (XEXP (link, 0)) <= regno
2109 && END_REGNO (XEXP (link, 0)) > regno)
2110 return link;
2111 return 0;
2112 }
2113
2114 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2115 has such a note. */
2116
2117 rtx
2118 find_reg_equal_equiv_note (const_rtx insn)
2119 {
2120 rtx link;
2121
2122 if (!INSN_P (insn))
2123 return 0;
2124
2125 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2126 if (REG_NOTE_KIND (link) == REG_EQUAL
2127 || REG_NOTE_KIND (link) == REG_EQUIV)
2128 {
2129 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2130 insns that have multiple sets. Checking single_set to
2131 make sure of this is not the proper check, as explained
2132 in the comment in set_unique_reg_note.
2133
2134 This should be changed into an assert. */
2135 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2136 return 0;
2137 return link;
2138 }
2139 return NULL;
2140 }
2141
2142 /* Check whether INSN is a single_set whose source is known to be
2143 equivalent to a constant. Return that constant if so, otherwise
2144 return null. */
2145
2146 rtx
2147 find_constant_src (const rtx_insn *insn)
2148 {
2149 rtx note, set, x;
2150
2151 set = single_set (insn);
2152 if (set)
2153 {
2154 x = avoid_constant_pool_reference (SET_SRC (set));
2155 if (CONSTANT_P (x))
2156 return x;
2157 }
2158
2159 note = find_reg_equal_equiv_note (insn);
2160 if (note && CONSTANT_P (XEXP (note, 0)))
2161 return XEXP (note, 0);
2162
2163 return NULL_RTX;
2164 }
2165
2166 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2167 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2168
2169 int
2170 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2171 {
2172 /* If it's not a CALL_INSN, it can't possibly have a
2173 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2174 if (!CALL_P (insn))
2175 return 0;
2176
2177 gcc_assert (datum);
2178
2179 if (!REG_P (datum))
2180 {
2181 rtx link;
2182
2183 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2184 link;
2185 link = XEXP (link, 1))
2186 if (GET_CODE (XEXP (link, 0)) == code
2187 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2188 return 1;
2189 }
2190 else
2191 {
2192 unsigned int regno = REGNO (datum);
2193
2194 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2195 to pseudo registers, so don't bother checking. */
2196
2197 if (regno < FIRST_PSEUDO_REGISTER)
2198 {
2199 unsigned int end_regno = END_REGNO (datum);
2200 unsigned int i;
2201
2202 for (i = regno; i < end_regno; i++)
2203 if (find_regno_fusage (insn, code, i))
2204 return 1;
2205 }
2206 }
2207
2208 return 0;
2209 }
2210
2211 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2212 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2213
2214 int
2215 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2216 {
2217 rtx link;
2218
2219 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2220 to pseudo registers, so don't bother checking. */
2221
2222 if (regno >= FIRST_PSEUDO_REGISTER
2223 || !CALL_P (insn) )
2224 return 0;
2225
2226 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2227 {
2228 rtx op, reg;
2229
2230 if (GET_CODE (op = XEXP (link, 0)) == code
2231 && REG_P (reg = XEXP (op, 0))
2232 && REGNO (reg) <= regno
2233 && END_REGNO (reg) > regno)
2234 return 1;
2235 }
2236
2237 return 0;
2238 }
2239
2240 \f
2241 /* Return true if KIND is an integer REG_NOTE. */
2242
2243 static bool
2244 int_reg_note_p (enum reg_note kind)
2245 {
2246 return kind == REG_BR_PROB;
2247 }
2248
2249 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2250 stored as the pointer to the next register note. */
2251
2252 rtx
2253 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2254 {
2255 rtx note;
2256
2257 gcc_checking_assert (!int_reg_note_p (kind));
2258 switch (kind)
2259 {
2260 case REG_CC_SETTER:
2261 case REG_CC_USER:
2262 case REG_LABEL_TARGET:
2263 case REG_LABEL_OPERAND:
2264 case REG_TM:
2265 /* These types of register notes use an INSN_LIST rather than an
2266 EXPR_LIST, so that copying is done right and dumps look
2267 better. */
2268 note = alloc_INSN_LIST (datum, list);
2269 PUT_REG_NOTE_KIND (note, kind);
2270 break;
2271
2272 default:
2273 note = alloc_EXPR_LIST (kind, datum, list);
2274 break;
2275 }
2276
2277 return note;
2278 }
2279
2280 /* Add register note with kind KIND and datum DATUM to INSN. */
2281
2282 void
2283 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2284 {
2285 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2286 }
2287
2288 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2289
2290 void
2291 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2292 {
2293 gcc_checking_assert (int_reg_note_p (kind));
2294 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2295 datum, REG_NOTES (insn));
2296 }
2297
2298 /* Add a register note like NOTE to INSN. */
2299
2300 void
2301 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2302 {
2303 if (GET_CODE (note) == INT_LIST)
2304 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2305 else
2306 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2307 }
2308
2309 /* Duplicate NOTE and return the copy. */
2310 rtx
2311 duplicate_reg_note (rtx note)
2312 {
2313 reg_note kind = REG_NOTE_KIND (note);
2314
2315 if (GET_CODE (note) == INT_LIST)
2316 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2317 else if (GET_CODE (note) == EXPR_LIST)
2318 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2319 else
2320 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2321 }
2322
2323 /* Remove register note NOTE from the REG_NOTES of INSN. */
2324
2325 void
2326 remove_note (rtx_insn *insn, const_rtx note)
2327 {
2328 rtx link;
2329
2330 if (note == NULL_RTX)
2331 return;
2332
2333 if (REG_NOTES (insn) == note)
2334 REG_NOTES (insn) = XEXP (note, 1);
2335 else
2336 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2337 if (XEXP (link, 1) == note)
2338 {
2339 XEXP (link, 1) = XEXP (note, 1);
2340 break;
2341 }
2342
2343 switch (REG_NOTE_KIND (note))
2344 {
2345 case REG_EQUAL:
2346 case REG_EQUIV:
2347 df_notes_rescan (insn);
2348 break;
2349 default:
2350 break;
2351 }
2352 }
2353
2354 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2355 Return true if any note has been removed. */
2356
2357 bool
2358 remove_reg_equal_equiv_notes (rtx_insn *insn)
2359 {
2360 rtx *loc;
2361 bool ret = false;
2362
2363 loc = &REG_NOTES (insn);
2364 while (*loc)
2365 {
2366 enum reg_note kind = REG_NOTE_KIND (*loc);
2367 if (kind == REG_EQUAL || kind == REG_EQUIV)
2368 {
2369 *loc = XEXP (*loc, 1);
2370 ret = true;
2371 }
2372 else
2373 loc = &XEXP (*loc, 1);
2374 }
2375 return ret;
2376 }
2377
2378 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2379
2380 void
2381 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2382 {
2383 df_ref eq_use;
2384
2385 if (!df)
2386 return;
2387
2388 /* This loop is a little tricky. We cannot just go down the chain because
2389 it is being modified by some actions in the loop. So we just iterate
2390 over the head. We plan to drain the list anyway. */
2391 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2392 {
2393 rtx_insn *insn = DF_REF_INSN (eq_use);
2394 rtx note = find_reg_equal_equiv_note (insn);
2395
2396 /* This assert is generally triggered when someone deletes a REG_EQUAL
2397 or REG_EQUIV note by hacking the list manually rather than calling
2398 remove_note. */
2399 gcc_assert (note);
2400
2401 remove_note (insn, note);
2402 }
2403 }
2404
2405 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2406 return 1 if it is found. A simple equality test is used to determine if
2407 NODE matches. */
2408
2409 bool
2410 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2411 {
2412 const_rtx x;
2413
2414 for (x = listp; x; x = XEXP (x, 1))
2415 if (node == XEXP (x, 0))
2416 return true;
2417
2418 return false;
2419 }
2420
2421 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2422 remove that entry from the list if it is found.
2423
2424 A simple equality test is used to determine if NODE matches. */
2425
2426 void
2427 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2428 {
2429 rtx_expr_list *temp = *listp;
2430 rtx_expr_list *prev = NULL;
2431
2432 while (temp)
2433 {
2434 if (node == temp->element ())
2435 {
2436 /* Splice the node out of the list. */
2437 if (prev)
2438 XEXP (prev, 1) = temp->next ();
2439 else
2440 *listp = temp->next ();
2441
2442 return;
2443 }
2444
2445 prev = temp;
2446 temp = temp->next ();
2447 }
2448 }
2449
2450 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2451 remove that entry from the list if it is found.
2452
2453 A simple equality test is used to determine if NODE matches. */
2454
2455 void
2456 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2457 {
2458 rtx_insn_list *temp = *listp;
2459 rtx_insn_list *prev = NULL;
2460
2461 while (temp)
2462 {
2463 if (node == temp->insn ())
2464 {
2465 /* Splice the node out of the list. */
2466 if (prev)
2467 XEXP (prev, 1) = temp->next ();
2468 else
2469 *listp = temp->next ();
2470
2471 return;
2472 }
2473
2474 prev = temp;
2475 temp = temp->next ();
2476 }
2477 }
2478 \f
2479 /* Nonzero if X contains any volatile instructions. These are instructions
2480 which may cause unpredictable machine state instructions, and thus no
2481 instructions or register uses should be moved or combined across them.
2482 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2483
2484 int
2485 volatile_insn_p (const_rtx x)
2486 {
2487 const RTX_CODE code = GET_CODE (x);
2488 switch (code)
2489 {
2490 case LABEL_REF:
2491 case SYMBOL_REF:
2492 case CONST:
2493 CASE_CONST_ANY:
2494 case CC0:
2495 case PC:
2496 case REG:
2497 case SCRATCH:
2498 case CLOBBER:
2499 case ADDR_VEC:
2500 case ADDR_DIFF_VEC:
2501 case CALL:
2502 case MEM:
2503 return 0;
2504
2505 case UNSPEC_VOLATILE:
2506 return 1;
2507
2508 case ASM_INPUT:
2509 case ASM_OPERANDS:
2510 if (MEM_VOLATILE_P (x))
2511 return 1;
2512
2513 default:
2514 break;
2515 }
2516
2517 /* Recursively scan the operands of this expression. */
2518
2519 {
2520 const char *const fmt = GET_RTX_FORMAT (code);
2521 int i;
2522
2523 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2524 {
2525 if (fmt[i] == 'e')
2526 {
2527 if (volatile_insn_p (XEXP (x, i)))
2528 return 1;
2529 }
2530 else if (fmt[i] == 'E')
2531 {
2532 int j;
2533 for (j = 0; j < XVECLEN (x, i); j++)
2534 if (volatile_insn_p (XVECEXP (x, i, j)))
2535 return 1;
2536 }
2537 }
2538 }
2539 return 0;
2540 }
2541
2542 /* Nonzero if X contains any volatile memory references
2543 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2544
2545 int
2546 volatile_refs_p (const_rtx x)
2547 {
2548 const RTX_CODE code = GET_CODE (x);
2549 switch (code)
2550 {
2551 case LABEL_REF:
2552 case SYMBOL_REF:
2553 case CONST:
2554 CASE_CONST_ANY:
2555 case CC0:
2556 case PC:
2557 case REG:
2558 case SCRATCH:
2559 case CLOBBER:
2560 case ADDR_VEC:
2561 case ADDR_DIFF_VEC:
2562 return 0;
2563
2564 case UNSPEC_VOLATILE:
2565 return 1;
2566
2567 case MEM:
2568 case ASM_INPUT:
2569 case ASM_OPERANDS:
2570 if (MEM_VOLATILE_P (x))
2571 return 1;
2572
2573 default:
2574 break;
2575 }
2576
2577 /* Recursively scan the operands of this expression. */
2578
2579 {
2580 const char *const fmt = GET_RTX_FORMAT (code);
2581 int i;
2582
2583 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2584 {
2585 if (fmt[i] == 'e')
2586 {
2587 if (volatile_refs_p (XEXP (x, i)))
2588 return 1;
2589 }
2590 else if (fmt[i] == 'E')
2591 {
2592 int j;
2593 for (j = 0; j < XVECLEN (x, i); j++)
2594 if (volatile_refs_p (XVECEXP (x, i, j)))
2595 return 1;
2596 }
2597 }
2598 }
2599 return 0;
2600 }
2601
2602 /* Similar to above, except that it also rejects register pre- and post-
2603 incrementing. */
2604
2605 int
2606 side_effects_p (const_rtx x)
2607 {
2608 const RTX_CODE code = GET_CODE (x);
2609 switch (code)
2610 {
2611 case LABEL_REF:
2612 case SYMBOL_REF:
2613 case CONST:
2614 CASE_CONST_ANY:
2615 case CC0:
2616 case PC:
2617 case REG:
2618 case SCRATCH:
2619 case ADDR_VEC:
2620 case ADDR_DIFF_VEC:
2621 case VAR_LOCATION:
2622 return 0;
2623
2624 case CLOBBER:
2625 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2626 when some combination can't be done. If we see one, don't think
2627 that we can simplify the expression. */
2628 return (GET_MODE (x) != VOIDmode);
2629
2630 case PRE_INC:
2631 case PRE_DEC:
2632 case POST_INC:
2633 case POST_DEC:
2634 case PRE_MODIFY:
2635 case POST_MODIFY:
2636 case CALL:
2637 case UNSPEC_VOLATILE:
2638 return 1;
2639
2640 case MEM:
2641 case ASM_INPUT:
2642 case ASM_OPERANDS:
2643 if (MEM_VOLATILE_P (x))
2644 return 1;
2645
2646 default:
2647 break;
2648 }
2649
2650 /* Recursively scan the operands of this expression. */
2651
2652 {
2653 const char *fmt = GET_RTX_FORMAT (code);
2654 int i;
2655
2656 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2657 {
2658 if (fmt[i] == 'e')
2659 {
2660 if (side_effects_p (XEXP (x, i)))
2661 return 1;
2662 }
2663 else if (fmt[i] == 'E')
2664 {
2665 int j;
2666 for (j = 0; j < XVECLEN (x, i); j++)
2667 if (side_effects_p (XVECEXP (x, i, j)))
2668 return 1;
2669 }
2670 }
2671 }
2672 return 0;
2673 }
2674 \f
2675 /* Return nonzero if evaluating rtx X might cause a trap.
2676 FLAGS controls how to consider MEMs. A nonzero means the context
2677 of the access may have changed from the original, such that the
2678 address may have become invalid. */
2679
2680 int
2681 may_trap_p_1 (const_rtx x, unsigned flags)
2682 {
2683 int i;
2684 enum rtx_code code;
2685 const char *fmt;
2686
2687 /* We make no distinction currently, but this function is part of
2688 the internal target-hooks ABI so we keep the parameter as
2689 "unsigned flags". */
2690 bool code_changed = flags != 0;
2691
2692 if (x == 0)
2693 return 0;
2694 code = GET_CODE (x);
2695 switch (code)
2696 {
2697 /* Handle these cases quickly. */
2698 CASE_CONST_ANY:
2699 case SYMBOL_REF:
2700 case LABEL_REF:
2701 case CONST:
2702 case PC:
2703 case CC0:
2704 case REG:
2705 case SCRATCH:
2706 return 0;
2707
2708 case UNSPEC:
2709 return targetm.unspec_may_trap_p (x, flags);
2710
2711 case UNSPEC_VOLATILE:
2712 case ASM_INPUT:
2713 case TRAP_IF:
2714 return 1;
2715
2716 case ASM_OPERANDS:
2717 return MEM_VOLATILE_P (x);
2718
2719 /* Memory ref can trap unless it's a static var or a stack slot. */
2720 case MEM:
2721 /* Recognize specific pattern of stack checking probes. */
2722 if (flag_stack_check
2723 && MEM_VOLATILE_P (x)
2724 && XEXP (x, 0) == stack_pointer_rtx)
2725 return 1;
2726 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2727 reference; moving it out of context such as when moving code
2728 when optimizing, might cause its address to become invalid. */
2729 code_changed
2730 || !MEM_NOTRAP_P (x))
2731 {
2732 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2733 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2734 GET_MODE (x), code_changed);
2735 }
2736
2737 return 0;
2738
2739 /* Division by a non-constant might trap. */
2740 case DIV:
2741 case MOD:
2742 case UDIV:
2743 case UMOD:
2744 if (HONOR_SNANS (x))
2745 return 1;
2746 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2747 return flag_trapping_math;
2748 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2749 return 1;
2750 break;
2751
2752 case EXPR_LIST:
2753 /* An EXPR_LIST is used to represent a function call. This
2754 certainly may trap. */
2755 return 1;
2756
2757 case GE:
2758 case GT:
2759 case LE:
2760 case LT:
2761 case LTGT:
2762 case COMPARE:
2763 /* Some floating point comparisons may trap. */
2764 if (!flag_trapping_math)
2765 break;
2766 /* ??? There is no machine independent way to check for tests that trap
2767 when COMPARE is used, though many targets do make this distinction.
2768 For instance, sparc uses CCFPE for compares which generate exceptions
2769 and CCFP for compares which do not generate exceptions. */
2770 if (HONOR_NANS (x))
2771 return 1;
2772 /* But often the compare has some CC mode, so check operand
2773 modes as well. */
2774 if (HONOR_NANS (XEXP (x, 0))
2775 || HONOR_NANS (XEXP (x, 1)))
2776 return 1;
2777 break;
2778
2779 case EQ:
2780 case NE:
2781 if (HONOR_SNANS (x))
2782 return 1;
2783 /* Often comparison is CC mode, so check operand modes. */
2784 if (HONOR_SNANS (XEXP (x, 0))
2785 || HONOR_SNANS (XEXP (x, 1)))
2786 return 1;
2787 break;
2788
2789 case FIX:
2790 /* Conversion of floating point might trap. */
2791 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2792 return 1;
2793 break;
2794
2795 case NEG:
2796 case ABS:
2797 case SUBREG:
2798 /* These operations don't trap even with floating point. */
2799 break;
2800
2801 default:
2802 /* Any floating arithmetic may trap. */
2803 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2804 return 1;
2805 }
2806
2807 fmt = GET_RTX_FORMAT (code);
2808 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2809 {
2810 if (fmt[i] == 'e')
2811 {
2812 if (may_trap_p_1 (XEXP (x, i), flags))
2813 return 1;
2814 }
2815 else if (fmt[i] == 'E')
2816 {
2817 int j;
2818 for (j = 0; j < XVECLEN (x, i); j++)
2819 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2820 return 1;
2821 }
2822 }
2823 return 0;
2824 }
2825
2826 /* Return nonzero if evaluating rtx X might cause a trap. */
2827
2828 int
2829 may_trap_p (const_rtx x)
2830 {
2831 return may_trap_p_1 (x, 0);
2832 }
2833
2834 /* Same as above, but additionally return nonzero if evaluating rtx X might
2835 cause a fault. We define a fault for the purpose of this function as a
2836 erroneous execution condition that cannot be encountered during the normal
2837 execution of a valid program; the typical example is an unaligned memory
2838 access on a strict alignment machine. The compiler guarantees that it
2839 doesn't generate code that will fault from a valid program, but this
2840 guarantee doesn't mean anything for individual instructions. Consider
2841 the following example:
2842
2843 struct S { int d; union { char *cp; int *ip; }; };
2844
2845 int foo(struct S *s)
2846 {
2847 if (s->d == 1)
2848 return *s->ip;
2849 else
2850 return *s->cp;
2851 }
2852
2853 on a strict alignment machine. In a valid program, foo will never be
2854 invoked on a structure for which d is equal to 1 and the underlying
2855 unique field of the union not aligned on a 4-byte boundary, but the
2856 expression *s->ip might cause a fault if considered individually.
2857
2858 At the RTL level, potentially problematic expressions will almost always
2859 verify may_trap_p; for example, the above dereference can be emitted as
2860 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2861 However, suppose that foo is inlined in a caller that causes s->cp to
2862 point to a local character variable and guarantees that s->d is not set
2863 to 1; foo may have been effectively translated into pseudo-RTL as:
2864
2865 if ((reg:SI) == 1)
2866 (set (reg:SI) (mem:SI (%fp - 7)))
2867 else
2868 (set (reg:QI) (mem:QI (%fp - 7)))
2869
2870 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2871 memory reference to a stack slot, but it will certainly cause a fault
2872 on a strict alignment machine. */
2873
2874 int
2875 may_trap_or_fault_p (const_rtx x)
2876 {
2877 return may_trap_p_1 (x, 1);
2878 }
2879 \f
2880 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2881 i.e., an inequality. */
2882
2883 int
2884 inequality_comparisons_p (const_rtx x)
2885 {
2886 const char *fmt;
2887 int len, i;
2888 const enum rtx_code code = GET_CODE (x);
2889
2890 switch (code)
2891 {
2892 case REG:
2893 case SCRATCH:
2894 case PC:
2895 case CC0:
2896 CASE_CONST_ANY:
2897 case CONST:
2898 case LABEL_REF:
2899 case SYMBOL_REF:
2900 return 0;
2901
2902 case LT:
2903 case LTU:
2904 case GT:
2905 case GTU:
2906 case LE:
2907 case LEU:
2908 case GE:
2909 case GEU:
2910 return 1;
2911
2912 default:
2913 break;
2914 }
2915
2916 len = GET_RTX_LENGTH (code);
2917 fmt = GET_RTX_FORMAT (code);
2918
2919 for (i = 0; i < len; i++)
2920 {
2921 if (fmt[i] == 'e')
2922 {
2923 if (inequality_comparisons_p (XEXP (x, i)))
2924 return 1;
2925 }
2926 else if (fmt[i] == 'E')
2927 {
2928 int j;
2929 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2930 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2931 return 1;
2932 }
2933 }
2934
2935 return 0;
2936 }
2937 \f
2938 /* Replace any occurrence of FROM in X with TO. The function does
2939 not enter into CONST_DOUBLE for the replace.
2940
2941 Note that copying is not done so X must not be shared unless all copies
2942 are to be modified.
2943
2944 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2945 those pointer-equal ones. */
2946
2947 rtx
2948 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
2949 {
2950 int i, j;
2951 const char *fmt;
2952
2953 if (x == from)
2954 return to;
2955
2956 /* Allow this function to make replacements in EXPR_LISTs. */
2957 if (x == 0)
2958 return 0;
2959
2960 if (all_regs
2961 && REG_P (x)
2962 && REG_P (from)
2963 && REGNO (x) == REGNO (from))
2964 {
2965 gcc_assert (GET_MODE (x) == GET_MODE (from));
2966 return to;
2967 }
2968 else if (GET_CODE (x) == SUBREG)
2969 {
2970 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
2971
2972 if (CONST_INT_P (new_rtx))
2973 {
2974 x = simplify_subreg (GET_MODE (x), new_rtx,
2975 GET_MODE (SUBREG_REG (x)),
2976 SUBREG_BYTE (x));
2977 gcc_assert (x);
2978 }
2979 else
2980 SUBREG_REG (x) = new_rtx;
2981
2982 return x;
2983 }
2984 else if (GET_CODE (x) == ZERO_EXTEND)
2985 {
2986 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
2987
2988 if (CONST_INT_P (new_rtx))
2989 {
2990 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2991 new_rtx, GET_MODE (XEXP (x, 0)));
2992 gcc_assert (x);
2993 }
2994 else
2995 XEXP (x, 0) = new_rtx;
2996
2997 return x;
2998 }
2999
3000 fmt = GET_RTX_FORMAT (GET_CODE (x));
3001 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3002 {
3003 if (fmt[i] == 'e')
3004 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3005 else if (fmt[i] == 'E')
3006 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3007 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3008 from, to, all_regs);
3009 }
3010
3011 return x;
3012 }
3013 \f
3014 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3015 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3016
3017 void
3018 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3019 {
3020 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3021 rtx x = *loc;
3022 if (JUMP_TABLE_DATA_P (x))
3023 {
3024 x = PATTERN (x);
3025 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3026 int len = GET_NUM_ELEM (vec);
3027 for (int i = 0; i < len; ++i)
3028 {
3029 rtx ref = RTVEC_ELT (vec, i);
3030 if (XEXP (ref, 0) == old_label)
3031 {
3032 XEXP (ref, 0) = new_label;
3033 if (update_label_nuses)
3034 {
3035 ++LABEL_NUSES (new_label);
3036 --LABEL_NUSES (old_label);
3037 }
3038 }
3039 }
3040 return;
3041 }
3042
3043 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3044 field. This is not handled by the iterator because it doesn't
3045 handle unprinted ('0') fields. */
3046 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3047 JUMP_LABEL (x) = new_label;
3048
3049 subrtx_ptr_iterator::array_type array;
3050 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3051 {
3052 rtx *loc = *iter;
3053 if (rtx x = *loc)
3054 {
3055 if (GET_CODE (x) == SYMBOL_REF
3056 && CONSTANT_POOL_ADDRESS_P (x))
3057 {
3058 rtx c = get_pool_constant (x);
3059 if (rtx_referenced_p (old_label, c))
3060 {
3061 /* Create a copy of constant C; replace the label inside
3062 but do not update LABEL_NUSES because uses in constant pool
3063 are not counted. */
3064 rtx new_c = copy_rtx (c);
3065 replace_label (&new_c, old_label, new_label, false);
3066
3067 /* Add the new constant NEW_C to constant pool and replace
3068 the old reference to constant by new reference. */
3069 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3070 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3071 }
3072 }
3073
3074 if ((GET_CODE (x) == LABEL_REF
3075 || GET_CODE (x) == INSN_LIST)
3076 && XEXP (x, 0) == old_label)
3077 {
3078 XEXP (x, 0) = new_label;
3079 if (update_label_nuses)
3080 {
3081 ++LABEL_NUSES (new_label);
3082 --LABEL_NUSES (old_label);
3083 }
3084 }
3085 }
3086 }
3087 }
3088
3089 void
3090 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3091 rtx_insn *new_label, bool update_label_nuses)
3092 {
3093 rtx insn_as_rtx = insn;
3094 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3095 gcc_checking_assert (insn_as_rtx == insn);
3096 }
3097
3098 /* Return true if X is referenced in BODY. */
3099
3100 bool
3101 rtx_referenced_p (const_rtx x, const_rtx body)
3102 {
3103 subrtx_iterator::array_type array;
3104 FOR_EACH_SUBRTX (iter, array, body, ALL)
3105 if (const_rtx y = *iter)
3106 {
3107 /* Check if a label_ref Y refers to label X. */
3108 if (GET_CODE (y) == LABEL_REF
3109 && LABEL_P (x)
3110 && label_ref_label (y) == x)
3111 return true;
3112
3113 if (rtx_equal_p (x, y))
3114 return true;
3115
3116 /* If Y is a reference to pool constant traverse the constant. */
3117 if (GET_CODE (y) == SYMBOL_REF
3118 && CONSTANT_POOL_ADDRESS_P (y))
3119 iter.substitute (get_pool_constant (y));
3120 }
3121 return false;
3122 }
3123
3124 /* If INSN is a tablejump return true and store the label (before jump table) to
3125 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3126
3127 bool
3128 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3129 rtx_jump_table_data **tablep)
3130 {
3131 if (!JUMP_P (insn))
3132 return false;
3133
3134 rtx target = JUMP_LABEL (insn);
3135 if (target == NULL_RTX || ANY_RETURN_P (target))
3136 return false;
3137
3138 rtx_insn *label = as_a<rtx_insn *> (target);
3139 rtx_insn *table = next_insn (label);
3140 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3141 return false;
3142
3143 if (labelp)
3144 *labelp = label;
3145 if (tablep)
3146 *tablep = as_a <rtx_jump_table_data *> (table);
3147 return true;
3148 }
3149
3150 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3151 constant that is not in the constant pool and not in the condition
3152 of an IF_THEN_ELSE. */
3153
3154 static int
3155 computed_jump_p_1 (const_rtx x)
3156 {
3157 const enum rtx_code code = GET_CODE (x);
3158 int i, j;
3159 const char *fmt;
3160
3161 switch (code)
3162 {
3163 case LABEL_REF:
3164 case PC:
3165 return 0;
3166
3167 case CONST:
3168 CASE_CONST_ANY:
3169 case SYMBOL_REF:
3170 case REG:
3171 return 1;
3172
3173 case MEM:
3174 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3175 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3176
3177 case IF_THEN_ELSE:
3178 return (computed_jump_p_1 (XEXP (x, 1))
3179 || computed_jump_p_1 (XEXP (x, 2)));
3180
3181 default:
3182 break;
3183 }
3184
3185 fmt = GET_RTX_FORMAT (code);
3186 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3187 {
3188 if (fmt[i] == 'e'
3189 && computed_jump_p_1 (XEXP (x, i)))
3190 return 1;
3191
3192 else if (fmt[i] == 'E')
3193 for (j = 0; j < XVECLEN (x, i); j++)
3194 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3195 return 1;
3196 }
3197
3198 return 0;
3199 }
3200
3201 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3202
3203 Tablejumps and casesi insns are not considered indirect jumps;
3204 we can recognize them by a (use (label_ref)). */
3205
3206 int
3207 computed_jump_p (const rtx_insn *insn)
3208 {
3209 int i;
3210 if (JUMP_P (insn))
3211 {
3212 rtx pat = PATTERN (insn);
3213
3214 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3215 if (JUMP_LABEL (insn) != NULL)
3216 return 0;
3217
3218 if (GET_CODE (pat) == PARALLEL)
3219 {
3220 int len = XVECLEN (pat, 0);
3221 int has_use_labelref = 0;
3222
3223 for (i = len - 1; i >= 0; i--)
3224 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3225 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3226 == LABEL_REF))
3227 {
3228 has_use_labelref = 1;
3229 break;
3230 }
3231
3232 if (! has_use_labelref)
3233 for (i = len - 1; i >= 0; i--)
3234 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3235 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3236 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3237 return 1;
3238 }
3239 else if (GET_CODE (pat) == SET
3240 && SET_DEST (pat) == pc_rtx
3241 && computed_jump_p_1 (SET_SRC (pat)))
3242 return 1;
3243 }
3244 return 0;
3245 }
3246
3247 \f
3248
3249 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3250 the equivalent add insn and pass the result to FN, using DATA as the
3251 final argument. */
3252
3253 static int
3254 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3255 {
3256 rtx x = XEXP (mem, 0);
3257 switch (GET_CODE (x))
3258 {
3259 case PRE_INC:
3260 case POST_INC:
3261 {
3262 int size = GET_MODE_SIZE (GET_MODE (mem));
3263 rtx r1 = XEXP (x, 0);
3264 rtx c = gen_int_mode (size, GET_MODE (r1));
3265 return fn (mem, x, r1, r1, c, data);
3266 }
3267
3268 case PRE_DEC:
3269 case POST_DEC:
3270 {
3271 int size = GET_MODE_SIZE (GET_MODE (mem));
3272 rtx r1 = XEXP (x, 0);
3273 rtx c = gen_int_mode (-size, GET_MODE (r1));
3274 return fn (mem, x, r1, r1, c, data);
3275 }
3276
3277 case PRE_MODIFY:
3278 case POST_MODIFY:
3279 {
3280 rtx r1 = XEXP (x, 0);
3281 rtx add = XEXP (x, 1);
3282 return fn (mem, x, r1, add, NULL, data);
3283 }
3284
3285 default:
3286 gcc_unreachable ();
3287 }
3288 }
3289
3290 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3291 For each such autoinc operation found, call FN, passing it
3292 the innermost enclosing MEM, the operation itself, the RTX modified
3293 by the operation, two RTXs (the second may be NULL) that, once
3294 added, represent the value to be held by the modified RTX
3295 afterwards, and DATA. FN is to return 0 to continue the
3296 traversal or any other value to have it returned to the caller of
3297 for_each_inc_dec. */
3298
3299 int
3300 for_each_inc_dec (rtx x,
3301 for_each_inc_dec_fn fn,
3302 void *data)
3303 {
3304 subrtx_var_iterator::array_type array;
3305 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3306 {
3307 rtx mem = *iter;
3308 if (mem
3309 && MEM_P (mem)
3310 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3311 {
3312 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3313 if (res != 0)
3314 return res;
3315 iter.skip_subrtxes ();
3316 }
3317 }
3318 return 0;
3319 }
3320
3321 \f
3322 /* Searches X for any reference to REGNO, returning the rtx of the
3323 reference found if any. Otherwise, returns NULL_RTX. */
3324
3325 rtx
3326 regno_use_in (unsigned int regno, rtx x)
3327 {
3328 const char *fmt;
3329 int i, j;
3330 rtx tem;
3331
3332 if (REG_P (x) && REGNO (x) == regno)
3333 return x;
3334
3335 fmt = GET_RTX_FORMAT (GET_CODE (x));
3336 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3337 {
3338 if (fmt[i] == 'e')
3339 {
3340 if ((tem = regno_use_in (regno, XEXP (x, i))))
3341 return tem;
3342 }
3343 else if (fmt[i] == 'E')
3344 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3345 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3346 return tem;
3347 }
3348
3349 return NULL_RTX;
3350 }
3351
3352 /* Return a value indicating whether OP, an operand of a commutative
3353 operation, is preferred as the first or second operand. The more
3354 positive the value, the stronger the preference for being the first
3355 operand. */
3356
3357 int
3358 commutative_operand_precedence (rtx op)
3359 {
3360 enum rtx_code code = GET_CODE (op);
3361
3362 /* Constants always become the second operand. Prefer "nice" constants. */
3363 if (code == CONST_INT)
3364 return -8;
3365 if (code == CONST_WIDE_INT)
3366 return -7;
3367 if (code == CONST_DOUBLE)
3368 return -7;
3369 if (code == CONST_FIXED)
3370 return -7;
3371 op = avoid_constant_pool_reference (op);
3372 code = GET_CODE (op);
3373
3374 switch (GET_RTX_CLASS (code))
3375 {
3376 case RTX_CONST_OBJ:
3377 if (code == CONST_INT)
3378 return -6;
3379 if (code == CONST_WIDE_INT)
3380 return -6;
3381 if (code == CONST_DOUBLE)
3382 return -5;
3383 if (code == CONST_FIXED)
3384 return -5;
3385 return -4;
3386
3387 case RTX_EXTRA:
3388 /* SUBREGs of objects should come second. */
3389 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3390 return -3;
3391 return 0;
3392
3393 case RTX_OBJ:
3394 /* Complex expressions should be the first, so decrease priority
3395 of objects. Prefer pointer objects over non pointer objects. */
3396 if ((REG_P (op) && REG_POINTER (op))
3397 || (MEM_P (op) && MEM_POINTER (op)))
3398 return -1;
3399 return -2;
3400
3401 case RTX_COMM_ARITH:
3402 /* Prefer operands that are themselves commutative to be first.
3403 This helps to make things linear. In particular,
3404 (and (and (reg) (reg)) (not (reg))) is canonical. */
3405 return 4;
3406
3407 case RTX_BIN_ARITH:
3408 /* If only one operand is a binary expression, it will be the first
3409 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3410 is canonical, although it will usually be further simplified. */
3411 return 2;
3412
3413 case RTX_UNARY:
3414 /* Then prefer NEG and NOT. */
3415 if (code == NEG || code == NOT)
3416 return 1;
3417 /* FALLTHRU */
3418
3419 default:
3420 return 0;
3421 }
3422 }
3423
3424 /* Return 1 iff it is necessary to swap operands of commutative operation
3425 in order to canonicalize expression. */
3426
3427 bool
3428 swap_commutative_operands_p (rtx x, rtx y)
3429 {
3430 return (commutative_operand_precedence (x)
3431 < commutative_operand_precedence (y));
3432 }
3433
3434 /* Return 1 if X is an autoincrement side effect and the register is
3435 not the stack pointer. */
3436 int
3437 auto_inc_p (const_rtx x)
3438 {
3439 switch (GET_CODE (x))
3440 {
3441 case PRE_INC:
3442 case POST_INC:
3443 case PRE_DEC:
3444 case POST_DEC:
3445 case PRE_MODIFY:
3446 case POST_MODIFY:
3447 /* There are no REG_INC notes for SP. */
3448 if (XEXP (x, 0) != stack_pointer_rtx)
3449 return 1;
3450 default:
3451 break;
3452 }
3453 return 0;
3454 }
3455
3456 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3457 int
3458 loc_mentioned_in_p (rtx *loc, const_rtx in)
3459 {
3460 enum rtx_code code;
3461 const char *fmt;
3462 int i, j;
3463
3464 if (!in)
3465 return 0;
3466
3467 code = GET_CODE (in);
3468 fmt = GET_RTX_FORMAT (code);
3469 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3470 {
3471 if (fmt[i] == 'e')
3472 {
3473 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3474 return 1;
3475 }
3476 else if (fmt[i] == 'E')
3477 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3478 if (loc == &XVECEXP (in, i, j)
3479 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3480 return 1;
3481 }
3482 return 0;
3483 }
3484
3485 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3486 and SUBREG_BYTE, return the bit offset where the subreg begins
3487 (counting from the least significant bit of the operand). */
3488
3489 unsigned int
3490 subreg_lsb_1 (machine_mode outer_mode,
3491 machine_mode inner_mode,
3492 unsigned int subreg_byte)
3493 {
3494 unsigned int bitpos;
3495 unsigned int byte;
3496 unsigned int word;
3497
3498 /* A paradoxical subreg begins at bit position 0. */
3499 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3500 return 0;
3501
3502 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3503 /* If the subreg crosses a word boundary ensure that
3504 it also begins and ends on a word boundary. */
3505 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3506 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3507 && (subreg_byte % UNITS_PER_WORD
3508 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3509
3510 if (WORDS_BIG_ENDIAN)
3511 word = (GET_MODE_SIZE (inner_mode)
3512 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3513 else
3514 word = subreg_byte / UNITS_PER_WORD;
3515 bitpos = word * BITS_PER_WORD;
3516
3517 if (BYTES_BIG_ENDIAN)
3518 byte = (GET_MODE_SIZE (inner_mode)
3519 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3520 else
3521 byte = subreg_byte % UNITS_PER_WORD;
3522 bitpos += byte * BITS_PER_UNIT;
3523
3524 return bitpos;
3525 }
3526
3527 /* Given a subreg X, return the bit offset where the subreg begins
3528 (counting from the least significant bit of the reg). */
3529
3530 unsigned int
3531 subreg_lsb (const_rtx x)
3532 {
3533 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3534 SUBREG_BYTE (x));
3535 }
3536
3537 /* Return the subreg byte offset for a subreg whose outer value has
3538 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3539 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3540 lsb of the inner value. This is the inverse of the calculation
3541 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3542
3543 unsigned int
3544 subreg_size_offset_from_lsb (unsigned int outer_bytes,
3545 unsigned int inner_bytes,
3546 unsigned int lsb_shift)
3547 {
3548 /* A paradoxical subreg begins at bit position 0. */
3549 if (outer_bytes > inner_bytes)
3550 {
3551 gcc_checking_assert (lsb_shift == 0);
3552 return 0;
3553 }
3554
3555 gcc_assert (lsb_shift % BITS_PER_UNIT == 0);
3556 unsigned int lower_bytes = lsb_shift / BITS_PER_UNIT;
3557 unsigned int upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3558 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3559 return upper_bytes;
3560 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3561 return lower_bytes;
3562 else
3563 {
3564 unsigned int lower_word_part = lower_bytes & -UNITS_PER_WORD;
3565 unsigned int upper_word_part = upper_bytes & -UNITS_PER_WORD;
3566 if (WORDS_BIG_ENDIAN)
3567 return upper_word_part + (lower_bytes - lower_word_part);
3568 else
3569 return lower_word_part + (upper_bytes - upper_word_part);
3570 }
3571 }
3572
3573 /* Fill in information about a subreg of a hard register.
3574 xregno - A regno of an inner hard subreg_reg (or what will become one).
3575 xmode - The mode of xregno.
3576 offset - The byte offset.
3577 ymode - The mode of a top level SUBREG (or what may become one).
3578 info - Pointer to structure to fill in.
3579
3580 Rather than considering one particular inner register (and thus one
3581 particular "outer" register) in isolation, this function really uses
3582 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3583 function does not check whether adding INFO->offset to XREGNO gives
3584 a valid hard register; even if INFO->offset + XREGNO is out of range,
3585 there might be another register of the same type that is in range.
3586 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3587 register, since that can depend on things like whether the final
3588 register number is even or odd. Callers that want to check whether
3589 this particular subreg can be replaced by a simple (reg ...) should
3590 use simplify_subreg_regno. */
3591
3592 void
3593 subreg_get_info (unsigned int xregno, machine_mode xmode,
3594 unsigned int offset, machine_mode ymode,
3595 struct subreg_info *info)
3596 {
3597 unsigned int nregs_xmode, nregs_ymode;
3598
3599 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3600
3601 unsigned int xsize = GET_MODE_SIZE (xmode);
3602 unsigned int ysize = GET_MODE_SIZE (ymode);
3603 bool rknown = false;
3604
3605 /* If the register representation of a non-scalar mode has holes in it,
3606 we expect the scalar units to be concatenated together, with the holes
3607 distributed evenly among the scalar units. Each scalar unit must occupy
3608 at least one register. */
3609 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3610 {
3611 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3612 unsigned int nunits = GET_MODE_NUNITS (xmode);
3613 machine_mode xmode_unit = GET_MODE_INNER (xmode);
3614 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3615 gcc_assert (nregs_xmode
3616 == (nunits
3617 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3618 gcc_assert (hard_regno_nregs[xregno][xmode]
3619 == hard_regno_nregs[xregno][xmode_unit] * nunits);
3620
3621 /* You can only ask for a SUBREG of a value with holes in the middle
3622 if you don't cross the holes. (Such a SUBREG should be done by
3623 picking a different register class, or doing it in memory if
3624 necessary.) An example of a value with holes is XCmode on 32-bit
3625 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3626 3 for each part, but in memory it's two 128-bit parts.
3627 Padding is assumed to be at the end (not necessarily the 'high part')
3628 of each unit. */
3629 if ((offset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3630 && (offset / GET_MODE_SIZE (xmode_unit)
3631 != ((offset + ysize - 1) / GET_MODE_SIZE (xmode_unit))))
3632 {
3633 info->representable_p = false;
3634 rknown = true;
3635 }
3636 }
3637 else
3638 nregs_xmode = hard_regno_nregs[xregno][xmode];
3639
3640 nregs_ymode = hard_regno_nregs[xregno][ymode];
3641
3642 /* Paradoxical subregs are otherwise valid. */
3643 if (!rknown && offset == 0 && ysize > xsize)
3644 {
3645 info->representable_p = true;
3646 /* If this is a big endian paradoxical subreg, which uses more
3647 actual hard registers than the original register, we must
3648 return a negative offset so that we find the proper highpart
3649 of the register.
3650
3651 We assume that the ordering of registers within a multi-register
3652 value has a consistent endianness: if bytes and register words
3653 have different endianness, the hard registers that make up a
3654 multi-register value must be at least word-sized. */
3655 if (REG_WORDS_BIG_ENDIAN)
3656 info->offset = (int) nregs_xmode - (int) nregs_ymode;
3657 else
3658 info->offset = 0;
3659 info->nregs = nregs_ymode;
3660 return;
3661 }
3662
3663 /* If registers store different numbers of bits in the different
3664 modes, we cannot generally form this subreg. */
3665 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3666 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3667 && (xsize % nregs_xmode) == 0
3668 && (ysize % nregs_ymode) == 0)
3669 {
3670 int regsize_xmode = xsize / nregs_xmode;
3671 int regsize_ymode = ysize / nregs_ymode;
3672 if (!rknown
3673 && ((nregs_ymode > 1 && regsize_xmode > regsize_ymode)
3674 || (nregs_xmode > 1 && regsize_ymode > regsize_xmode)))
3675 {
3676 info->representable_p = false;
3677 info->nregs = CEIL (ysize, regsize_xmode);
3678 info->offset = offset / regsize_xmode;
3679 return;
3680 }
3681 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3682 would go outside of XMODE. */
3683 if (!rknown && ysize + offset > xsize)
3684 {
3685 info->representable_p = false;
3686 info->nregs = nregs_ymode;
3687 info->offset = offset / regsize_xmode;
3688 return;
3689 }
3690 /* Quick exit for the simple and common case of extracting whole
3691 subregisters from a multiregister value. */
3692 /* ??? It would be better to integrate this into the code below,
3693 if we can generalize the concept enough and figure out how
3694 odd-sized modes can coexist with the other weird cases we support. */
3695 if (!rknown
3696 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3697 && regsize_xmode == regsize_ymode
3698 && (offset % regsize_ymode) == 0)
3699 {
3700 info->representable_p = true;
3701 info->nregs = nregs_ymode;
3702 info->offset = offset / regsize_ymode;
3703 gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3704 return;
3705 }
3706 }
3707
3708 /* Lowpart subregs are otherwise valid. */
3709 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3710 {
3711 info->representable_p = true;
3712 rknown = true;
3713
3714 if (offset == 0 || nregs_xmode == nregs_ymode)
3715 {
3716 info->offset = 0;
3717 info->nregs = nregs_ymode;
3718 return;
3719 }
3720 }
3721
3722 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3723 values there are in (reg:XMODE XREGNO). We can view the register
3724 as consisting of this number of independent "blocks", where each
3725 block occupies NREGS_YMODE registers and contains exactly one
3726 representable YMODE value. */
3727 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3728 unsigned int num_blocks = nregs_xmode / nregs_ymode;
3729
3730 /* Calculate the number of bytes in each block. This must always
3731 be exact, otherwise we don't know how to verify the constraint.
3732 These conditions may be relaxed but subreg_regno_offset would
3733 need to be redesigned. */
3734 gcc_assert ((xsize % num_blocks) == 0);
3735 unsigned int bytes_per_block = xsize / num_blocks;
3736
3737 /* Get the number of the first block that contains the subreg and the byte
3738 offset of the subreg from the start of that block. */
3739 unsigned int block_number = offset / bytes_per_block;
3740 unsigned int subblock_offset = offset % bytes_per_block;
3741
3742 if (!rknown)
3743 {
3744 /* Only the lowpart of each block is representable. */
3745 info->representable_p
3746 = (subblock_offset
3747 == subreg_size_lowpart_offset (ysize, bytes_per_block));
3748 rknown = true;
3749 }
3750
3751 /* We assume that the ordering of registers within a multi-register
3752 value has a consistent endianness: if bytes and register words
3753 have different endianness, the hard registers that make up a
3754 multi-register value must be at least word-sized. */
3755 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3756 /* The block number we calculated above followed memory endianness.
3757 Convert it to register endianness by counting back from the end.
3758 (Note that, because of the assumption above, each block must be
3759 at least word-sized.) */
3760 info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3761 else
3762 info->offset = block_number * nregs_ymode;
3763 info->nregs = nregs_ymode;
3764 }
3765
3766 /* This function returns the regno offset of a subreg expression.
3767 xregno - A regno of an inner hard subreg_reg (or what will become one).
3768 xmode - The mode of xregno.
3769 offset - The byte offset.
3770 ymode - The mode of a top level SUBREG (or what may become one).
3771 RETURN - The regno offset which would be used. */
3772 unsigned int
3773 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3774 unsigned int offset, machine_mode ymode)
3775 {
3776 struct subreg_info info;
3777 subreg_get_info (xregno, xmode, offset, ymode, &info);
3778 return info.offset;
3779 }
3780
3781 /* This function returns true when the offset is representable via
3782 subreg_offset in the given regno.
3783 xregno - A regno of an inner hard subreg_reg (or what will become one).
3784 xmode - The mode of xregno.
3785 offset - The byte offset.
3786 ymode - The mode of a top level SUBREG (or what may become one).
3787 RETURN - Whether the offset is representable. */
3788 bool
3789 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3790 unsigned int offset, machine_mode ymode)
3791 {
3792 struct subreg_info info;
3793 subreg_get_info (xregno, xmode, offset, ymode, &info);
3794 return info.representable_p;
3795 }
3796
3797 /* Return the number of a YMODE register to which
3798
3799 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3800
3801 can be simplified. Return -1 if the subreg can't be simplified.
3802
3803 XREGNO is a hard register number. */
3804
3805 int
3806 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3807 unsigned int offset, machine_mode ymode)
3808 {
3809 struct subreg_info info;
3810 unsigned int yregno;
3811
3812 #ifdef CANNOT_CHANGE_MODE_CLASS
3813 /* Give the backend a chance to disallow the mode change. */
3814 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3815 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3816 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3817 /* We can use mode change in LRA for some transformations. */
3818 && ! lra_in_progress)
3819 return -1;
3820 #endif
3821
3822 /* We shouldn't simplify stack-related registers. */
3823 if ((!reload_completed || frame_pointer_needed)
3824 && xregno == FRAME_POINTER_REGNUM)
3825 return -1;
3826
3827 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3828 && xregno == ARG_POINTER_REGNUM)
3829 return -1;
3830
3831 if (xregno == STACK_POINTER_REGNUM
3832 /* We should convert hard stack register in LRA if it is
3833 possible. */
3834 && ! lra_in_progress)
3835 return -1;
3836
3837 /* Try to get the register offset. */
3838 subreg_get_info (xregno, xmode, offset, ymode, &info);
3839 if (!info.representable_p)
3840 return -1;
3841
3842 /* Make sure that the offsetted register value is in range. */
3843 yregno = xregno + info.offset;
3844 if (!HARD_REGISTER_NUM_P (yregno))
3845 return -1;
3846
3847 /* See whether (reg:YMODE YREGNO) is valid.
3848
3849 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3850 This is a kludge to work around how complex FP arguments are passed
3851 on IA-64 and should be fixed. See PR target/49226. */
3852 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3853 && HARD_REGNO_MODE_OK (xregno, xmode))
3854 return -1;
3855
3856 return (int) yregno;
3857 }
3858
3859 /* Return the final regno that a subreg expression refers to. */
3860 unsigned int
3861 subreg_regno (const_rtx x)
3862 {
3863 unsigned int ret;
3864 rtx subreg = SUBREG_REG (x);
3865 int regno = REGNO (subreg);
3866
3867 ret = regno + subreg_regno_offset (regno,
3868 GET_MODE (subreg),
3869 SUBREG_BYTE (x),
3870 GET_MODE (x));
3871 return ret;
3872
3873 }
3874
3875 /* Return the number of registers that a subreg expression refers
3876 to. */
3877 unsigned int
3878 subreg_nregs (const_rtx x)
3879 {
3880 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3881 }
3882
3883 /* Return the number of registers that a subreg REG with REGNO
3884 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3885 changed so that the regno can be passed in. */
3886
3887 unsigned int
3888 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3889 {
3890 struct subreg_info info;
3891 rtx subreg = SUBREG_REG (x);
3892
3893 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3894 &info);
3895 return info.nregs;
3896 }
3897
3898 struct parms_set_data
3899 {
3900 int nregs;
3901 HARD_REG_SET regs;
3902 };
3903
3904 /* Helper function for noticing stores to parameter registers. */
3905 static void
3906 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3907 {
3908 struct parms_set_data *const d = (struct parms_set_data *) data;
3909 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3910 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3911 {
3912 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3913 d->nregs--;
3914 }
3915 }
3916
3917 /* Look backward for first parameter to be loaded.
3918 Note that loads of all parameters will not necessarily be
3919 found if CSE has eliminated some of them (e.g., an argument
3920 to the outer function is passed down as a parameter).
3921 Do not skip BOUNDARY. */
3922 rtx_insn *
3923 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3924 {
3925 struct parms_set_data parm;
3926 rtx p;
3927 rtx_insn *before, *first_set;
3928
3929 /* Since different machines initialize their parameter registers
3930 in different orders, assume nothing. Collect the set of all
3931 parameter registers. */
3932 CLEAR_HARD_REG_SET (parm.regs);
3933 parm.nregs = 0;
3934 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3935 if (GET_CODE (XEXP (p, 0)) == USE
3936 && REG_P (XEXP (XEXP (p, 0), 0))
3937 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
3938 {
3939 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3940
3941 /* We only care about registers which can hold function
3942 arguments. */
3943 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3944 continue;
3945
3946 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3947 parm.nregs++;
3948 }
3949 before = call_insn;
3950 first_set = call_insn;
3951
3952 /* Search backward for the first set of a register in this set. */
3953 while (parm.nregs && before != boundary)
3954 {
3955 before = PREV_INSN (before);
3956
3957 /* It is possible that some loads got CSEed from one call to
3958 another. Stop in that case. */
3959 if (CALL_P (before))
3960 break;
3961
3962 /* Our caller needs either ensure that we will find all sets
3963 (in case code has not been optimized yet), or take care
3964 for possible labels in a way by setting boundary to preceding
3965 CODE_LABEL. */
3966 if (LABEL_P (before))
3967 {
3968 gcc_assert (before == boundary);
3969 break;
3970 }
3971
3972 if (INSN_P (before))
3973 {
3974 int nregs_old = parm.nregs;
3975 note_stores (PATTERN (before), parms_set, &parm);
3976 /* If we found something that did not set a parameter reg,
3977 we're done. Do not keep going, as that might result
3978 in hoisting an insn before the setting of a pseudo
3979 that is used by the hoisted insn. */
3980 if (nregs_old != parm.nregs)
3981 first_set = before;
3982 else
3983 break;
3984 }
3985 }
3986 return first_set;
3987 }
3988
3989 /* Return true if we should avoid inserting code between INSN and preceding
3990 call instruction. */
3991
3992 bool
3993 keep_with_call_p (const rtx_insn *insn)
3994 {
3995 rtx set;
3996
3997 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3998 {
3999 if (REG_P (SET_DEST (set))
4000 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4001 && fixed_regs[REGNO (SET_DEST (set))]
4002 && general_operand (SET_SRC (set), VOIDmode))
4003 return true;
4004 if (REG_P (SET_SRC (set))
4005 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4006 && REG_P (SET_DEST (set))
4007 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4008 return true;
4009 /* There may be a stack pop just after the call and before the store
4010 of the return register. Search for the actual store when deciding
4011 if we can break or not. */
4012 if (SET_DEST (set) == stack_pointer_rtx)
4013 {
4014 /* This CONST_CAST is okay because next_nonnote_insn just
4015 returns its argument and we assign it to a const_rtx
4016 variable. */
4017 const rtx_insn *i2
4018 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4019 if (i2 && keep_with_call_p (i2))
4020 return true;
4021 }
4022 }
4023 return false;
4024 }
4025
4026 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4027 to non-complex jumps. That is, direct unconditional, conditional,
4028 and tablejumps, but not computed jumps or returns. It also does
4029 not apply to the fallthru case of a conditional jump. */
4030
4031 bool
4032 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4033 {
4034 rtx tmp = JUMP_LABEL (jump_insn);
4035 rtx_jump_table_data *table;
4036
4037 if (label == tmp)
4038 return true;
4039
4040 if (tablejump_p (jump_insn, NULL, &table))
4041 {
4042 rtvec vec = table->get_labels ();
4043 int i, veclen = GET_NUM_ELEM (vec);
4044
4045 for (i = 0; i < veclen; ++i)
4046 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4047 return true;
4048 }
4049
4050 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4051 return true;
4052
4053 return false;
4054 }
4055
4056 \f
4057 /* Return an estimate of the cost of computing rtx X.
4058 One use is in cse, to decide which expression to keep in the hash table.
4059 Another is in rtl generation, to pick the cheapest way to multiply.
4060 Other uses like the latter are expected in the future.
4061
4062 X appears as operand OPNO in an expression with code OUTER_CODE.
4063 SPEED specifies whether costs optimized for speed or size should
4064 be returned. */
4065
4066 int
4067 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4068 int opno, bool speed)
4069 {
4070 int i, j;
4071 enum rtx_code code;
4072 const char *fmt;
4073 int total;
4074 int factor;
4075
4076 if (x == 0)
4077 return 0;
4078
4079 if (GET_MODE (x) != VOIDmode)
4080 mode = GET_MODE (x);
4081
4082 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4083 many insns, taking N times as long. */
4084 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4085 if (factor == 0)
4086 factor = 1;
4087
4088 /* Compute the default costs of certain things.
4089 Note that targetm.rtx_costs can override the defaults. */
4090
4091 code = GET_CODE (x);
4092 switch (code)
4093 {
4094 case MULT:
4095 /* Multiplication has time-complexity O(N*N), where N is the
4096 number of units (translated from digits) when using
4097 schoolbook long multiplication. */
4098 total = factor * factor * COSTS_N_INSNS (5);
4099 break;
4100 case DIV:
4101 case UDIV:
4102 case MOD:
4103 case UMOD:
4104 /* Similarly, complexity for schoolbook long division. */
4105 total = factor * factor * COSTS_N_INSNS (7);
4106 break;
4107 case USE:
4108 /* Used in combine.c as a marker. */
4109 total = 0;
4110 break;
4111 case SET:
4112 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4113 the mode for the factor. */
4114 mode = GET_MODE (SET_DEST (x));
4115 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4116 if (factor == 0)
4117 factor = 1;
4118 /* FALLTHRU */
4119 default:
4120 total = factor * COSTS_N_INSNS (1);
4121 }
4122
4123 switch (code)
4124 {
4125 case REG:
4126 return 0;
4127
4128 case SUBREG:
4129 total = 0;
4130 /* If we can't tie these modes, make this expensive. The larger
4131 the mode, the more expensive it is. */
4132 if (! MODES_TIEABLE_P (mode, GET_MODE (SUBREG_REG (x))))
4133 return COSTS_N_INSNS (2 + factor);
4134 break;
4135
4136 default:
4137 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4138 return total;
4139 break;
4140 }
4141
4142 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4143 which is already in total. */
4144
4145 fmt = GET_RTX_FORMAT (code);
4146 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4147 if (fmt[i] == 'e')
4148 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4149 else if (fmt[i] == 'E')
4150 for (j = 0; j < XVECLEN (x, i); j++)
4151 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4152
4153 return total;
4154 }
4155
4156 /* Fill in the structure C with information about both speed and size rtx
4157 costs for X, which is operand OPNO in an expression with code OUTER. */
4158
4159 void
4160 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4161 struct full_rtx_costs *c)
4162 {
4163 c->speed = rtx_cost (x, mode, outer, opno, true);
4164 c->size = rtx_cost (x, mode, outer, opno, false);
4165 }
4166
4167 \f
4168 /* Return cost of address expression X.
4169 Expect that X is properly formed address reference.
4170
4171 SPEED parameter specify whether costs optimized for speed or size should
4172 be returned. */
4173
4174 int
4175 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4176 {
4177 /* We may be asked for cost of various unusual addresses, such as operands
4178 of push instruction. It is not worthwhile to complicate writing
4179 of the target hook by such cases. */
4180
4181 if (!memory_address_addr_space_p (mode, x, as))
4182 return 1000;
4183
4184 return targetm.address_cost (x, mode, as, speed);
4185 }
4186
4187 /* If the target doesn't override, compute the cost as with arithmetic. */
4188
4189 int
4190 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4191 {
4192 return rtx_cost (x, Pmode, MEM, 0, speed);
4193 }
4194 \f
4195
4196 unsigned HOST_WIDE_INT
4197 nonzero_bits (const_rtx x, machine_mode mode)
4198 {
4199 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4200 }
4201
4202 unsigned int
4203 num_sign_bit_copies (const_rtx x, machine_mode mode)
4204 {
4205 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4206 }
4207
4208 /* Return true if nonzero_bits1 might recurse into both operands
4209 of X. */
4210
4211 static inline bool
4212 nonzero_bits_binary_arith_p (const_rtx x)
4213 {
4214 if (!ARITHMETIC_P (x))
4215 return false;
4216 switch (GET_CODE (x))
4217 {
4218 case AND:
4219 case XOR:
4220 case IOR:
4221 case UMIN:
4222 case UMAX:
4223 case SMIN:
4224 case SMAX:
4225 case PLUS:
4226 case MINUS:
4227 case MULT:
4228 case DIV:
4229 case UDIV:
4230 case MOD:
4231 case UMOD:
4232 return true;
4233 default:
4234 return false;
4235 }
4236 }
4237
4238 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4239 It avoids exponential behavior in nonzero_bits1 when X has
4240 identical subexpressions on the first or the second level. */
4241
4242 static unsigned HOST_WIDE_INT
4243 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
4244 machine_mode known_mode,
4245 unsigned HOST_WIDE_INT known_ret)
4246 {
4247 if (x == known_x && mode == known_mode)
4248 return known_ret;
4249
4250 /* Try to find identical subexpressions. If found call
4251 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4252 precomputed value for the subexpression as KNOWN_RET. */
4253
4254 if (nonzero_bits_binary_arith_p (x))
4255 {
4256 rtx x0 = XEXP (x, 0);
4257 rtx x1 = XEXP (x, 1);
4258
4259 /* Check the first level. */
4260 if (x0 == x1)
4261 return nonzero_bits1 (x, mode, x0, mode,
4262 cached_nonzero_bits (x0, mode, known_x,
4263 known_mode, known_ret));
4264
4265 /* Check the second level. */
4266 if (nonzero_bits_binary_arith_p (x0)
4267 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4268 return nonzero_bits1 (x, mode, x1, mode,
4269 cached_nonzero_bits (x1, mode, known_x,
4270 known_mode, known_ret));
4271
4272 if (nonzero_bits_binary_arith_p (x1)
4273 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4274 return nonzero_bits1 (x, mode, x0, mode,
4275 cached_nonzero_bits (x0, mode, known_x,
4276 known_mode, known_ret));
4277 }
4278
4279 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4280 }
4281
4282 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4283 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4284 is less useful. We can't allow both, because that results in exponential
4285 run time recursion. There is a nullstone testcase that triggered
4286 this. This macro avoids accidental uses of num_sign_bit_copies. */
4287 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4288
4289 /* Given an expression, X, compute which bits in X can be nonzero.
4290 We don't care about bits outside of those defined in MODE.
4291
4292 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4293 an arithmetic operation, we can do better. */
4294
4295 static unsigned HOST_WIDE_INT
4296 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4297 machine_mode known_mode,
4298 unsigned HOST_WIDE_INT known_ret)
4299 {
4300 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4301 unsigned HOST_WIDE_INT inner_nz;
4302 enum rtx_code code;
4303 machine_mode inner_mode;
4304 unsigned int mode_width = GET_MODE_PRECISION (mode);
4305
4306 /* For floating-point and vector values, assume all bits are needed. */
4307 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4308 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4309 return nonzero;
4310
4311 /* If X is wider than MODE, use its mode instead. */
4312 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4313 {
4314 mode = GET_MODE (x);
4315 nonzero = GET_MODE_MASK (mode);
4316 mode_width = GET_MODE_PRECISION (mode);
4317 }
4318
4319 if (mode_width > HOST_BITS_PER_WIDE_INT)
4320 /* Our only callers in this case look for single bit values. So
4321 just return the mode mask. Those tests will then be false. */
4322 return nonzero;
4323
4324 /* If MODE is wider than X, but both are a single word for both the host
4325 and target machines, we can compute this from which bits of the
4326 object might be nonzero in its own mode, taking into account the fact
4327 that on many CISC machines, accessing an object in a wider mode
4328 causes the high-order bits to become undefined. So they are
4329 not known to be zero. */
4330
4331 if (!WORD_REGISTER_OPERATIONS
4332 && GET_MODE (x) != VOIDmode
4333 && GET_MODE (x) != mode
4334 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4335 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4336 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4337 {
4338 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4339 known_x, known_mode, known_ret);
4340 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4341 return nonzero;
4342 }
4343
4344 /* Please keep nonzero_bits_binary_arith_p above in sync with
4345 the code in the switch below. */
4346 code = GET_CODE (x);
4347 switch (code)
4348 {
4349 case REG:
4350 #if defined(POINTERS_EXTEND_UNSIGNED)
4351 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4352 all the bits above ptr_mode are known to be zero. */
4353 /* As we do not know which address space the pointer is referring to,
4354 we can do this only if the target does not support different pointer
4355 or address modes depending on the address space. */
4356 if (target_default_pointer_address_modes_p ()
4357 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4358 && REG_POINTER (x)
4359 && !targetm.have_ptr_extend ())
4360 nonzero &= GET_MODE_MASK (ptr_mode);
4361 #endif
4362
4363 /* Include declared information about alignment of pointers. */
4364 /* ??? We don't properly preserve REG_POINTER changes across
4365 pointer-to-integer casts, so we can't trust it except for
4366 things that we know must be pointers. See execute/960116-1.c. */
4367 if ((x == stack_pointer_rtx
4368 || x == frame_pointer_rtx
4369 || x == arg_pointer_rtx)
4370 && REGNO_POINTER_ALIGN (REGNO (x)))
4371 {
4372 unsigned HOST_WIDE_INT alignment
4373 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4374
4375 #ifdef PUSH_ROUNDING
4376 /* If PUSH_ROUNDING is defined, it is possible for the
4377 stack to be momentarily aligned only to that amount,
4378 so we pick the least alignment. */
4379 if (x == stack_pointer_rtx && PUSH_ARGS)
4380 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4381 alignment);
4382 #endif
4383
4384 nonzero &= ~(alignment - 1);
4385 }
4386
4387 {
4388 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4389 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4390 known_mode, known_ret,
4391 &nonzero_for_hook);
4392
4393 if (new_rtx)
4394 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4395 known_mode, known_ret);
4396
4397 return nonzero_for_hook;
4398 }
4399
4400 case CONST_INT:
4401 /* If X is negative in MODE, sign-extend the value. */
4402 if (SHORT_IMMEDIATES_SIGN_EXTEND && INTVAL (x) > 0
4403 && mode_width < BITS_PER_WORD
4404 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1)))
4405 != 0)
4406 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4407
4408 return UINTVAL (x);
4409
4410 case MEM:
4411 /* In many, if not most, RISC machines, reading a byte from memory
4412 zeros the rest of the register. Noticing that fact saves a lot
4413 of extra zero-extends. */
4414 if (load_extend_op (GET_MODE (x)) == ZERO_EXTEND)
4415 nonzero &= GET_MODE_MASK (GET_MODE (x));
4416 break;
4417
4418 case EQ: case NE:
4419 case UNEQ: case LTGT:
4420 case GT: case GTU: case UNGT:
4421 case LT: case LTU: case UNLT:
4422 case GE: case GEU: case UNGE:
4423 case LE: case LEU: case UNLE:
4424 case UNORDERED: case ORDERED:
4425 /* If this produces an integer result, we know which bits are set.
4426 Code here used to clear bits outside the mode of X, but that is
4427 now done above. */
4428 /* Mind that MODE is the mode the caller wants to look at this
4429 operation in, and not the actual operation mode. We can wind
4430 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4431 that describes the results of a vector compare. */
4432 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4433 && mode_width <= HOST_BITS_PER_WIDE_INT)
4434 nonzero = STORE_FLAG_VALUE;
4435 break;
4436
4437 case NEG:
4438 #if 0
4439 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4440 and num_sign_bit_copies. */
4441 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4442 == GET_MODE_PRECISION (GET_MODE (x)))
4443 nonzero = 1;
4444 #endif
4445
4446 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4447 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4448 break;
4449
4450 case ABS:
4451 #if 0
4452 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4453 and num_sign_bit_copies. */
4454 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4455 == GET_MODE_PRECISION (GET_MODE (x)))
4456 nonzero = 1;
4457 #endif
4458 break;
4459
4460 case TRUNCATE:
4461 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4462 known_x, known_mode, known_ret)
4463 & GET_MODE_MASK (mode));
4464 break;
4465
4466 case ZERO_EXTEND:
4467 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4468 known_x, known_mode, known_ret);
4469 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4470 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4471 break;
4472
4473 case SIGN_EXTEND:
4474 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4475 Otherwise, show all the bits in the outer mode but not the inner
4476 may be nonzero. */
4477 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4478 known_x, known_mode, known_ret);
4479 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4480 {
4481 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4482 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4483 inner_nz |= (GET_MODE_MASK (mode)
4484 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4485 }
4486
4487 nonzero &= inner_nz;
4488 break;
4489
4490 case AND:
4491 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4492 known_x, known_mode, known_ret)
4493 & cached_nonzero_bits (XEXP (x, 1), mode,
4494 known_x, known_mode, known_ret);
4495 break;
4496
4497 case XOR: case IOR:
4498 case UMIN: case UMAX: case SMIN: case SMAX:
4499 {
4500 unsigned HOST_WIDE_INT nonzero0
4501 = cached_nonzero_bits (XEXP (x, 0), mode,
4502 known_x, known_mode, known_ret);
4503
4504 /* Don't call nonzero_bits for the second time if it cannot change
4505 anything. */
4506 if ((nonzero & nonzero0) != nonzero)
4507 nonzero &= nonzero0
4508 | cached_nonzero_bits (XEXP (x, 1), mode,
4509 known_x, known_mode, known_ret);
4510 }
4511 break;
4512
4513 case PLUS: case MINUS:
4514 case MULT:
4515 case DIV: case UDIV:
4516 case MOD: case UMOD:
4517 /* We can apply the rules of arithmetic to compute the number of
4518 high- and low-order zero bits of these operations. We start by
4519 computing the width (position of the highest-order nonzero bit)
4520 and the number of low-order zero bits for each value. */
4521 {
4522 unsigned HOST_WIDE_INT nz0
4523 = cached_nonzero_bits (XEXP (x, 0), mode,
4524 known_x, known_mode, known_ret);
4525 unsigned HOST_WIDE_INT nz1
4526 = cached_nonzero_bits (XEXP (x, 1), mode,
4527 known_x, known_mode, known_ret);
4528 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4529 int width0 = floor_log2 (nz0) + 1;
4530 int width1 = floor_log2 (nz1) + 1;
4531 int low0 = ctz_or_zero (nz0);
4532 int low1 = ctz_or_zero (nz1);
4533 unsigned HOST_WIDE_INT op0_maybe_minusp
4534 = nz0 & (HOST_WIDE_INT_1U << sign_index);
4535 unsigned HOST_WIDE_INT op1_maybe_minusp
4536 = nz1 & (HOST_WIDE_INT_1U << sign_index);
4537 unsigned int result_width = mode_width;
4538 int result_low = 0;
4539
4540 switch (code)
4541 {
4542 case PLUS:
4543 result_width = MAX (width0, width1) + 1;
4544 result_low = MIN (low0, low1);
4545 break;
4546 case MINUS:
4547 result_low = MIN (low0, low1);
4548 break;
4549 case MULT:
4550 result_width = width0 + width1;
4551 result_low = low0 + low1;
4552 break;
4553 case DIV:
4554 if (width1 == 0)
4555 break;
4556 if (!op0_maybe_minusp && !op1_maybe_minusp)
4557 result_width = width0;
4558 break;
4559 case UDIV:
4560 if (width1 == 0)
4561 break;
4562 result_width = width0;
4563 break;
4564 case MOD:
4565 if (width1 == 0)
4566 break;
4567 if (!op0_maybe_minusp && !op1_maybe_minusp)
4568 result_width = MIN (width0, width1);
4569 result_low = MIN (low0, low1);
4570 break;
4571 case UMOD:
4572 if (width1 == 0)
4573 break;
4574 result_width = MIN (width0, width1);
4575 result_low = MIN (low0, low1);
4576 break;
4577 default:
4578 gcc_unreachable ();
4579 }
4580
4581 if (result_width < mode_width)
4582 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4583
4584 if (result_low > 0)
4585 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4586 }
4587 break;
4588
4589 case ZERO_EXTRACT:
4590 if (CONST_INT_P (XEXP (x, 1))
4591 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4592 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4593 break;
4594
4595 case SUBREG:
4596 /* If this is a SUBREG formed for a promoted variable that has
4597 been zero-extended, we know that at least the high-order bits
4598 are zero, though others might be too. */
4599 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4600 nonzero = GET_MODE_MASK (GET_MODE (x))
4601 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4602 known_x, known_mode, known_ret);
4603
4604 /* If the inner mode is a single word for both the host and target
4605 machines, we can compute this from which bits of the inner
4606 object might be nonzero. */
4607 inner_mode = GET_MODE (SUBREG_REG (x));
4608 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4609 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT)
4610 {
4611 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4612 known_x, known_mode, known_ret);
4613
4614 /* On many CISC machines, accessing an object in a wider mode
4615 causes the high-order bits to become undefined. So they are
4616 not known to be zero. */
4617 rtx_code extend_op;
4618 if ((!WORD_REGISTER_OPERATIONS
4619 /* If this is a typical RISC machine, we only have to worry
4620 about the way loads are extended. */
4621 || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4622 ? val_signbit_known_set_p (inner_mode, nonzero)
4623 : extend_op != ZERO_EXTEND)
4624 || (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
4625 && GET_MODE_PRECISION (GET_MODE (x))
4626 > GET_MODE_PRECISION (inner_mode))
4627 nonzero
4628 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4629 }
4630 break;
4631
4632 case ASHIFTRT:
4633 case LSHIFTRT:
4634 case ASHIFT:
4635 case ROTATE:
4636 /* The nonzero bits are in two classes: any bits within MODE
4637 that aren't in GET_MODE (x) are always significant. The rest of the
4638 nonzero bits are those that are significant in the operand of
4639 the shift when shifted the appropriate number of bits. This
4640 shows that high-order bits are cleared by the right shift and
4641 low-order bits by left shifts. */
4642 if (CONST_INT_P (XEXP (x, 1))
4643 && INTVAL (XEXP (x, 1)) >= 0
4644 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4645 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4646 {
4647 machine_mode inner_mode = GET_MODE (x);
4648 unsigned int width = GET_MODE_PRECISION (inner_mode);
4649 int count = INTVAL (XEXP (x, 1));
4650 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4651 unsigned HOST_WIDE_INT op_nonzero
4652 = cached_nonzero_bits (XEXP (x, 0), mode,
4653 known_x, known_mode, known_ret);
4654 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4655 unsigned HOST_WIDE_INT outer = 0;
4656
4657 if (mode_width > width)
4658 outer = (op_nonzero & nonzero & ~mode_mask);
4659
4660 if (code == LSHIFTRT)
4661 inner >>= count;
4662 else if (code == ASHIFTRT)
4663 {
4664 inner >>= count;
4665
4666 /* If the sign bit may have been nonzero before the shift, we
4667 need to mark all the places it could have been copied to
4668 by the shift as possibly nonzero. */
4669 if (inner & (HOST_WIDE_INT_1U << (width - 1 - count)))
4670 inner |= ((HOST_WIDE_INT_1U << count) - 1)
4671 << (width - count);
4672 }
4673 else if (code == ASHIFT)
4674 inner <<= count;
4675 else
4676 inner = ((inner << (count % width)
4677 | (inner >> (width - (count % width)))) & mode_mask);
4678
4679 nonzero &= (outer | inner);
4680 }
4681 break;
4682
4683 case FFS:
4684 case POPCOUNT:
4685 /* This is at most the number of bits in the mode. */
4686 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4687 break;
4688
4689 case CLZ:
4690 /* If CLZ has a known value at zero, then the nonzero bits are
4691 that value, plus the number of bits in the mode minus one. */
4692 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4693 nonzero
4694 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4695 else
4696 nonzero = -1;
4697 break;
4698
4699 case CTZ:
4700 /* If CTZ has a known value at zero, then the nonzero bits are
4701 that value, plus the number of bits in the mode minus one. */
4702 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4703 nonzero
4704 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4705 else
4706 nonzero = -1;
4707 break;
4708
4709 case CLRSB:
4710 /* This is at most the number of bits in the mode minus 1. */
4711 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4712 break;
4713
4714 case PARITY:
4715 nonzero = 1;
4716 break;
4717
4718 case IF_THEN_ELSE:
4719 {
4720 unsigned HOST_WIDE_INT nonzero_true
4721 = cached_nonzero_bits (XEXP (x, 1), mode,
4722 known_x, known_mode, known_ret);
4723
4724 /* Don't call nonzero_bits for the second time if it cannot change
4725 anything. */
4726 if ((nonzero & nonzero_true) != nonzero)
4727 nonzero &= nonzero_true
4728 | cached_nonzero_bits (XEXP (x, 2), mode,
4729 known_x, known_mode, known_ret);
4730 }
4731 break;
4732
4733 default:
4734 break;
4735 }
4736
4737 return nonzero;
4738 }
4739
4740 /* See the macro definition above. */
4741 #undef cached_num_sign_bit_copies
4742
4743 \f
4744 /* Return true if num_sign_bit_copies1 might recurse into both operands
4745 of X. */
4746
4747 static inline bool
4748 num_sign_bit_copies_binary_arith_p (const_rtx x)
4749 {
4750 if (!ARITHMETIC_P (x))
4751 return false;
4752 switch (GET_CODE (x))
4753 {
4754 case IOR:
4755 case AND:
4756 case XOR:
4757 case SMIN:
4758 case SMAX:
4759 case UMIN:
4760 case UMAX:
4761 case PLUS:
4762 case MINUS:
4763 case MULT:
4764 return true;
4765 default:
4766 return false;
4767 }
4768 }
4769
4770 /* The function cached_num_sign_bit_copies is a wrapper around
4771 num_sign_bit_copies1. It avoids exponential behavior in
4772 num_sign_bit_copies1 when X has identical subexpressions on the
4773 first or the second level. */
4774
4775 static unsigned int
4776 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4777 machine_mode known_mode,
4778 unsigned int known_ret)
4779 {
4780 if (x == known_x && mode == known_mode)
4781 return known_ret;
4782
4783 /* Try to find identical subexpressions. If found call
4784 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4785 the precomputed value for the subexpression as KNOWN_RET. */
4786
4787 if (num_sign_bit_copies_binary_arith_p (x))
4788 {
4789 rtx x0 = XEXP (x, 0);
4790 rtx x1 = XEXP (x, 1);
4791
4792 /* Check the first level. */
4793 if (x0 == x1)
4794 return
4795 num_sign_bit_copies1 (x, mode, x0, mode,
4796 cached_num_sign_bit_copies (x0, mode, known_x,
4797 known_mode,
4798 known_ret));
4799
4800 /* Check the second level. */
4801 if (num_sign_bit_copies_binary_arith_p (x0)
4802 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4803 return
4804 num_sign_bit_copies1 (x, mode, x1, mode,
4805 cached_num_sign_bit_copies (x1, mode, known_x,
4806 known_mode,
4807 known_ret));
4808
4809 if (num_sign_bit_copies_binary_arith_p (x1)
4810 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4811 return
4812 num_sign_bit_copies1 (x, mode, x0, mode,
4813 cached_num_sign_bit_copies (x0, mode, known_x,
4814 known_mode,
4815 known_ret));
4816 }
4817
4818 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4819 }
4820
4821 /* Return the number of bits at the high-order end of X that are known to
4822 be equal to the sign bit. X will be used in mode MODE; if MODE is
4823 VOIDmode, X will be used in its own mode. The returned value will always
4824 be between 1 and the number of bits in MODE. */
4825
4826 static unsigned int
4827 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4828 machine_mode known_mode,
4829 unsigned int known_ret)
4830 {
4831 enum rtx_code code = GET_CODE (x);
4832 machine_mode inner_mode;
4833 int num0, num1, result;
4834 unsigned HOST_WIDE_INT nonzero;
4835
4836 /* If we weren't given a mode, use the mode of X. If the mode is still
4837 VOIDmode, we don't know anything. Likewise if one of the modes is
4838 floating-point. */
4839
4840 if (mode == VOIDmode)
4841 mode = GET_MODE (x);
4842
4843 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4844 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4845 return 1;
4846
4847 /* For a smaller mode, just ignore the high bits. */
4848 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4849 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4850 {
4851 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4852 known_x, known_mode, known_ret);
4853 return MAX (1,
4854 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4855 }
4856
4857 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4858 {
4859 /* If this machine does not do all register operations on the entire
4860 register and MODE is wider than the mode of X, we can say nothing
4861 at all about the high-order bits. */
4862 if (!WORD_REGISTER_OPERATIONS)
4863 return 1;
4864
4865 /* Likewise on machines that do, if the mode of the object is smaller
4866 than a word and loads of that size don't sign extend, we can say
4867 nothing about the high order bits. */
4868 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4869 && load_extend_op (GET_MODE (x)) != SIGN_EXTEND)
4870 return 1;
4871 }
4872
4873 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4874 the code in the switch below. */
4875 switch (code)
4876 {
4877 case REG:
4878
4879 #if defined(POINTERS_EXTEND_UNSIGNED)
4880 /* If pointers extend signed and this is a pointer in Pmode, say that
4881 all the bits above ptr_mode are known to be sign bit copies. */
4882 /* As we do not know which address space the pointer is referring to,
4883 we can do this only if the target does not support different pointer
4884 or address modes depending on the address space. */
4885 if (target_default_pointer_address_modes_p ()
4886 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4887 && mode == Pmode && REG_POINTER (x)
4888 && !targetm.have_ptr_extend ())
4889 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4890 #endif
4891
4892 {
4893 unsigned int copies_for_hook = 1, copies = 1;
4894 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4895 known_mode, known_ret,
4896 &copies_for_hook);
4897
4898 if (new_rtx)
4899 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4900 known_mode, known_ret);
4901
4902 if (copies > 1 || copies_for_hook > 1)
4903 return MAX (copies, copies_for_hook);
4904
4905 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4906 }
4907 break;
4908
4909 case MEM:
4910 /* Some RISC machines sign-extend all loads of smaller than a word. */
4911 if (load_extend_op (GET_MODE (x)) == SIGN_EXTEND)
4912 return MAX (1, ((int) bitwidth
4913 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4914 break;
4915
4916 case CONST_INT:
4917 /* If the constant is negative, take its 1's complement and remask.
4918 Then see how many zero bits we have. */
4919 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4920 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4921 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
4922 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4923
4924 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4925
4926 case SUBREG:
4927 /* If this is a SUBREG for a promoted object that is sign-extended
4928 and we are looking at it in a wider mode, we know that at least the
4929 high-order bits are known to be sign bit copies. */
4930
4931 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4932 {
4933 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4934 known_x, known_mode, known_ret);
4935 return MAX ((int) bitwidth
4936 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4937 num0);
4938 }
4939
4940 /* For a smaller object, just ignore the high bits. */
4941 inner_mode = GET_MODE (SUBREG_REG (x));
4942 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
4943 {
4944 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4945 known_x, known_mode, known_ret);
4946 return
4947 MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth));
4948 }
4949
4950 /* For paradoxical SUBREGs on machines where all register operations
4951 affect the entire register, just look inside. Note that we are
4952 passing MODE to the recursive call, so the number of sign bit copies
4953 will remain relative to that mode, not the inner mode. */
4954
4955 /* This works only if loads sign extend. Otherwise, if we get a
4956 reload for the inner part, it may be loaded from the stack, and
4957 then we lose all sign bit copies that existed before the store
4958 to the stack. */
4959
4960 if (WORD_REGISTER_OPERATIONS
4961 && load_extend_op (inner_mode) == SIGN_EXTEND
4962 && paradoxical_subreg_p (x)
4963 && (MEM_P (SUBREG_REG (x)) || REG_P (SUBREG_REG (x))))
4964 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4965 known_x, known_mode, known_ret);
4966 break;
4967
4968 case SIGN_EXTRACT:
4969 if (CONST_INT_P (XEXP (x, 1)))
4970 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4971 break;
4972
4973 case SIGN_EXTEND:
4974 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4975 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4976 known_x, known_mode, known_ret));
4977
4978 case TRUNCATE:
4979 /* For a smaller object, just ignore the high bits. */
4980 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4981 known_x, known_mode, known_ret);
4982 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4983 - bitwidth)));
4984
4985 case NOT:
4986 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4987 known_x, known_mode, known_ret);
4988
4989 case ROTATE: case ROTATERT:
4990 /* If we are rotating left by a number of bits less than the number
4991 of sign bit copies, we can just subtract that amount from the
4992 number. */
4993 if (CONST_INT_P (XEXP (x, 1))
4994 && INTVAL (XEXP (x, 1)) >= 0
4995 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4996 {
4997 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4998 known_x, known_mode, known_ret);
4999 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5000 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5001 }
5002 break;
5003
5004 case NEG:
5005 /* In general, this subtracts one sign bit copy. But if the value
5006 is known to be positive, the number of sign bit copies is the
5007 same as that of the input. Finally, if the input has just one bit
5008 that might be nonzero, all the bits are copies of the sign bit. */
5009 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5010 known_x, known_mode, known_ret);
5011 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5012 return num0 > 1 ? num0 - 1 : 1;
5013
5014 nonzero = nonzero_bits (XEXP (x, 0), mode);
5015 if (nonzero == 1)
5016 return bitwidth;
5017
5018 if (num0 > 1
5019 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5020 num0--;
5021
5022 return num0;
5023
5024 case IOR: case AND: case XOR:
5025 case SMIN: case SMAX: case UMIN: case UMAX:
5026 /* Logical operations will preserve the number of sign-bit copies.
5027 MIN and MAX operations always return one of the operands. */
5028 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5029 known_x, known_mode, known_ret);
5030 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5031 known_x, known_mode, known_ret);
5032
5033 /* If num1 is clearing some of the top bits then regardless of
5034 the other term, we are guaranteed to have at least that many
5035 high-order zero bits. */
5036 if (code == AND
5037 && num1 > 1
5038 && bitwidth <= HOST_BITS_PER_WIDE_INT
5039 && CONST_INT_P (XEXP (x, 1))
5040 && (UINTVAL (XEXP (x, 1))
5041 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5042 return num1;
5043
5044 /* Similarly for IOR when setting high-order bits. */
5045 if (code == IOR
5046 && num1 > 1
5047 && bitwidth <= HOST_BITS_PER_WIDE_INT
5048 && CONST_INT_P (XEXP (x, 1))
5049 && (UINTVAL (XEXP (x, 1))
5050 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5051 return num1;
5052
5053 return MIN (num0, num1);
5054
5055 case PLUS: case MINUS:
5056 /* For addition and subtraction, we can have a 1-bit carry. However,
5057 if we are subtracting 1 from a positive number, there will not
5058 be such a carry. Furthermore, if the positive number is known to
5059 be 0 or 1, we know the result is either -1 or 0. */
5060
5061 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5062 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5063 {
5064 nonzero = nonzero_bits (XEXP (x, 0), mode);
5065 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5066 return (nonzero == 1 || nonzero == 0 ? bitwidth
5067 : bitwidth - floor_log2 (nonzero) - 1);
5068 }
5069
5070 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5071 known_x, known_mode, known_ret);
5072 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5073 known_x, known_mode, known_ret);
5074 result = MAX (1, MIN (num0, num1) - 1);
5075
5076 return result;
5077
5078 case MULT:
5079 /* The number of bits of the product is the sum of the number of
5080 bits of both terms. However, unless one of the terms if known
5081 to be positive, we must allow for an additional bit since negating
5082 a negative number can remove one sign bit copy. */
5083
5084 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5085 known_x, known_mode, known_ret);
5086 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5087 known_x, known_mode, known_ret);
5088
5089 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5090 if (result > 0
5091 && (bitwidth > HOST_BITS_PER_WIDE_INT
5092 || (((nonzero_bits (XEXP (x, 0), mode)
5093 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5094 && ((nonzero_bits (XEXP (x, 1), mode)
5095 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5096 != 0))))
5097 result--;
5098
5099 return MAX (1, result);
5100
5101 case UDIV:
5102 /* The result must be <= the first operand. If the first operand
5103 has the high bit set, we know nothing about the number of sign
5104 bit copies. */
5105 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5106 return 1;
5107 else if ((nonzero_bits (XEXP (x, 0), mode)
5108 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5109 return 1;
5110 else
5111 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5112 known_x, known_mode, known_ret);
5113
5114 case UMOD:
5115 /* The result must be <= the second operand. If the second operand
5116 has (or just might have) the high bit set, we know nothing about
5117 the number of sign bit copies. */
5118 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5119 return 1;
5120 else if ((nonzero_bits (XEXP (x, 1), mode)
5121 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5122 return 1;
5123 else
5124 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5125 known_x, known_mode, known_ret);
5126
5127 case DIV:
5128 /* Similar to unsigned division, except that we have to worry about
5129 the case where the divisor is negative, in which case we have
5130 to add 1. */
5131 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5132 known_x, known_mode, known_ret);
5133 if (result > 1
5134 && (bitwidth > HOST_BITS_PER_WIDE_INT
5135 || (nonzero_bits (XEXP (x, 1), mode)
5136 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5137 result--;
5138
5139 return result;
5140
5141 case MOD:
5142 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5143 known_x, known_mode, known_ret);
5144 if (result > 1
5145 && (bitwidth > HOST_BITS_PER_WIDE_INT
5146 || (nonzero_bits (XEXP (x, 1), mode)
5147 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5148 result--;
5149
5150 return result;
5151
5152 case ASHIFTRT:
5153 /* Shifts by a constant add to the number of bits equal to the
5154 sign bit. */
5155 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5156 known_x, known_mode, known_ret);
5157 if (CONST_INT_P (XEXP (x, 1))
5158 && INTVAL (XEXP (x, 1)) > 0
5159 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
5160 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5161
5162 return num0;
5163
5164 case ASHIFT:
5165 /* Left shifts destroy copies. */
5166 if (!CONST_INT_P (XEXP (x, 1))
5167 || INTVAL (XEXP (x, 1)) < 0
5168 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5169 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
5170 return 1;
5171
5172 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5173 known_x, known_mode, known_ret);
5174 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5175
5176 case IF_THEN_ELSE:
5177 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5178 known_x, known_mode, known_ret);
5179 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5180 known_x, known_mode, known_ret);
5181 return MIN (num0, num1);
5182
5183 case EQ: case NE: case GE: case GT: case LE: case LT:
5184 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5185 case GEU: case GTU: case LEU: case LTU:
5186 case UNORDERED: case ORDERED:
5187 /* If the constant is negative, take its 1's complement and remask.
5188 Then see how many zero bits we have. */
5189 nonzero = STORE_FLAG_VALUE;
5190 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5191 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5192 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5193
5194 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5195
5196 default:
5197 break;
5198 }
5199
5200 /* If we haven't been able to figure it out by one of the above rules,
5201 see if some of the high-order bits are known to be zero. If so,
5202 count those bits and return one less than that amount. If we can't
5203 safely compute the mask for this mode, always return BITWIDTH. */
5204
5205 bitwidth = GET_MODE_PRECISION (mode);
5206 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5207 return 1;
5208
5209 nonzero = nonzero_bits (x, mode);
5210 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5211 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5212 }
5213
5214 /* Calculate the rtx_cost of a single instruction. A return value of
5215 zero indicates an instruction pattern without a known cost. */
5216
5217 int
5218 insn_rtx_cost (rtx pat, bool speed)
5219 {
5220 int i, cost;
5221 rtx set;
5222
5223 /* Extract the single set rtx from the instruction pattern.
5224 We can't use single_set since we only have the pattern. */
5225 if (GET_CODE (pat) == SET)
5226 set = pat;
5227 else if (GET_CODE (pat) == PARALLEL)
5228 {
5229 set = NULL_RTX;
5230 for (i = 0; i < XVECLEN (pat, 0); i++)
5231 {
5232 rtx x = XVECEXP (pat, 0, i);
5233 if (GET_CODE (x) == SET)
5234 {
5235 if (set)
5236 return 0;
5237 set = x;
5238 }
5239 }
5240 if (!set)
5241 return 0;
5242 }
5243 else
5244 return 0;
5245
5246 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5247 return cost > 0 ? cost : COSTS_N_INSNS (1);
5248 }
5249
5250 /* Returns estimate on cost of computing SEQ. */
5251
5252 unsigned
5253 seq_cost (const rtx_insn *seq, bool speed)
5254 {
5255 unsigned cost = 0;
5256 rtx set;
5257
5258 for (; seq; seq = NEXT_INSN (seq))
5259 {
5260 set = single_set (seq);
5261 if (set)
5262 cost += set_rtx_cost (set, speed);
5263 else
5264 cost++;
5265 }
5266
5267 return cost;
5268 }
5269
5270 /* Given an insn INSN and condition COND, return the condition in a
5271 canonical form to simplify testing by callers. Specifically:
5272
5273 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5274 (2) Both operands will be machine operands; (cc0) will have been replaced.
5275 (3) If an operand is a constant, it will be the second operand.
5276 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5277 for GE, GEU, and LEU.
5278
5279 If the condition cannot be understood, or is an inequality floating-point
5280 comparison which needs to be reversed, 0 will be returned.
5281
5282 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5283
5284 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5285 insn used in locating the condition was found. If a replacement test
5286 of the condition is desired, it should be placed in front of that
5287 insn and we will be sure that the inputs are still valid.
5288
5289 If WANT_REG is nonzero, we wish the condition to be relative to that
5290 register, if possible. Therefore, do not canonicalize the condition
5291 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5292 to be a compare to a CC mode register.
5293
5294 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5295 and at INSN. */
5296
5297 rtx
5298 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5299 rtx_insn **earliest,
5300 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5301 {
5302 enum rtx_code code;
5303 rtx_insn *prev = insn;
5304 const_rtx set;
5305 rtx tem;
5306 rtx op0, op1;
5307 int reverse_code = 0;
5308 machine_mode mode;
5309 basic_block bb = BLOCK_FOR_INSN (insn);
5310
5311 code = GET_CODE (cond);
5312 mode = GET_MODE (cond);
5313 op0 = XEXP (cond, 0);
5314 op1 = XEXP (cond, 1);
5315
5316 if (reverse)
5317 code = reversed_comparison_code (cond, insn);
5318 if (code == UNKNOWN)
5319 return 0;
5320
5321 if (earliest)
5322 *earliest = insn;
5323
5324 /* If we are comparing a register with zero, see if the register is set
5325 in the previous insn to a COMPARE or a comparison operation. Perform
5326 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5327 in cse.c */
5328
5329 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5330 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5331 && op1 == CONST0_RTX (GET_MODE (op0))
5332 && op0 != want_reg)
5333 {
5334 /* Set nonzero when we find something of interest. */
5335 rtx x = 0;
5336
5337 /* If comparison with cc0, import actual comparison from compare
5338 insn. */
5339 if (op0 == cc0_rtx)
5340 {
5341 if ((prev = prev_nonnote_insn (prev)) == 0
5342 || !NONJUMP_INSN_P (prev)
5343 || (set = single_set (prev)) == 0
5344 || SET_DEST (set) != cc0_rtx)
5345 return 0;
5346
5347 op0 = SET_SRC (set);
5348 op1 = CONST0_RTX (GET_MODE (op0));
5349 if (earliest)
5350 *earliest = prev;
5351 }
5352
5353 /* If this is a COMPARE, pick up the two things being compared. */
5354 if (GET_CODE (op0) == COMPARE)
5355 {
5356 op1 = XEXP (op0, 1);
5357 op0 = XEXP (op0, 0);
5358 continue;
5359 }
5360 else if (!REG_P (op0))
5361 break;
5362
5363 /* Go back to the previous insn. Stop if it is not an INSN. We also
5364 stop if it isn't a single set or if it has a REG_INC note because
5365 we don't want to bother dealing with it. */
5366
5367 prev = prev_nonnote_nondebug_insn (prev);
5368
5369 if (prev == 0
5370 || !NONJUMP_INSN_P (prev)
5371 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5372 /* In cfglayout mode, there do not have to be labels at the
5373 beginning of a block, or jumps at the end, so the previous
5374 conditions would not stop us when we reach bb boundary. */
5375 || BLOCK_FOR_INSN (prev) != bb)
5376 break;
5377
5378 set = set_of (op0, prev);
5379
5380 if (set
5381 && (GET_CODE (set) != SET
5382 || !rtx_equal_p (SET_DEST (set), op0)))
5383 break;
5384
5385 /* If this is setting OP0, get what it sets it to if it looks
5386 relevant. */
5387 if (set)
5388 {
5389 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5390 #ifdef FLOAT_STORE_FLAG_VALUE
5391 REAL_VALUE_TYPE fsfv;
5392 #endif
5393
5394 /* ??? We may not combine comparisons done in a CCmode with
5395 comparisons not done in a CCmode. This is to aid targets
5396 like Alpha that have an IEEE compliant EQ instruction, and
5397 a non-IEEE compliant BEQ instruction. The use of CCmode is
5398 actually artificial, simply to prevent the combination, but
5399 should not affect other platforms.
5400
5401 However, we must allow VOIDmode comparisons to match either
5402 CCmode or non-CCmode comparison, because some ports have
5403 modeless comparisons inside branch patterns.
5404
5405 ??? This mode check should perhaps look more like the mode check
5406 in simplify_comparison in combine. */
5407 if (((GET_MODE_CLASS (mode) == MODE_CC)
5408 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5409 && mode != VOIDmode
5410 && inner_mode != VOIDmode)
5411 break;
5412 if (GET_CODE (SET_SRC (set)) == COMPARE
5413 || (((code == NE
5414 || (code == LT
5415 && val_signbit_known_set_p (inner_mode,
5416 STORE_FLAG_VALUE))
5417 #ifdef FLOAT_STORE_FLAG_VALUE
5418 || (code == LT
5419 && SCALAR_FLOAT_MODE_P (inner_mode)
5420 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5421 REAL_VALUE_NEGATIVE (fsfv)))
5422 #endif
5423 ))
5424 && COMPARISON_P (SET_SRC (set))))
5425 x = SET_SRC (set);
5426 else if (((code == EQ
5427 || (code == GE
5428 && val_signbit_known_set_p (inner_mode,
5429 STORE_FLAG_VALUE))
5430 #ifdef FLOAT_STORE_FLAG_VALUE
5431 || (code == GE
5432 && SCALAR_FLOAT_MODE_P (inner_mode)
5433 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5434 REAL_VALUE_NEGATIVE (fsfv)))
5435 #endif
5436 ))
5437 && COMPARISON_P (SET_SRC (set)))
5438 {
5439 reverse_code = 1;
5440 x = SET_SRC (set);
5441 }
5442 else if ((code == EQ || code == NE)
5443 && GET_CODE (SET_SRC (set)) == XOR)
5444 /* Handle sequences like:
5445
5446 (set op0 (xor X Y))
5447 ...(eq|ne op0 (const_int 0))...
5448
5449 in which case:
5450
5451 (eq op0 (const_int 0)) reduces to (eq X Y)
5452 (ne op0 (const_int 0)) reduces to (ne X Y)
5453
5454 This is the form used by MIPS16, for example. */
5455 x = SET_SRC (set);
5456 else
5457 break;
5458 }
5459
5460 else if (reg_set_p (op0, prev))
5461 /* If this sets OP0, but not directly, we have to give up. */
5462 break;
5463
5464 if (x)
5465 {
5466 /* If the caller is expecting the condition to be valid at INSN,
5467 make sure X doesn't change before INSN. */
5468 if (valid_at_insn_p)
5469 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5470 break;
5471 if (COMPARISON_P (x))
5472 code = GET_CODE (x);
5473 if (reverse_code)
5474 {
5475 code = reversed_comparison_code (x, prev);
5476 if (code == UNKNOWN)
5477 return 0;
5478 reverse_code = 0;
5479 }
5480
5481 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5482 if (earliest)
5483 *earliest = prev;
5484 }
5485 }
5486
5487 /* If constant is first, put it last. */
5488 if (CONSTANT_P (op0))
5489 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5490
5491 /* If OP0 is the result of a comparison, we weren't able to find what
5492 was really being compared, so fail. */
5493 if (!allow_cc_mode
5494 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5495 return 0;
5496
5497 /* Canonicalize any ordered comparison with integers involving equality
5498 if we can do computations in the relevant mode and we do not
5499 overflow. */
5500
5501 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5502 && CONST_INT_P (op1)
5503 && GET_MODE (op0) != VOIDmode
5504 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5505 {
5506 HOST_WIDE_INT const_val = INTVAL (op1);
5507 unsigned HOST_WIDE_INT uconst_val = const_val;
5508 unsigned HOST_WIDE_INT max_val
5509 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5510
5511 switch (code)
5512 {
5513 case LE:
5514 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5515 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5516 break;
5517
5518 /* When cross-compiling, const_val might be sign-extended from
5519 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5520 case GE:
5521 if ((const_val & max_val)
5522 != (HOST_WIDE_INT_1U
5523 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5524 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5525 break;
5526
5527 case LEU:
5528 if (uconst_val < max_val)
5529 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5530 break;
5531
5532 case GEU:
5533 if (uconst_val != 0)
5534 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5535 break;
5536
5537 default:
5538 break;
5539 }
5540 }
5541
5542 /* Never return CC0; return zero instead. */
5543 if (CC0_P (op0))
5544 return 0;
5545
5546 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5547 }
5548
5549 /* Given a jump insn JUMP, return the condition that will cause it to branch
5550 to its JUMP_LABEL. If the condition cannot be understood, or is an
5551 inequality floating-point comparison which needs to be reversed, 0 will
5552 be returned.
5553
5554 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5555 insn used in locating the condition was found. If a replacement test
5556 of the condition is desired, it should be placed in front of that
5557 insn and we will be sure that the inputs are still valid. If EARLIEST
5558 is null, the returned condition will be valid at INSN.
5559
5560 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5561 compare CC mode register.
5562
5563 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5564
5565 rtx
5566 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5567 int valid_at_insn_p)
5568 {
5569 rtx cond;
5570 int reverse;
5571 rtx set;
5572
5573 /* If this is not a standard conditional jump, we can't parse it. */
5574 if (!JUMP_P (jump)
5575 || ! any_condjump_p (jump))
5576 return 0;
5577 set = pc_set (jump);
5578
5579 cond = XEXP (SET_SRC (set), 0);
5580
5581 /* If this branches to JUMP_LABEL when the condition is false, reverse
5582 the condition. */
5583 reverse
5584 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5585 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5586
5587 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5588 allow_cc_mode, valid_at_insn_p);
5589 }
5590
5591 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5592 TARGET_MODE_REP_EXTENDED.
5593
5594 Note that we assume that the property of
5595 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5596 narrower than mode B. I.e., if A is a mode narrower than B then in
5597 order to be able to operate on it in mode B, mode A needs to
5598 satisfy the requirements set by the representation of mode B. */
5599
5600 static void
5601 init_num_sign_bit_copies_in_rep (void)
5602 {
5603 machine_mode mode, in_mode;
5604
5605 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5606 in_mode = GET_MODE_WIDER_MODE (mode))
5607 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5608 mode = GET_MODE_WIDER_MODE (mode))
5609 {
5610 machine_mode i;
5611
5612 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5613 extends to the next widest mode. */
5614 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5615 || GET_MODE_WIDER_MODE (mode) == in_mode);
5616
5617 /* We are in in_mode. Count how many bits outside of mode
5618 have to be copies of the sign-bit. */
5619 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5620 {
5621 machine_mode wider = GET_MODE_WIDER_MODE (i);
5622
5623 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5624 /* We can only check sign-bit copies starting from the
5625 top-bit. In order to be able to check the bits we
5626 have already seen we pretend that subsequent bits
5627 have to be sign-bit copies too. */
5628 || num_sign_bit_copies_in_rep [in_mode][mode])
5629 num_sign_bit_copies_in_rep [in_mode][mode]
5630 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5631 }
5632 }
5633 }
5634
5635 /* Suppose that truncation from the machine mode of X to MODE is not a
5636 no-op. See if there is anything special about X so that we can
5637 assume it already contains a truncated value of MODE. */
5638
5639 bool
5640 truncated_to_mode (machine_mode mode, const_rtx x)
5641 {
5642 /* This register has already been used in MODE without explicit
5643 truncation. */
5644 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5645 return true;
5646
5647 /* See if we already satisfy the requirements of MODE. If yes we
5648 can just switch to MODE. */
5649 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5650 && (num_sign_bit_copies (x, GET_MODE (x))
5651 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5652 return true;
5653
5654 return false;
5655 }
5656 \f
5657 /* Return true if RTX code CODE has a single sequence of zero or more
5658 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5659 entry in that case. */
5660
5661 static bool
5662 setup_reg_subrtx_bounds (unsigned int code)
5663 {
5664 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5665 unsigned int i = 0;
5666 for (; format[i] != 'e'; ++i)
5667 {
5668 if (!format[i])
5669 /* No subrtxes. Leave start and count as 0. */
5670 return true;
5671 if (format[i] == 'E' || format[i] == 'V')
5672 return false;
5673 }
5674
5675 /* Record the sequence of 'e's. */
5676 rtx_all_subrtx_bounds[code].start = i;
5677 do
5678 ++i;
5679 while (format[i] == 'e');
5680 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5681 /* rtl-iter.h relies on this. */
5682 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5683
5684 for (; format[i]; ++i)
5685 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5686 return false;
5687
5688 return true;
5689 }
5690
5691 /* Initialize rtx_all_subrtx_bounds. */
5692 void
5693 init_rtlanal (void)
5694 {
5695 int i;
5696 for (i = 0; i < NUM_RTX_CODE; i++)
5697 {
5698 if (!setup_reg_subrtx_bounds (i))
5699 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5700 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5701 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5702 }
5703
5704 init_num_sign_bit_copies_in_rep ();
5705 }
5706 \f
5707 /* Check whether this is a constant pool constant. */
5708 bool
5709 constant_pool_constant_p (rtx x)
5710 {
5711 x = avoid_constant_pool_reference (x);
5712 return CONST_DOUBLE_P (x);
5713 }
5714 \f
5715 /* If M is a bitmask that selects a field of low-order bits within an item but
5716 not the entire word, return the length of the field. Return -1 otherwise.
5717 M is used in machine mode MODE. */
5718
5719 int
5720 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5721 {
5722 if (mode != VOIDmode)
5723 {
5724 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5725 return -1;
5726 m &= GET_MODE_MASK (mode);
5727 }
5728
5729 return exact_log2 (m + 1);
5730 }
5731
5732 /* Return the mode of MEM's address. */
5733
5734 machine_mode
5735 get_address_mode (rtx mem)
5736 {
5737 machine_mode mode;
5738
5739 gcc_assert (MEM_P (mem));
5740 mode = GET_MODE (XEXP (mem, 0));
5741 if (mode != VOIDmode)
5742 return mode;
5743 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5744 }
5745 \f
5746 /* Split up a CONST_DOUBLE or integer constant rtx
5747 into two rtx's for single words,
5748 storing in *FIRST the word that comes first in memory in the target
5749 and in *SECOND the other.
5750
5751 TODO: This function needs to be rewritten to work on any size
5752 integer. */
5753
5754 void
5755 split_double (rtx value, rtx *first, rtx *second)
5756 {
5757 if (CONST_INT_P (value))
5758 {
5759 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5760 {
5761 /* In this case the CONST_INT holds both target words.
5762 Extract the bits from it into two word-sized pieces.
5763 Sign extend each half to HOST_WIDE_INT. */
5764 unsigned HOST_WIDE_INT low, high;
5765 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5766 unsigned bits_per_word = BITS_PER_WORD;
5767
5768 /* Set sign_bit to the most significant bit of a word. */
5769 sign_bit = 1;
5770 sign_bit <<= bits_per_word - 1;
5771
5772 /* Set mask so that all bits of the word are set. We could
5773 have used 1 << BITS_PER_WORD instead of basing the
5774 calculation on sign_bit. However, on machines where
5775 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5776 compiler warning, even though the code would never be
5777 executed. */
5778 mask = sign_bit << 1;
5779 mask--;
5780
5781 /* Set sign_extend as any remaining bits. */
5782 sign_extend = ~mask;
5783
5784 /* Pick the lower word and sign-extend it. */
5785 low = INTVAL (value);
5786 low &= mask;
5787 if (low & sign_bit)
5788 low |= sign_extend;
5789
5790 /* Pick the higher word, shifted to the least significant
5791 bits, and sign-extend it. */
5792 high = INTVAL (value);
5793 high >>= bits_per_word - 1;
5794 high >>= 1;
5795 high &= mask;
5796 if (high & sign_bit)
5797 high |= sign_extend;
5798
5799 /* Store the words in the target machine order. */
5800 if (WORDS_BIG_ENDIAN)
5801 {
5802 *first = GEN_INT (high);
5803 *second = GEN_INT (low);
5804 }
5805 else
5806 {
5807 *first = GEN_INT (low);
5808 *second = GEN_INT (high);
5809 }
5810 }
5811 else
5812 {
5813 /* The rule for using CONST_INT for a wider mode
5814 is that we regard the value as signed.
5815 So sign-extend it. */
5816 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5817 if (WORDS_BIG_ENDIAN)
5818 {
5819 *first = high;
5820 *second = value;
5821 }
5822 else
5823 {
5824 *first = value;
5825 *second = high;
5826 }
5827 }
5828 }
5829 else if (GET_CODE (value) == CONST_WIDE_INT)
5830 {
5831 /* All of this is scary code and needs to be converted to
5832 properly work with any size integer. */
5833 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5834 if (WORDS_BIG_ENDIAN)
5835 {
5836 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5837 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5838 }
5839 else
5840 {
5841 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5842 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5843 }
5844 }
5845 else if (!CONST_DOUBLE_P (value))
5846 {
5847 if (WORDS_BIG_ENDIAN)
5848 {
5849 *first = const0_rtx;
5850 *second = value;
5851 }
5852 else
5853 {
5854 *first = value;
5855 *second = const0_rtx;
5856 }
5857 }
5858 else if (GET_MODE (value) == VOIDmode
5859 /* This is the old way we did CONST_DOUBLE integers. */
5860 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5861 {
5862 /* In an integer, the words are defined as most and least significant.
5863 So order them by the target's convention. */
5864 if (WORDS_BIG_ENDIAN)
5865 {
5866 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5867 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5868 }
5869 else
5870 {
5871 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5872 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5873 }
5874 }
5875 else
5876 {
5877 long l[2];
5878
5879 /* Note, this converts the REAL_VALUE_TYPE to the target's
5880 format, splits up the floating point double and outputs
5881 exactly 32 bits of it into each of l[0] and l[1] --
5882 not necessarily BITS_PER_WORD bits. */
5883 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
5884
5885 /* If 32 bits is an entire word for the target, but not for the host,
5886 then sign-extend on the host so that the number will look the same
5887 way on the host that it would on the target. See for instance
5888 simplify_unary_operation. The #if is needed to avoid compiler
5889 warnings. */
5890
5891 #if HOST_BITS_PER_LONG > 32
5892 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5893 {
5894 if (l[0] & ((long) 1 << 31))
5895 l[0] |= ((unsigned long) (-1) << 32);
5896 if (l[1] & ((long) 1 << 31))
5897 l[1] |= ((unsigned long) (-1) << 32);
5898 }
5899 #endif
5900
5901 *first = GEN_INT (l[0]);
5902 *second = GEN_INT (l[1]);
5903 }
5904 }
5905
5906 /* Return true if X is a sign_extract or zero_extract from the least
5907 significant bit. */
5908
5909 static bool
5910 lsb_bitfield_op_p (rtx x)
5911 {
5912 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5913 {
5914 machine_mode mode = GET_MODE (XEXP (x, 0));
5915 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5916 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5917
5918 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5919 }
5920 return false;
5921 }
5922
5923 /* Strip outer address "mutations" from LOC and return a pointer to the
5924 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5925 stripped expression there.
5926
5927 "Mutations" either convert between modes or apply some kind of
5928 extension, truncation or alignment. */
5929
5930 rtx *
5931 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5932 {
5933 for (;;)
5934 {
5935 enum rtx_code code = GET_CODE (*loc);
5936 if (GET_RTX_CLASS (code) == RTX_UNARY)
5937 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5938 used to convert between pointer sizes. */
5939 loc = &XEXP (*loc, 0);
5940 else if (lsb_bitfield_op_p (*loc))
5941 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5942 acts as a combined truncation and extension. */
5943 loc = &XEXP (*loc, 0);
5944 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5945 /* (and ... (const_int -X)) is used to align to X bytes. */
5946 loc = &XEXP (*loc, 0);
5947 else if (code == SUBREG
5948 && !OBJECT_P (SUBREG_REG (*loc))
5949 && subreg_lowpart_p (*loc))
5950 /* (subreg (operator ...) ...) inside and is used for mode
5951 conversion too. */
5952 loc = &SUBREG_REG (*loc);
5953 else
5954 return loc;
5955 if (outer_code)
5956 *outer_code = code;
5957 }
5958 }
5959
5960 /* Return true if CODE applies some kind of scale. The scaled value is
5961 is the first operand and the scale is the second. */
5962
5963 static bool
5964 binary_scale_code_p (enum rtx_code code)
5965 {
5966 return (code == MULT
5967 || code == ASHIFT
5968 /* Needed by ARM targets. */
5969 || code == ASHIFTRT
5970 || code == LSHIFTRT
5971 || code == ROTATE
5972 || code == ROTATERT);
5973 }
5974
5975 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5976 (see address_info). Return null otherwise. */
5977
5978 static rtx *
5979 get_base_term (rtx *inner)
5980 {
5981 if (GET_CODE (*inner) == LO_SUM)
5982 inner = strip_address_mutations (&XEXP (*inner, 0));
5983 if (REG_P (*inner)
5984 || MEM_P (*inner)
5985 || GET_CODE (*inner) == SUBREG
5986 || GET_CODE (*inner) == SCRATCH)
5987 return inner;
5988 return 0;
5989 }
5990
5991 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5992 (see address_info). Return null otherwise. */
5993
5994 static rtx *
5995 get_index_term (rtx *inner)
5996 {
5997 /* At present, only constant scales are allowed. */
5998 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5999 inner = strip_address_mutations (&XEXP (*inner, 0));
6000 if (REG_P (*inner)
6001 || MEM_P (*inner)
6002 || GET_CODE (*inner) == SUBREG
6003 || GET_CODE (*inner) == SCRATCH)
6004 return inner;
6005 return 0;
6006 }
6007
6008 /* Set the segment part of address INFO to LOC, given that INNER is the
6009 unmutated value. */
6010
6011 static void
6012 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6013 {
6014 gcc_assert (!info->segment);
6015 info->segment = loc;
6016 info->segment_term = inner;
6017 }
6018
6019 /* Set the base part of address INFO to LOC, given that INNER is the
6020 unmutated value. */
6021
6022 static void
6023 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6024 {
6025 gcc_assert (!info->base);
6026 info->base = loc;
6027 info->base_term = inner;
6028 }
6029
6030 /* Set the index part of address INFO to LOC, given that INNER is the
6031 unmutated value. */
6032
6033 static void
6034 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6035 {
6036 gcc_assert (!info->index);
6037 info->index = loc;
6038 info->index_term = inner;
6039 }
6040
6041 /* Set the displacement part of address INFO to LOC, given that INNER
6042 is the constant term. */
6043
6044 static void
6045 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6046 {
6047 gcc_assert (!info->disp);
6048 info->disp = loc;
6049 info->disp_term = inner;
6050 }
6051
6052 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6053 rest of INFO accordingly. */
6054
6055 static void
6056 decompose_incdec_address (struct address_info *info)
6057 {
6058 info->autoinc_p = true;
6059
6060 rtx *base = &XEXP (*info->inner, 0);
6061 set_address_base (info, base, base);
6062 gcc_checking_assert (info->base == info->base_term);
6063
6064 /* These addresses are only valid when the size of the addressed
6065 value is known. */
6066 gcc_checking_assert (info->mode != VOIDmode);
6067 }
6068
6069 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6070 of INFO accordingly. */
6071
6072 static void
6073 decompose_automod_address (struct address_info *info)
6074 {
6075 info->autoinc_p = true;
6076
6077 rtx *base = &XEXP (*info->inner, 0);
6078 set_address_base (info, base, base);
6079 gcc_checking_assert (info->base == info->base_term);
6080
6081 rtx plus = XEXP (*info->inner, 1);
6082 gcc_assert (GET_CODE (plus) == PLUS);
6083
6084 info->base_term2 = &XEXP (plus, 0);
6085 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6086
6087 rtx *step = &XEXP (plus, 1);
6088 rtx *inner_step = strip_address_mutations (step);
6089 if (CONSTANT_P (*inner_step))
6090 set_address_disp (info, step, inner_step);
6091 else
6092 set_address_index (info, step, inner_step);
6093 }
6094
6095 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6096 values in [PTR, END). Return a pointer to the end of the used array. */
6097
6098 static rtx **
6099 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6100 {
6101 rtx x = *loc;
6102 if (GET_CODE (x) == PLUS)
6103 {
6104 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6105 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6106 }
6107 else
6108 {
6109 gcc_assert (ptr != end);
6110 *ptr++ = loc;
6111 }
6112 return ptr;
6113 }
6114
6115 /* Evaluate the likelihood of X being a base or index value, returning
6116 positive if it is likely to be a base, negative if it is likely to be
6117 an index, and 0 if we can't tell. Make the magnitude of the return
6118 value reflect the amount of confidence we have in the answer.
6119
6120 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6121
6122 static int
6123 baseness (rtx x, machine_mode mode, addr_space_t as,
6124 enum rtx_code outer_code, enum rtx_code index_code)
6125 {
6126 /* Believe *_POINTER unless the address shape requires otherwise. */
6127 if (REG_P (x) && REG_POINTER (x))
6128 return 2;
6129 if (MEM_P (x) && MEM_POINTER (x))
6130 return 2;
6131
6132 if (REG_P (x) && HARD_REGISTER_P (x))
6133 {
6134 /* X is a hard register. If it only fits one of the base
6135 or index classes, choose that interpretation. */
6136 int regno = REGNO (x);
6137 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6138 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6139 if (base_p != index_p)
6140 return base_p ? 1 : -1;
6141 }
6142 return 0;
6143 }
6144
6145 /* INFO->INNER describes a normal, non-automodified address.
6146 Fill in the rest of INFO accordingly. */
6147
6148 static void
6149 decompose_normal_address (struct address_info *info)
6150 {
6151 /* Treat the address as the sum of up to four values. */
6152 rtx *ops[4];
6153 size_t n_ops = extract_plus_operands (info->inner, ops,
6154 ops + ARRAY_SIZE (ops)) - ops;
6155
6156 /* If there is more than one component, any base component is in a PLUS. */
6157 if (n_ops > 1)
6158 info->base_outer_code = PLUS;
6159
6160 /* Try to classify each sum operand now. Leave those that could be
6161 either a base or an index in OPS. */
6162 rtx *inner_ops[4];
6163 size_t out = 0;
6164 for (size_t in = 0; in < n_ops; ++in)
6165 {
6166 rtx *loc = ops[in];
6167 rtx *inner = strip_address_mutations (loc);
6168 if (CONSTANT_P (*inner))
6169 set_address_disp (info, loc, inner);
6170 else if (GET_CODE (*inner) == UNSPEC)
6171 set_address_segment (info, loc, inner);
6172 else
6173 {
6174 /* The only other possibilities are a base or an index. */
6175 rtx *base_term = get_base_term (inner);
6176 rtx *index_term = get_index_term (inner);
6177 gcc_assert (base_term || index_term);
6178 if (!base_term)
6179 set_address_index (info, loc, index_term);
6180 else if (!index_term)
6181 set_address_base (info, loc, base_term);
6182 else
6183 {
6184 gcc_assert (base_term == index_term);
6185 ops[out] = loc;
6186 inner_ops[out] = base_term;
6187 ++out;
6188 }
6189 }
6190 }
6191
6192 /* Classify the remaining OPS members as bases and indexes. */
6193 if (out == 1)
6194 {
6195 /* If we haven't seen a base or an index yet, assume that this is
6196 the base. If we were confident that another term was the base
6197 or index, treat the remaining operand as the other kind. */
6198 if (!info->base)
6199 set_address_base (info, ops[0], inner_ops[0]);
6200 else
6201 set_address_index (info, ops[0], inner_ops[0]);
6202 }
6203 else if (out == 2)
6204 {
6205 /* In the event of a tie, assume the base comes first. */
6206 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6207 GET_CODE (*ops[1]))
6208 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6209 GET_CODE (*ops[0])))
6210 {
6211 set_address_base (info, ops[0], inner_ops[0]);
6212 set_address_index (info, ops[1], inner_ops[1]);
6213 }
6214 else
6215 {
6216 set_address_base (info, ops[1], inner_ops[1]);
6217 set_address_index (info, ops[0], inner_ops[0]);
6218 }
6219 }
6220 else
6221 gcc_assert (out == 0);
6222 }
6223
6224 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6225 or VOIDmode if not known. AS is the address space associated with LOC.
6226 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6227
6228 void
6229 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6230 addr_space_t as, enum rtx_code outer_code)
6231 {
6232 memset (info, 0, sizeof (*info));
6233 info->mode = mode;
6234 info->as = as;
6235 info->addr_outer_code = outer_code;
6236 info->outer = loc;
6237 info->inner = strip_address_mutations (loc, &outer_code);
6238 info->base_outer_code = outer_code;
6239 switch (GET_CODE (*info->inner))
6240 {
6241 case PRE_DEC:
6242 case PRE_INC:
6243 case POST_DEC:
6244 case POST_INC:
6245 decompose_incdec_address (info);
6246 break;
6247
6248 case PRE_MODIFY:
6249 case POST_MODIFY:
6250 decompose_automod_address (info);
6251 break;
6252
6253 default:
6254 decompose_normal_address (info);
6255 break;
6256 }
6257 }
6258
6259 /* Describe address operand LOC in INFO. */
6260
6261 void
6262 decompose_lea_address (struct address_info *info, rtx *loc)
6263 {
6264 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6265 }
6266
6267 /* Describe the address of MEM X in INFO. */
6268
6269 void
6270 decompose_mem_address (struct address_info *info, rtx x)
6271 {
6272 gcc_assert (MEM_P (x));
6273 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6274 MEM_ADDR_SPACE (x), MEM);
6275 }
6276
6277 /* Update INFO after a change to the address it describes. */
6278
6279 void
6280 update_address (struct address_info *info)
6281 {
6282 decompose_address (info, info->outer, info->mode, info->as,
6283 info->addr_outer_code);
6284 }
6285
6286 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6287 more complicated than that. */
6288
6289 HOST_WIDE_INT
6290 get_index_scale (const struct address_info *info)
6291 {
6292 rtx index = *info->index;
6293 if (GET_CODE (index) == MULT
6294 && CONST_INT_P (XEXP (index, 1))
6295 && info->index_term == &XEXP (index, 0))
6296 return INTVAL (XEXP (index, 1));
6297
6298 if (GET_CODE (index) == ASHIFT
6299 && CONST_INT_P (XEXP (index, 1))
6300 && info->index_term == &XEXP (index, 0))
6301 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6302
6303 if (info->index == info->index_term)
6304 return 1;
6305
6306 return 0;
6307 }
6308
6309 /* Return the "index code" of INFO, in the form required by
6310 ok_for_base_p_1. */
6311
6312 enum rtx_code
6313 get_index_code (const struct address_info *info)
6314 {
6315 if (info->index)
6316 return GET_CODE (*info->index);
6317
6318 if (info->disp)
6319 return GET_CODE (*info->disp);
6320
6321 return SCRATCH;
6322 }
6323
6324 /* Return true if RTL X contains a SYMBOL_REF. */
6325
6326 bool
6327 contains_symbol_ref_p (const_rtx x)
6328 {
6329 subrtx_iterator::array_type array;
6330 FOR_EACH_SUBRTX (iter, array, x, ALL)
6331 if (SYMBOL_REF_P (*iter))
6332 return true;
6333
6334 return false;
6335 }
6336
6337 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6338
6339 bool
6340 contains_symbolic_reference_p (const_rtx x)
6341 {
6342 subrtx_iterator::array_type array;
6343 FOR_EACH_SUBRTX (iter, array, x, ALL)
6344 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6345 return true;
6346
6347 return false;
6348 }
6349
6350 /* Return true if X contains a thread-local symbol. */
6351
6352 bool
6353 tls_referenced_p (const_rtx x)
6354 {
6355 if (!targetm.have_tls)
6356 return false;
6357
6358 subrtx_iterator::array_type array;
6359 FOR_EACH_SUBRTX (iter, array, x, ALL)
6360 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6361 return true;
6362 return false;
6363 }