]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/rtlanal.c
generalized IPA predicate on parameter
[thirdparty/gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38 #include "hard-reg-set.h"
39 #include "function-abi.h"
40
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int computed_jump_p_1 (const_rtx);
46 static void parms_set (rtx, const_rtx, void *);
47
48 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
49 const_rtx, machine_mode,
50 unsigned HOST_WIDE_INT);
51 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
52 const_rtx, machine_mode,
53 unsigned HOST_WIDE_INT);
54 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
55 const_rtx, machine_mode,
56 unsigned int);
57 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
58 const_rtx, machine_mode,
59 unsigned int);
60
61 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
62 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
63
64 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
65 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
66 SIGN_EXTEND then while narrowing we also have to enforce the
67 representation and sign-extend the value to mode DESTINATION_REP.
68
69 If the value is already sign-extended to DESTINATION_REP mode we
70 can just switch to DESTINATION mode on it. For each pair of
71 integral modes SOURCE and DESTINATION, when truncating from SOURCE
72 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
73 contains the number of high-order bits in SOURCE that have to be
74 copies of the sign-bit so that we can do this mode-switch to
75 DESTINATION. */
76
77 static unsigned int
78 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
79 \f
80 /* Store X into index I of ARRAY. ARRAY is known to have at least I
81 elements. Return the new base of ARRAY. */
82
83 template <typename T>
84 typename T::value_type *
85 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
86 value_type *base,
87 size_t i, value_type x)
88 {
89 if (base == array.stack)
90 {
91 if (i < LOCAL_ELEMS)
92 {
93 base[i] = x;
94 return base;
95 }
96 gcc_checking_assert (i == LOCAL_ELEMS);
97 /* A previous iteration might also have moved from the stack to the
98 heap, in which case the heap array will already be big enough. */
99 if (vec_safe_length (array.heap) <= i)
100 vec_safe_grow (array.heap, i + 1);
101 base = array.heap->address ();
102 memcpy (base, array.stack, sizeof (array.stack));
103 base[LOCAL_ELEMS] = x;
104 return base;
105 }
106 unsigned int length = array.heap->length ();
107 if (length > i)
108 {
109 gcc_checking_assert (base == array.heap->address ());
110 base[i] = x;
111 return base;
112 }
113 else
114 {
115 gcc_checking_assert (i == length);
116 vec_safe_push (array.heap, x);
117 return array.heap->address ();
118 }
119 }
120
121 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
122 number of elements added to the worklist. */
123
124 template <typename T>
125 size_t
126 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
127 value_type *base,
128 size_t end, rtx_type x)
129 {
130 enum rtx_code code = GET_CODE (x);
131 const char *format = GET_RTX_FORMAT (code);
132 size_t orig_end = end;
133 if (__builtin_expect (INSN_P (x), false))
134 {
135 /* Put the pattern at the top of the queue, since that's what
136 we're likely to want most. It also allows for the SEQUENCE
137 code below. */
138 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
139 if (format[i] == 'e')
140 {
141 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
142 if (__builtin_expect (end < LOCAL_ELEMS, true))
143 base[end++] = subx;
144 else
145 base = add_single_to_queue (array, base, end++, subx);
146 }
147 }
148 else
149 for (int i = 0; format[i]; ++i)
150 if (format[i] == 'e')
151 {
152 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
153 if (__builtin_expect (end < LOCAL_ELEMS, true))
154 base[end++] = subx;
155 else
156 base = add_single_to_queue (array, base, end++, subx);
157 }
158 else if (format[i] == 'E')
159 {
160 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
161 rtx *vec = x->u.fld[i].rt_rtvec->elem;
162 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
163 for (unsigned int j = 0; j < length; j++)
164 base[end++] = T::get_value (vec[j]);
165 else
166 for (unsigned int j = 0; j < length; j++)
167 base = add_single_to_queue (array, base, end++,
168 T::get_value (vec[j]));
169 if (code == SEQUENCE && end == length)
170 /* If the subrtxes of the sequence fill the entire array then
171 we know that no other parts of a containing insn are queued.
172 The caller is therefore iterating over the sequence as a
173 PATTERN (...), so we also want the patterns of the
174 subinstructions. */
175 for (unsigned int j = 0; j < length; j++)
176 {
177 typename T::rtx_type x = T::get_rtx (base[j]);
178 if (INSN_P (x))
179 base[j] = T::get_value (PATTERN (x));
180 }
181 }
182 return end - orig_end;
183 }
184
185 template <typename T>
186 void
187 generic_subrtx_iterator <T>::free_array (array_type &array)
188 {
189 vec_free (array.heap);
190 }
191
192 template <typename T>
193 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
194
195 template class generic_subrtx_iterator <const_rtx_accessor>;
196 template class generic_subrtx_iterator <rtx_var_accessor>;
197 template class generic_subrtx_iterator <rtx_ptr_accessor>;
198
199 /* Return 1 if the value of X is unstable
200 (would be different at a different point in the program).
201 The frame pointer, arg pointer, etc. are considered stable
202 (within one function) and so is anything marked `unchanging'. */
203
204 int
205 rtx_unstable_p (const_rtx x)
206 {
207 const RTX_CODE code = GET_CODE (x);
208 int i;
209 const char *fmt;
210
211 switch (code)
212 {
213 case MEM:
214 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
215
216 case CONST:
217 CASE_CONST_ANY:
218 case SYMBOL_REF:
219 case LABEL_REF:
220 return 0;
221
222 case REG:
223 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
224 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
225 /* The arg pointer varies if it is not a fixed register. */
226 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
227 return 0;
228 /* ??? When call-clobbered, the value is stable modulo the restore
229 that must happen after a call. This currently screws up local-alloc
230 into believing that the restore is not needed. */
231 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
232 return 0;
233 return 1;
234
235 case ASM_OPERANDS:
236 if (MEM_VOLATILE_P (x))
237 return 1;
238
239 /* Fall through. */
240
241 default:
242 break;
243 }
244
245 fmt = GET_RTX_FORMAT (code);
246 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
247 if (fmt[i] == 'e')
248 {
249 if (rtx_unstable_p (XEXP (x, i)))
250 return 1;
251 }
252 else if (fmt[i] == 'E')
253 {
254 int j;
255 for (j = 0; j < XVECLEN (x, i); j++)
256 if (rtx_unstable_p (XVECEXP (x, i, j)))
257 return 1;
258 }
259
260 return 0;
261 }
262
263 /* Return 1 if X has a value that can vary even between two
264 executions of the program. 0 means X can be compared reliably
265 against certain constants or near-constants.
266 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
267 zero, we are slightly more conservative.
268 The frame pointer and the arg pointer are considered constant. */
269
270 bool
271 rtx_varies_p (const_rtx x, bool for_alias)
272 {
273 RTX_CODE code;
274 int i;
275 const char *fmt;
276
277 if (!x)
278 return 0;
279
280 code = GET_CODE (x);
281 switch (code)
282 {
283 case MEM:
284 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
285
286 case CONST:
287 CASE_CONST_ANY:
288 case SYMBOL_REF:
289 case LABEL_REF:
290 return 0;
291
292 case REG:
293 /* Note that we have to test for the actual rtx used for the frame
294 and arg pointers and not just the register number in case we have
295 eliminated the frame and/or arg pointer and are using it
296 for pseudos. */
297 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
298 /* The arg pointer varies if it is not a fixed register. */
299 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
300 return 0;
301 if (x == pic_offset_table_rtx
302 /* ??? When call-clobbered, the value is stable modulo the restore
303 that must happen after a call. This currently screws up
304 local-alloc into believing that the restore is not needed, so we
305 must return 0 only if we are called from alias analysis. */
306 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
307 return 0;
308 return 1;
309
310 case LO_SUM:
311 /* The operand 0 of a LO_SUM is considered constant
312 (in fact it is related specifically to operand 1)
313 during alias analysis. */
314 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
315 || rtx_varies_p (XEXP (x, 1), for_alias);
316
317 case ASM_OPERANDS:
318 if (MEM_VOLATILE_P (x))
319 return 1;
320
321 /* Fall through. */
322
323 default:
324 break;
325 }
326
327 fmt = GET_RTX_FORMAT (code);
328 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
329 if (fmt[i] == 'e')
330 {
331 if (rtx_varies_p (XEXP (x, i), for_alias))
332 return 1;
333 }
334 else if (fmt[i] == 'E')
335 {
336 int j;
337 for (j = 0; j < XVECLEN (x, i); j++)
338 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
339 return 1;
340 }
341
342 return 0;
343 }
344
345 /* Compute an approximation for the offset between the register
346 FROM and TO for the current function, as it was at the start
347 of the routine. */
348
349 static poly_int64
350 get_initial_register_offset (int from, int to)
351 {
352 static const struct elim_table_t
353 {
354 const int from;
355 const int to;
356 } table[] = ELIMINABLE_REGS;
357 poly_int64 offset1, offset2;
358 unsigned int i, j;
359
360 if (to == from)
361 return 0;
362
363 /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
364 is completed, but we need to give at least an estimate for the stack
365 pointer based on the frame size. */
366 if (!epilogue_completed)
367 {
368 offset1 = crtl->outgoing_args_size + get_frame_size ();
369 #if !STACK_GROWS_DOWNWARD
370 offset1 = - offset1;
371 #endif
372 if (to == STACK_POINTER_REGNUM)
373 return offset1;
374 else if (from == STACK_POINTER_REGNUM)
375 return - offset1;
376 else
377 return 0;
378 }
379
380 for (i = 0; i < ARRAY_SIZE (table); i++)
381 if (table[i].from == from)
382 {
383 if (table[i].to == to)
384 {
385 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
386 offset1);
387 return offset1;
388 }
389 for (j = 0; j < ARRAY_SIZE (table); j++)
390 {
391 if (table[j].to == to
392 && table[j].from == table[i].to)
393 {
394 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
395 offset1);
396 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
397 offset2);
398 return offset1 + offset2;
399 }
400 if (table[j].from == to
401 && table[j].to == table[i].to)
402 {
403 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
404 offset1);
405 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
406 offset2);
407 return offset1 - offset2;
408 }
409 }
410 }
411 else if (table[i].to == from)
412 {
413 if (table[i].from == to)
414 {
415 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
416 offset1);
417 return - offset1;
418 }
419 for (j = 0; j < ARRAY_SIZE (table); j++)
420 {
421 if (table[j].to == to
422 && table[j].from == table[i].from)
423 {
424 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
425 offset1);
426 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
427 offset2);
428 return - offset1 + offset2;
429 }
430 if (table[j].from == to
431 && table[j].to == table[i].from)
432 {
433 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
434 offset1);
435 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
436 offset2);
437 return - offset1 - offset2;
438 }
439 }
440 }
441
442 /* If the requested register combination was not found,
443 try a different more simple combination. */
444 if (from == ARG_POINTER_REGNUM)
445 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
446 else if (to == ARG_POINTER_REGNUM)
447 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
448 else if (from == HARD_FRAME_POINTER_REGNUM)
449 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
450 else if (to == HARD_FRAME_POINTER_REGNUM)
451 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
452 else
453 return 0;
454 }
455
456 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
457 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
458 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
459 references on strict alignment machines. */
460
461 static int
462 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
463 machine_mode mode, bool unaligned_mems)
464 {
465 enum rtx_code code = GET_CODE (x);
466 gcc_checking_assert (mode == BLKmode || known_size_p (size));
467 poly_int64 const_x1;
468
469 /* The offset must be a multiple of the mode size if we are considering
470 unaligned memory references on strict alignment machines. */
471 if (STRICT_ALIGNMENT && unaligned_mems && mode != BLKmode)
472 {
473 poly_int64 actual_offset = offset;
474
475 #ifdef SPARC_STACK_BOUNDARY_HACK
476 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
477 the real alignment of %sp. However, when it does this, the
478 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
479 if (SPARC_STACK_BOUNDARY_HACK
480 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
481 actual_offset -= STACK_POINTER_OFFSET;
482 #endif
483
484 if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
485 return 1;
486 }
487
488 switch (code)
489 {
490 case SYMBOL_REF:
491 if (SYMBOL_REF_WEAK (x))
492 return 1;
493 if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
494 {
495 tree decl;
496 poly_int64 decl_size;
497
498 if (maybe_lt (offset, 0))
499 return 1;
500 if (!known_size_p (size))
501 return maybe_ne (offset, 0);
502
503 /* If the size of the access or of the symbol is unknown,
504 assume the worst. */
505 decl = SYMBOL_REF_DECL (x);
506
507 /* Else check that the access is in bounds. TODO: restructure
508 expr_size/tree_expr_size/int_expr_size and just use the latter. */
509 if (!decl)
510 decl_size = -1;
511 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
512 {
513 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
514 decl_size = -1;
515 }
516 else if (TREE_CODE (decl) == STRING_CST)
517 decl_size = TREE_STRING_LENGTH (decl);
518 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
519 decl_size = int_size_in_bytes (TREE_TYPE (decl));
520 else
521 decl_size = -1;
522
523 return (!known_size_p (decl_size) || known_eq (decl_size, 0)
524 ? maybe_ne (offset, 0)
525 : !known_subrange_p (offset, size, 0, decl_size));
526 }
527
528 return 0;
529
530 case LABEL_REF:
531 return 0;
532
533 case REG:
534 /* Stack references are assumed not to trap, but we need to deal with
535 nonsensical offsets. */
536 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
537 || x == stack_pointer_rtx
538 /* The arg pointer varies if it is not a fixed register. */
539 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
540 {
541 #ifdef RED_ZONE_SIZE
542 poly_int64 red_zone_size = RED_ZONE_SIZE;
543 #else
544 poly_int64 red_zone_size = 0;
545 #endif
546 poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
547 poly_int64 low_bound, high_bound;
548
549 if (!known_size_p (size))
550 return 1;
551
552 if (x == frame_pointer_rtx)
553 {
554 if (FRAME_GROWS_DOWNWARD)
555 {
556 high_bound = targetm.starting_frame_offset ();
557 low_bound = high_bound - get_frame_size ();
558 }
559 else
560 {
561 low_bound = targetm.starting_frame_offset ();
562 high_bound = low_bound + get_frame_size ();
563 }
564 }
565 else if (x == hard_frame_pointer_rtx)
566 {
567 poly_int64 sp_offset
568 = get_initial_register_offset (STACK_POINTER_REGNUM,
569 HARD_FRAME_POINTER_REGNUM);
570 poly_int64 ap_offset
571 = get_initial_register_offset (ARG_POINTER_REGNUM,
572 HARD_FRAME_POINTER_REGNUM);
573
574 #if STACK_GROWS_DOWNWARD
575 low_bound = sp_offset - red_zone_size - stack_boundary;
576 high_bound = ap_offset
577 + FIRST_PARM_OFFSET (current_function_decl)
578 #if !ARGS_GROW_DOWNWARD
579 + crtl->args.size
580 #endif
581 + stack_boundary;
582 #else
583 high_bound = sp_offset + red_zone_size + stack_boundary;
584 low_bound = ap_offset
585 + FIRST_PARM_OFFSET (current_function_decl)
586 #if ARGS_GROW_DOWNWARD
587 - crtl->args.size
588 #endif
589 - stack_boundary;
590 #endif
591 }
592 else if (x == stack_pointer_rtx)
593 {
594 poly_int64 ap_offset
595 = get_initial_register_offset (ARG_POINTER_REGNUM,
596 STACK_POINTER_REGNUM);
597
598 #if STACK_GROWS_DOWNWARD
599 low_bound = - red_zone_size - stack_boundary;
600 high_bound = ap_offset
601 + FIRST_PARM_OFFSET (current_function_decl)
602 #if !ARGS_GROW_DOWNWARD
603 + crtl->args.size
604 #endif
605 + stack_boundary;
606 #else
607 high_bound = red_zone_size + stack_boundary;
608 low_bound = ap_offset
609 + FIRST_PARM_OFFSET (current_function_decl)
610 #if ARGS_GROW_DOWNWARD
611 - crtl->args.size
612 #endif
613 - stack_boundary;
614 #endif
615 }
616 else
617 {
618 /* We assume that accesses are safe to at least the
619 next stack boundary.
620 Examples are varargs and __builtin_return_address. */
621 #if ARGS_GROW_DOWNWARD
622 high_bound = FIRST_PARM_OFFSET (current_function_decl)
623 + stack_boundary;
624 low_bound = FIRST_PARM_OFFSET (current_function_decl)
625 - crtl->args.size - stack_boundary;
626 #else
627 low_bound = FIRST_PARM_OFFSET (current_function_decl)
628 - stack_boundary;
629 high_bound = FIRST_PARM_OFFSET (current_function_decl)
630 + crtl->args.size + stack_boundary;
631 #endif
632 }
633
634 if (known_ge (offset, low_bound)
635 && known_le (offset, high_bound - size))
636 return 0;
637 return 1;
638 }
639 /* All of the virtual frame registers are stack references. */
640 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
641 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
642 return 0;
643 return 1;
644
645 case CONST:
646 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
647 mode, unaligned_mems);
648
649 case PLUS:
650 /* An address is assumed not to trap if:
651 - it is the pic register plus a const unspec without offset. */
652 if (XEXP (x, 0) == pic_offset_table_rtx
653 && GET_CODE (XEXP (x, 1)) == CONST
654 && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
655 && known_eq (offset, 0))
656 return 0;
657
658 /* - or it is an address that can't trap plus a constant integer. */
659 if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
660 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
661 size, mode, unaligned_mems))
662 return 0;
663
664 return 1;
665
666 case LO_SUM:
667 case PRE_MODIFY:
668 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
669 mode, unaligned_mems);
670
671 case PRE_DEC:
672 case PRE_INC:
673 case POST_DEC:
674 case POST_INC:
675 case POST_MODIFY:
676 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
677 mode, unaligned_mems);
678
679 default:
680 break;
681 }
682
683 /* If it isn't one of the case above, it can cause a trap. */
684 return 1;
685 }
686
687 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
688
689 int
690 rtx_addr_can_trap_p (const_rtx x)
691 {
692 return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
693 }
694
695 /* Return true if X contains a MEM subrtx. */
696
697 bool
698 contains_mem_rtx_p (rtx x)
699 {
700 subrtx_iterator::array_type array;
701 FOR_EACH_SUBRTX (iter, array, x, ALL)
702 if (MEM_P (*iter))
703 return true;
704
705 return false;
706 }
707
708 /* Return true if X is an address that is known to not be zero. */
709
710 bool
711 nonzero_address_p (const_rtx x)
712 {
713 const enum rtx_code code = GET_CODE (x);
714
715 switch (code)
716 {
717 case SYMBOL_REF:
718 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
719
720 case LABEL_REF:
721 return true;
722
723 case REG:
724 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
725 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
726 || x == stack_pointer_rtx
727 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
728 return true;
729 /* All of the virtual frame registers are stack references. */
730 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
731 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
732 return true;
733 return false;
734
735 case CONST:
736 return nonzero_address_p (XEXP (x, 0));
737
738 case PLUS:
739 /* Handle PIC references. */
740 if (XEXP (x, 0) == pic_offset_table_rtx
741 && CONSTANT_P (XEXP (x, 1)))
742 return true;
743 return false;
744
745 case PRE_MODIFY:
746 /* Similar to the above; allow positive offsets. Further, since
747 auto-inc is only allowed in memories, the register must be a
748 pointer. */
749 if (CONST_INT_P (XEXP (x, 1))
750 && INTVAL (XEXP (x, 1)) > 0)
751 return true;
752 return nonzero_address_p (XEXP (x, 0));
753
754 case PRE_INC:
755 /* Similarly. Further, the offset is always positive. */
756 return true;
757
758 case PRE_DEC:
759 case POST_DEC:
760 case POST_INC:
761 case POST_MODIFY:
762 return nonzero_address_p (XEXP (x, 0));
763
764 case LO_SUM:
765 return nonzero_address_p (XEXP (x, 1));
766
767 default:
768 break;
769 }
770
771 /* If it isn't one of the case above, might be zero. */
772 return false;
773 }
774
775 /* Return 1 if X refers to a memory location whose address
776 cannot be compared reliably with constant addresses,
777 or if X refers to a BLKmode memory object.
778 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
779 zero, we are slightly more conservative. */
780
781 bool
782 rtx_addr_varies_p (const_rtx x, bool for_alias)
783 {
784 enum rtx_code code;
785 int i;
786 const char *fmt;
787
788 if (x == 0)
789 return 0;
790
791 code = GET_CODE (x);
792 if (code == MEM)
793 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
794
795 fmt = GET_RTX_FORMAT (code);
796 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
797 if (fmt[i] == 'e')
798 {
799 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
800 return 1;
801 }
802 else if (fmt[i] == 'E')
803 {
804 int j;
805 for (j = 0; j < XVECLEN (x, i); j++)
806 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
807 return 1;
808 }
809 return 0;
810 }
811 \f
812 /* Return the CALL in X if there is one. */
813
814 rtx
815 get_call_rtx_from (const rtx_insn *insn)
816 {
817 rtx x = PATTERN (insn);
818 if (GET_CODE (x) == PARALLEL)
819 x = XVECEXP (x, 0, 0);
820 if (GET_CODE (x) == SET)
821 x = SET_SRC (x);
822 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
823 return x;
824 return NULL_RTX;
825 }
826
827 /* Get the declaration of the function called by INSN. */
828
829 tree
830 get_call_fndecl (const rtx_insn *insn)
831 {
832 rtx note, datum;
833
834 note = find_reg_note (insn, REG_CALL_DECL, NULL_RTX);
835 if (note == NULL_RTX)
836 return NULL_TREE;
837
838 datum = XEXP (note, 0);
839 if (datum != NULL_RTX)
840 return SYMBOL_REF_DECL (datum);
841
842 return NULL_TREE;
843 }
844 \f
845 /* Return the value of the integer term in X, if one is apparent;
846 otherwise return 0.
847 Only obvious integer terms are detected.
848 This is used in cse.c with the `related_value' field. */
849
850 HOST_WIDE_INT
851 get_integer_term (const_rtx x)
852 {
853 if (GET_CODE (x) == CONST)
854 x = XEXP (x, 0);
855
856 if (GET_CODE (x) == MINUS
857 && CONST_INT_P (XEXP (x, 1)))
858 return - INTVAL (XEXP (x, 1));
859 if (GET_CODE (x) == PLUS
860 && CONST_INT_P (XEXP (x, 1)))
861 return INTVAL (XEXP (x, 1));
862 return 0;
863 }
864
865 /* If X is a constant, return the value sans apparent integer term;
866 otherwise return 0.
867 Only obvious integer terms are detected. */
868
869 rtx
870 get_related_value (const_rtx x)
871 {
872 if (GET_CODE (x) != CONST)
873 return 0;
874 x = XEXP (x, 0);
875 if (GET_CODE (x) == PLUS
876 && CONST_INT_P (XEXP (x, 1)))
877 return XEXP (x, 0);
878 else if (GET_CODE (x) == MINUS
879 && CONST_INT_P (XEXP (x, 1)))
880 return XEXP (x, 0);
881 return 0;
882 }
883 \f
884 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
885 to somewhere in the same object or object_block as SYMBOL. */
886
887 bool
888 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
889 {
890 tree decl;
891
892 if (GET_CODE (symbol) != SYMBOL_REF)
893 return false;
894
895 if (offset == 0)
896 return true;
897
898 if (offset > 0)
899 {
900 if (CONSTANT_POOL_ADDRESS_P (symbol)
901 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
902 return true;
903
904 decl = SYMBOL_REF_DECL (symbol);
905 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
906 return true;
907 }
908
909 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
910 && SYMBOL_REF_BLOCK (symbol)
911 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
912 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
913 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
914 return true;
915
916 return false;
917 }
918
919 /* Split X into a base and a constant offset, storing them in *BASE_OUT
920 and *OFFSET_OUT respectively. */
921
922 void
923 split_const (rtx x, rtx *base_out, rtx *offset_out)
924 {
925 if (GET_CODE (x) == CONST)
926 {
927 x = XEXP (x, 0);
928 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
929 {
930 *base_out = XEXP (x, 0);
931 *offset_out = XEXP (x, 1);
932 return;
933 }
934 }
935 *base_out = x;
936 *offset_out = const0_rtx;
937 }
938
939 /* Express integer value X as some value Y plus a polynomial offset,
940 where Y is either const0_rtx, X or something within X (as opposed
941 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
942
943 rtx
944 strip_offset (rtx x, poly_int64_pod *offset_out)
945 {
946 rtx base = const0_rtx;
947 rtx test = x;
948 if (GET_CODE (test) == CONST)
949 test = XEXP (test, 0);
950 if (GET_CODE (test) == PLUS)
951 {
952 base = XEXP (test, 0);
953 test = XEXP (test, 1);
954 }
955 if (poly_int_rtx_p (test, offset_out))
956 return base;
957 *offset_out = 0;
958 return x;
959 }
960
961 /* Return the argument size in REG_ARGS_SIZE note X. */
962
963 poly_int64
964 get_args_size (const_rtx x)
965 {
966 gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
967 return rtx_to_poly_int64 (XEXP (x, 0));
968 }
969 \f
970 /* Return the number of places FIND appears within X. If COUNT_DEST is
971 zero, we do not count occurrences inside the destination of a SET. */
972
973 int
974 count_occurrences (const_rtx x, const_rtx find, int count_dest)
975 {
976 int i, j;
977 enum rtx_code code;
978 const char *format_ptr;
979 int count;
980
981 if (x == find)
982 return 1;
983
984 code = GET_CODE (x);
985
986 switch (code)
987 {
988 case REG:
989 CASE_CONST_ANY:
990 case SYMBOL_REF:
991 case CODE_LABEL:
992 case PC:
993 case CC0:
994 return 0;
995
996 case EXPR_LIST:
997 count = count_occurrences (XEXP (x, 0), find, count_dest);
998 if (XEXP (x, 1))
999 count += count_occurrences (XEXP (x, 1), find, count_dest);
1000 return count;
1001
1002 case MEM:
1003 if (MEM_P (find) && rtx_equal_p (x, find))
1004 return 1;
1005 break;
1006
1007 case SET:
1008 if (SET_DEST (x) == find && ! count_dest)
1009 return count_occurrences (SET_SRC (x), find, count_dest);
1010 break;
1011
1012 default:
1013 break;
1014 }
1015
1016 format_ptr = GET_RTX_FORMAT (code);
1017 count = 0;
1018
1019 for (i = 0; i < GET_RTX_LENGTH (code); i++)
1020 {
1021 switch (*format_ptr++)
1022 {
1023 case 'e':
1024 count += count_occurrences (XEXP (x, i), find, count_dest);
1025 break;
1026
1027 case 'E':
1028 for (j = 0; j < XVECLEN (x, i); j++)
1029 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1030 break;
1031 }
1032 }
1033 return count;
1034 }
1035
1036 \f
1037 /* Return TRUE if OP is a register or subreg of a register that
1038 holds an unsigned quantity. Otherwise, return FALSE. */
1039
1040 bool
1041 unsigned_reg_p (rtx op)
1042 {
1043 if (REG_P (op)
1044 && REG_EXPR (op)
1045 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1046 return true;
1047
1048 if (GET_CODE (op) == SUBREG
1049 && SUBREG_PROMOTED_SIGN (op))
1050 return true;
1051
1052 return false;
1053 }
1054
1055 \f
1056 /* Nonzero if register REG appears somewhere within IN.
1057 Also works if REG is not a register; in this case it checks
1058 for a subexpression of IN that is Lisp "equal" to REG. */
1059
1060 int
1061 reg_mentioned_p (const_rtx reg, const_rtx in)
1062 {
1063 const char *fmt;
1064 int i;
1065 enum rtx_code code;
1066
1067 if (in == 0)
1068 return 0;
1069
1070 if (reg == in)
1071 return 1;
1072
1073 if (GET_CODE (in) == LABEL_REF)
1074 return reg == label_ref_label (in);
1075
1076 code = GET_CODE (in);
1077
1078 switch (code)
1079 {
1080 /* Compare registers by number. */
1081 case REG:
1082 return REG_P (reg) && REGNO (in) == REGNO (reg);
1083
1084 /* These codes have no constituent expressions
1085 and are unique. */
1086 case SCRATCH:
1087 case CC0:
1088 case PC:
1089 return 0;
1090
1091 CASE_CONST_ANY:
1092 /* These are kept unique for a given value. */
1093 return 0;
1094
1095 default:
1096 break;
1097 }
1098
1099 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1100 return 1;
1101
1102 fmt = GET_RTX_FORMAT (code);
1103
1104 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1105 {
1106 if (fmt[i] == 'E')
1107 {
1108 int j;
1109 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1110 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1111 return 1;
1112 }
1113 else if (fmt[i] == 'e'
1114 && reg_mentioned_p (reg, XEXP (in, i)))
1115 return 1;
1116 }
1117 return 0;
1118 }
1119 \f
1120 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1121 no CODE_LABEL insn. */
1122
1123 int
1124 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1125 {
1126 rtx_insn *p;
1127 if (beg == end)
1128 return 0;
1129 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1130 if (LABEL_P (p))
1131 return 0;
1132 return 1;
1133 }
1134
1135 /* Nonzero if register REG is used in an insn between
1136 FROM_INSN and TO_INSN (exclusive of those two). */
1137
1138 int
1139 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1140 const rtx_insn *to_insn)
1141 {
1142 rtx_insn *insn;
1143
1144 if (from_insn == to_insn)
1145 return 0;
1146
1147 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1148 if (NONDEBUG_INSN_P (insn)
1149 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1150 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1151 return 1;
1152 return 0;
1153 }
1154 \f
1155 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1156 is entirely replaced by a new value and the only use is as a SET_DEST,
1157 we do not consider it a reference. */
1158
1159 int
1160 reg_referenced_p (const_rtx x, const_rtx body)
1161 {
1162 int i;
1163
1164 switch (GET_CODE (body))
1165 {
1166 case SET:
1167 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1168 return 1;
1169
1170 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1171 of a REG that occupies all of the REG, the insn references X if
1172 it is mentioned in the destination. */
1173 if (GET_CODE (SET_DEST (body)) != CC0
1174 && GET_CODE (SET_DEST (body)) != PC
1175 && !REG_P (SET_DEST (body))
1176 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1177 && REG_P (SUBREG_REG (SET_DEST (body)))
1178 && !read_modify_subreg_p (SET_DEST (body)))
1179 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1180 return 1;
1181 return 0;
1182
1183 case ASM_OPERANDS:
1184 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1185 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1186 return 1;
1187 return 0;
1188
1189 case CALL:
1190 case USE:
1191 case IF_THEN_ELSE:
1192 return reg_overlap_mentioned_p (x, body);
1193
1194 case TRAP_IF:
1195 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1196
1197 case PREFETCH:
1198 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1199
1200 case UNSPEC:
1201 case UNSPEC_VOLATILE:
1202 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1203 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1204 return 1;
1205 return 0;
1206
1207 case PARALLEL:
1208 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1209 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1210 return 1;
1211 return 0;
1212
1213 case CLOBBER:
1214 if (MEM_P (XEXP (body, 0)))
1215 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1216 return 1;
1217 return 0;
1218
1219 case COND_EXEC:
1220 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1221 return 1;
1222 return reg_referenced_p (x, COND_EXEC_CODE (body));
1223
1224 default:
1225 return 0;
1226 }
1227 }
1228 \f
1229 /* Nonzero if register REG is set or clobbered in an insn between
1230 FROM_INSN and TO_INSN (exclusive of those two). */
1231
1232 int
1233 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1234 const rtx_insn *to_insn)
1235 {
1236 const rtx_insn *insn;
1237
1238 if (from_insn == to_insn)
1239 return 0;
1240
1241 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1242 if (INSN_P (insn) && reg_set_p (reg, insn))
1243 return 1;
1244 return 0;
1245 }
1246
1247 /* Return true if REG is set or clobbered inside INSN. */
1248
1249 int
1250 reg_set_p (const_rtx reg, const_rtx insn)
1251 {
1252 /* After delay slot handling, call and branch insns might be in a
1253 sequence. Check all the elements there. */
1254 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1255 {
1256 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1257 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1258 return true;
1259
1260 return false;
1261 }
1262
1263 /* We can be passed an insn or part of one. If we are passed an insn,
1264 check if a side-effect of the insn clobbers REG. */
1265 if (INSN_P (insn)
1266 && (FIND_REG_INC_NOTE (insn, reg)
1267 || (CALL_P (insn)
1268 && ((REG_P (reg)
1269 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1270 && (insn_callee_abi (as_a<const rtx_insn *> (insn))
1271 .clobbers_reg_p (GET_MODE (reg), REGNO (reg))))
1272 || MEM_P (reg)
1273 || find_reg_fusage (insn, CLOBBER, reg)))))
1274 return true;
1275
1276 /* There are no REG_INC notes for SP autoinc. */
1277 if (reg == stack_pointer_rtx && INSN_P (insn))
1278 {
1279 subrtx_var_iterator::array_type array;
1280 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1281 {
1282 rtx mem = *iter;
1283 if (mem
1284 && MEM_P (mem)
1285 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1286 {
1287 if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1288 return true;
1289 iter.skip_subrtxes ();
1290 }
1291 }
1292 }
1293
1294 return set_of (reg, insn) != NULL_RTX;
1295 }
1296
1297 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1298 only if none of them are modified between START and END. Return 1 if
1299 X contains a MEM; this routine does use memory aliasing. */
1300
1301 int
1302 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1303 {
1304 const enum rtx_code code = GET_CODE (x);
1305 const char *fmt;
1306 int i, j;
1307 rtx_insn *insn;
1308
1309 if (start == end)
1310 return 0;
1311
1312 switch (code)
1313 {
1314 CASE_CONST_ANY:
1315 case CONST:
1316 case SYMBOL_REF:
1317 case LABEL_REF:
1318 return 0;
1319
1320 case PC:
1321 case CC0:
1322 return 1;
1323
1324 case MEM:
1325 if (modified_between_p (XEXP (x, 0), start, end))
1326 return 1;
1327 if (MEM_READONLY_P (x))
1328 return 0;
1329 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1330 if (memory_modified_in_insn_p (x, insn))
1331 return 1;
1332 return 0;
1333
1334 case REG:
1335 return reg_set_between_p (x, start, end);
1336
1337 default:
1338 break;
1339 }
1340
1341 fmt = GET_RTX_FORMAT (code);
1342 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1343 {
1344 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1345 return 1;
1346
1347 else if (fmt[i] == 'E')
1348 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1349 if (modified_between_p (XVECEXP (x, i, j), start, end))
1350 return 1;
1351 }
1352
1353 return 0;
1354 }
1355
1356 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1357 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1358 does use memory aliasing. */
1359
1360 int
1361 modified_in_p (const_rtx x, const_rtx insn)
1362 {
1363 const enum rtx_code code = GET_CODE (x);
1364 const char *fmt;
1365 int i, j;
1366
1367 switch (code)
1368 {
1369 CASE_CONST_ANY:
1370 case CONST:
1371 case SYMBOL_REF:
1372 case LABEL_REF:
1373 return 0;
1374
1375 case PC:
1376 case CC0:
1377 return 1;
1378
1379 case MEM:
1380 if (modified_in_p (XEXP (x, 0), insn))
1381 return 1;
1382 if (MEM_READONLY_P (x))
1383 return 0;
1384 if (memory_modified_in_insn_p (x, insn))
1385 return 1;
1386 return 0;
1387
1388 case REG:
1389 return reg_set_p (x, insn);
1390
1391 default:
1392 break;
1393 }
1394
1395 fmt = GET_RTX_FORMAT (code);
1396 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1397 {
1398 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1399 return 1;
1400
1401 else if (fmt[i] == 'E')
1402 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1403 if (modified_in_p (XVECEXP (x, i, j), insn))
1404 return 1;
1405 }
1406
1407 return 0;
1408 }
1409
1410 /* Return true if X is a SUBREG and if storing a value to X would
1411 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1412 target, using a SUBREG to store to one half of a DImode REG would
1413 preserve the other half. */
1414
1415 bool
1416 read_modify_subreg_p (const_rtx x)
1417 {
1418 if (GET_CODE (x) != SUBREG)
1419 return false;
1420 poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1421 poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1422 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1423 /* The inner and outer modes of a subreg must be ordered, so that we
1424 can tell whether they're paradoxical or partial. */
1425 gcc_checking_assert (ordered_p (isize, osize));
1426 return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1427 }
1428 \f
1429 /* Helper function for set_of. */
1430 struct set_of_data
1431 {
1432 const_rtx found;
1433 const_rtx pat;
1434 };
1435
1436 static void
1437 set_of_1 (rtx x, const_rtx pat, void *data1)
1438 {
1439 struct set_of_data *const data = (struct set_of_data *) (data1);
1440 if (rtx_equal_p (x, data->pat)
1441 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1442 data->found = pat;
1443 }
1444
1445 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1446 (either directly or via STRICT_LOW_PART and similar modifiers). */
1447 const_rtx
1448 set_of (const_rtx pat, const_rtx insn)
1449 {
1450 struct set_of_data data;
1451 data.found = NULL_RTX;
1452 data.pat = pat;
1453 note_pattern_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1454 return data.found;
1455 }
1456
1457 /* Add all hard register in X to *PSET. */
1458 void
1459 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1460 {
1461 subrtx_iterator::array_type array;
1462 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1463 {
1464 const_rtx x = *iter;
1465 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1466 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1467 }
1468 }
1469
1470 /* This function, called through note_stores, collects sets and
1471 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1472 by DATA. */
1473 void
1474 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1475 {
1476 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1477 if (REG_P (x) && HARD_REGISTER_P (x))
1478 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1479 }
1480
1481 /* Examine INSN, and compute the set of hard registers written by it.
1482 Store it in *PSET. Should only be called after reload.
1483
1484 IMPLICIT is true if we should include registers that are fully-clobbered
1485 by calls. This should be used with caution, since it doesn't include
1486 partially-clobbered registers. */
1487 void
1488 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1489 {
1490 rtx link;
1491
1492 CLEAR_HARD_REG_SET (*pset);
1493 note_stores (insn, record_hard_reg_sets, pset);
1494 if (CALL_P (insn) && implicit)
1495 *pset |= insn_callee_abi (insn).full_reg_clobbers ();
1496 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1497 if (REG_NOTE_KIND (link) == REG_INC)
1498 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1499 }
1500
1501 /* Like record_hard_reg_sets, but called through note_uses. */
1502 void
1503 record_hard_reg_uses (rtx *px, void *data)
1504 {
1505 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1506 }
1507 \f
1508 /* Given an INSN, return a SET expression if this insn has only a single SET.
1509 It may also have CLOBBERs, USEs, or SET whose output
1510 will not be used, which we ignore. */
1511
1512 rtx
1513 single_set_2 (const rtx_insn *insn, const_rtx pat)
1514 {
1515 rtx set = NULL;
1516 int set_verified = 1;
1517 int i;
1518
1519 if (GET_CODE (pat) == PARALLEL)
1520 {
1521 for (i = 0; i < XVECLEN (pat, 0); i++)
1522 {
1523 rtx sub = XVECEXP (pat, 0, i);
1524 switch (GET_CODE (sub))
1525 {
1526 case USE:
1527 case CLOBBER:
1528 break;
1529
1530 case SET:
1531 /* We can consider insns having multiple sets, where all
1532 but one are dead as single set insns. In common case
1533 only single set is present in the pattern so we want
1534 to avoid checking for REG_UNUSED notes unless necessary.
1535
1536 When we reach set first time, we just expect this is
1537 the single set we are looking for and only when more
1538 sets are found in the insn, we check them. */
1539 if (!set_verified)
1540 {
1541 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1542 && !side_effects_p (set))
1543 set = NULL;
1544 else
1545 set_verified = 1;
1546 }
1547 if (!set)
1548 set = sub, set_verified = 0;
1549 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1550 || side_effects_p (sub))
1551 return NULL_RTX;
1552 break;
1553
1554 default:
1555 return NULL_RTX;
1556 }
1557 }
1558 }
1559 return set;
1560 }
1561
1562 /* Given an INSN, return nonzero if it has more than one SET, else return
1563 zero. */
1564
1565 int
1566 multiple_sets (const_rtx insn)
1567 {
1568 int found;
1569 int i;
1570
1571 /* INSN must be an insn. */
1572 if (! INSN_P (insn))
1573 return 0;
1574
1575 /* Only a PARALLEL can have multiple SETs. */
1576 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1577 {
1578 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1579 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1580 {
1581 /* If we have already found a SET, then return now. */
1582 if (found)
1583 return 1;
1584 else
1585 found = 1;
1586 }
1587 }
1588
1589 /* Either zero or one SET. */
1590 return 0;
1591 }
1592 \f
1593 /* Return nonzero if the destination of SET equals the source
1594 and there are no side effects. */
1595
1596 int
1597 set_noop_p (const_rtx set)
1598 {
1599 rtx src = SET_SRC (set);
1600 rtx dst = SET_DEST (set);
1601
1602 if (dst == pc_rtx && src == pc_rtx)
1603 return 1;
1604
1605 if (MEM_P (dst) && MEM_P (src))
1606 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1607
1608 if (GET_CODE (dst) == ZERO_EXTRACT)
1609 return rtx_equal_p (XEXP (dst, 0), src)
1610 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1611 && !side_effects_p (src);
1612
1613 if (GET_CODE (dst) == STRICT_LOW_PART)
1614 dst = XEXP (dst, 0);
1615
1616 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1617 {
1618 if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1619 return 0;
1620 src = SUBREG_REG (src);
1621 dst = SUBREG_REG (dst);
1622 }
1623
1624 /* It is a NOOP if destination overlaps with selected src vector
1625 elements. */
1626 if (GET_CODE (src) == VEC_SELECT
1627 && REG_P (XEXP (src, 0)) && REG_P (dst)
1628 && HARD_REGISTER_P (XEXP (src, 0))
1629 && HARD_REGISTER_P (dst))
1630 {
1631 int i;
1632 rtx par = XEXP (src, 1);
1633 rtx src0 = XEXP (src, 0);
1634 poly_int64 c0 = rtx_to_poly_int64 (XVECEXP (par, 0, 0));
1635 poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1636
1637 for (i = 1; i < XVECLEN (par, 0); i++)
1638 if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par, 0, i)), c0 + i))
1639 return 0;
1640 return
1641 REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1642 && simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1643 offset, GET_MODE (dst)) == (int) REGNO (dst);
1644 }
1645
1646 return (REG_P (src) && REG_P (dst)
1647 && REGNO (src) == REGNO (dst));
1648 }
1649 \f
1650 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1651 value to itself. */
1652
1653 int
1654 noop_move_p (const rtx_insn *insn)
1655 {
1656 rtx pat = PATTERN (insn);
1657
1658 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1659 return 1;
1660
1661 /* Insns carrying these notes are useful later on. */
1662 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1663 return 0;
1664
1665 /* Check the code to be executed for COND_EXEC. */
1666 if (GET_CODE (pat) == COND_EXEC)
1667 pat = COND_EXEC_CODE (pat);
1668
1669 if (GET_CODE (pat) == SET && set_noop_p (pat))
1670 return 1;
1671
1672 if (GET_CODE (pat) == PARALLEL)
1673 {
1674 int i;
1675 /* If nothing but SETs of registers to themselves,
1676 this insn can also be deleted. */
1677 for (i = 0; i < XVECLEN (pat, 0); i++)
1678 {
1679 rtx tem = XVECEXP (pat, 0, i);
1680
1681 if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)
1682 continue;
1683
1684 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1685 return 0;
1686 }
1687
1688 return 1;
1689 }
1690 return 0;
1691 }
1692 \f
1693
1694 /* Return nonzero if register in range [REGNO, ENDREGNO)
1695 appears either explicitly or implicitly in X
1696 other than being stored into.
1697
1698 References contained within the substructure at LOC do not count.
1699 LOC may be zero, meaning don't ignore anything. */
1700
1701 bool
1702 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1703 rtx *loc)
1704 {
1705 int i;
1706 unsigned int x_regno;
1707 RTX_CODE code;
1708 const char *fmt;
1709
1710 repeat:
1711 /* The contents of a REG_NONNEG note is always zero, so we must come here
1712 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1713 if (x == 0)
1714 return false;
1715
1716 code = GET_CODE (x);
1717
1718 switch (code)
1719 {
1720 case REG:
1721 x_regno = REGNO (x);
1722
1723 /* If we modifying the stack, frame, or argument pointer, it will
1724 clobber a virtual register. In fact, we could be more precise,
1725 but it isn't worth it. */
1726 if ((x_regno == STACK_POINTER_REGNUM
1727 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1728 && x_regno == ARG_POINTER_REGNUM)
1729 || x_regno == FRAME_POINTER_REGNUM)
1730 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1731 return true;
1732
1733 return endregno > x_regno && regno < END_REGNO (x);
1734
1735 case SUBREG:
1736 /* If this is a SUBREG of a hard reg, we can see exactly which
1737 registers are being modified. Otherwise, handle normally. */
1738 if (REG_P (SUBREG_REG (x))
1739 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1740 {
1741 unsigned int inner_regno = subreg_regno (x);
1742 unsigned int inner_endregno
1743 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1744 ? subreg_nregs (x) : 1);
1745
1746 return endregno > inner_regno && regno < inner_endregno;
1747 }
1748 break;
1749
1750 case CLOBBER:
1751 case SET:
1752 if (&SET_DEST (x) != loc
1753 /* Note setting a SUBREG counts as referring to the REG it is in for
1754 a pseudo but not for hard registers since we can
1755 treat each word individually. */
1756 && ((GET_CODE (SET_DEST (x)) == SUBREG
1757 && loc != &SUBREG_REG (SET_DEST (x))
1758 && REG_P (SUBREG_REG (SET_DEST (x)))
1759 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1760 && refers_to_regno_p (regno, endregno,
1761 SUBREG_REG (SET_DEST (x)), loc))
1762 || (!REG_P (SET_DEST (x))
1763 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1764 return true;
1765
1766 if (code == CLOBBER || loc == &SET_SRC (x))
1767 return false;
1768 x = SET_SRC (x);
1769 goto repeat;
1770
1771 default:
1772 break;
1773 }
1774
1775 /* X does not match, so try its subexpressions. */
1776
1777 fmt = GET_RTX_FORMAT (code);
1778 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1779 {
1780 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1781 {
1782 if (i == 0)
1783 {
1784 x = XEXP (x, 0);
1785 goto repeat;
1786 }
1787 else
1788 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1789 return true;
1790 }
1791 else if (fmt[i] == 'E')
1792 {
1793 int j;
1794 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1795 if (loc != &XVECEXP (x, i, j)
1796 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1797 return true;
1798 }
1799 }
1800 return false;
1801 }
1802
1803 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1804 we check if any register number in X conflicts with the relevant register
1805 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1806 contains a MEM (we don't bother checking for memory addresses that can't
1807 conflict because we expect this to be a rare case. */
1808
1809 int
1810 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1811 {
1812 unsigned int regno, endregno;
1813
1814 /* If either argument is a constant, then modifying X cannot
1815 affect IN. Here we look at IN, we can profitably combine
1816 CONSTANT_P (x) with the switch statement below. */
1817 if (CONSTANT_P (in))
1818 return 0;
1819
1820 recurse:
1821 switch (GET_CODE (x))
1822 {
1823 case CLOBBER:
1824 case STRICT_LOW_PART:
1825 case ZERO_EXTRACT:
1826 case SIGN_EXTRACT:
1827 /* Overly conservative. */
1828 x = XEXP (x, 0);
1829 goto recurse;
1830
1831 case SUBREG:
1832 regno = REGNO (SUBREG_REG (x));
1833 if (regno < FIRST_PSEUDO_REGISTER)
1834 regno = subreg_regno (x);
1835 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1836 ? subreg_nregs (x) : 1);
1837 goto do_reg;
1838
1839 case REG:
1840 regno = REGNO (x);
1841 endregno = END_REGNO (x);
1842 do_reg:
1843 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1844
1845 case MEM:
1846 {
1847 const char *fmt;
1848 int i;
1849
1850 if (MEM_P (in))
1851 return 1;
1852
1853 fmt = GET_RTX_FORMAT (GET_CODE (in));
1854 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1855 if (fmt[i] == 'e')
1856 {
1857 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1858 return 1;
1859 }
1860 else if (fmt[i] == 'E')
1861 {
1862 int j;
1863 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1864 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1865 return 1;
1866 }
1867
1868 return 0;
1869 }
1870
1871 case SCRATCH:
1872 case PC:
1873 case CC0:
1874 return reg_mentioned_p (x, in);
1875
1876 case PARALLEL:
1877 {
1878 int i;
1879
1880 /* If any register in here refers to it we return true. */
1881 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1882 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1883 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1884 return 1;
1885 return 0;
1886 }
1887
1888 default:
1889 gcc_assert (CONSTANT_P (x));
1890 return 0;
1891 }
1892 }
1893 \f
1894 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1895 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1896 ignored by note_stores, but passed to FUN.
1897
1898 FUN receives three arguments:
1899 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1900 2. the SET or CLOBBER rtx that does the store,
1901 3. the pointer DATA provided to note_stores.
1902
1903 If the item being stored in or clobbered is a SUBREG of a hard register,
1904 the SUBREG will be passed. */
1905
1906 void
1907 note_pattern_stores (const_rtx x,
1908 void (*fun) (rtx, const_rtx, void *), void *data)
1909 {
1910 int i;
1911
1912 if (GET_CODE (x) == COND_EXEC)
1913 x = COND_EXEC_CODE (x);
1914
1915 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1916 {
1917 rtx dest = SET_DEST (x);
1918
1919 while ((GET_CODE (dest) == SUBREG
1920 && (!REG_P (SUBREG_REG (dest))
1921 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1922 || GET_CODE (dest) == ZERO_EXTRACT
1923 || GET_CODE (dest) == STRICT_LOW_PART)
1924 dest = XEXP (dest, 0);
1925
1926 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1927 each of whose first operand is a register. */
1928 if (GET_CODE (dest) == PARALLEL)
1929 {
1930 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1931 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1932 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1933 }
1934 else
1935 (*fun) (dest, x, data);
1936 }
1937
1938 else if (GET_CODE (x) == PARALLEL)
1939 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1940 note_pattern_stores (XVECEXP (x, 0, i), fun, data);
1941 }
1942
1943 /* Same, but for an instruction. If the instruction is a call, include
1944 any CLOBBERs in its CALL_INSN_FUNCTION_USAGE. */
1945
1946 void
1947 note_stores (const rtx_insn *insn,
1948 void (*fun) (rtx, const_rtx, void *), void *data)
1949 {
1950 if (CALL_P (insn))
1951 for (rtx link = CALL_INSN_FUNCTION_USAGE (insn);
1952 link; link = XEXP (link, 1))
1953 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1954 note_pattern_stores (XEXP (link, 0), fun, data);
1955 note_pattern_stores (PATTERN (insn), fun, data);
1956 }
1957 \f
1958 /* Like notes_stores, but call FUN for each expression that is being
1959 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1960 FUN for each expression, not any interior subexpressions. FUN receives a
1961 pointer to the expression and the DATA passed to this function.
1962
1963 Note that this is not quite the same test as that done in reg_referenced_p
1964 since that considers something as being referenced if it is being
1965 partially set, while we do not. */
1966
1967 void
1968 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1969 {
1970 rtx body = *pbody;
1971 int i;
1972
1973 switch (GET_CODE (body))
1974 {
1975 case COND_EXEC:
1976 (*fun) (&COND_EXEC_TEST (body), data);
1977 note_uses (&COND_EXEC_CODE (body), fun, data);
1978 return;
1979
1980 case PARALLEL:
1981 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1982 note_uses (&XVECEXP (body, 0, i), fun, data);
1983 return;
1984
1985 case SEQUENCE:
1986 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1987 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1988 return;
1989
1990 case USE:
1991 (*fun) (&XEXP (body, 0), data);
1992 return;
1993
1994 case ASM_OPERANDS:
1995 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1996 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1997 return;
1998
1999 case TRAP_IF:
2000 (*fun) (&TRAP_CONDITION (body), data);
2001 return;
2002
2003 case PREFETCH:
2004 (*fun) (&XEXP (body, 0), data);
2005 return;
2006
2007 case UNSPEC:
2008 case UNSPEC_VOLATILE:
2009 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2010 (*fun) (&XVECEXP (body, 0, i), data);
2011 return;
2012
2013 case CLOBBER:
2014 if (MEM_P (XEXP (body, 0)))
2015 (*fun) (&XEXP (XEXP (body, 0), 0), data);
2016 return;
2017
2018 case SET:
2019 {
2020 rtx dest = SET_DEST (body);
2021
2022 /* For sets we replace everything in source plus registers in memory
2023 expression in store and operands of a ZERO_EXTRACT. */
2024 (*fun) (&SET_SRC (body), data);
2025
2026 if (GET_CODE (dest) == ZERO_EXTRACT)
2027 {
2028 (*fun) (&XEXP (dest, 1), data);
2029 (*fun) (&XEXP (dest, 2), data);
2030 }
2031
2032 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2033 dest = XEXP (dest, 0);
2034
2035 if (MEM_P (dest))
2036 (*fun) (&XEXP (dest, 0), data);
2037 }
2038 return;
2039
2040 default:
2041 /* All the other possibilities never store. */
2042 (*fun) (pbody, data);
2043 return;
2044 }
2045 }
2046 \f
2047 /* Return nonzero if X's old contents don't survive after INSN.
2048 This will be true if X is (cc0) or if X is a register and
2049 X dies in INSN or because INSN entirely sets X.
2050
2051 "Entirely set" means set directly and not through a SUBREG, or
2052 ZERO_EXTRACT, so no trace of the old contents remains.
2053 Likewise, REG_INC does not count.
2054
2055 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2056 but for this use that makes no difference, since regs don't overlap
2057 during their lifetimes. Therefore, this function may be used
2058 at any time after deaths have been computed.
2059
2060 If REG is a hard reg that occupies multiple machine registers, this
2061 function will only return 1 if each of those registers will be replaced
2062 by INSN. */
2063
2064 int
2065 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2066 {
2067 unsigned int regno, end_regno;
2068 unsigned int i;
2069
2070 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2071 if (GET_CODE (x) == CC0)
2072 return 1;
2073
2074 gcc_assert (REG_P (x));
2075
2076 regno = REGNO (x);
2077 end_regno = END_REGNO (x);
2078 for (i = regno; i < end_regno; i++)
2079 if (! dead_or_set_regno_p (insn, i))
2080 return 0;
2081
2082 return 1;
2083 }
2084
2085 /* Return TRUE iff DEST is a register or subreg of a register, is a
2086 complete rather than read-modify-write destination, and contains
2087 register TEST_REGNO. */
2088
2089 static bool
2090 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2091 {
2092 unsigned int regno, endregno;
2093
2094 if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2095 dest = SUBREG_REG (dest);
2096
2097 if (!REG_P (dest))
2098 return false;
2099
2100 regno = REGNO (dest);
2101 endregno = END_REGNO (dest);
2102 return (test_regno >= regno && test_regno < endregno);
2103 }
2104
2105 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2106 any member matches the covers_regno_no_parallel_p criteria. */
2107
2108 static bool
2109 covers_regno_p (const_rtx dest, unsigned int test_regno)
2110 {
2111 if (GET_CODE (dest) == PARALLEL)
2112 {
2113 /* Some targets place small structures in registers for return
2114 values of functions, and those registers are wrapped in
2115 PARALLELs that we may see as the destination of a SET. */
2116 int i;
2117
2118 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2119 {
2120 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2121 if (inner != NULL_RTX
2122 && covers_regno_no_parallel_p (inner, test_regno))
2123 return true;
2124 }
2125
2126 return false;
2127 }
2128 else
2129 return covers_regno_no_parallel_p (dest, test_regno);
2130 }
2131
2132 /* Utility function for dead_or_set_p to check an individual register. */
2133
2134 int
2135 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2136 {
2137 const_rtx pattern;
2138
2139 /* See if there is a death note for something that includes TEST_REGNO. */
2140 if (find_regno_note (insn, REG_DEAD, test_regno))
2141 return 1;
2142
2143 if (CALL_P (insn)
2144 && find_regno_fusage (insn, CLOBBER, test_regno))
2145 return 1;
2146
2147 pattern = PATTERN (insn);
2148
2149 /* If a COND_EXEC is not executed, the value survives. */
2150 if (GET_CODE (pattern) == COND_EXEC)
2151 return 0;
2152
2153 if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2154 return covers_regno_p (SET_DEST (pattern), test_regno);
2155 else if (GET_CODE (pattern) == PARALLEL)
2156 {
2157 int i;
2158
2159 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2160 {
2161 rtx body = XVECEXP (pattern, 0, i);
2162
2163 if (GET_CODE (body) == COND_EXEC)
2164 body = COND_EXEC_CODE (body);
2165
2166 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2167 && covers_regno_p (SET_DEST (body), test_regno))
2168 return 1;
2169 }
2170 }
2171
2172 return 0;
2173 }
2174
2175 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2176 If DATUM is nonzero, look for one whose datum is DATUM. */
2177
2178 rtx
2179 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2180 {
2181 rtx link;
2182
2183 gcc_checking_assert (insn);
2184
2185 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2186 if (! INSN_P (insn))
2187 return 0;
2188 if (datum == 0)
2189 {
2190 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2191 if (REG_NOTE_KIND (link) == kind)
2192 return link;
2193 return 0;
2194 }
2195
2196 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2197 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2198 return link;
2199 return 0;
2200 }
2201
2202 /* Return the reg-note of kind KIND in insn INSN which applies to register
2203 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2204 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2205 it might be the case that the note overlaps REGNO. */
2206
2207 rtx
2208 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2209 {
2210 rtx link;
2211
2212 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2213 if (! INSN_P (insn))
2214 return 0;
2215
2216 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2217 if (REG_NOTE_KIND (link) == kind
2218 /* Verify that it is a register, so that scratch and MEM won't cause a
2219 problem here. */
2220 && REG_P (XEXP (link, 0))
2221 && REGNO (XEXP (link, 0)) <= regno
2222 && END_REGNO (XEXP (link, 0)) > regno)
2223 return link;
2224 return 0;
2225 }
2226
2227 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2228 has such a note. */
2229
2230 rtx
2231 find_reg_equal_equiv_note (const_rtx insn)
2232 {
2233 rtx link;
2234
2235 if (!INSN_P (insn))
2236 return 0;
2237
2238 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2239 if (REG_NOTE_KIND (link) == REG_EQUAL
2240 || REG_NOTE_KIND (link) == REG_EQUIV)
2241 {
2242 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2243 insns that have multiple sets. Checking single_set to
2244 make sure of this is not the proper check, as explained
2245 in the comment in set_unique_reg_note.
2246
2247 This should be changed into an assert. */
2248 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2249 return 0;
2250 return link;
2251 }
2252 return NULL;
2253 }
2254
2255 /* Check whether INSN is a single_set whose source is known to be
2256 equivalent to a constant. Return that constant if so, otherwise
2257 return null. */
2258
2259 rtx
2260 find_constant_src (const rtx_insn *insn)
2261 {
2262 rtx note, set, x;
2263
2264 set = single_set (insn);
2265 if (set)
2266 {
2267 x = avoid_constant_pool_reference (SET_SRC (set));
2268 if (CONSTANT_P (x))
2269 return x;
2270 }
2271
2272 note = find_reg_equal_equiv_note (insn);
2273 if (note && CONSTANT_P (XEXP (note, 0)))
2274 return XEXP (note, 0);
2275
2276 return NULL_RTX;
2277 }
2278
2279 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2280 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2281
2282 int
2283 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2284 {
2285 /* If it's not a CALL_INSN, it can't possibly have a
2286 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2287 if (!CALL_P (insn))
2288 return 0;
2289
2290 gcc_assert (datum);
2291
2292 if (!REG_P (datum))
2293 {
2294 rtx link;
2295
2296 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2297 link;
2298 link = XEXP (link, 1))
2299 if (GET_CODE (XEXP (link, 0)) == code
2300 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2301 return 1;
2302 }
2303 else
2304 {
2305 unsigned int regno = REGNO (datum);
2306
2307 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2308 to pseudo registers, so don't bother checking. */
2309
2310 if (regno < FIRST_PSEUDO_REGISTER)
2311 {
2312 unsigned int end_regno = END_REGNO (datum);
2313 unsigned int i;
2314
2315 for (i = regno; i < end_regno; i++)
2316 if (find_regno_fusage (insn, code, i))
2317 return 1;
2318 }
2319 }
2320
2321 return 0;
2322 }
2323
2324 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2325 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2326
2327 int
2328 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2329 {
2330 rtx link;
2331
2332 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2333 to pseudo registers, so don't bother checking. */
2334
2335 if (regno >= FIRST_PSEUDO_REGISTER
2336 || !CALL_P (insn) )
2337 return 0;
2338
2339 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2340 {
2341 rtx op, reg;
2342
2343 if (GET_CODE (op = XEXP (link, 0)) == code
2344 && REG_P (reg = XEXP (op, 0))
2345 && REGNO (reg) <= regno
2346 && END_REGNO (reg) > regno)
2347 return 1;
2348 }
2349
2350 return 0;
2351 }
2352
2353 \f
2354 /* Return true if KIND is an integer REG_NOTE. */
2355
2356 static bool
2357 int_reg_note_p (enum reg_note kind)
2358 {
2359 return kind == REG_BR_PROB;
2360 }
2361
2362 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2363 stored as the pointer to the next register note. */
2364
2365 rtx
2366 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2367 {
2368 rtx note;
2369
2370 gcc_checking_assert (!int_reg_note_p (kind));
2371 switch (kind)
2372 {
2373 case REG_CC_SETTER:
2374 case REG_CC_USER:
2375 case REG_LABEL_TARGET:
2376 case REG_LABEL_OPERAND:
2377 case REG_TM:
2378 /* These types of register notes use an INSN_LIST rather than an
2379 EXPR_LIST, so that copying is done right and dumps look
2380 better. */
2381 note = alloc_INSN_LIST (datum, list);
2382 PUT_REG_NOTE_KIND (note, kind);
2383 break;
2384
2385 default:
2386 note = alloc_EXPR_LIST (kind, datum, list);
2387 break;
2388 }
2389
2390 return note;
2391 }
2392
2393 /* Add register note with kind KIND and datum DATUM to INSN. */
2394
2395 void
2396 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2397 {
2398 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2399 }
2400
2401 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2402
2403 void
2404 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2405 {
2406 gcc_checking_assert (int_reg_note_p (kind));
2407 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2408 datum, REG_NOTES (insn));
2409 }
2410
2411 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2412
2413 void
2414 add_args_size_note (rtx_insn *insn, poly_int64 value)
2415 {
2416 gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2417 add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2418 }
2419
2420 /* Add a register note like NOTE to INSN. */
2421
2422 void
2423 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2424 {
2425 if (GET_CODE (note) == INT_LIST)
2426 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2427 else
2428 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2429 }
2430
2431 /* Duplicate NOTE and return the copy. */
2432 rtx
2433 duplicate_reg_note (rtx note)
2434 {
2435 reg_note kind = REG_NOTE_KIND (note);
2436
2437 if (GET_CODE (note) == INT_LIST)
2438 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2439 else if (GET_CODE (note) == EXPR_LIST)
2440 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2441 else
2442 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2443 }
2444
2445 /* Remove register note NOTE from the REG_NOTES of INSN. */
2446
2447 void
2448 remove_note (rtx_insn *insn, const_rtx note)
2449 {
2450 rtx link;
2451
2452 if (note == NULL_RTX)
2453 return;
2454
2455 if (REG_NOTES (insn) == note)
2456 REG_NOTES (insn) = XEXP (note, 1);
2457 else
2458 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2459 if (XEXP (link, 1) == note)
2460 {
2461 XEXP (link, 1) = XEXP (note, 1);
2462 break;
2463 }
2464
2465 switch (REG_NOTE_KIND (note))
2466 {
2467 case REG_EQUAL:
2468 case REG_EQUIV:
2469 df_notes_rescan (insn);
2470 break;
2471 default:
2472 break;
2473 }
2474 }
2475
2476 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2477 Return true if any note has been removed. */
2478
2479 bool
2480 remove_reg_equal_equiv_notes (rtx_insn *insn)
2481 {
2482 rtx *loc;
2483 bool ret = false;
2484
2485 loc = &REG_NOTES (insn);
2486 while (*loc)
2487 {
2488 enum reg_note kind = REG_NOTE_KIND (*loc);
2489 if (kind == REG_EQUAL || kind == REG_EQUIV)
2490 {
2491 *loc = XEXP (*loc, 1);
2492 ret = true;
2493 }
2494 else
2495 loc = &XEXP (*loc, 1);
2496 }
2497 return ret;
2498 }
2499
2500 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2501
2502 void
2503 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2504 {
2505 df_ref eq_use;
2506
2507 if (!df)
2508 return;
2509
2510 /* This loop is a little tricky. We cannot just go down the chain because
2511 it is being modified by some actions in the loop. So we just iterate
2512 over the head. We plan to drain the list anyway. */
2513 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2514 {
2515 rtx_insn *insn = DF_REF_INSN (eq_use);
2516 rtx note = find_reg_equal_equiv_note (insn);
2517
2518 /* This assert is generally triggered when someone deletes a REG_EQUAL
2519 or REG_EQUIV note by hacking the list manually rather than calling
2520 remove_note. */
2521 gcc_assert (note);
2522
2523 remove_note (insn, note);
2524 }
2525 }
2526
2527 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2528 return 1 if it is found. A simple equality test is used to determine if
2529 NODE matches. */
2530
2531 bool
2532 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2533 {
2534 const_rtx x;
2535
2536 for (x = listp; x; x = XEXP (x, 1))
2537 if (node == XEXP (x, 0))
2538 return true;
2539
2540 return false;
2541 }
2542
2543 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2544 remove that entry from the list if it is found.
2545
2546 A simple equality test is used to determine if NODE matches. */
2547
2548 void
2549 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2550 {
2551 rtx_expr_list *temp = *listp;
2552 rtx_expr_list *prev = NULL;
2553
2554 while (temp)
2555 {
2556 if (node == temp->element ())
2557 {
2558 /* Splice the node out of the list. */
2559 if (prev)
2560 XEXP (prev, 1) = temp->next ();
2561 else
2562 *listp = temp->next ();
2563
2564 return;
2565 }
2566
2567 prev = temp;
2568 temp = temp->next ();
2569 }
2570 }
2571
2572 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2573 remove that entry from the list if it is found.
2574
2575 A simple equality test is used to determine if NODE matches. */
2576
2577 void
2578 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2579 {
2580 rtx_insn_list *temp = *listp;
2581 rtx_insn_list *prev = NULL;
2582
2583 while (temp)
2584 {
2585 if (node == temp->insn ())
2586 {
2587 /* Splice the node out of the list. */
2588 if (prev)
2589 XEXP (prev, 1) = temp->next ();
2590 else
2591 *listp = temp->next ();
2592
2593 return;
2594 }
2595
2596 prev = temp;
2597 temp = temp->next ();
2598 }
2599 }
2600 \f
2601 /* Nonzero if X contains any volatile instructions. These are instructions
2602 which may cause unpredictable machine state instructions, and thus no
2603 instructions or register uses should be moved or combined across them.
2604 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2605
2606 int
2607 volatile_insn_p (const_rtx x)
2608 {
2609 const RTX_CODE code = GET_CODE (x);
2610 switch (code)
2611 {
2612 case LABEL_REF:
2613 case SYMBOL_REF:
2614 case CONST:
2615 CASE_CONST_ANY:
2616 case CC0:
2617 case PC:
2618 case REG:
2619 case SCRATCH:
2620 case CLOBBER:
2621 case ADDR_VEC:
2622 case ADDR_DIFF_VEC:
2623 case CALL:
2624 case MEM:
2625 return 0;
2626
2627 case UNSPEC_VOLATILE:
2628 return 1;
2629
2630 case ASM_INPUT:
2631 case ASM_OPERANDS:
2632 if (MEM_VOLATILE_P (x))
2633 return 1;
2634
2635 default:
2636 break;
2637 }
2638
2639 /* Recursively scan the operands of this expression. */
2640
2641 {
2642 const char *const fmt = GET_RTX_FORMAT (code);
2643 int i;
2644
2645 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2646 {
2647 if (fmt[i] == 'e')
2648 {
2649 if (volatile_insn_p (XEXP (x, i)))
2650 return 1;
2651 }
2652 else if (fmt[i] == 'E')
2653 {
2654 int j;
2655 for (j = 0; j < XVECLEN (x, i); j++)
2656 if (volatile_insn_p (XVECEXP (x, i, j)))
2657 return 1;
2658 }
2659 }
2660 }
2661 return 0;
2662 }
2663
2664 /* Nonzero if X contains any volatile memory references
2665 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2666
2667 int
2668 volatile_refs_p (const_rtx x)
2669 {
2670 const RTX_CODE code = GET_CODE (x);
2671 switch (code)
2672 {
2673 case LABEL_REF:
2674 case SYMBOL_REF:
2675 case CONST:
2676 CASE_CONST_ANY:
2677 case CC0:
2678 case PC:
2679 case REG:
2680 case SCRATCH:
2681 case CLOBBER:
2682 case ADDR_VEC:
2683 case ADDR_DIFF_VEC:
2684 return 0;
2685
2686 case UNSPEC_VOLATILE:
2687 return 1;
2688
2689 case MEM:
2690 case ASM_INPUT:
2691 case ASM_OPERANDS:
2692 if (MEM_VOLATILE_P (x))
2693 return 1;
2694
2695 default:
2696 break;
2697 }
2698
2699 /* Recursively scan the operands of this expression. */
2700
2701 {
2702 const char *const fmt = GET_RTX_FORMAT (code);
2703 int i;
2704
2705 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2706 {
2707 if (fmt[i] == 'e')
2708 {
2709 if (volatile_refs_p (XEXP (x, i)))
2710 return 1;
2711 }
2712 else if (fmt[i] == 'E')
2713 {
2714 int j;
2715 for (j = 0; j < XVECLEN (x, i); j++)
2716 if (volatile_refs_p (XVECEXP (x, i, j)))
2717 return 1;
2718 }
2719 }
2720 }
2721 return 0;
2722 }
2723
2724 /* Similar to above, except that it also rejects register pre- and post-
2725 incrementing. */
2726
2727 int
2728 side_effects_p (const_rtx x)
2729 {
2730 const RTX_CODE code = GET_CODE (x);
2731 switch (code)
2732 {
2733 case LABEL_REF:
2734 case SYMBOL_REF:
2735 case CONST:
2736 CASE_CONST_ANY:
2737 case CC0:
2738 case PC:
2739 case REG:
2740 case SCRATCH:
2741 case ADDR_VEC:
2742 case ADDR_DIFF_VEC:
2743 case VAR_LOCATION:
2744 return 0;
2745
2746 case CLOBBER:
2747 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2748 when some combination can't be done. If we see one, don't think
2749 that we can simplify the expression. */
2750 return (GET_MODE (x) != VOIDmode);
2751
2752 case PRE_INC:
2753 case PRE_DEC:
2754 case POST_INC:
2755 case POST_DEC:
2756 case PRE_MODIFY:
2757 case POST_MODIFY:
2758 case CALL:
2759 case UNSPEC_VOLATILE:
2760 return 1;
2761
2762 case MEM:
2763 case ASM_INPUT:
2764 case ASM_OPERANDS:
2765 if (MEM_VOLATILE_P (x))
2766 return 1;
2767
2768 default:
2769 break;
2770 }
2771
2772 /* Recursively scan the operands of this expression. */
2773
2774 {
2775 const char *fmt = GET_RTX_FORMAT (code);
2776 int i;
2777
2778 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2779 {
2780 if (fmt[i] == 'e')
2781 {
2782 if (side_effects_p (XEXP (x, i)))
2783 return 1;
2784 }
2785 else if (fmt[i] == 'E')
2786 {
2787 int j;
2788 for (j = 0; j < XVECLEN (x, i); j++)
2789 if (side_effects_p (XVECEXP (x, i, j)))
2790 return 1;
2791 }
2792 }
2793 }
2794 return 0;
2795 }
2796 \f
2797 /* Return nonzero if evaluating rtx X might cause a trap.
2798 FLAGS controls how to consider MEMs. A nonzero means the context
2799 of the access may have changed from the original, such that the
2800 address may have become invalid. */
2801
2802 int
2803 may_trap_p_1 (const_rtx x, unsigned flags)
2804 {
2805 int i;
2806 enum rtx_code code;
2807 const char *fmt;
2808
2809 /* We make no distinction currently, but this function is part of
2810 the internal target-hooks ABI so we keep the parameter as
2811 "unsigned flags". */
2812 bool code_changed = flags != 0;
2813
2814 if (x == 0)
2815 return 0;
2816 code = GET_CODE (x);
2817 switch (code)
2818 {
2819 /* Handle these cases quickly. */
2820 CASE_CONST_ANY:
2821 case SYMBOL_REF:
2822 case LABEL_REF:
2823 case CONST:
2824 case PC:
2825 case CC0:
2826 case REG:
2827 case SCRATCH:
2828 return 0;
2829
2830 case UNSPEC:
2831 return targetm.unspec_may_trap_p (x, flags);
2832
2833 case UNSPEC_VOLATILE:
2834 case ASM_INPUT:
2835 case TRAP_IF:
2836 return 1;
2837
2838 case ASM_OPERANDS:
2839 return MEM_VOLATILE_P (x);
2840
2841 /* Memory ref can trap unless it's a static var or a stack slot. */
2842 case MEM:
2843 /* Recognize specific pattern of stack checking probes. */
2844 if (flag_stack_check
2845 && MEM_VOLATILE_P (x)
2846 && XEXP (x, 0) == stack_pointer_rtx)
2847 return 1;
2848 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2849 reference; moving it out of context such as when moving code
2850 when optimizing, might cause its address to become invalid. */
2851 code_changed
2852 || !MEM_NOTRAP_P (x))
2853 {
2854 poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
2855 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2856 GET_MODE (x), code_changed);
2857 }
2858
2859 return 0;
2860
2861 /* Division by a non-constant might trap. */
2862 case DIV:
2863 case MOD:
2864 case UDIV:
2865 case UMOD:
2866 if (HONOR_SNANS (x))
2867 return 1;
2868 if (FLOAT_MODE_P (GET_MODE (x)))
2869 return flag_trapping_math;
2870 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2871 return 1;
2872 if (GET_CODE (XEXP (x, 1)) == CONST_VECTOR)
2873 {
2874 /* For CONST_VECTOR, return 1 if any element is or might be zero. */
2875 unsigned int n_elts;
2876 rtx op = XEXP (x, 1);
2877 if (!GET_MODE_NUNITS (GET_MODE (op)).is_constant (&n_elts))
2878 {
2879 if (!CONST_VECTOR_DUPLICATE_P (op))
2880 return 1;
2881 for (unsigned i = 0; i < (unsigned int) XVECLEN (op, 0); i++)
2882 if (CONST_VECTOR_ENCODED_ELT (op, i) == const0_rtx)
2883 return 1;
2884 }
2885 else
2886 for (unsigned i = 0; i < n_elts; i++)
2887 if (CONST_VECTOR_ELT (op, i) == const0_rtx)
2888 return 1;
2889 }
2890 break;
2891
2892 case EXPR_LIST:
2893 /* An EXPR_LIST is used to represent a function call. This
2894 certainly may trap. */
2895 return 1;
2896
2897 case GE:
2898 case GT:
2899 case LE:
2900 case LT:
2901 case LTGT:
2902 case COMPARE:
2903 /* Some floating point comparisons may trap. */
2904 if (!flag_trapping_math)
2905 break;
2906 /* ??? There is no machine independent way to check for tests that trap
2907 when COMPARE is used, though many targets do make this distinction.
2908 For instance, sparc uses CCFPE for compares which generate exceptions
2909 and CCFP for compares which do not generate exceptions. */
2910 if (HONOR_NANS (x))
2911 return 1;
2912 /* But often the compare has some CC mode, so check operand
2913 modes as well. */
2914 if (HONOR_NANS (XEXP (x, 0))
2915 || HONOR_NANS (XEXP (x, 1)))
2916 return 1;
2917 break;
2918
2919 case EQ:
2920 case NE:
2921 if (HONOR_SNANS (x))
2922 return 1;
2923 /* Often comparison is CC mode, so check operand modes. */
2924 if (HONOR_SNANS (XEXP (x, 0))
2925 || HONOR_SNANS (XEXP (x, 1)))
2926 return 1;
2927 break;
2928
2929 case FIX:
2930 /* Conversion of floating point might trap. */
2931 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2932 return 1;
2933 break;
2934
2935 case NEG:
2936 case ABS:
2937 case SUBREG:
2938 case VEC_MERGE:
2939 case VEC_SELECT:
2940 case VEC_CONCAT:
2941 case VEC_DUPLICATE:
2942 /* These operations don't trap even with floating point. */
2943 break;
2944
2945 default:
2946 /* Any floating arithmetic may trap. */
2947 if (FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2948 return 1;
2949 }
2950
2951 fmt = GET_RTX_FORMAT (code);
2952 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2953 {
2954 if (fmt[i] == 'e')
2955 {
2956 if (may_trap_p_1 (XEXP (x, i), flags))
2957 return 1;
2958 }
2959 else if (fmt[i] == 'E')
2960 {
2961 int j;
2962 for (j = 0; j < XVECLEN (x, i); j++)
2963 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2964 return 1;
2965 }
2966 }
2967 return 0;
2968 }
2969
2970 /* Return nonzero if evaluating rtx X might cause a trap. */
2971
2972 int
2973 may_trap_p (const_rtx x)
2974 {
2975 return may_trap_p_1 (x, 0);
2976 }
2977
2978 /* Same as above, but additionally return nonzero if evaluating rtx X might
2979 cause a fault. We define a fault for the purpose of this function as a
2980 erroneous execution condition that cannot be encountered during the normal
2981 execution of a valid program; the typical example is an unaligned memory
2982 access on a strict alignment machine. The compiler guarantees that it
2983 doesn't generate code that will fault from a valid program, but this
2984 guarantee doesn't mean anything for individual instructions. Consider
2985 the following example:
2986
2987 struct S { int d; union { char *cp; int *ip; }; };
2988
2989 int foo(struct S *s)
2990 {
2991 if (s->d == 1)
2992 return *s->ip;
2993 else
2994 return *s->cp;
2995 }
2996
2997 on a strict alignment machine. In a valid program, foo will never be
2998 invoked on a structure for which d is equal to 1 and the underlying
2999 unique field of the union not aligned on a 4-byte boundary, but the
3000 expression *s->ip might cause a fault if considered individually.
3001
3002 At the RTL level, potentially problematic expressions will almost always
3003 verify may_trap_p; for example, the above dereference can be emitted as
3004 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3005 However, suppose that foo is inlined in a caller that causes s->cp to
3006 point to a local character variable and guarantees that s->d is not set
3007 to 1; foo may have been effectively translated into pseudo-RTL as:
3008
3009 if ((reg:SI) == 1)
3010 (set (reg:SI) (mem:SI (%fp - 7)))
3011 else
3012 (set (reg:QI) (mem:QI (%fp - 7)))
3013
3014 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3015 memory reference to a stack slot, but it will certainly cause a fault
3016 on a strict alignment machine. */
3017
3018 int
3019 may_trap_or_fault_p (const_rtx x)
3020 {
3021 return may_trap_p_1 (x, 1);
3022 }
3023 \f
3024 /* Return nonzero if X contains a comparison that is not either EQ or NE,
3025 i.e., an inequality. */
3026
3027 int
3028 inequality_comparisons_p (const_rtx x)
3029 {
3030 const char *fmt;
3031 int len, i;
3032 const enum rtx_code code = GET_CODE (x);
3033
3034 switch (code)
3035 {
3036 case REG:
3037 case SCRATCH:
3038 case PC:
3039 case CC0:
3040 CASE_CONST_ANY:
3041 case CONST:
3042 case LABEL_REF:
3043 case SYMBOL_REF:
3044 return 0;
3045
3046 case LT:
3047 case LTU:
3048 case GT:
3049 case GTU:
3050 case LE:
3051 case LEU:
3052 case GE:
3053 case GEU:
3054 return 1;
3055
3056 default:
3057 break;
3058 }
3059
3060 len = GET_RTX_LENGTH (code);
3061 fmt = GET_RTX_FORMAT (code);
3062
3063 for (i = 0; i < len; i++)
3064 {
3065 if (fmt[i] == 'e')
3066 {
3067 if (inequality_comparisons_p (XEXP (x, i)))
3068 return 1;
3069 }
3070 else if (fmt[i] == 'E')
3071 {
3072 int j;
3073 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3074 if (inequality_comparisons_p (XVECEXP (x, i, j)))
3075 return 1;
3076 }
3077 }
3078
3079 return 0;
3080 }
3081 \f
3082 /* Replace any occurrence of FROM in X with TO. The function does
3083 not enter into CONST_DOUBLE for the replace.
3084
3085 Note that copying is not done so X must not be shared unless all copies
3086 are to be modified.
3087
3088 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3089 those pointer-equal ones. */
3090
3091 rtx
3092 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3093 {
3094 int i, j;
3095 const char *fmt;
3096
3097 if (x == from)
3098 return to;
3099
3100 /* Allow this function to make replacements in EXPR_LISTs. */
3101 if (x == 0)
3102 return 0;
3103
3104 if (all_regs
3105 && REG_P (x)
3106 && REG_P (from)
3107 && REGNO (x) == REGNO (from))
3108 {
3109 gcc_assert (GET_MODE (x) == GET_MODE (from));
3110 return to;
3111 }
3112 else if (GET_CODE (x) == SUBREG)
3113 {
3114 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3115
3116 if (CONST_INT_P (new_rtx))
3117 {
3118 x = simplify_subreg (GET_MODE (x), new_rtx,
3119 GET_MODE (SUBREG_REG (x)),
3120 SUBREG_BYTE (x));
3121 gcc_assert (x);
3122 }
3123 else
3124 SUBREG_REG (x) = new_rtx;
3125
3126 return x;
3127 }
3128 else if (GET_CODE (x) == ZERO_EXTEND)
3129 {
3130 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3131
3132 if (CONST_INT_P (new_rtx))
3133 {
3134 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3135 new_rtx, GET_MODE (XEXP (x, 0)));
3136 gcc_assert (x);
3137 }
3138 else
3139 XEXP (x, 0) = new_rtx;
3140
3141 return x;
3142 }
3143
3144 fmt = GET_RTX_FORMAT (GET_CODE (x));
3145 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3146 {
3147 if (fmt[i] == 'e')
3148 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3149 else if (fmt[i] == 'E')
3150 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3151 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3152 from, to, all_regs);
3153 }
3154
3155 return x;
3156 }
3157 \f
3158 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3159 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3160
3161 void
3162 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3163 {
3164 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3165 rtx x = *loc;
3166 if (JUMP_TABLE_DATA_P (x))
3167 {
3168 x = PATTERN (x);
3169 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3170 int len = GET_NUM_ELEM (vec);
3171 for (int i = 0; i < len; ++i)
3172 {
3173 rtx ref = RTVEC_ELT (vec, i);
3174 if (XEXP (ref, 0) == old_label)
3175 {
3176 XEXP (ref, 0) = new_label;
3177 if (update_label_nuses)
3178 {
3179 ++LABEL_NUSES (new_label);
3180 --LABEL_NUSES (old_label);
3181 }
3182 }
3183 }
3184 return;
3185 }
3186
3187 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3188 field. This is not handled by the iterator because it doesn't
3189 handle unprinted ('0') fields. */
3190 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3191 JUMP_LABEL (x) = new_label;
3192
3193 subrtx_ptr_iterator::array_type array;
3194 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3195 {
3196 rtx *loc = *iter;
3197 if (rtx x = *loc)
3198 {
3199 if (GET_CODE (x) == SYMBOL_REF
3200 && CONSTANT_POOL_ADDRESS_P (x))
3201 {
3202 rtx c = get_pool_constant (x);
3203 if (rtx_referenced_p (old_label, c))
3204 {
3205 /* Create a copy of constant C; replace the label inside
3206 but do not update LABEL_NUSES because uses in constant pool
3207 are not counted. */
3208 rtx new_c = copy_rtx (c);
3209 replace_label (&new_c, old_label, new_label, false);
3210
3211 /* Add the new constant NEW_C to constant pool and replace
3212 the old reference to constant by new reference. */
3213 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3214 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3215 }
3216 }
3217
3218 if ((GET_CODE (x) == LABEL_REF
3219 || GET_CODE (x) == INSN_LIST)
3220 && XEXP (x, 0) == old_label)
3221 {
3222 XEXP (x, 0) = new_label;
3223 if (update_label_nuses)
3224 {
3225 ++LABEL_NUSES (new_label);
3226 --LABEL_NUSES (old_label);
3227 }
3228 }
3229 }
3230 }
3231 }
3232
3233 void
3234 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3235 rtx_insn *new_label, bool update_label_nuses)
3236 {
3237 rtx insn_as_rtx = insn;
3238 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3239 gcc_checking_assert (insn_as_rtx == insn);
3240 }
3241
3242 /* Return true if X is referenced in BODY. */
3243
3244 bool
3245 rtx_referenced_p (const_rtx x, const_rtx body)
3246 {
3247 subrtx_iterator::array_type array;
3248 FOR_EACH_SUBRTX (iter, array, body, ALL)
3249 if (const_rtx y = *iter)
3250 {
3251 /* Check if a label_ref Y refers to label X. */
3252 if (GET_CODE (y) == LABEL_REF
3253 && LABEL_P (x)
3254 && label_ref_label (y) == x)
3255 return true;
3256
3257 if (rtx_equal_p (x, y))
3258 return true;
3259
3260 /* If Y is a reference to pool constant traverse the constant. */
3261 if (GET_CODE (y) == SYMBOL_REF
3262 && CONSTANT_POOL_ADDRESS_P (y))
3263 iter.substitute (get_pool_constant (y));
3264 }
3265 return false;
3266 }
3267
3268 /* If INSN is a tablejump return true and store the label (before jump table) to
3269 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3270
3271 bool
3272 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3273 rtx_jump_table_data **tablep)
3274 {
3275 if (!JUMP_P (insn))
3276 return false;
3277
3278 rtx target = JUMP_LABEL (insn);
3279 if (target == NULL_RTX || ANY_RETURN_P (target))
3280 return false;
3281
3282 rtx_insn *label = as_a<rtx_insn *> (target);
3283 rtx_insn *table = next_insn (label);
3284 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3285 return false;
3286
3287 if (labelp)
3288 *labelp = label;
3289 if (tablep)
3290 *tablep = as_a <rtx_jump_table_data *> (table);
3291 return true;
3292 }
3293
3294 /* For INSN known to satisfy tablejump_p, determine if it actually is a
3295 CASESI. Return the insn pattern if so, NULL_RTX otherwise. */
3296
3297 rtx
3298 tablejump_casesi_pattern (const rtx_insn *insn)
3299 {
3300 rtx tmp;
3301
3302 if ((tmp = single_set (insn)) != NULL
3303 && SET_DEST (tmp) == pc_rtx
3304 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3305 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF)
3306 return tmp;
3307
3308 return NULL_RTX;
3309 }
3310
3311 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3312 constant that is not in the constant pool and not in the condition
3313 of an IF_THEN_ELSE. */
3314
3315 static int
3316 computed_jump_p_1 (const_rtx x)
3317 {
3318 const enum rtx_code code = GET_CODE (x);
3319 int i, j;
3320 const char *fmt;
3321
3322 switch (code)
3323 {
3324 case LABEL_REF:
3325 case PC:
3326 return 0;
3327
3328 case CONST:
3329 CASE_CONST_ANY:
3330 case SYMBOL_REF:
3331 case REG:
3332 return 1;
3333
3334 case MEM:
3335 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3336 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3337
3338 case IF_THEN_ELSE:
3339 return (computed_jump_p_1 (XEXP (x, 1))
3340 || computed_jump_p_1 (XEXP (x, 2)));
3341
3342 default:
3343 break;
3344 }
3345
3346 fmt = GET_RTX_FORMAT (code);
3347 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3348 {
3349 if (fmt[i] == 'e'
3350 && computed_jump_p_1 (XEXP (x, i)))
3351 return 1;
3352
3353 else if (fmt[i] == 'E')
3354 for (j = 0; j < XVECLEN (x, i); j++)
3355 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3356 return 1;
3357 }
3358
3359 return 0;
3360 }
3361
3362 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3363
3364 Tablejumps and casesi insns are not considered indirect jumps;
3365 we can recognize them by a (use (label_ref)). */
3366
3367 int
3368 computed_jump_p (const rtx_insn *insn)
3369 {
3370 int i;
3371 if (JUMP_P (insn))
3372 {
3373 rtx pat = PATTERN (insn);
3374
3375 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3376 if (JUMP_LABEL (insn) != NULL)
3377 return 0;
3378
3379 if (GET_CODE (pat) == PARALLEL)
3380 {
3381 int len = XVECLEN (pat, 0);
3382 int has_use_labelref = 0;
3383
3384 for (i = len - 1; i >= 0; i--)
3385 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3386 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3387 == LABEL_REF))
3388 {
3389 has_use_labelref = 1;
3390 break;
3391 }
3392
3393 if (! has_use_labelref)
3394 for (i = len - 1; i >= 0; i--)
3395 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3396 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3397 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3398 return 1;
3399 }
3400 else if (GET_CODE (pat) == SET
3401 && SET_DEST (pat) == pc_rtx
3402 && computed_jump_p_1 (SET_SRC (pat)))
3403 return 1;
3404 }
3405 return 0;
3406 }
3407
3408 \f
3409
3410 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3411 the equivalent add insn and pass the result to FN, using DATA as the
3412 final argument. */
3413
3414 static int
3415 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3416 {
3417 rtx x = XEXP (mem, 0);
3418 switch (GET_CODE (x))
3419 {
3420 case PRE_INC:
3421 case POST_INC:
3422 {
3423 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3424 rtx r1 = XEXP (x, 0);
3425 rtx c = gen_int_mode (size, GET_MODE (r1));
3426 return fn (mem, x, r1, r1, c, data);
3427 }
3428
3429 case PRE_DEC:
3430 case POST_DEC:
3431 {
3432 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3433 rtx r1 = XEXP (x, 0);
3434 rtx c = gen_int_mode (-size, GET_MODE (r1));
3435 return fn (mem, x, r1, r1, c, data);
3436 }
3437
3438 case PRE_MODIFY:
3439 case POST_MODIFY:
3440 {
3441 rtx r1 = XEXP (x, 0);
3442 rtx add = XEXP (x, 1);
3443 return fn (mem, x, r1, add, NULL, data);
3444 }
3445
3446 default:
3447 gcc_unreachable ();
3448 }
3449 }
3450
3451 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3452 For each such autoinc operation found, call FN, passing it
3453 the innermost enclosing MEM, the operation itself, the RTX modified
3454 by the operation, two RTXs (the second may be NULL) that, once
3455 added, represent the value to be held by the modified RTX
3456 afterwards, and DATA. FN is to return 0 to continue the
3457 traversal or any other value to have it returned to the caller of
3458 for_each_inc_dec. */
3459
3460 int
3461 for_each_inc_dec (rtx x,
3462 for_each_inc_dec_fn fn,
3463 void *data)
3464 {
3465 subrtx_var_iterator::array_type array;
3466 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3467 {
3468 rtx mem = *iter;
3469 if (mem
3470 && MEM_P (mem)
3471 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3472 {
3473 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3474 if (res != 0)
3475 return res;
3476 iter.skip_subrtxes ();
3477 }
3478 }
3479 return 0;
3480 }
3481
3482 \f
3483 /* Searches X for any reference to REGNO, returning the rtx of the
3484 reference found if any. Otherwise, returns NULL_RTX. */
3485
3486 rtx
3487 regno_use_in (unsigned int regno, rtx x)
3488 {
3489 const char *fmt;
3490 int i, j;
3491 rtx tem;
3492
3493 if (REG_P (x) && REGNO (x) == regno)
3494 return x;
3495
3496 fmt = GET_RTX_FORMAT (GET_CODE (x));
3497 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3498 {
3499 if (fmt[i] == 'e')
3500 {
3501 if ((tem = regno_use_in (regno, XEXP (x, i))))
3502 return tem;
3503 }
3504 else if (fmt[i] == 'E')
3505 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3506 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3507 return tem;
3508 }
3509
3510 return NULL_RTX;
3511 }
3512
3513 /* Return a value indicating whether OP, an operand of a commutative
3514 operation, is preferred as the first or second operand. The more
3515 positive the value, the stronger the preference for being the first
3516 operand. */
3517
3518 int
3519 commutative_operand_precedence (rtx op)
3520 {
3521 enum rtx_code code = GET_CODE (op);
3522
3523 /* Constants always become the second operand. Prefer "nice" constants. */
3524 if (code == CONST_INT)
3525 return -10;
3526 if (code == CONST_WIDE_INT)
3527 return -9;
3528 if (code == CONST_POLY_INT)
3529 return -8;
3530 if (code == CONST_DOUBLE)
3531 return -8;
3532 if (code == CONST_FIXED)
3533 return -8;
3534 op = avoid_constant_pool_reference (op);
3535 code = GET_CODE (op);
3536
3537 switch (GET_RTX_CLASS (code))
3538 {
3539 case RTX_CONST_OBJ:
3540 if (code == CONST_INT)
3541 return -7;
3542 if (code == CONST_WIDE_INT)
3543 return -6;
3544 if (code == CONST_POLY_INT)
3545 return -5;
3546 if (code == CONST_DOUBLE)
3547 return -5;
3548 if (code == CONST_FIXED)
3549 return -5;
3550 return -4;
3551
3552 case RTX_EXTRA:
3553 /* SUBREGs of objects should come second. */
3554 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3555 return -3;
3556 return 0;
3557
3558 case RTX_OBJ:
3559 /* Complex expressions should be the first, so decrease priority
3560 of objects. Prefer pointer objects over non pointer objects. */
3561 if ((REG_P (op) && REG_POINTER (op))
3562 || (MEM_P (op) && MEM_POINTER (op)))
3563 return -1;
3564 return -2;
3565
3566 case RTX_COMM_ARITH:
3567 /* Prefer operands that are themselves commutative to be first.
3568 This helps to make things linear. In particular,
3569 (and (and (reg) (reg)) (not (reg))) is canonical. */
3570 return 4;
3571
3572 case RTX_BIN_ARITH:
3573 /* If only one operand is a binary expression, it will be the first
3574 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3575 is canonical, although it will usually be further simplified. */
3576 return 2;
3577
3578 case RTX_UNARY:
3579 /* Then prefer NEG and NOT. */
3580 if (code == NEG || code == NOT)
3581 return 1;
3582 /* FALLTHRU */
3583
3584 default:
3585 return 0;
3586 }
3587 }
3588
3589 /* Return 1 iff it is necessary to swap operands of commutative operation
3590 in order to canonicalize expression. */
3591
3592 bool
3593 swap_commutative_operands_p (rtx x, rtx y)
3594 {
3595 return (commutative_operand_precedence (x)
3596 < commutative_operand_precedence (y));
3597 }
3598
3599 /* Return 1 if X is an autoincrement side effect and the register is
3600 not the stack pointer. */
3601 int
3602 auto_inc_p (const_rtx x)
3603 {
3604 switch (GET_CODE (x))
3605 {
3606 case PRE_INC:
3607 case POST_INC:
3608 case PRE_DEC:
3609 case POST_DEC:
3610 case PRE_MODIFY:
3611 case POST_MODIFY:
3612 /* There are no REG_INC notes for SP. */
3613 if (XEXP (x, 0) != stack_pointer_rtx)
3614 return 1;
3615 default:
3616 break;
3617 }
3618 return 0;
3619 }
3620
3621 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3622 int
3623 loc_mentioned_in_p (rtx *loc, const_rtx in)
3624 {
3625 enum rtx_code code;
3626 const char *fmt;
3627 int i, j;
3628
3629 if (!in)
3630 return 0;
3631
3632 code = GET_CODE (in);
3633 fmt = GET_RTX_FORMAT (code);
3634 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3635 {
3636 if (fmt[i] == 'e')
3637 {
3638 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3639 return 1;
3640 }
3641 else if (fmt[i] == 'E')
3642 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3643 if (loc == &XVECEXP (in, i, j)
3644 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3645 return 1;
3646 }
3647 return 0;
3648 }
3649
3650 /* Reinterpret a subreg as a bit extraction from an integer and return
3651 the position of the least significant bit of the extracted value.
3652 In other words, if the extraction were performed as a shift right
3653 and mask, return the number of bits to shift right.
3654
3655 The outer value of the subreg has OUTER_BYTES bytes and starts at
3656 byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes. */
3657
3658 poly_uint64
3659 subreg_size_lsb (poly_uint64 outer_bytes,
3660 poly_uint64 inner_bytes,
3661 poly_uint64 subreg_byte)
3662 {
3663 poly_uint64 subreg_end, trailing_bytes, byte_pos;
3664
3665 /* A paradoxical subreg begins at bit position 0. */
3666 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3667 if (maybe_gt (outer_bytes, inner_bytes))
3668 {
3669 gcc_checking_assert (known_eq (subreg_byte, 0U));
3670 return 0;
3671 }
3672
3673 subreg_end = subreg_byte + outer_bytes;
3674 trailing_bytes = inner_bytes - subreg_end;
3675 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3676 byte_pos = trailing_bytes;
3677 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3678 byte_pos = subreg_byte;
3679 else
3680 {
3681 /* When bytes and words have opposite endianness, we must be able
3682 to split offsets into words and bytes at compile time. */
3683 poly_uint64 leading_word_part
3684 = force_align_down (subreg_byte, UNITS_PER_WORD);
3685 poly_uint64 trailing_word_part
3686 = force_align_down (trailing_bytes, UNITS_PER_WORD);
3687 /* If the subreg crosses a word boundary ensure that
3688 it also begins and ends on a word boundary. */
3689 gcc_assert (known_le (subreg_end - leading_word_part,
3690 (unsigned int) UNITS_PER_WORD)
3691 || (known_eq (leading_word_part, subreg_byte)
3692 && known_eq (trailing_word_part, trailing_bytes)));
3693 if (WORDS_BIG_ENDIAN)
3694 byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3695 else
3696 byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3697 }
3698
3699 return byte_pos * BITS_PER_UNIT;
3700 }
3701
3702 /* Given a subreg X, return the bit offset where the subreg begins
3703 (counting from the least significant bit of the reg). */
3704
3705 poly_uint64
3706 subreg_lsb (const_rtx x)
3707 {
3708 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3709 SUBREG_BYTE (x));
3710 }
3711
3712 /* Return the subreg byte offset for a subreg whose outer value has
3713 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3714 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3715 lsb of the inner value. This is the inverse of the calculation
3716 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3717
3718 poly_uint64
3719 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3720 poly_uint64 lsb_shift)
3721 {
3722 /* A paradoxical subreg begins at bit position 0. */
3723 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3724 if (maybe_gt (outer_bytes, inner_bytes))
3725 {
3726 gcc_checking_assert (known_eq (lsb_shift, 0U));
3727 return 0;
3728 }
3729
3730 poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
3731 poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3732 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3733 return upper_bytes;
3734 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3735 return lower_bytes;
3736 else
3737 {
3738 /* When bytes and words have opposite endianness, we must be able
3739 to split offsets into words and bytes at compile time. */
3740 poly_uint64 lower_word_part = force_align_down (lower_bytes,
3741 UNITS_PER_WORD);
3742 poly_uint64 upper_word_part = force_align_down (upper_bytes,
3743 UNITS_PER_WORD);
3744 if (WORDS_BIG_ENDIAN)
3745 return upper_word_part + (lower_bytes - lower_word_part);
3746 else
3747 return lower_word_part + (upper_bytes - upper_word_part);
3748 }
3749 }
3750
3751 /* Fill in information about a subreg of a hard register.
3752 xregno - A regno of an inner hard subreg_reg (or what will become one).
3753 xmode - The mode of xregno.
3754 offset - The byte offset.
3755 ymode - The mode of a top level SUBREG (or what may become one).
3756 info - Pointer to structure to fill in.
3757
3758 Rather than considering one particular inner register (and thus one
3759 particular "outer" register) in isolation, this function really uses
3760 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3761 function does not check whether adding INFO->offset to XREGNO gives
3762 a valid hard register; even if INFO->offset + XREGNO is out of range,
3763 there might be another register of the same type that is in range.
3764 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3765 the new register, since that can depend on things like whether the final
3766 register number is even or odd. Callers that want to check whether
3767 this particular subreg can be replaced by a simple (reg ...) should
3768 use simplify_subreg_regno. */
3769
3770 void
3771 subreg_get_info (unsigned int xregno, machine_mode xmode,
3772 poly_uint64 offset, machine_mode ymode,
3773 struct subreg_info *info)
3774 {
3775 unsigned int nregs_xmode, nregs_ymode;
3776
3777 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3778
3779 poly_uint64 xsize = GET_MODE_SIZE (xmode);
3780 poly_uint64 ysize = GET_MODE_SIZE (ymode);
3781
3782 bool rknown = false;
3783
3784 /* If the register representation of a non-scalar mode has holes in it,
3785 we expect the scalar units to be concatenated together, with the holes
3786 distributed evenly among the scalar units. Each scalar unit must occupy
3787 at least one register. */
3788 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3789 {
3790 /* As a consequence, we must be dealing with a constant number of
3791 scalars, and thus a constant offset and number of units. */
3792 HOST_WIDE_INT coffset = offset.to_constant ();
3793 HOST_WIDE_INT cysize = ysize.to_constant ();
3794 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3795 unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
3796 scalar_mode xmode_unit = GET_MODE_INNER (xmode);
3797 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3798 gcc_assert (nregs_xmode
3799 == (nunits
3800 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3801 gcc_assert (hard_regno_nregs (xregno, xmode)
3802 == hard_regno_nregs (xregno, xmode_unit) * nunits);
3803
3804 /* You can only ask for a SUBREG of a value with holes in the middle
3805 if you don't cross the holes. (Such a SUBREG should be done by
3806 picking a different register class, or doing it in memory if
3807 necessary.) An example of a value with holes is XCmode on 32-bit
3808 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3809 3 for each part, but in memory it's two 128-bit parts.
3810 Padding is assumed to be at the end (not necessarily the 'high part')
3811 of each unit. */
3812 if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3813 && (coffset / GET_MODE_SIZE (xmode_unit)
3814 != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
3815 {
3816 info->representable_p = false;
3817 rknown = true;
3818 }
3819 }
3820 else
3821 nregs_xmode = hard_regno_nregs (xregno, xmode);
3822
3823 nregs_ymode = hard_regno_nregs (xregno, ymode);
3824
3825 /* Subreg sizes must be ordered, so that we can tell whether they are
3826 partial, paradoxical or complete. */
3827 gcc_checking_assert (ordered_p (xsize, ysize));
3828
3829 /* Paradoxical subregs are otherwise valid. */
3830 if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
3831 {
3832 info->representable_p = true;
3833 /* If this is a big endian paradoxical subreg, which uses more
3834 actual hard registers than the original register, we must
3835 return a negative offset so that we find the proper highpart
3836 of the register.
3837
3838 We assume that the ordering of registers within a multi-register
3839 value has a consistent endianness: if bytes and register words
3840 have different endianness, the hard registers that make up a
3841 multi-register value must be at least word-sized. */
3842 if (REG_WORDS_BIG_ENDIAN)
3843 info->offset = (int) nregs_xmode - (int) nregs_ymode;
3844 else
3845 info->offset = 0;
3846 info->nregs = nregs_ymode;
3847 return;
3848 }
3849
3850 /* If registers store different numbers of bits in the different
3851 modes, we cannot generally form this subreg. */
3852 poly_uint64 regsize_xmode, regsize_ymode;
3853 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3854 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3855 && multiple_p (xsize, nregs_xmode, &regsize_xmode)
3856 && multiple_p (ysize, nregs_ymode, &regsize_ymode))
3857 {
3858 if (!rknown
3859 && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
3860 || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
3861 {
3862 info->representable_p = false;
3863 if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
3864 || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
3865 /* Checked by validate_subreg. We must know at compile time
3866 which inner registers are being accessed. */
3867 gcc_unreachable ();
3868 return;
3869 }
3870 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3871 would go outside of XMODE. */
3872 if (!rknown && maybe_gt (ysize + offset, xsize))
3873 {
3874 info->representable_p = false;
3875 info->nregs = nregs_ymode;
3876 if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
3877 /* Checked by validate_subreg. We must know at compile time
3878 which inner registers are being accessed. */
3879 gcc_unreachable ();
3880 return;
3881 }
3882 /* Quick exit for the simple and common case of extracting whole
3883 subregisters from a multiregister value. */
3884 /* ??? It would be better to integrate this into the code below,
3885 if we can generalize the concept enough and figure out how
3886 odd-sized modes can coexist with the other weird cases we support. */
3887 HOST_WIDE_INT count;
3888 if (!rknown
3889 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3890 && known_eq (regsize_xmode, regsize_ymode)
3891 && constant_multiple_p (offset, regsize_ymode, &count))
3892 {
3893 info->representable_p = true;
3894 info->nregs = nregs_ymode;
3895 info->offset = count;
3896 gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3897 return;
3898 }
3899 }
3900
3901 /* Lowpart subregs are otherwise valid. */
3902 if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
3903 {
3904 info->representable_p = true;
3905 rknown = true;
3906
3907 if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
3908 {
3909 info->offset = 0;
3910 info->nregs = nregs_ymode;
3911 return;
3912 }
3913 }
3914
3915 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3916 values there are in (reg:XMODE XREGNO). We can view the register
3917 as consisting of this number of independent "blocks", where each
3918 block occupies NREGS_YMODE registers and contains exactly one
3919 representable YMODE value. */
3920 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3921 unsigned int num_blocks = nregs_xmode / nregs_ymode;
3922
3923 /* Calculate the number of bytes in each block. This must always
3924 be exact, otherwise we don't know how to verify the constraint.
3925 These conditions may be relaxed but subreg_regno_offset would
3926 need to be redesigned. */
3927 poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
3928
3929 /* Get the number of the first block that contains the subreg and the byte
3930 offset of the subreg from the start of that block. */
3931 unsigned int block_number;
3932 poly_uint64 subblock_offset;
3933 if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
3934 &subblock_offset))
3935 /* Checked by validate_subreg. We must know at compile time which
3936 inner registers are being accessed. */
3937 gcc_unreachable ();
3938
3939 if (!rknown)
3940 {
3941 /* Only the lowpart of each block is representable. */
3942 info->representable_p
3943 = known_eq (subblock_offset,
3944 subreg_size_lowpart_offset (ysize, bytes_per_block));
3945 rknown = true;
3946 }
3947
3948 /* We assume that the ordering of registers within a multi-register
3949 value has a consistent endianness: if bytes and register words
3950 have different endianness, the hard registers that make up a
3951 multi-register value must be at least word-sized. */
3952 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3953 /* The block number we calculated above followed memory endianness.
3954 Convert it to register endianness by counting back from the end.
3955 (Note that, because of the assumption above, each block must be
3956 at least word-sized.) */
3957 info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3958 else
3959 info->offset = block_number * nregs_ymode;
3960 info->nregs = nregs_ymode;
3961 }
3962
3963 /* This function returns the regno offset of a subreg expression.
3964 xregno - A regno of an inner hard subreg_reg (or what will become one).
3965 xmode - The mode of xregno.
3966 offset - The byte offset.
3967 ymode - The mode of a top level SUBREG (or what may become one).
3968 RETURN - The regno offset which would be used. */
3969 unsigned int
3970 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3971 poly_uint64 offset, machine_mode ymode)
3972 {
3973 struct subreg_info info;
3974 subreg_get_info (xregno, xmode, offset, ymode, &info);
3975 return info.offset;
3976 }
3977
3978 /* This function returns true when the offset is representable via
3979 subreg_offset in the given regno.
3980 xregno - A regno of an inner hard subreg_reg (or what will become one).
3981 xmode - The mode of xregno.
3982 offset - The byte offset.
3983 ymode - The mode of a top level SUBREG (or what may become one).
3984 RETURN - Whether the offset is representable. */
3985 bool
3986 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3987 poly_uint64 offset, machine_mode ymode)
3988 {
3989 struct subreg_info info;
3990 subreg_get_info (xregno, xmode, offset, ymode, &info);
3991 return info.representable_p;
3992 }
3993
3994 /* Return the number of a YMODE register to which
3995
3996 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3997
3998 can be simplified. Return -1 if the subreg can't be simplified.
3999
4000 XREGNO is a hard register number. */
4001
4002 int
4003 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
4004 poly_uint64 offset, machine_mode ymode)
4005 {
4006 struct subreg_info info;
4007 unsigned int yregno;
4008
4009 /* Give the backend a chance to disallow the mode change. */
4010 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
4011 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
4012 && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode)
4013 /* We can use mode change in LRA for some transformations. */
4014 && ! lra_in_progress)
4015 return -1;
4016
4017 /* We shouldn't simplify stack-related registers. */
4018 if ((!reload_completed || frame_pointer_needed)
4019 && xregno == FRAME_POINTER_REGNUM)
4020 return -1;
4021
4022 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4023 && xregno == ARG_POINTER_REGNUM)
4024 return -1;
4025
4026 if (xregno == STACK_POINTER_REGNUM
4027 /* We should convert hard stack register in LRA if it is
4028 possible. */
4029 && ! lra_in_progress)
4030 return -1;
4031
4032 /* Try to get the register offset. */
4033 subreg_get_info (xregno, xmode, offset, ymode, &info);
4034 if (!info.representable_p)
4035 return -1;
4036
4037 /* Make sure that the offsetted register value is in range. */
4038 yregno = xregno + info.offset;
4039 if (!HARD_REGISTER_NUM_P (yregno))
4040 return -1;
4041
4042 /* See whether (reg:YMODE YREGNO) is valid.
4043
4044 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4045 This is a kludge to work around how complex FP arguments are passed
4046 on IA-64 and should be fixed. See PR target/49226. */
4047 if (!targetm.hard_regno_mode_ok (yregno, ymode)
4048 && targetm.hard_regno_mode_ok (xregno, xmode))
4049 return -1;
4050
4051 return (int) yregno;
4052 }
4053
4054 /* Return the final regno that a subreg expression refers to. */
4055 unsigned int
4056 subreg_regno (const_rtx x)
4057 {
4058 unsigned int ret;
4059 rtx subreg = SUBREG_REG (x);
4060 int regno = REGNO (subreg);
4061
4062 ret = regno + subreg_regno_offset (regno,
4063 GET_MODE (subreg),
4064 SUBREG_BYTE (x),
4065 GET_MODE (x));
4066 return ret;
4067
4068 }
4069
4070 /* Return the number of registers that a subreg expression refers
4071 to. */
4072 unsigned int
4073 subreg_nregs (const_rtx x)
4074 {
4075 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4076 }
4077
4078 /* Return the number of registers that a subreg REG with REGNO
4079 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
4080 changed so that the regno can be passed in. */
4081
4082 unsigned int
4083 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4084 {
4085 struct subreg_info info;
4086 rtx subreg = SUBREG_REG (x);
4087
4088 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4089 &info);
4090 return info.nregs;
4091 }
4092
4093 struct parms_set_data
4094 {
4095 int nregs;
4096 HARD_REG_SET regs;
4097 };
4098
4099 /* Helper function for noticing stores to parameter registers. */
4100 static void
4101 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4102 {
4103 struct parms_set_data *const d = (struct parms_set_data *) data;
4104 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4105 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4106 {
4107 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4108 d->nregs--;
4109 }
4110 }
4111
4112 /* Look backward for first parameter to be loaded.
4113 Note that loads of all parameters will not necessarily be
4114 found if CSE has eliminated some of them (e.g., an argument
4115 to the outer function is passed down as a parameter).
4116 Do not skip BOUNDARY. */
4117 rtx_insn *
4118 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4119 {
4120 struct parms_set_data parm;
4121 rtx p;
4122 rtx_insn *before, *first_set;
4123
4124 /* Since different machines initialize their parameter registers
4125 in different orders, assume nothing. Collect the set of all
4126 parameter registers. */
4127 CLEAR_HARD_REG_SET (parm.regs);
4128 parm.nregs = 0;
4129 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4130 if (GET_CODE (XEXP (p, 0)) == USE
4131 && REG_P (XEXP (XEXP (p, 0), 0))
4132 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4133 {
4134 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4135
4136 /* We only care about registers which can hold function
4137 arguments. */
4138 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4139 continue;
4140
4141 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4142 parm.nregs++;
4143 }
4144 before = call_insn;
4145 first_set = call_insn;
4146
4147 /* Search backward for the first set of a register in this set. */
4148 while (parm.nregs && before != boundary)
4149 {
4150 before = PREV_INSN (before);
4151
4152 /* It is possible that some loads got CSEed from one call to
4153 another. Stop in that case. */
4154 if (CALL_P (before))
4155 break;
4156
4157 /* Our caller needs either ensure that we will find all sets
4158 (in case code has not been optimized yet), or take care
4159 for possible labels in a way by setting boundary to preceding
4160 CODE_LABEL. */
4161 if (LABEL_P (before))
4162 {
4163 gcc_assert (before == boundary);
4164 break;
4165 }
4166
4167 if (INSN_P (before))
4168 {
4169 int nregs_old = parm.nregs;
4170 note_stores (before, parms_set, &parm);
4171 /* If we found something that did not set a parameter reg,
4172 we're done. Do not keep going, as that might result
4173 in hoisting an insn before the setting of a pseudo
4174 that is used by the hoisted insn. */
4175 if (nregs_old != parm.nregs)
4176 first_set = before;
4177 else
4178 break;
4179 }
4180 }
4181 return first_set;
4182 }
4183
4184 /* Return true if we should avoid inserting code between INSN and preceding
4185 call instruction. */
4186
4187 bool
4188 keep_with_call_p (const rtx_insn *insn)
4189 {
4190 rtx set;
4191
4192 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4193 {
4194 if (REG_P (SET_DEST (set))
4195 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4196 && fixed_regs[REGNO (SET_DEST (set))]
4197 && general_operand (SET_SRC (set), VOIDmode))
4198 return true;
4199 if (REG_P (SET_SRC (set))
4200 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4201 && REG_P (SET_DEST (set))
4202 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4203 return true;
4204 /* There may be a stack pop just after the call and before the store
4205 of the return register. Search for the actual store when deciding
4206 if we can break or not. */
4207 if (SET_DEST (set) == stack_pointer_rtx)
4208 {
4209 /* This CONST_CAST is okay because next_nonnote_insn just
4210 returns its argument and we assign it to a const_rtx
4211 variable. */
4212 const rtx_insn *i2
4213 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4214 if (i2 && keep_with_call_p (i2))
4215 return true;
4216 }
4217 }
4218 return false;
4219 }
4220
4221 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4222 to non-complex jumps. That is, direct unconditional, conditional,
4223 and tablejumps, but not computed jumps or returns. It also does
4224 not apply to the fallthru case of a conditional jump. */
4225
4226 bool
4227 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4228 {
4229 rtx tmp = JUMP_LABEL (jump_insn);
4230 rtx_jump_table_data *table;
4231
4232 if (label == tmp)
4233 return true;
4234
4235 if (tablejump_p (jump_insn, NULL, &table))
4236 {
4237 rtvec vec = table->get_labels ();
4238 int i, veclen = GET_NUM_ELEM (vec);
4239
4240 for (i = 0; i < veclen; ++i)
4241 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4242 return true;
4243 }
4244
4245 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4246 return true;
4247
4248 return false;
4249 }
4250
4251 \f
4252 /* Return an estimate of the cost of computing rtx X.
4253 One use is in cse, to decide which expression to keep in the hash table.
4254 Another is in rtl generation, to pick the cheapest way to multiply.
4255 Other uses like the latter are expected in the future.
4256
4257 X appears as operand OPNO in an expression with code OUTER_CODE.
4258 SPEED specifies whether costs optimized for speed or size should
4259 be returned. */
4260
4261 int
4262 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4263 int opno, bool speed)
4264 {
4265 int i, j;
4266 enum rtx_code code;
4267 const char *fmt;
4268 int total;
4269 int factor;
4270
4271 if (x == 0)
4272 return 0;
4273
4274 if (GET_MODE (x) != VOIDmode)
4275 mode = GET_MODE (x);
4276
4277 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4278 many insns, taking N times as long. */
4279 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4280 if (factor == 0)
4281 factor = 1;
4282
4283 /* Compute the default costs of certain things.
4284 Note that targetm.rtx_costs can override the defaults. */
4285
4286 code = GET_CODE (x);
4287 switch (code)
4288 {
4289 case MULT:
4290 /* Multiplication has time-complexity O(N*N), where N is the
4291 number of units (translated from digits) when using
4292 schoolbook long multiplication. */
4293 total = factor * factor * COSTS_N_INSNS (5);
4294 break;
4295 case DIV:
4296 case UDIV:
4297 case MOD:
4298 case UMOD:
4299 /* Similarly, complexity for schoolbook long division. */
4300 total = factor * factor * COSTS_N_INSNS (7);
4301 break;
4302 case USE:
4303 /* Used in combine.c as a marker. */
4304 total = 0;
4305 break;
4306 case SET:
4307 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4308 the mode for the factor. */
4309 mode = GET_MODE (SET_DEST (x));
4310 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4311 if (factor == 0)
4312 factor = 1;
4313 /* FALLTHRU */
4314 default:
4315 total = factor * COSTS_N_INSNS (1);
4316 }
4317
4318 switch (code)
4319 {
4320 case REG:
4321 return 0;
4322
4323 case SUBREG:
4324 total = 0;
4325 /* If we can't tie these modes, make this expensive. The larger
4326 the mode, the more expensive it is. */
4327 if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4328 return COSTS_N_INSNS (2 + factor);
4329 break;
4330
4331 case TRUNCATE:
4332 if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4333 {
4334 total = 0;
4335 break;
4336 }
4337 /* FALLTHRU */
4338 default:
4339 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4340 return total;
4341 break;
4342 }
4343
4344 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4345 which is already in total. */
4346
4347 fmt = GET_RTX_FORMAT (code);
4348 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4349 if (fmt[i] == 'e')
4350 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4351 else if (fmt[i] == 'E')
4352 for (j = 0; j < XVECLEN (x, i); j++)
4353 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4354
4355 return total;
4356 }
4357
4358 /* Fill in the structure C with information about both speed and size rtx
4359 costs for X, which is operand OPNO in an expression with code OUTER. */
4360
4361 void
4362 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4363 struct full_rtx_costs *c)
4364 {
4365 c->speed = rtx_cost (x, mode, outer, opno, true);
4366 c->size = rtx_cost (x, mode, outer, opno, false);
4367 }
4368
4369 \f
4370 /* Return cost of address expression X.
4371 Expect that X is properly formed address reference.
4372
4373 SPEED parameter specify whether costs optimized for speed or size should
4374 be returned. */
4375
4376 int
4377 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4378 {
4379 /* We may be asked for cost of various unusual addresses, such as operands
4380 of push instruction. It is not worthwhile to complicate writing
4381 of the target hook by such cases. */
4382
4383 if (!memory_address_addr_space_p (mode, x, as))
4384 return 1000;
4385
4386 return targetm.address_cost (x, mode, as, speed);
4387 }
4388
4389 /* If the target doesn't override, compute the cost as with arithmetic. */
4390
4391 int
4392 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4393 {
4394 return rtx_cost (x, Pmode, MEM, 0, speed);
4395 }
4396 \f
4397
4398 unsigned HOST_WIDE_INT
4399 nonzero_bits (const_rtx x, machine_mode mode)
4400 {
4401 if (mode == VOIDmode)
4402 mode = GET_MODE (x);
4403 scalar_int_mode int_mode;
4404 if (!is_a <scalar_int_mode> (mode, &int_mode))
4405 return GET_MODE_MASK (mode);
4406 return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4407 }
4408
4409 unsigned int
4410 num_sign_bit_copies (const_rtx x, machine_mode mode)
4411 {
4412 if (mode == VOIDmode)
4413 mode = GET_MODE (x);
4414 scalar_int_mode int_mode;
4415 if (!is_a <scalar_int_mode> (mode, &int_mode))
4416 return 1;
4417 return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4418 }
4419
4420 /* Return true if nonzero_bits1 might recurse into both operands
4421 of X. */
4422
4423 static inline bool
4424 nonzero_bits_binary_arith_p (const_rtx x)
4425 {
4426 if (!ARITHMETIC_P (x))
4427 return false;
4428 switch (GET_CODE (x))
4429 {
4430 case AND:
4431 case XOR:
4432 case IOR:
4433 case UMIN:
4434 case UMAX:
4435 case SMIN:
4436 case SMAX:
4437 case PLUS:
4438 case MINUS:
4439 case MULT:
4440 case DIV:
4441 case UDIV:
4442 case MOD:
4443 case UMOD:
4444 return true;
4445 default:
4446 return false;
4447 }
4448 }
4449
4450 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4451 It avoids exponential behavior in nonzero_bits1 when X has
4452 identical subexpressions on the first or the second level. */
4453
4454 static unsigned HOST_WIDE_INT
4455 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4456 machine_mode known_mode,
4457 unsigned HOST_WIDE_INT known_ret)
4458 {
4459 if (x == known_x && mode == known_mode)
4460 return known_ret;
4461
4462 /* Try to find identical subexpressions. If found call
4463 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4464 precomputed value for the subexpression as KNOWN_RET. */
4465
4466 if (nonzero_bits_binary_arith_p (x))
4467 {
4468 rtx x0 = XEXP (x, 0);
4469 rtx x1 = XEXP (x, 1);
4470
4471 /* Check the first level. */
4472 if (x0 == x1)
4473 return nonzero_bits1 (x, mode, x0, mode,
4474 cached_nonzero_bits (x0, mode, known_x,
4475 known_mode, known_ret));
4476
4477 /* Check the second level. */
4478 if (nonzero_bits_binary_arith_p (x0)
4479 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4480 return nonzero_bits1 (x, mode, x1, mode,
4481 cached_nonzero_bits (x1, mode, known_x,
4482 known_mode, known_ret));
4483
4484 if (nonzero_bits_binary_arith_p (x1)
4485 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4486 return nonzero_bits1 (x, mode, x0, mode,
4487 cached_nonzero_bits (x0, mode, known_x,
4488 known_mode, known_ret));
4489 }
4490
4491 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4492 }
4493
4494 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4495 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4496 is less useful. We can't allow both, because that results in exponential
4497 run time recursion. There is a nullstone testcase that triggered
4498 this. This macro avoids accidental uses of num_sign_bit_copies. */
4499 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4500
4501 /* Given an expression, X, compute which bits in X can be nonzero.
4502 We don't care about bits outside of those defined in MODE.
4503
4504 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4505 an arithmetic operation, we can do better. */
4506
4507 static unsigned HOST_WIDE_INT
4508 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4509 machine_mode known_mode,
4510 unsigned HOST_WIDE_INT known_ret)
4511 {
4512 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4513 unsigned HOST_WIDE_INT inner_nz;
4514 enum rtx_code code = GET_CODE (x);
4515 machine_mode inner_mode;
4516 unsigned int inner_width;
4517 scalar_int_mode xmode;
4518
4519 unsigned int mode_width = GET_MODE_PRECISION (mode);
4520
4521 if (CONST_INT_P (x))
4522 {
4523 if (SHORT_IMMEDIATES_SIGN_EXTEND
4524 && INTVAL (x) > 0
4525 && mode_width < BITS_PER_WORD
4526 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4527 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4528
4529 return UINTVAL (x);
4530 }
4531
4532 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4533 return nonzero;
4534 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4535
4536 /* If X is wider than MODE, use its mode instead. */
4537 if (xmode_width > mode_width)
4538 {
4539 mode = xmode;
4540 nonzero = GET_MODE_MASK (mode);
4541 mode_width = xmode_width;
4542 }
4543
4544 if (mode_width > HOST_BITS_PER_WIDE_INT)
4545 /* Our only callers in this case look for single bit values. So
4546 just return the mode mask. Those tests will then be false. */
4547 return nonzero;
4548
4549 /* If MODE is wider than X, but both are a single word for both the host
4550 and target machines, we can compute this from which bits of the object
4551 might be nonzero in its own mode, taking into account the fact that, on
4552 CISC machines, accessing an object in a wider mode generally causes the
4553 high-order bits to become undefined, so they are not known to be zero.
4554 We extend this reasoning to RISC machines for operations that might not
4555 operate on the full registers. */
4556 if (mode_width > xmode_width
4557 && xmode_width <= BITS_PER_WORD
4558 && xmode_width <= HOST_BITS_PER_WIDE_INT
4559 && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4560 {
4561 nonzero &= cached_nonzero_bits (x, xmode,
4562 known_x, known_mode, known_ret);
4563 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4564 return nonzero;
4565 }
4566
4567 /* Please keep nonzero_bits_binary_arith_p above in sync with
4568 the code in the switch below. */
4569 switch (code)
4570 {
4571 case REG:
4572 #if defined(POINTERS_EXTEND_UNSIGNED)
4573 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4574 all the bits above ptr_mode are known to be zero. */
4575 /* As we do not know which address space the pointer is referring to,
4576 we can do this only if the target does not support different pointer
4577 or address modes depending on the address space. */
4578 if (target_default_pointer_address_modes_p ()
4579 && POINTERS_EXTEND_UNSIGNED
4580 && xmode == Pmode
4581 && REG_POINTER (x)
4582 && !targetm.have_ptr_extend ())
4583 nonzero &= GET_MODE_MASK (ptr_mode);
4584 #endif
4585
4586 /* Include declared information about alignment of pointers. */
4587 /* ??? We don't properly preserve REG_POINTER changes across
4588 pointer-to-integer casts, so we can't trust it except for
4589 things that we know must be pointers. See execute/960116-1.c. */
4590 if ((x == stack_pointer_rtx
4591 || x == frame_pointer_rtx
4592 || x == arg_pointer_rtx)
4593 && REGNO_POINTER_ALIGN (REGNO (x)))
4594 {
4595 unsigned HOST_WIDE_INT alignment
4596 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4597
4598 #ifdef PUSH_ROUNDING
4599 /* If PUSH_ROUNDING is defined, it is possible for the
4600 stack to be momentarily aligned only to that amount,
4601 so we pick the least alignment. */
4602 if (x == stack_pointer_rtx && PUSH_ARGS)
4603 {
4604 poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4605 alignment = MIN (known_alignment (rounded_1), alignment);
4606 }
4607 #endif
4608
4609 nonzero &= ~(alignment - 1);
4610 }
4611
4612 {
4613 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4614 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4615 &nonzero_for_hook);
4616
4617 if (new_rtx)
4618 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4619 known_mode, known_ret);
4620
4621 return nonzero_for_hook;
4622 }
4623
4624 case MEM:
4625 /* In many, if not most, RISC machines, reading a byte from memory
4626 zeros the rest of the register. Noticing that fact saves a lot
4627 of extra zero-extends. */
4628 if (load_extend_op (xmode) == ZERO_EXTEND)
4629 nonzero &= GET_MODE_MASK (xmode);
4630 break;
4631
4632 case EQ: case NE:
4633 case UNEQ: case LTGT:
4634 case GT: case GTU: case UNGT:
4635 case LT: case LTU: case UNLT:
4636 case GE: case GEU: case UNGE:
4637 case LE: case LEU: case UNLE:
4638 case UNORDERED: case ORDERED:
4639 /* If this produces an integer result, we know which bits are set.
4640 Code here used to clear bits outside the mode of X, but that is
4641 now done above. */
4642 /* Mind that MODE is the mode the caller wants to look at this
4643 operation in, and not the actual operation mode. We can wind
4644 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4645 that describes the results of a vector compare. */
4646 if (GET_MODE_CLASS (xmode) == MODE_INT
4647 && mode_width <= HOST_BITS_PER_WIDE_INT)
4648 nonzero = STORE_FLAG_VALUE;
4649 break;
4650
4651 case NEG:
4652 #if 0
4653 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4654 and num_sign_bit_copies. */
4655 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4656 nonzero = 1;
4657 #endif
4658
4659 if (xmode_width < mode_width)
4660 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4661 break;
4662
4663 case ABS:
4664 #if 0
4665 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4666 and num_sign_bit_copies. */
4667 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4668 nonzero = 1;
4669 #endif
4670 break;
4671
4672 case TRUNCATE:
4673 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4674 known_x, known_mode, known_ret)
4675 & GET_MODE_MASK (mode));
4676 break;
4677
4678 case ZERO_EXTEND:
4679 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4680 known_x, known_mode, known_ret);
4681 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4682 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4683 break;
4684
4685 case SIGN_EXTEND:
4686 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4687 Otherwise, show all the bits in the outer mode but not the inner
4688 may be nonzero. */
4689 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4690 known_x, known_mode, known_ret);
4691 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4692 {
4693 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4694 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4695 inner_nz |= (GET_MODE_MASK (mode)
4696 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4697 }
4698
4699 nonzero &= inner_nz;
4700 break;
4701
4702 case AND:
4703 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4704 known_x, known_mode, known_ret)
4705 & cached_nonzero_bits (XEXP (x, 1), mode,
4706 known_x, known_mode, known_ret);
4707 break;
4708
4709 case XOR: case IOR:
4710 case UMIN: case UMAX: case SMIN: case SMAX:
4711 {
4712 unsigned HOST_WIDE_INT nonzero0
4713 = cached_nonzero_bits (XEXP (x, 0), mode,
4714 known_x, known_mode, known_ret);
4715
4716 /* Don't call nonzero_bits for the second time if it cannot change
4717 anything. */
4718 if ((nonzero & nonzero0) != nonzero)
4719 nonzero &= nonzero0
4720 | cached_nonzero_bits (XEXP (x, 1), mode,
4721 known_x, known_mode, known_ret);
4722 }
4723 break;
4724
4725 case PLUS: case MINUS:
4726 case MULT:
4727 case DIV: case UDIV:
4728 case MOD: case UMOD:
4729 /* We can apply the rules of arithmetic to compute the number of
4730 high- and low-order zero bits of these operations. We start by
4731 computing the width (position of the highest-order nonzero bit)
4732 and the number of low-order zero bits for each value. */
4733 {
4734 unsigned HOST_WIDE_INT nz0
4735 = cached_nonzero_bits (XEXP (x, 0), mode,
4736 known_x, known_mode, known_ret);
4737 unsigned HOST_WIDE_INT nz1
4738 = cached_nonzero_bits (XEXP (x, 1), mode,
4739 known_x, known_mode, known_ret);
4740 int sign_index = xmode_width - 1;
4741 int width0 = floor_log2 (nz0) + 1;
4742 int width1 = floor_log2 (nz1) + 1;
4743 int low0 = ctz_or_zero (nz0);
4744 int low1 = ctz_or_zero (nz1);
4745 unsigned HOST_WIDE_INT op0_maybe_minusp
4746 = nz0 & (HOST_WIDE_INT_1U << sign_index);
4747 unsigned HOST_WIDE_INT op1_maybe_minusp
4748 = nz1 & (HOST_WIDE_INT_1U << sign_index);
4749 unsigned int result_width = mode_width;
4750 int result_low = 0;
4751
4752 switch (code)
4753 {
4754 case PLUS:
4755 result_width = MAX (width0, width1) + 1;
4756 result_low = MIN (low0, low1);
4757 break;
4758 case MINUS:
4759 result_low = MIN (low0, low1);
4760 break;
4761 case MULT:
4762 result_width = width0 + width1;
4763 result_low = low0 + low1;
4764 break;
4765 case DIV:
4766 if (width1 == 0)
4767 break;
4768 if (!op0_maybe_minusp && !op1_maybe_minusp)
4769 result_width = width0;
4770 break;
4771 case UDIV:
4772 if (width1 == 0)
4773 break;
4774 result_width = width0;
4775 break;
4776 case MOD:
4777 if (width1 == 0)
4778 break;
4779 if (!op0_maybe_minusp && !op1_maybe_minusp)
4780 result_width = MIN (width0, width1);
4781 result_low = MIN (low0, low1);
4782 break;
4783 case UMOD:
4784 if (width1 == 0)
4785 break;
4786 result_width = MIN (width0, width1);
4787 result_low = MIN (low0, low1);
4788 break;
4789 default:
4790 gcc_unreachable ();
4791 }
4792
4793 if (result_width < mode_width)
4794 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4795
4796 if (result_low > 0)
4797 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4798 }
4799 break;
4800
4801 case ZERO_EXTRACT:
4802 if (CONST_INT_P (XEXP (x, 1))
4803 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4804 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4805 break;
4806
4807 case SUBREG:
4808 /* If this is a SUBREG formed for a promoted variable that has
4809 been zero-extended, we know that at least the high-order bits
4810 are zero, though others might be too. */
4811 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4812 nonzero = GET_MODE_MASK (xmode)
4813 & cached_nonzero_bits (SUBREG_REG (x), xmode,
4814 known_x, known_mode, known_ret);
4815
4816 /* If the inner mode is a single word for both the host and target
4817 machines, we can compute this from which bits of the inner
4818 object might be nonzero. */
4819 inner_mode = GET_MODE (SUBREG_REG (x));
4820 if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
4821 && inner_width <= BITS_PER_WORD
4822 && inner_width <= HOST_BITS_PER_WIDE_INT)
4823 {
4824 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4825 known_x, known_mode, known_ret);
4826
4827 /* On a typical CISC machine, accessing an object in a wider mode
4828 causes the high-order bits to become undefined. So they are
4829 not known to be zero.
4830
4831 On a typical RISC machine, we only have to worry about the way
4832 loads are extended. Otherwise, if we get a reload for the inner
4833 part, it may be loaded from the stack, and then we may lose all
4834 the zero bits that existed before the store to the stack. */
4835 rtx_code extend_op;
4836 if ((!WORD_REGISTER_OPERATIONS
4837 || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4838 ? val_signbit_known_set_p (inner_mode, nonzero)
4839 : extend_op != ZERO_EXTEND)
4840 || !MEM_P (SUBREG_REG (x)))
4841 && xmode_width > inner_width)
4842 nonzero
4843 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4844 }
4845 break;
4846
4847 case ASHIFT:
4848 case ASHIFTRT:
4849 case LSHIFTRT:
4850 case ROTATE:
4851 case ROTATERT:
4852 /* The nonzero bits are in two classes: any bits within MODE
4853 that aren't in xmode are always significant. The rest of the
4854 nonzero bits are those that are significant in the operand of
4855 the shift when shifted the appropriate number of bits. This
4856 shows that high-order bits are cleared by the right shift and
4857 low-order bits by left shifts. */
4858 if (CONST_INT_P (XEXP (x, 1))
4859 && INTVAL (XEXP (x, 1)) >= 0
4860 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4861 && INTVAL (XEXP (x, 1)) < xmode_width)
4862 {
4863 int count = INTVAL (XEXP (x, 1));
4864 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
4865 unsigned HOST_WIDE_INT op_nonzero
4866 = cached_nonzero_bits (XEXP (x, 0), mode,
4867 known_x, known_mode, known_ret);
4868 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4869 unsigned HOST_WIDE_INT outer = 0;
4870
4871 if (mode_width > xmode_width)
4872 outer = (op_nonzero & nonzero & ~mode_mask);
4873
4874 switch (code)
4875 {
4876 case ASHIFT:
4877 inner <<= count;
4878 break;
4879
4880 case LSHIFTRT:
4881 inner >>= count;
4882 break;
4883
4884 case ASHIFTRT:
4885 inner >>= count;
4886
4887 /* If the sign bit may have been nonzero before the shift, we
4888 need to mark all the places it could have been copied to
4889 by the shift as possibly nonzero. */
4890 if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
4891 inner |= (((HOST_WIDE_INT_1U << count) - 1)
4892 << (xmode_width - count));
4893 break;
4894
4895 case ROTATE:
4896 inner = (inner << (count % xmode_width)
4897 | (inner >> (xmode_width - (count % xmode_width))))
4898 & mode_mask;
4899 break;
4900
4901 case ROTATERT:
4902 inner = (inner >> (count % xmode_width)
4903 | (inner << (xmode_width - (count % xmode_width))))
4904 & mode_mask;
4905 break;
4906
4907 default:
4908 gcc_unreachable ();
4909 }
4910
4911 nonzero &= (outer | inner);
4912 }
4913 break;
4914
4915 case FFS:
4916 case POPCOUNT:
4917 /* This is at most the number of bits in the mode. */
4918 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4919 break;
4920
4921 case CLZ:
4922 /* If CLZ has a known value at zero, then the nonzero bits are
4923 that value, plus the number of bits in the mode minus one. */
4924 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4925 nonzero
4926 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4927 else
4928 nonzero = -1;
4929 break;
4930
4931 case CTZ:
4932 /* If CTZ has a known value at zero, then the nonzero bits are
4933 that value, plus the number of bits in the mode minus one. */
4934 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4935 nonzero
4936 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4937 else
4938 nonzero = -1;
4939 break;
4940
4941 case CLRSB:
4942 /* This is at most the number of bits in the mode minus 1. */
4943 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4944 break;
4945
4946 case PARITY:
4947 nonzero = 1;
4948 break;
4949
4950 case IF_THEN_ELSE:
4951 {
4952 unsigned HOST_WIDE_INT nonzero_true
4953 = cached_nonzero_bits (XEXP (x, 1), mode,
4954 known_x, known_mode, known_ret);
4955
4956 /* Don't call nonzero_bits for the second time if it cannot change
4957 anything. */
4958 if ((nonzero & nonzero_true) != nonzero)
4959 nonzero &= nonzero_true
4960 | cached_nonzero_bits (XEXP (x, 2), mode,
4961 known_x, known_mode, known_ret);
4962 }
4963 break;
4964
4965 default:
4966 break;
4967 }
4968
4969 return nonzero;
4970 }
4971
4972 /* See the macro definition above. */
4973 #undef cached_num_sign_bit_copies
4974
4975 \f
4976 /* Return true if num_sign_bit_copies1 might recurse into both operands
4977 of X. */
4978
4979 static inline bool
4980 num_sign_bit_copies_binary_arith_p (const_rtx x)
4981 {
4982 if (!ARITHMETIC_P (x))
4983 return false;
4984 switch (GET_CODE (x))
4985 {
4986 case IOR:
4987 case AND:
4988 case XOR:
4989 case SMIN:
4990 case SMAX:
4991 case UMIN:
4992 case UMAX:
4993 case PLUS:
4994 case MINUS:
4995 case MULT:
4996 return true;
4997 default:
4998 return false;
4999 }
5000 }
5001
5002 /* The function cached_num_sign_bit_copies is a wrapper around
5003 num_sign_bit_copies1. It avoids exponential behavior in
5004 num_sign_bit_copies1 when X has identical subexpressions on the
5005 first or the second level. */
5006
5007 static unsigned int
5008 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
5009 const_rtx known_x, machine_mode known_mode,
5010 unsigned int known_ret)
5011 {
5012 if (x == known_x && mode == known_mode)
5013 return known_ret;
5014
5015 /* Try to find identical subexpressions. If found call
5016 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
5017 the precomputed value for the subexpression as KNOWN_RET. */
5018
5019 if (num_sign_bit_copies_binary_arith_p (x))
5020 {
5021 rtx x0 = XEXP (x, 0);
5022 rtx x1 = XEXP (x, 1);
5023
5024 /* Check the first level. */
5025 if (x0 == x1)
5026 return
5027 num_sign_bit_copies1 (x, mode, x0, mode,
5028 cached_num_sign_bit_copies (x0, mode, known_x,
5029 known_mode,
5030 known_ret));
5031
5032 /* Check the second level. */
5033 if (num_sign_bit_copies_binary_arith_p (x0)
5034 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
5035 return
5036 num_sign_bit_copies1 (x, mode, x1, mode,
5037 cached_num_sign_bit_copies (x1, mode, known_x,
5038 known_mode,
5039 known_ret));
5040
5041 if (num_sign_bit_copies_binary_arith_p (x1)
5042 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
5043 return
5044 num_sign_bit_copies1 (x, mode, x0, mode,
5045 cached_num_sign_bit_copies (x0, mode, known_x,
5046 known_mode,
5047 known_ret));
5048 }
5049
5050 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
5051 }
5052
5053 /* Return the number of bits at the high-order end of X that are known to
5054 be equal to the sign bit. X will be used in mode MODE. The returned
5055 value will always be between 1 and the number of bits in MODE. */
5056
5057 static unsigned int
5058 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
5059 machine_mode known_mode,
5060 unsigned int known_ret)
5061 {
5062 enum rtx_code code = GET_CODE (x);
5063 unsigned int bitwidth = GET_MODE_PRECISION (mode);
5064 int num0, num1, result;
5065 unsigned HOST_WIDE_INT nonzero;
5066
5067 if (CONST_INT_P (x))
5068 {
5069 /* If the constant is negative, take its 1's complement and remask.
5070 Then see how many zero bits we have. */
5071 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5072 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5073 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5074 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5075
5076 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5077 }
5078
5079 scalar_int_mode xmode, inner_mode;
5080 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5081 return 1;
5082
5083 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5084
5085 /* For a smaller mode, just ignore the high bits. */
5086 if (bitwidth < xmode_width)
5087 {
5088 num0 = cached_num_sign_bit_copies (x, xmode,
5089 known_x, known_mode, known_ret);
5090 return MAX (1, num0 - (int) (xmode_width - bitwidth));
5091 }
5092
5093 if (bitwidth > xmode_width)
5094 {
5095 /* If this machine does not do all register operations on the entire
5096 register and MODE is wider than the mode of X, we can say nothing
5097 at all about the high-order bits. We extend this reasoning to RISC
5098 machines for operations that might not operate on full registers. */
5099 if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
5100 return 1;
5101
5102 /* Likewise on machines that do, if the mode of the object is smaller
5103 than a word and loads of that size don't sign extend, we can say
5104 nothing about the high order bits. */
5105 if (xmode_width < BITS_PER_WORD
5106 && load_extend_op (xmode) != SIGN_EXTEND)
5107 return 1;
5108 }
5109
5110 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5111 the code in the switch below. */
5112 switch (code)
5113 {
5114 case REG:
5115
5116 #if defined(POINTERS_EXTEND_UNSIGNED)
5117 /* If pointers extend signed and this is a pointer in Pmode, say that
5118 all the bits above ptr_mode are known to be sign bit copies. */
5119 /* As we do not know which address space the pointer is referring to,
5120 we can do this only if the target does not support different pointer
5121 or address modes depending on the address space. */
5122 if (target_default_pointer_address_modes_p ()
5123 && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5124 && mode == Pmode && REG_POINTER (x)
5125 && !targetm.have_ptr_extend ())
5126 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5127 #endif
5128
5129 {
5130 unsigned int copies_for_hook = 1, copies = 1;
5131 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5132 &copies_for_hook);
5133
5134 if (new_rtx)
5135 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5136 known_mode, known_ret);
5137
5138 if (copies > 1 || copies_for_hook > 1)
5139 return MAX (copies, copies_for_hook);
5140
5141 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5142 }
5143 break;
5144
5145 case MEM:
5146 /* Some RISC machines sign-extend all loads of smaller than a word. */
5147 if (load_extend_op (xmode) == SIGN_EXTEND)
5148 return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5149 break;
5150
5151 case SUBREG:
5152 /* If this is a SUBREG for a promoted object that is sign-extended
5153 and we are looking at it in a wider mode, we know that at least the
5154 high-order bits are known to be sign bit copies. */
5155
5156 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5157 {
5158 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5159 known_x, known_mode, known_ret);
5160 return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5161 }
5162
5163 if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5164 {
5165 /* For a smaller object, just ignore the high bits. */
5166 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5167 {
5168 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5169 known_x, known_mode,
5170 known_ret);
5171 return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5172 - bitwidth));
5173 }
5174
5175 /* For paradoxical SUBREGs on machines where all register operations
5176 affect the entire register, just look inside. Note that we are
5177 passing MODE to the recursive call, so the number of sign bit
5178 copies will remain relative to that mode, not the inner mode.
5179
5180 This works only if loads sign extend. Otherwise, if we get a
5181 reload for the inner part, it may be loaded from the stack, and
5182 then we lose all sign bit copies that existed before the store
5183 to the stack. */
5184 if (WORD_REGISTER_OPERATIONS
5185 && load_extend_op (inner_mode) == SIGN_EXTEND
5186 && paradoxical_subreg_p (x)
5187 && MEM_P (SUBREG_REG (x)))
5188 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5189 known_x, known_mode, known_ret);
5190 }
5191 break;
5192
5193 case SIGN_EXTRACT:
5194 if (CONST_INT_P (XEXP (x, 1)))
5195 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5196 break;
5197
5198 case SIGN_EXTEND:
5199 if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5200 return (bitwidth - GET_MODE_PRECISION (inner_mode)
5201 + cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5202 known_x, known_mode, known_ret));
5203 break;
5204
5205 case TRUNCATE:
5206 /* For a smaller object, just ignore the high bits. */
5207 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5208 num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5209 known_x, known_mode, known_ret);
5210 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5211 - bitwidth)));
5212
5213 case NOT:
5214 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5215 known_x, known_mode, known_ret);
5216
5217 case ROTATE: case ROTATERT:
5218 /* If we are rotating left by a number of bits less than the number
5219 of sign bit copies, we can just subtract that amount from the
5220 number. */
5221 if (CONST_INT_P (XEXP (x, 1))
5222 && INTVAL (XEXP (x, 1)) >= 0
5223 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5224 {
5225 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5226 known_x, known_mode, known_ret);
5227 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5228 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5229 }
5230 break;
5231
5232 case NEG:
5233 /* In general, this subtracts one sign bit copy. But if the value
5234 is known to be positive, the number of sign bit copies is the
5235 same as that of the input. Finally, if the input has just one bit
5236 that might be nonzero, all the bits are copies of the sign bit. */
5237 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5238 known_x, known_mode, known_ret);
5239 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5240 return num0 > 1 ? num0 - 1 : 1;
5241
5242 nonzero = nonzero_bits (XEXP (x, 0), mode);
5243 if (nonzero == 1)
5244 return bitwidth;
5245
5246 if (num0 > 1
5247 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5248 num0--;
5249
5250 return num0;
5251
5252 case IOR: case AND: case XOR:
5253 case SMIN: case SMAX: case UMIN: case UMAX:
5254 /* Logical operations will preserve the number of sign-bit copies.
5255 MIN and MAX operations always return one of the operands. */
5256 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5257 known_x, known_mode, known_ret);
5258 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5259 known_x, known_mode, known_ret);
5260
5261 /* If num1 is clearing some of the top bits then regardless of
5262 the other term, we are guaranteed to have at least that many
5263 high-order zero bits. */
5264 if (code == AND
5265 && num1 > 1
5266 && bitwidth <= HOST_BITS_PER_WIDE_INT
5267 && CONST_INT_P (XEXP (x, 1))
5268 && (UINTVAL (XEXP (x, 1))
5269 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5270 return num1;
5271
5272 /* Similarly for IOR when setting high-order bits. */
5273 if (code == IOR
5274 && num1 > 1
5275 && bitwidth <= HOST_BITS_PER_WIDE_INT
5276 && CONST_INT_P (XEXP (x, 1))
5277 && (UINTVAL (XEXP (x, 1))
5278 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5279 return num1;
5280
5281 return MIN (num0, num1);
5282
5283 case PLUS: case MINUS:
5284 /* For addition and subtraction, we can have a 1-bit carry. However,
5285 if we are subtracting 1 from a positive number, there will not
5286 be such a carry. Furthermore, if the positive number is known to
5287 be 0 or 1, we know the result is either -1 or 0. */
5288
5289 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5290 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5291 {
5292 nonzero = nonzero_bits (XEXP (x, 0), mode);
5293 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5294 return (nonzero == 1 || nonzero == 0 ? bitwidth
5295 : bitwidth - floor_log2 (nonzero) - 1);
5296 }
5297
5298 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5299 known_x, known_mode, known_ret);
5300 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5301 known_x, known_mode, known_ret);
5302 result = MAX (1, MIN (num0, num1) - 1);
5303
5304 return result;
5305
5306 case MULT:
5307 /* The number of bits of the product is the sum of the number of
5308 bits of both terms. However, unless one of the terms if known
5309 to be positive, we must allow for an additional bit since negating
5310 a negative number can remove one sign bit copy. */
5311
5312 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5313 known_x, known_mode, known_ret);
5314 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5315 known_x, known_mode, known_ret);
5316
5317 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5318 if (result > 0
5319 && (bitwidth > HOST_BITS_PER_WIDE_INT
5320 || (((nonzero_bits (XEXP (x, 0), mode)
5321 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5322 && ((nonzero_bits (XEXP (x, 1), mode)
5323 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5324 != 0))))
5325 result--;
5326
5327 return MAX (1, result);
5328
5329 case UDIV:
5330 /* The result must be <= the first operand. If the first operand
5331 has the high bit set, we know nothing about the number of sign
5332 bit copies. */
5333 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5334 return 1;
5335 else if ((nonzero_bits (XEXP (x, 0), mode)
5336 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5337 return 1;
5338 else
5339 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5340 known_x, known_mode, known_ret);
5341
5342 case UMOD:
5343 /* The result must be <= the second operand. If the second operand
5344 has (or just might have) the high bit set, we know nothing about
5345 the number of sign bit copies. */
5346 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5347 return 1;
5348 else if ((nonzero_bits (XEXP (x, 1), mode)
5349 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5350 return 1;
5351 else
5352 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5353 known_x, known_mode, known_ret);
5354
5355 case DIV:
5356 /* Similar to unsigned division, except that we have to worry about
5357 the case where the divisor is negative, in which case we have
5358 to add 1. */
5359 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5360 known_x, known_mode, known_ret);
5361 if (result > 1
5362 && (bitwidth > HOST_BITS_PER_WIDE_INT
5363 || (nonzero_bits (XEXP (x, 1), mode)
5364 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5365 result--;
5366
5367 return result;
5368
5369 case MOD:
5370 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5371 known_x, known_mode, known_ret);
5372 if (result > 1
5373 && (bitwidth > HOST_BITS_PER_WIDE_INT
5374 || (nonzero_bits (XEXP (x, 1), mode)
5375 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5376 result--;
5377
5378 return result;
5379
5380 case ASHIFTRT:
5381 /* Shifts by a constant add to the number of bits equal to the
5382 sign bit. */
5383 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5384 known_x, known_mode, known_ret);
5385 if (CONST_INT_P (XEXP (x, 1))
5386 && INTVAL (XEXP (x, 1)) > 0
5387 && INTVAL (XEXP (x, 1)) < xmode_width)
5388 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5389
5390 return num0;
5391
5392 case ASHIFT:
5393 /* Left shifts destroy copies. */
5394 if (!CONST_INT_P (XEXP (x, 1))
5395 || INTVAL (XEXP (x, 1)) < 0
5396 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5397 || INTVAL (XEXP (x, 1)) >= xmode_width)
5398 return 1;
5399
5400 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5401 known_x, known_mode, known_ret);
5402 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5403
5404 case IF_THEN_ELSE:
5405 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5406 known_x, known_mode, known_ret);
5407 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5408 known_x, known_mode, known_ret);
5409 return MIN (num0, num1);
5410
5411 case EQ: case NE: case GE: case GT: case LE: case LT:
5412 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5413 case GEU: case GTU: case LEU: case LTU:
5414 case UNORDERED: case ORDERED:
5415 /* If the constant is negative, take its 1's complement and remask.
5416 Then see how many zero bits we have. */
5417 nonzero = STORE_FLAG_VALUE;
5418 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5419 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5420 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5421
5422 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5423
5424 default:
5425 break;
5426 }
5427
5428 /* If we haven't been able to figure it out by one of the above rules,
5429 see if some of the high-order bits are known to be zero. If so,
5430 count those bits and return one less than that amount. If we can't
5431 safely compute the mask for this mode, always return BITWIDTH. */
5432
5433 bitwidth = GET_MODE_PRECISION (mode);
5434 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5435 return 1;
5436
5437 nonzero = nonzero_bits (x, mode);
5438 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5439 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5440 }
5441
5442 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5443 zero indicates an instruction pattern without a known cost. */
5444
5445 int
5446 pattern_cost (rtx pat, bool speed)
5447 {
5448 int i, cost;
5449 rtx set;
5450
5451 /* Extract the single set rtx from the instruction pattern. We
5452 can't use single_set since we only have the pattern. We also
5453 consider PARALLELs of a normal set and a single comparison. In
5454 that case we use the cost of the non-comparison SET operation,
5455 which is most-likely to be the real cost of this operation. */
5456 if (GET_CODE (pat) == SET)
5457 set = pat;
5458 else if (GET_CODE (pat) == PARALLEL)
5459 {
5460 set = NULL_RTX;
5461 rtx comparison = NULL_RTX;
5462
5463 for (i = 0; i < XVECLEN (pat, 0); i++)
5464 {
5465 rtx x = XVECEXP (pat, 0, i);
5466 if (GET_CODE (x) == SET)
5467 {
5468 if (GET_CODE (SET_SRC (x)) == COMPARE)
5469 {
5470 if (comparison)
5471 return 0;
5472 comparison = x;
5473 }
5474 else
5475 {
5476 if (set)
5477 return 0;
5478 set = x;
5479 }
5480 }
5481 }
5482
5483 if (!set && comparison)
5484 set = comparison;
5485
5486 if (!set)
5487 return 0;
5488 }
5489 else
5490 return 0;
5491
5492 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5493 return cost > 0 ? cost : COSTS_N_INSNS (1);
5494 }
5495
5496 /* Calculate the cost of a single instruction. A return value of zero
5497 indicates an instruction pattern without a known cost. */
5498
5499 int
5500 insn_cost (rtx_insn *insn, bool speed)
5501 {
5502 if (targetm.insn_cost)
5503 return targetm.insn_cost (insn, speed);
5504
5505 return pattern_cost (PATTERN (insn), speed);
5506 }
5507
5508 /* Returns estimate on cost of computing SEQ. */
5509
5510 unsigned
5511 seq_cost (const rtx_insn *seq, bool speed)
5512 {
5513 unsigned cost = 0;
5514 rtx set;
5515
5516 for (; seq; seq = NEXT_INSN (seq))
5517 {
5518 set = single_set (seq);
5519 if (set)
5520 cost += set_rtx_cost (set, speed);
5521 else if (NONDEBUG_INSN_P (seq))
5522 {
5523 int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5524 if (this_cost > 0)
5525 cost += this_cost;
5526 else
5527 cost++;
5528 }
5529 }
5530
5531 return cost;
5532 }
5533
5534 /* Given an insn INSN and condition COND, return the condition in a
5535 canonical form to simplify testing by callers. Specifically:
5536
5537 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5538 (2) Both operands will be machine operands; (cc0) will have been replaced.
5539 (3) If an operand is a constant, it will be the second operand.
5540 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5541 for GE, GEU, and LEU.
5542
5543 If the condition cannot be understood, or is an inequality floating-point
5544 comparison which needs to be reversed, 0 will be returned.
5545
5546 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5547
5548 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5549 insn used in locating the condition was found. If a replacement test
5550 of the condition is desired, it should be placed in front of that
5551 insn and we will be sure that the inputs are still valid.
5552
5553 If WANT_REG is nonzero, we wish the condition to be relative to that
5554 register, if possible. Therefore, do not canonicalize the condition
5555 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5556 to be a compare to a CC mode register.
5557
5558 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5559 and at INSN. */
5560
5561 rtx
5562 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5563 rtx_insn **earliest,
5564 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5565 {
5566 enum rtx_code code;
5567 rtx_insn *prev = insn;
5568 const_rtx set;
5569 rtx tem;
5570 rtx op0, op1;
5571 int reverse_code = 0;
5572 machine_mode mode;
5573 basic_block bb = BLOCK_FOR_INSN (insn);
5574
5575 code = GET_CODE (cond);
5576 mode = GET_MODE (cond);
5577 op0 = XEXP (cond, 0);
5578 op1 = XEXP (cond, 1);
5579
5580 if (reverse)
5581 code = reversed_comparison_code (cond, insn);
5582 if (code == UNKNOWN)
5583 return 0;
5584
5585 if (earliest)
5586 *earliest = insn;
5587
5588 /* If we are comparing a register with zero, see if the register is set
5589 in the previous insn to a COMPARE or a comparison operation. Perform
5590 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5591 in cse.c */
5592
5593 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5594 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5595 && op1 == CONST0_RTX (GET_MODE (op0))
5596 && op0 != want_reg)
5597 {
5598 /* Set nonzero when we find something of interest. */
5599 rtx x = 0;
5600
5601 /* If comparison with cc0, import actual comparison from compare
5602 insn. */
5603 if (op0 == cc0_rtx)
5604 {
5605 if ((prev = prev_nonnote_insn (prev)) == 0
5606 || !NONJUMP_INSN_P (prev)
5607 || (set = single_set (prev)) == 0
5608 || SET_DEST (set) != cc0_rtx)
5609 return 0;
5610
5611 op0 = SET_SRC (set);
5612 op1 = CONST0_RTX (GET_MODE (op0));
5613 if (earliest)
5614 *earliest = prev;
5615 }
5616
5617 /* If this is a COMPARE, pick up the two things being compared. */
5618 if (GET_CODE (op0) == COMPARE)
5619 {
5620 op1 = XEXP (op0, 1);
5621 op0 = XEXP (op0, 0);
5622 continue;
5623 }
5624 else if (!REG_P (op0))
5625 break;
5626
5627 /* Go back to the previous insn. Stop if it is not an INSN. We also
5628 stop if it isn't a single set or if it has a REG_INC note because
5629 we don't want to bother dealing with it. */
5630
5631 prev = prev_nonnote_nondebug_insn (prev);
5632
5633 if (prev == 0
5634 || !NONJUMP_INSN_P (prev)
5635 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5636 /* In cfglayout mode, there do not have to be labels at the
5637 beginning of a block, or jumps at the end, so the previous
5638 conditions would not stop us when we reach bb boundary. */
5639 || BLOCK_FOR_INSN (prev) != bb)
5640 break;
5641
5642 set = set_of (op0, prev);
5643
5644 if (set
5645 && (GET_CODE (set) != SET
5646 || !rtx_equal_p (SET_DEST (set), op0)))
5647 break;
5648
5649 /* If this is setting OP0, get what it sets it to if it looks
5650 relevant. */
5651 if (set)
5652 {
5653 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5654 #ifdef FLOAT_STORE_FLAG_VALUE
5655 REAL_VALUE_TYPE fsfv;
5656 #endif
5657
5658 /* ??? We may not combine comparisons done in a CCmode with
5659 comparisons not done in a CCmode. This is to aid targets
5660 like Alpha that have an IEEE compliant EQ instruction, and
5661 a non-IEEE compliant BEQ instruction. The use of CCmode is
5662 actually artificial, simply to prevent the combination, but
5663 should not affect other platforms.
5664
5665 However, we must allow VOIDmode comparisons to match either
5666 CCmode or non-CCmode comparison, because some ports have
5667 modeless comparisons inside branch patterns.
5668
5669 ??? This mode check should perhaps look more like the mode check
5670 in simplify_comparison in combine. */
5671 if (((GET_MODE_CLASS (mode) == MODE_CC)
5672 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5673 && mode != VOIDmode
5674 && inner_mode != VOIDmode)
5675 break;
5676 if (GET_CODE (SET_SRC (set)) == COMPARE
5677 || (((code == NE
5678 || (code == LT
5679 && val_signbit_known_set_p (inner_mode,
5680 STORE_FLAG_VALUE))
5681 #ifdef FLOAT_STORE_FLAG_VALUE
5682 || (code == LT
5683 && SCALAR_FLOAT_MODE_P (inner_mode)
5684 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5685 REAL_VALUE_NEGATIVE (fsfv)))
5686 #endif
5687 ))
5688 && COMPARISON_P (SET_SRC (set))))
5689 x = SET_SRC (set);
5690 else if (((code == EQ
5691 || (code == GE
5692 && val_signbit_known_set_p (inner_mode,
5693 STORE_FLAG_VALUE))
5694 #ifdef FLOAT_STORE_FLAG_VALUE
5695 || (code == GE
5696 && SCALAR_FLOAT_MODE_P (inner_mode)
5697 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5698 REAL_VALUE_NEGATIVE (fsfv)))
5699 #endif
5700 ))
5701 && COMPARISON_P (SET_SRC (set)))
5702 {
5703 reverse_code = 1;
5704 x = SET_SRC (set);
5705 }
5706 else if ((code == EQ || code == NE)
5707 && GET_CODE (SET_SRC (set)) == XOR)
5708 /* Handle sequences like:
5709
5710 (set op0 (xor X Y))
5711 ...(eq|ne op0 (const_int 0))...
5712
5713 in which case:
5714
5715 (eq op0 (const_int 0)) reduces to (eq X Y)
5716 (ne op0 (const_int 0)) reduces to (ne X Y)
5717
5718 This is the form used by MIPS16, for example. */
5719 x = SET_SRC (set);
5720 else
5721 break;
5722 }
5723
5724 else if (reg_set_p (op0, prev))
5725 /* If this sets OP0, but not directly, we have to give up. */
5726 break;
5727
5728 if (x)
5729 {
5730 /* If the caller is expecting the condition to be valid at INSN,
5731 make sure X doesn't change before INSN. */
5732 if (valid_at_insn_p)
5733 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5734 break;
5735 if (COMPARISON_P (x))
5736 code = GET_CODE (x);
5737 if (reverse_code)
5738 {
5739 code = reversed_comparison_code (x, prev);
5740 if (code == UNKNOWN)
5741 return 0;
5742 reverse_code = 0;
5743 }
5744
5745 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5746 if (earliest)
5747 *earliest = prev;
5748 }
5749 }
5750
5751 /* If constant is first, put it last. */
5752 if (CONSTANT_P (op0))
5753 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5754
5755 /* If OP0 is the result of a comparison, we weren't able to find what
5756 was really being compared, so fail. */
5757 if (!allow_cc_mode
5758 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5759 return 0;
5760
5761 /* Canonicalize any ordered comparison with integers involving equality
5762 if we can do computations in the relevant mode and we do not
5763 overflow. */
5764
5765 scalar_int_mode op0_mode;
5766 if (CONST_INT_P (op1)
5767 && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
5768 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
5769 {
5770 HOST_WIDE_INT const_val = INTVAL (op1);
5771 unsigned HOST_WIDE_INT uconst_val = const_val;
5772 unsigned HOST_WIDE_INT max_val
5773 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
5774
5775 switch (code)
5776 {
5777 case LE:
5778 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5779 code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
5780 break;
5781
5782 /* When cross-compiling, const_val might be sign-extended from
5783 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5784 case GE:
5785 if ((const_val & max_val)
5786 != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
5787 code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
5788 break;
5789
5790 case LEU:
5791 if (uconst_val < max_val)
5792 code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
5793 break;
5794
5795 case GEU:
5796 if (uconst_val != 0)
5797 code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
5798 break;
5799
5800 default:
5801 break;
5802 }
5803 }
5804
5805 /* Never return CC0; return zero instead. */
5806 if (CC0_P (op0))
5807 return 0;
5808
5809 /* We promised to return a comparison. */
5810 rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5811 if (COMPARISON_P (ret))
5812 return ret;
5813 return 0;
5814 }
5815
5816 /* Given a jump insn JUMP, return the condition that will cause it to branch
5817 to its JUMP_LABEL. If the condition cannot be understood, or is an
5818 inequality floating-point comparison which needs to be reversed, 0 will
5819 be returned.
5820
5821 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5822 insn used in locating the condition was found. If a replacement test
5823 of the condition is desired, it should be placed in front of that
5824 insn and we will be sure that the inputs are still valid. If EARLIEST
5825 is null, the returned condition will be valid at INSN.
5826
5827 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5828 compare CC mode register.
5829
5830 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5831
5832 rtx
5833 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5834 int valid_at_insn_p)
5835 {
5836 rtx cond;
5837 int reverse;
5838 rtx set;
5839
5840 /* If this is not a standard conditional jump, we can't parse it. */
5841 if (!JUMP_P (jump)
5842 || ! any_condjump_p (jump))
5843 return 0;
5844 set = pc_set (jump);
5845
5846 cond = XEXP (SET_SRC (set), 0);
5847
5848 /* If this branches to JUMP_LABEL when the condition is false, reverse
5849 the condition. */
5850 reverse
5851 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5852 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5853
5854 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5855 allow_cc_mode, valid_at_insn_p);
5856 }
5857
5858 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5859 TARGET_MODE_REP_EXTENDED.
5860
5861 Note that we assume that the property of
5862 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5863 narrower than mode B. I.e., if A is a mode narrower than B then in
5864 order to be able to operate on it in mode B, mode A needs to
5865 satisfy the requirements set by the representation of mode B. */
5866
5867 static void
5868 init_num_sign_bit_copies_in_rep (void)
5869 {
5870 opt_scalar_int_mode in_mode_iter;
5871 scalar_int_mode mode;
5872
5873 FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
5874 FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
5875 {
5876 scalar_int_mode in_mode = in_mode_iter.require ();
5877 scalar_int_mode i;
5878
5879 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5880 extends to the next widest mode. */
5881 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5882 || GET_MODE_WIDER_MODE (mode).require () == in_mode);
5883
5884 /* We are in in_mode. Count how many bits outside of mode
5885 have to be copies of the sign-bit. */
5886 FOR_EACH_MODE (i, mode, in_mode)
5887 {
5888 /* This must always exist (for the last iteration it will be
5889 IN_MODE). */
5890 scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
5891
5892 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5893 /* We can only check sign-bit copies starting from the
5894 top-bit. In order to be able to check the bits we
5895 have already seen we pretend that subsequent bits
5896 have to be sign-bit copies too. */
5897 || num_sign_bit_copies_in_rep [in_mode][mode])
5898 num_sign_bit_copies_in_rep [in_mode][mode]
5899 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5900 }
5901 }
5902 }
5903
5904 /* Suppose that truncation from the machine mode of X to MODE is not a
5905 no-op. See if there is anything special about X so that we can
5906 assume it already contains a truncated value of MODE. */
5907
5908 bool
5909 truncated_to_mode (machine_mode mode, const_rtx x)
5910 {
5911 /* This register has already been used in MODE without explicit
5912 truncation. */
5913 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5914 return true;
5915
5916 /* See if we already satisfy the requirements of MODE. If yes we
5917 can just switch to MODE. */
5918 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5919 && (num_sign_bit_copies (x, GET_MODE (x))
5920 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5921 return true;
5922
5923 return false;
5924 }
5925 \f
5926 /* Return true if RTX code CODE has a single sequence of zero or more
5927 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5928 entry in that case. */
5929
5930 static bool
5931 setup_reg_subrtx_bounds (unsigned int code)
5932 {
5933 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5934 unsigned int i = 0;
5935 for (; format[i] != 'e'; ++i)
5936 {
5937 if (!format[i])
5938 /* No subrtxes. Leave start and count as 0. */
5939 return true;
5940 if (format[i] == 'E' || format[i] == 'V')
5941 return false;
5942 }
5943
5944 /* Record the sequence of 'e's. */
5945 rtx_all_subrtx_bounds[code].start = i;
5946 do
5947 ++i;
5948 while (format[i] == 'e');
5949 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5950 /* rtl-iter.h relies on this. */
5951 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5952
5953 for (; format[i]; ++i)
5954 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5955 return false;
5956
5957 return true;
5958 }
5959
5960 /* Initialize rtx_all_subrtx_bounds. */
5961 void
5962 init_rtlanal (void)
5963 {
5964 int i;
5965 for (i = 0; i < NUM_RTX_CODE; i++)
5966 {
5967 if (!setup_reg_subrtx_bounds (i))
5968 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5969 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5970 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5971 }
5972
5973 init_num_sign_bit_copies_in_rep ();
5974 }
5975 \f
5976 /* Check whether this is a constant pool constant. */
5977 bool
5978 constant_pool_constant_p (rtx x)
5979 {
5980 x = avoid_constant_pool_reference (x);
5981 return CONST_DOUBLE_P (x);
5982 }
5983 \f
5984 /* If M is a bitmask that selects a field of low-order bits within an item but
5985 not the entire word, return the length of the field. Return -1 otherwise.
5986 M is used in machine mode MODE. */
5987
5988 int
5989 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5990 {
5991 if (mode != VOIDmode)
5992 {
5993 if (!HWI_COMPUTABLE_MODE_P (mode))
5994 return -1;
5995 m &= GET_MODE_MASK (mode);
5996 }
5997
5998 return exact_log2 (m + 1);
5999 }
6000
6001 /* Return the mode of MEM's address. */
6002
6003 scalar_int_mode
6004 get_address_mode (rtx mem)
6005 {
6006 machine_mode mode;
6007
6008 gcc_assert (MEM_P (mem));
6009 mode = GET_MODE (XEXP (mem, 0));
6010 if (mode != VOIDmode)
6011 return as_a <scalar_int_mode> (mode);
6012 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
6013 }
6014 \f
6015 /* Split up a CONST_DOUBLE or integer constant rtx
6016 into two rtx's for single words,
6017 storing in *FIRST the word that comes first in memory in the target
6018 and in *SECOND the other.
6019
6020 TODO: This function needs to be rewritten to work on any size
6021 integer. */
6022
6023 void
6024 split_double (rtx value, rtx *first, rtx *second)
6025 {
6026 if (CONST_INT_P (value))
6027 {
6028 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
6029 {
6030 /* In this case the CONST_INT holds both target words.
6031 Extract the bits from it into two word-sized pieces.
6032 Sign extend each half to HOST_WIDE_INT. */
6033 unsigned HOST_WIDE_INT low, high;
6034 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
6035 unsigned bits_per_word = BITS_PER_WORD;
6036
6037 /* Set sign_bit to the most significant bit of a word. */
6038 sign_bit = 1;
6039 sign_bit <<= bits_per_word - 1;
6040
6041 /* Set mask so that all bits of the word are set. We could
6042 have used 1 << BITS_PER_WORD instead of basing the
6043 calculation on sign_bit. However, on machines where
6044 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6045 compiler warning, even though the code would never be
6046 executed. */
6047 mask = sign_bit << 1;
6048 mask--;
6049
6050 /* Set sign_extend as any remaining bits. */
6051 sign_extend = ~mask;
6052
6053 /* Pick the lower word and sign-extend it. */
6054 low = INTVAL (value);
6055 low &= mask;
6056 if (low & sign_bit)
6057 low |= sign_extend;
6058
6059 /* Pick the higher word, shifted to the least significant
6060 bits, and sign-extend it. */
6061 high = INTVAL (value);
6062 high >>= bits_per_word - 1;
6063 high >>= 1;
6064 high &= mask;
6065 if (high & sign_bit)
6066 high |= sign_extend;
6067
6068 /* Store the words in the target machine order. */
6069 if (WORDS_BIG_ENDIAN)
6070 {
6071 *first = GEN_INT (high);
6072 *second = GEN_INT (low);
6073 }
6074 else
6075 {
6076 *first = GEN_INT (low);
6077 *second = GEN_INT (high);
6078 }
6079 }
6080 else
6081 {
6082 /* The rule for using CONST_INT for a wider mode
6083 is that we regard the value as signed.
6084 So sign-extend it. */
6085 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6086 if (WORDS_BIG_ENDIAN)
6087 {
6088 *first = high;
6089 *second = value;
6090 }
6091 else
6092 {
6093 *first = value;
6094 *second = high;
6095 }
6096 }
6097 }
6098 else if (GET_CODE (value) == CONST_WIDE_INT)
6099 {
6100 /* All of this is scary code and needs to be converted to
6101 properly work with any size integer. */
6102 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6103 if (WORDS_BIG_ENDIAN)
6104 {
6105 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6106 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6107 }
6108 else
6109 {
6110 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6111 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6112 }
6113 }
6114 else if (!CONST_DOUBLE_P (value))
6115 {
6116 if (WORDS_BIG_ENDIAN)
6117 {
6118 *first = const0_rtx;
6119 *second = value;
6120 }
6121 else
6122 {
6123 *first = value;
6124 *second = const0_rtx;
6125 }
6126 }
6127 else if (GET_MODE (value) == VOIDmode
6128 /* This is the old way we did CONST_DOUBLE integers. */
6129 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6130 {
6131 /* In an integer, the words are defined as most and least significant.
6132 So order them by the target's convention. */
6133 if (WORDS_BIG_ENDIAN)
6134 {
6135 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6136 *second = GEN_INT (CONST_DOUBLE_LOW (value));
6137 }
6138 else
6139 {
6140 *first = GEN_INT (CONST_DOUBLE_LOW (value));
6141 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6142 }
6143 }
6144 else
6145 {
6146 long l[2];
6147
6148 /* Note, this converts the REAL_VALUE_TYPE to the target's
6149 format, splits up the floating point double and outputs
6150 exactly 32 bits of it into each of l[0] and l[1] --
6151 not necessarily BITS_PER_WORD bits. */
6152 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6153
6154 /* If 32 bits is an entire word for the target, but not for the host,
6155 then sign-extend on the host so that the number will look the same
6156 way on the host that it would on the target. See for instance
6157 simplify_unary_operation. The #if is needed to avoid compiler
6158 warnings. */
6159
6160 #if HOST_BITS_PER_LONG > 32
6161 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6162 {
6163 if (l[0] & ((long) 1 << 31))
6164 l[0] |= ((unsigned long) (-1) << 32);
6165 if (l[1] & ((long) 1 << 31))
6166 l[1] |= ((unsigned long) (-1) << 32);
6167 }
6168 #endif
6169
6170 *first = GEN_INT (l[0]);
6171 *second = GEN_INT (l[1]);
6172 }
6173 }
6174
6175 /* Return true if X is a sign_extract or zero_extract from the least
6176 significant bit. */
6177
6178 static bool
6179 lsb_bitfield_op_p (rtx x)
6180 {
6181 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6182 {
6183 machine_mode mode = GET_MODE (XEXP (x, 0));
6184 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6185 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6186 poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6187
6188 return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6189 }
6190 return false;
6191 }
6192
6193 /* Strip outer address "mutations" from LOC and return a pointer to the
6194 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6195 stripped expression there.
6196
6197 "Mutations" either convert between modes or apply some kind of
6198 extension, truncation or alignment. */
6199
6200 rtx *
6201 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6202 {
6203 for (;;)
6204 {
6205 enum rtx_code code = GET_CODE (*loc);
6206 if (GET_RTX_CLASS (code) == RTX_UNARY)
6207 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6208 used to convert between pointer sizes. */
6209 loc = &XEXP (*loc, 0);
6210 else if (lsb_bitfield_op_p (*loc))
6211 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6212 acts as a combined truncation and extension. */
6213 loc = &XEXP (*loc, 0);
6214 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6215 /* (and ... (const_int -X)) is used to align to X bytes. */
6216 loc = &XEXP (*loc, 0);
6217 else if (code == SUBREG
6218 && !OBJECT_P (SUBREG_REG (*loc))
6219 && subreg_lowpart_p (*loc))
6220 /* (subreg (operator ...) ...) inside and is used for mode
6221 conversion too. */
6222 loc = &SUBREG_REG (*loc);
6223 else
6224 return loc;
6225 if (outer_code)
6226 *outer_code = code;
6227 }
6228 }
6229
6230 /* Return true if CODE applies some kind of scale. The scaled value is
6231 is the first operand and the scale is the second. */
6232
6233 static bool
6234 binary_scale_code_p (enum rtx_code code)
6235 {
6236 return (code == MULT
6237 || code == ASHIFT
6238 /* Needed by ARM targets. */
6239 || code == ASHIFTRT
6240 || code == LSHIFTRT
6241 || code == ROTATE
6242 || code == ROTATERT);
6243 }
6244
6245 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6246 (see address_info). Return null otherwise. */
6247
6248 static rtx *
6249 get_base_term (rtx *inner)
6250 {
6251 if (GET_CODE (*inner) == LO_SUM)
6252 inner = strip_address_mutations (&XEXP (*inner, 0));
6253 if (REG_P (*inner)
6254 || MEM_P (*inner)
6255 || GET_CODE (*inner) == SUBREG
6256 || GET_CODE (*inner) == SCRATCH)
6257 return inner;
6258 return 0;
6259 }
6260
6261 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6262 (see address_info). Return null otherwise. */
6263
6264 static rtx *
6265 get_index_term (rtx *inner)
6266 {
6267 /* At present, only constant scales are allowed. */
6268 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6269 inner = strip_address_mutations (&XEXP (*inner, 0));
6270 if (REG_P (*inner)
6271 || MEM_P (*inner)
6272 || GET_CODE (*inner) == SUBREG
6273 || GET_CODE (*inner) == SCRATCH)
6274 return inner;
6275 return 0;
6276 }
6277
6278 /* Set the segment part of address INFO to LOC, given that INNER is the
6279 unmutated value. */
6280
6281 static void
6282 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6283 {
6284 gcc_assert (!info->segment);
6285 info->segment = loc;
6286 info->segment_term = inner;
6287 }
6288
6289 /* Set the base part of address INFO to LOC, given that INNER is the
6290 unmutated value. */
6291
6292 static void
6293 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6294 {
6295 gcc_assert (!info->base);
6296 info->base = loc;
6297 info->base_term = inner;
6298 }
6299
6300 /* Set the index part of address INFO to LOC, given that INNER is the
6301 unmutated value. */
6302
6303 static void
6304 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6305 {
6306 gcc_assert (!info->index);
6307 info->index = loc;
6308 info->index_term = inner;
6309 }
6310
6311 /* Set the displacement part of address INFO to LOC, given that INNER
6312 is the constant term. */
6313
6314 static void
6315 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6316 {
6317 gcc_assert (!info->disp);
6318 info->disp = loc;
6319 info->disp_term = inner;
6320 }
6321
6322 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6323 rest of INFO accordingly. */
6324
6325 static void
6326 decompose_incdec_address (struct address_info *info)
6327 {
6328 info->autoinc_p = true;
6329
6330 rtx *base = &XEXP (*info->inner, 0);
6331 set_address_base (info, base, base);
6332 gcc_checking_assert (info->base == info->base_term);
6333
6334 /* These addresses are only valid when the size of the addressed
6335 value is known. */
6336 gcc_checking_assert (info->mode != VOIDmode);
6337 }
6338
6339 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6340 of INFO accordingly. */
6341
6342 static void
6343 decompose_automod_address (struct address_info *info)
6344 {
6345 info->autoinc_p = true;
6346
6347 rtx *base = &XEXP (*info->inner, 0);
6348 set_address_base (info, base, base);
6349 gcc_checking_assert (info->base == info->base_term);
6350
6351 rtx plus = XEXP (*info->inner, 1);
6352 gcc_assert (GET_CODE (plus) == PLUS);
6353
6354 info->base_term2 = &XEXP (plus, 0);
6355 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6356
6357 rtx *step = &XEXP (plus, 1);
6358 rtx *inner_step = strip_address_mutations (step);
6359 if (CONSTANT_P (*inner_step))
6360 set_address_disp (info, step, inner_step);
6361 else
6362 set_address_index (info, step, inner_step);
6363 }
6364
6365 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6366 values in [PTR, END). Return a pointer to the end of the used array. */
6367
6368 static rtx **
6369 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6370 {
6371 rtx x = *loc;
6372 if (GET_CODE (x) == PLUS)
6373 {
6374 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6375 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6376 }
6377 else
6378 {
6379 gcc_assert (ptr != end);
6380 *ptr++ = loc;
6381 }
6382 return ptr;
6383 }
6384
6385 /* Evaluate the likelihood of X being a base or index value, returning
6386 positive if it is likely to be a base, negative if it is likely to be
6387 an index, and 0 if we can't tell. Make the magnitude of the return
6388 value reflect the amount of confidence we have in the answer.
6389
6390 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6391
6392 static int
6393 baseness (rtx x, machine_mode mode, addr_space_t as,
6394 enum rtx_code outer_code, enum rtx_code index_code)
6395 {
6396 /* Believe *_POINTER unless the address shape requires otherwise. */
6397 if (REG_P (x) && REG_POINTER (x))
6398 return 2;
6399 if (MEM_P (x) && MEM_POINTER (x))
6400 return 2;
6401
6402 if (REG_P (x) && HARD_REGISTER_P (x))
6403 {
6404 /* X is a hard register. If it only fits one of the base
6405 or index classes, choose that interpretation. */
6406 int regno = REGNO (x);
6407 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6408 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6409 if (base_p != index_p)
6410 return base_p ? 1 : -1;
6411 }
6412 return 0;
6413 }
6414
6415 /* INFO->INNER describes a normal, non-automodified address.
6416 Fill in the rest of INFO accordingly. */
6417
6418 static void
6419 decompose_normal_address (struct address_info *info)
6420 {
6421 /* Treat the address as the sum of up to four values. */
6422 rtx *ops[4];
6423 size_t n_ops = extract_plus_operands (info->inner, ops,
6424 ops + ARRAY_SIZE (ops)) - ops;
6425
6426 /* If there is more than one component, any base component is in a PLUS. */
6427 if (n_ops > 1)
6428 info->base_outer_code = PLUS;
6429
6430 /* Try to classify each sum operand now. Leave those that could be
6431 either a base or an index in OPS. */
6432 rtx *inner_ops[4];
6433 size_t out = 0;
6434 for (size_t in = 0; in < n_ops; ++in)
6435 {
6436 rtx *loc = ops[in];
6437 rtx *inner = strip_address_mutations (loc);
6438 if (CONSTANT_P (*inner))
6439 set_address_disp (info, loc, inner);
6440 else if (GET_CODE (*inner) == UNSPEC)
6441 set_address_segment (info, loc, inner);
6442 else
6443 {
6444 /* The only other possibilities are a base or an index. */
6445 rtx *base_term = get_base_term (inner);
6446 rtx *index_term = get_index_term (inner);
6447 gcc_assert (base_term || index_term);
6448 if (!base_term)
6449 set_address_index (info, loc, index_term);
6450 else if (!index_term)
6451 set_address_base (info, loc, base_term);
6452 else
6453 {
6454 gcc_assert (base_term == index_term);
6455 ops[out] = loc;
6456 inner_ops[out] = base_term;
6457 ++out;
6458 }
6459 }
6460 }
6461
6462 /* Classify the remaining OPS members as bases and indexes. */
6463 if (out == 1)
6464 {
6465 /* If we haven't seen a base or an index yet, assume that this is
6466 the base. If we were confident that another term was the base
6467 or index, treat the remaining operand as the other kind. */
6468 if (!info->base)
6469 set_address_base (info, ops[0], inner_ops[0]);
6470 else
6471 set_address_index (info, ops[0], inner_ops[0]);
6472 }
6473 else if (out == 2)
6474 {
6475 /* In the event of a tie, assume the base comes first. */
6476 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6477 GET_CODE (*ops[1]))
6478 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6479 GET_CODE (*ops[0])))
6480 {
6481 set_address_base (info, ops[0], inner_ops[0]);
6482 set_address_index (info, ops[1], inner_ops[1]);
6483 }
6484 else
6485 {
6486 set_address_base (info, ops[1], inner_ops[1]);
6487 set_address_index (info, ops[0], inner_ops[0]);
6488 }
6489 }
6490 else
6491 gcc_assert (out == 0);
6492 }
6493
6494 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6495 or VOIDmode if not known. AS is the address space associated with LOC.
6496 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6497
6498 void
6499 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6500 addr_space_t as, enum rtx_code outer_code)
6501 {
6502 memset (info, 0, sizeof (*info));
6503 info->mode = mode;
6504 info->as = as;
6505 info->addr_outer_code = outer_code;
6506 info->outer = loc;
6507 info->inner = strip_address_mutations (loc, &outer_code);
6508 info->base_outer_code = outer_code;
6509 switch (GET_CODE (*info->inner))
6510 {
6511 case PRE_DEC:
6512 case PRE_INC:
6513 case POST_DEC:
6514 case POST_INC:
6515 decompose_incdec_address (info);
6516 break;
6517
6518 case PRE_MODIFY:
6519 case POST_MODIFY:
6520 decompose_automod_address (info);
6521 break;
6522
6523 default:
6524 decompose_normal_address (info);
6525 break;
6526 }
6527 }
6528
6529 /* Describe address operand LOC in INFO. */
6530
6531 void
6532 decompose_lea_address (struct address_info *info, rtx *loc)
6533 {
6534 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6535 }
6536
6537 /* Describe the address of MEM X in INFO. */
6538
6539 void
6540 decompose_mem_address (struct address_info *info, rtx x)
6541 {
6542 gcc_assert (MEM_P (x));
6543 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6544 MEM_ADDR_SPACE (x), MEM);
6545 }
6546
6547 /* Update INFO after a change to the address it describes. */
6548
6549 void
6550 update_address (struct address_info *info)
6551 {
6552 decompose_address (info, info->outer, info->mode, info->as,
6553 info->addr_outer_code);
6554 }
6555
6556 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6557 more complicated than that. */
6558
6559 HOST_WIDE_INT
6560 get_index_scale (const struct address_info *info)
6561 {
6562 rtx index = *info->index;
6563 if (GET_CODE (index) == MULT
6564 && CONST_INT_P (XEXP (index, 1))
6565 && info->index_term == &XEXP (index, 0))
6566 return INTVAL (XEXP (index, 1));
6567
6568 if (GET_CODE (index) == ASHIFT
6569 && CONST_INT_P (XEXP (index, 1))
6570 && info->index_term == &XEXP (index, 0))
6571 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6572
6573 if (info->index == info->index_term)
6574 return 1;
6575
6576 return 0;
6577 }
6578
6579 /* Return the "index code" of INFO, in the form required by
6580 ok_for_base_p_1. */
6581
6582 enum rtx_code
6583 get_index_code (const struct address_info *info)
6584 {
6585 if (info->index)
6586 return GET_CODE (*info->index);
6587
6588 if (info->disp)
6589 return GET_CODE (*info->disp);
6590
6591 return SCRATCH;
6592 }
6593
6594 /* Return true if RTL X contains a SYMBOL_REF. */
6595
6596 bool
6597 contains_symbol_ref_p (const_rtx x)
6598 {
6599 subrtx_iterator::array_type array;
6600 FOR_EACH_SUBRTX (iter, array, x, ALL)
6601 if (SYMBOL_REF_P (*iter))
6602 return true;
6603
6604 return false;
6605 }
6606
6607 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6608
6609 bool
6610 contains_symbolic_reference_p (const_rtx x)
6611 {
6612 subrtx_iterator::array_type array;
6613 FOR_EACH_SUBRTX (iter, array, x, ALL)
6614 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6615 return true;
6616
6617 return false;
6618 }
6619
6620 /* Return true if RTL X contains a constant pool address. */
6621
6622 bool
6623 contains_constant_pool_address_p (const_rtx x)
6624 {
6625 subrtx_iterator::array_type array;
6626 FOR_EACH_SUBRTX (iter, array, x, ALL)
6627 if (SYMBOL_REF_P (*iter) && CONSTANT_POOL_ADDRESS_P (*iter))
6628 return true;
6629
6630 return false;
6631 }
6632
6633
6634 /* Return true if X contains a thread-local symbol. */
6635
6636 bool
6637 tls_referenced_p (const_rtx x)
6638 {
6639 if (!targetm.have_tls)
6640 return false;
6641
6642 subrtx_iterator::array_type array;
6643 FOR_EACH_SUBRTX (iter, array, x, ALL)
6644 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6645 return true;
6646 return false;
6647 }