1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
47 virtual regs here because the simplify_*_operation routines are called
48 by integrate.c, which is called before virtual register instantiation.
50 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
51 a header file so that their definitions can be shared with the
52 simplification routines in simplify-rtx.c. Until then, do not
53 change these macros without also changing the copy in simplify-rtx.c. */
55 #define FIXED_BASE_PLUS_P(X) \
56 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
57 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
58 || (X) == virtual_stack_vars_rtx \
59 || (X) == virtual_incoming_args_rtx \
60 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
61 && (XEXP (X, 0) == frame_pointer_rtx \
62 || XEXP (X, 0) == hard_frame_pointer_rtx \
63 || ((X) == arg_pointer_rtx \
64 && fixed_regs[ARG_POINTER_REGNUM]) \
65 || XEXP (X, 0) == virtual_stack_vars_rtx \
66 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
67 || GET_CODE (X) == ADDRESSOF)
69 /* Similar, but also allows reference to the stack pointer.
71 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
72 arg_pointer_rtx by itself is nonzero, because on at least one machine,
73 the i960, the arg pointer is zero when it is unused. */
75 #define NONZERO_BASE_PLUS_P(X) \
76 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
77 || (X) == virtual_stack_vars_rtx \
78 || (X) == virtual_incoming_args_rtx \
79 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
80 && (XEXP (X, 0) == frame_pointer_rtx \
81 || XEXP (X, 0) == hard_frame_pointer_rtx \
82 || ((X) == arg_pointer_rtx \
83 && fixed_regs[ARG_POINTER_REGNUM]) \
84 || XEXP (X, 0) == virtual_stack_vars_rtx \
85 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
86 || (X) == stack_pointer_rtx \
87 || (X) == virtual_stack_dynamic_rtx \
88 || (X) == virtual_outgoing_args_rtx \
89 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
90 && (XEXP (X, 0) == stack_pointer_rtx \
91 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
92 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
93 || GET_CODE (X) == ADDRESSOF)
96 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
97 enum machine_mode
, rtx
, rtx
));
98 static void check_fold_consts
PARAMS ((PTR
));
99 static int entry_and_rtx_equal_p
PARAMS ((const void *, const void *));
100 static unsigned int get_value_hash
PARAMS ((const void *));
101 static struct elt_list
*new_elt_list
PARAMS ((struct elt_list
*,
103 static struct elt_loc_list
*new_elt_loc_list
PARAMS ((struct elt_loc_list
*,
105 static void unchain_one_value
PARAMS ((cselib_val
*));
106 static void unchain_one_elt_list
PARAMS ((struct elt_list
**));
107 static void unchain_one_elt_loc_list
PARAMS ((struct elt_loc_list
**));
108 static void clear_table
PARAMS ((void));
109 static int check_value_useless
PARAMS ((cselib_val
*));
110 static int discard_useless_locs
PARAMS ((void **, void *));
111 static int discard_useless_values
PARAMS ((void **, void *));
112 static void remove_useless_values
PARAMS ((void));
113 static unsigned int hash_rtx
PARAMS ((rtx
, enum machine_mode
, int));
114 static cselib_val
*new_cselib_val
PARAMS ((unsigned int,
116 static void add_mem_for_addr
PARAMS ((cselib_val
*, cselib_val
*,
118 static cselib_val
*cselib_lookup_mem
PARAMS ((rtx
, int));
119 static rtx cselib_subst_to_values
PARAMS ((rtx
));
120 static void cselib_invalidate_regno
PARAMS ((unsigned int,
122 static int cselib_mem_conflict_p
PARAMS ((rtx
, rtx
));
123 static int cselib_invalidate_mem_1
PARAMS ((void **, void *));
124 static void cselib_invalidate_mem
PARAMS ((rtx
));
125 static void cselib_invalidate_rtx
PARAMS ((rtx
, rtx
, void *));
126 static void cselib_record_set
PARAMS ((rtx
, cselib_val
*,
128 static void cselib_record_sets
PARAMS ((rtx
));
130 /* There are three ways in which cselib can look up an rtx:
131 - for a REG, the reg_values table (which is indexed by regno) is used
132 - for a MEM, we recursively look up its address and then follow the
133 addr_list of that value
134 - for everything else, we compute a hash value and go through the hash
135 table. Since different rtx's can still have the same hash value,
136 this involves walking the table entries for a given value and comparing
137 the locations of the entries with the rtx we are looking up. */
139 /* A table that enables us to look up elts by their value. */
140 static htab_t hash_table
;
142 /* This is a global so we don't have to pass this through every function.
143 It is used in new_elt_loc_list to set SETTING_INSN. */
144 static rtx cselib_current_insn
;
146 /* Every new unknown value gets a unique number. */
147 static unsigned int next_unknown_value
;
149 /* The number of registers we had when the varrays were last resized. */
150 static unsigned int cselib_nregs
;
152 /* Count values without known locations. Whenever this grows too big, we
153 remove these useless values from the table. */
154 static int n_useless_values
;
156 /* Number of useless values before we remove them from the hash table. */
157 #define MAX_USELESS_VALUES 32
159 /* This table maps from register number to values. It does not contain
160 pointers to cselib_val structures, but rather elt_lists. The purpose is
161 to be able to refer to the same register in different modes. */
162 static varray_type reg_values
;
163 #define REG_VALUES(I) VARRAY_ELT_LIST (reg_values, (I))
165 /* We pass this to cselib_invalidate_mem to invalidate all of
166 memory for a non-const call instruction. */
169 /* Memory for our structures is allocated from this obstack. */
170 static struct obstack cselib_obstack
;
172 /* Used to quickly free all memory. */
173 static char *cselib_startobj
;
175 /* Caches for unused structures. */
176 static cselib_val
*empty_vals
;
177 static struct elt_list
*empty_elt_lists
;
178 static struct elt_loc_list
*empty_elt_loc_lists
;
180 /* Set by discard_useless_locs if it deleted the last location of any
182 static int values_became_useless
;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (code
, mode
, op0
, op1
)
190 enum machine_mode mode
;
195 /* Put complex operands first and constants second if commutative. */
196 if (GET_RTX_CLASS (code
) == 'c'
197 && ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
198 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
199 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
200 || (GET_CODE (op0
) == SUBREG
201 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
202 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')))
203 tem
= op0
, op0
= op1
, op1
= tem
;
205 /* If this simplifies, do it. */
206 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
211 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
212 just form the operation. */
214 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
215 && GET_MODE (op0
) != VOIDmode
)
216 return plus_constant (op0
, INTVAL (op1
));
217 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
218 && GET_MODE (op0
) != VOIDmode
)
219 return plus_constant (op0
, - INTVAL (op1
));
221 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
224 /* Try to simplify a unary operation CODE whose output mode is to be
225 MODE with input operand OP whose mode was originally OP_MODE.
226 Return zero if no simplification can be made. */
229 simplify_unary_operation (code
, mode
, op
, op_mode
)
231 enum machine_mode mode
;
233 enum machine_mode op_mode
;
235 unsigned int width
= GET_MODE_BITSIZE (mode
);
237 /* The order of these tests is critical so that, for example, we don't
238 check the wrong mode (input vs. output) for a conversion operation,
239 such as FIX. At some point, this should be simplified. */
241 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
243 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
244 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
246 HOST_WIDE_INT hv
, lv
;
249 if (GET_CODE (op
) == CONST_INT
)
250 lv
= INTVAL (op
), hv
= INTVAL (op
) < 0 ? -1 : 0;
252 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
254 #ifdef REAL_ARITHMETIC
255 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
260 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
261 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
262 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
268 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
269 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
270 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
272 #endif /* REAL_ARITHMETIC */
273 d
= real_value_truncate (mode
, d
);
274 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
276 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
277 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
279 HOST_WIDE_INT hv
, lv
;
282 if (GET_CODE (op
) == CONST_INT
)
283 lv
= INTVAL (op
), hv
= INTVAL (op
) < 0 ? -1 : 0;
285 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
287 if (op_mode
== VOIDmode
)
289 /* We don't know how to interpret negative-looking numbers in
290 this case, so don't try to fold those. */
294 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
297 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
299 #ifdef REAL_ARITHMETIC
300 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
303 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
304 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
305 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
306 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
307 #endif /* REAL_ARITHMETIC */
308 d
= real_value_truncate (mode
, d
);
309 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
313 if (GET_CODE (op
) == CONST_INT
314 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
316 register HOST_WIDE_INT arg0
= INTVAL (op
);
317 register HOST_WIDE_INT val
;
330 val
= (arg0
>= 0 ? arg0
: - arg0
);
334 /* Don't use ffs here. Instead, get low order bit and then its
335 number. If arg0 is zero, this will return 0, as desired. */
336 arg0
&= GET_MODE_MASK (mode
);
337 val
= exact_log2 (arg0
& (- arg0
)) + 1;
345 if (op_mode
== VOIDmode
)
347 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
349 /* If we were really extending the mode,
350 we would have to distinguish between zero-extension
351 and sign-extension. */
352 if (width
!= GET_MODE_BITSIZE (op_mode
))
356 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
357 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
363 if (op_mode
== VOIDmode
)
365 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
367 /* If we were really extending the mode,
368 we would have to distinguish between zero-extension
369 and sign-extension. */
370 if (width
!= GET_MODE_BITSIZE (op_mode
))
374 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
377 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
379 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
380 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
393 val
= trunc_int_for_mode (val
, mode
);
395 return GEN_INT (val
);
398 /* We can do some operations on integer CONST_DOUBLEs. Also allow
399 for a DImode operation on a CONST_INT. */
400 else if (GET_MODE (op
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
401 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
403 HOST_WIDE_INT l1
, h1
, lv
, hv
;
405 if (GET_CODE (op
) == CONST_DOUBLE
)
406 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
408 l1
= INTVAL (op
), h1
= l1
< 0 ? -1 : 0;
418 neg_double (l1
, h1
, &lv
, &hv
);
423 neg_double (l1
, h1
, &lv
, &hv
);
431 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
433 lv
= exact_log2 (l1
& (-l1
)) + 1;
437 /* This is just a change-of-mode, so do nothing. */
442 if (op_mode
== VOIDmode
443 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
447 lv
= l1
& GET_MODE_MASK (op_mode
);
451 if (op_mode
== VOIDmode
452 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
456 lv
= l1
& GET_MODE_MASK (op_mode
);
457 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
458 && (lv
& ((HOST_WIDE_INT
) 1
459 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
460 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
462 hv
= (lv
< 0) ? ~ (HOST_WIDE_INT
) 0 : 0;
473 return immed_double_const (lv
, hv
, mode
);
476 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
477 else if (GET_CODE (op
) == CONST_DOUBLE
478 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
484 if (setjmp (handler
))
485 /* There used to be a warning here, but that is inadvisable.
486 People may want to cause traps, and the natural way
487 to do it should not get a warning. */
490 set_float_handler (handler
);
492 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
497 d
= REAL_VALUE_NEGATE (d
);
501 if (REAL_VALUE_NEGATIVE (d
))
502 d
= REAL_VALUE_NEGATE (d
);
506 d
= real_value_truncate (mode
, d
);
510 /* All this does is change the mode. */
514 d
= REAL_VALUE_RNDZINT (d
);
518 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
528 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
529 set_float_handler (NULL_PTR
);
533 else if (GET_CODE (op
) == CONST_DOUBLE
534 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
535 && GET_MODE_CLASS (mode
) == MODE_INT
536 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
542 if (setjmp (handler
))
545 set_float_handler (handler
);
547 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
552 val
= REAL_VALUE_FIX (d
);
556 val
= REAL_VALUE_UNSIGNED_FIX (d
);
563 set_float_handler (NULL_PTR
);
565 val
= trunc_int_for_mode (val
, mode
);
567 return GEN_INT (val
);
570 /* This was formerly used only for non-IEEE float.
571 eggert@twinsun.com says it is safe for IEEE also. */
574 /* There are some simplifications we can do even if the operands
580 /* (not (not X)) == X, similarly for NEG. */
581 if (GET_CODE (op
) == code
)
586 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
587 becomes just the MINUS if its mode is MODE. This allows
588 folding switch statements on machines using casesi (such as
590 if (GET_CODE (op
) == TRUNCATE
591 && GET_MODE (XEXP (op
, 0)) == mode
592 && GET_CODE (XEXP (op
, 0)) == MINUS
593 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
594 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
597 #ifdef POINTERS_EXTEND_UNSIGNED
598 if (! POINTERS_EXTEND_UNSIGNED
599 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
601 return convert_memory_address (Pmode
, op
);
605 #ifdef POINTERS_EXTEND_UNSIGNED
607 if (POINTERS_EXTEND_UNSIGNED
608 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
610 return convert_memory_address (Pmode
, op
);
622 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
623 and OP1. Return 0 if no simplification is possible.
625 Don't use this for relational operations such as EQ or LT.
626 Use simplify_relational_operation instead. */
629 simplify_binary_operation (code
, mode
, op0
, op1
)
631 enum machine_mode mode
;
634 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
636 unsigned int width
= GET_MODE_BITSIZE (mode
);
639 /* Relational operations don't work here. We must know the mode
640 of the operands in order to do the comparison correctly.
641 Assuming a full word can give incorrect results.
642 Consider comparing 128 with -128 in QImode. */
644 if (GET_RTX_CLASS (code
) == '<')
647 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
648 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
649 && GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
650 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
652 REAL_VALUE_TYPE f0
, f1
, value
;
655 if (setjmp (handler
))
658 set_float_handler (handler
);
660 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
661 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
662 f0
= real_value_truncate (mode
, f0
);
663 f1
= real_value_truncate (mode
, f1
);
665 #ifdef REAL_ARITHMETIC
666 #ifndef REAL_INFINITY
667 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
670 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
684 #ifndef REAL_INFINITY
691 value
= MIN (f0
, f1
);
694 value
= MAX (f0
, f1
);
701 value
= real_value_truncate (mode
, value
);
702 set_float_handler (NULL_PTR
);
703 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
705 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
707 /* We can fold some multi-word operations. */
708 if (GET_MODE_CLASS (mode
) == MODE_INT
709 && width
== HOST_BITS_PER_WIDE_INT
* 2
710 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
711 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
713 HOST_WIDE_INT l1
, l2
, h1
, h2
, lv
, hv
;
715 if (GET_CODE (op0
) == CONST_DOUBLE
)
716 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
718 l1
= INTVAL (op0
), h1
= l1
< 0 ? -1 : 0;
720 if (GET_CODE (op1
) == CONST_DOUBLE
)
721 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
723 l2
= INTVAL (op1
), h2
= l2
< 0 ? -1 : 0;
728 /* A - B == A + (-B). */
729 neg_double (l2
, h2
, &lv
, &hv
);
732 /* .. fall through ... */
735 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
739 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
742 case DIV
: case MOD
: case UDIV
: case UMOD
:
743 /* We'd need to include tree.h to do this and it doesn't seem worth
748 lv
= l1
& l2
, hv
= h1
& h2
;
752 lv
= l1
| l2
, hv
= h1
| h2
;
756 lv
= l1
^ l2
, hv
= h1
^ h2
;
762 && ((unsigned HOST_WIDE_INT
) l1
763 < (unsigned HOST_WIDE_INT
) l2
)))
772 && ((unsigned HOST_WIDE_INT
) l1
773 > (unsigned HOST_WIDE_INT
) l2
)))
780 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
782 && ((unsigned HOST_WIDE_INT
) l1
783 < (unsigned HOST_WIDE_INT
) l2
)))
790 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
792 && ((unsigned HOST_WIDE_INT
) l1
793 > (unsigned HOST_WIDE_INT
) l2
)))
799 case LSHIFTRT
: case ASHIFTRT
:
801 case ROTATE
: case ROTATERT
:
802 #ifdef SHIFT_COUNT_TRUNCATED
803 if (SHIFT_COUNT_TRUNCATED
)
804 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
807 if (h2
!= 0 || l2
< 0 || l2
>= GET_MODE_BITSIZE (mode
))
810 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
811 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
813 else if (code
== ASHIFT
)
814 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
815 else if (code
== ROTATE
)
816 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
817 else /* code == ROTATERT */
818 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
825 return immed_double_const (lv
, hv
, mode
);
828 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
829 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
831 /* Even if we can't compute a constant result,
832 there are some cases worth simplifying. */
837 /* In IEEE floating point, x+0 is not the same as x. Similarly
838 for the other optimizations below. */
839 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
840 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
843 if (op1
== CONST0_RTX (mode
))
846 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
847 if (GET_CODE (op0
) == NEG
)
848 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
849 else if (GET_CODE (op1
) == NEG
)
850 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
852 /* Handle both-operands-constant cases. We can only add
853 CONST_INTs to constants since the sum of relocatable symbols
854 can't be handled by most assemblers. Don't add CONST_INT
855 to CONST_INT since overflow won't be computed properly if wider
856 than HOST_BITS_PER_WIDE_INT. */
858 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
859 && GET_CODE (op1
) == CONST_INT
)
860 return plus_constant (op0
, INTVAL (op1
));
861 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
862 && GET_CODE (op0
) == CONST_INT
)
863 return plus_constant (op1
, INTVAL (op0
));
865 /* See if this is something like X * C - X or vice versa or
866 if the multiplication is written as a shift. If so, we can
867 distribute and make a new multiply, shift, or maybe just
868 have X (if C is 2 in the example above). But don't make
869 real multiply if we didn't have one before. */
871 if (! FLOAT_MODE_P (mode
))
873 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
874 rtx lhs
= op0
, rhs
= op1
;
877 if (GET_CODE (lhs
) == NEG
)
878 coeff0
= -1, lhs
= XEXP (lhs
, 0);
879 else if (GET_CODE (lhs
) == MULT
880 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
882 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
885 else if (GET_CODE (lhs
) == ASHIFT
886 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
887 && INTVAL (XEXP (lhs
, 1)) >= 0
888 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
890 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
894 if (GET_CODE (rhs
) == NEG
)
895 coeff1
= -1, rhs
= XEXP (rhs
, 0);
896 else if (GET_CODE (rhs
) == MULT
897 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
899 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
902 else if (GET_CODE (rhs
) == ASHIFT
903 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
904 && INTVAL (XEXP (rhs
, 1)) >= 0
905 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
907 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
911 if (rtx_equal_p (lhs
, rhs
))
913 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
914 GEN_INT (coeff0
+ coeff1
));
915 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
919 /* If one of the operands is a PLUS or a MINUS, see if we can
920 simplify this by the associative law.
921 Don't use the associative law for floating point.
922 The inaccuracy makes it nonassociative,
923 and subtle programs can break if operations are associated. */
925 if (INTEGRAL_MODE_P (mode
)
926 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
927 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
928 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
934 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
935 using cc0, in which case we want to leave it as a COMPARE
936 so we can distinguish it from a register-register-copy.
938 In IEEE floating point, x-0 is not the same as x. */
940 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
941 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
942 && op1
== CONST0_RTX (mode
))
945 /* Do nothing here. */
950 /* None of these optimizations can be done for IEEE
952 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
953 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
956 /* We can't assume x-x is 0 even with non-IEEE floating point,
957 but since it is zero except in very strange circumstances, we
958 will treat it as zero with -ffast-math. */
959 if (rtx_equal_p (op0
, op1
)
960 && ! side_effects_p (op0
)
961 && (! FLOAT_MODE_P (mode
) || flag_fast_math
))
962 return CONST0_RTX (mode
);
964 /* Change subtraction from zero into negation. */
965 if (op0
== CONST0_RTX (mode
))
966 return gen_rtx_NEG (mode
, op1
);
968 /* (-1 - a) is ~a. */
969 if (op0
== constm1_rtx
)
970 return gen_rtx_NOT (mode
, op1
);
972 /* Subtracting 0 has no effect. */
973 if (op1
== CONST0_RTX (mode
))
976 /* See if this is something like X * C - X or vice versa or
977 if the multiplication is written as a shift. If so, we can
978 distribute and make a new multiply, shift, or maybe just
979 have X (if C is 2 in the example above). But don't make
980 real multiply if we didn't have one before. */
982 if (! FLOAT_MODE_P (mode
))
984 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
985 rtx lhs
= op0
, rhs
= op1
;
988 if (GET_CODE (lhs
) == NEG
)
989 coeff0
= -1, lhs
= XEXP (lhs
, 0);
990 else if (GET_CODE (lhs
) == MULT
991 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
993 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
996 else if (GET_CODE (lhs
) == ASHIFT
997 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
998 && INTVAL (XEXP (lhs
, 1)) >= 0
999 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1001 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1002 lhs
= XEXP (lhs
, 0);
1005 if (GET_CODE (rhs
) == NEG
)
1006 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1007 else if (GET_CODE (rhs
) == MULT
1008 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1010 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1013 else if (GET_CODE (rhs
) == ASHIFT
1014 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1015 && INTVAL (XEXP (rhs
, 1)) >= 0
1016 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1018 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1019 rhs
= XEXP (rhs
, 0);
1022 if (rtx_equal_p (lhs
, rhs
))
1024 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1025 GEN_INT (coeff0
- coeff1
));
1026 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1030 /* (a - (-b)) -> (a + b). */
1031 if (GET_CODE (op1
) == NEG
)
1032 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1034 /* If one of the operands is a PLUS or a MINUS, see if we can
1035 simplify this by the associative law.
1036 Don't use the associative law for floating point.
1037 The inaccuracy makes it nonassociative,
1038 and subtle programs can break if operations are associated. */
1040 if (INTEGRAL_MODE_P (mode
)
1041 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1042 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1043 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1046 /* Don't let a relocatable value get a negative coeff. */
1047 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1048 return plus_constant (op0
, - INTVAL (op1
));
1050 /* (x - (x & y)) -> (x & ~y) */
1051 if (GET_CODE (op1
) == AND
)
1053 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1054 return simplify_gen_binary (AND
, mode
, op0
,
1055 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1056 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1057 return simplify_gen_binary (AND
, mode
, op0
,
1058 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1063 if (op1
== constm1_rtx
)
1065 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1067 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1070 /* In IEEE floating point, x*0 is not always 0. */
1071 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1072 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1073 && op1
== CONST0_RTX (mode
)
1074 && ! side_effects_p (op0
))
1077 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1078 However, ANSI says we can drop signals,
1079 so we can do this anyway. */
1080 if (op1
== CONST1_RTX (mode
))
1083 /* Convert multiply by constant power of two into shift unless
1084 we are still generating RTL. This test is a kludge. */
1085 if (GET_CODE (op1
) == CONST_INT
1086 && (val
= exact_log2 (INTVAL (op1
))) >= 0
1087 /* If the mode is larger than the host word size, and the
1088 uppermost bit is set, then this isn't a power of two due
1089 to implicit sign extension. */
1090 && (width
<= HOST_BITS_PER_WIDE_INT
1091 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1092 && ! rtx_equal_function_value_matters
)
1093 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1095 if (GET_CODE (op1
) == CONST_DOUBLE
1096 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
)
1100 int op1is2
, op1ism1
;
1102 if (setjmp (handler
))
1105 set_float_handler (handler
);
1106 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1107 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1108 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1109 set_float_handler (NULL_PTR
);
1111 /* x*2 is x+x and x*(-1) is -x */
1112 if (op1is2
&& GET_MODE (op0
) == mode
)
1113 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1115 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1116 return gen_rtx_NEG (mode
, op0
);
1121 if (op1
== const0_rtx
)
1123 if (GET_CODE (op1
) == CONST_INT
1124 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1126 if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1128 /* A | (~A) -> -1 */
1129 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1130 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1131 && ! side_effects_p (op0
)
1132 && GET_MODE_CLASS (mode
) != MODE_CC
)
1137 if (op1
== const0_rtx
)
1139 if (GET_CODE (op1
) == CONST_INT
1140 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1141 return gen_rtx_NOT (mode
, op0
);
1142 if (op0
== op1
&& ! side_effects_p (op0
)
1143 && GET_MODE_CLASS (mode
) != MODE_CC
)
1148 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1150 if (GET_CODE (op1
) == CONST_INT
1151 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1153 if (op0
== op1
&& ! side_effects_p (op0
)
1154 && GET_MODE_CLASS (mode
) != MODE_CC
)
1157 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1158 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1159 && ! side_effects_p (op0
)
1160 && GET_MODE_CLASS (mode
) != MODE_CC
)
1165 /* Convert divide by power of two into shift (divide by 1 handled
1167 if (GET_CODE (op1
) == CONST_INT
1168 && (arg1
= exact_log2 (INTVAL (op1
))) > 0)
1169 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1171 /* ... fall through ... */
1174 if (op1
== CONST1_RTX (mode
))
1177 /* In IEEE floating point, 0/x is not always 0. */
1178 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1179 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1180 && op0
== CONST0_RTX (mode
)
1181 && ! side_effects_p (op1
))
1184 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1185 /* Change division by a constant into multiplication. Only do
1186 this with -ffast-math until an expert says it is safe in
1188 else if (GET_CODE (op1
) == CONST_DOUBLE
1189 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
1190 && op1
!= CONST0_RTX (mode
)
1194 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1196 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1198 #if defined (REAL_ARITHMETIC)
1199 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1200 return gen_rtx_MULT (mode
, op0
,
1201 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1204 gen_rtx_MULT (mode
, op0
,
1205 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1213 /* Handle modulus by power of two (mod with 1 handled below). */
1214 if (GET_CODE (op1
) == CONST_INT
1215 && exact_log2 (INTVAL (op1
)) > 0)
1216 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1218 /* ... fall through ... */
1221 if ((op0
== const0_rtx
|| op1
== const1_rtx
)
1222 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1228 /* Rotating ~0 always results in ~0. */
1229 if (GET_CODE (op0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1230 && (unsigned HOST_WIDE_INT
) INTVAL (op0
) == GET_MODE_MASK (mode
)
1231 && ! side_effects_p (op1
))
1234 /* ... fall through ... */
1239 if (op1
== const0_rtx
)
1241 if (op0
== const0_rtx
&& ! side_effects_p (op1
))
1246 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1247 && INTVAL (op1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1248 && ! side_effects_p (op0
))
1250 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1255 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1256 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1257 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1258 && ! side_effects_p (op0
))
1260 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1265 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1267 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1272 if (op1
== constm1_rtx
&& ! side_effects_p (op0
))
1274 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1285 /* Get the integer argument values in two forms:
1286 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1288 arg0
= INTVAL (op0
);
1289 arg1
= INTVAL (op1
);
1291 if (width
< HOST_BITS_PER_WIDE_INT
)
1293 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1294 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1297 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1298 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1301 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1302 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1310 /* Compute the value of the arithmetic. */
1315 val
= arg0s
+ arg1s
;
1319 val
= arg0s
- arg1s
;
1323 val
= arg0s
* arg1s
;
1329 val
= arg0s
/ arg1s
;
1335 val
= arg0s
% arg1s
;
1341 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1347 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1363 /* If shift count is undefined, don't fold it; let the machine do
1364 what it wants. But truncate it if the machine will do that. */
1368 #ifdef SHIFT_COUNT_TRUNCATED
1369 if (SHIFT_COUNT_TRUNCATED
)
1373 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1380 #ifdef SHIFT_COUNT_TRUNCATED
1381 if (SHIFT_COUNT_TRUNCATED
)
1385 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1392 #ifdef SHIFT_COUNT_TRUNCATED
1393 if (SHIFT_COUNT_TRUNCATED
)
1397 val
= arg0s
>> arg1
;
1399 /* Bootstrap compiler may not have sign extended the right shift.
1400 Manually extend the sign to insure bootstrap cc matches gcc. */
1401 if (arg0s
< 0 && arg1
> 0)
1402 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1411 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1412 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1420 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1421 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1425 /* Do nothing here. */
1429 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1433 val
= ((unsigned HOST_WIDE_INT
) arg0
1434 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1438 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1442 val
= ((unsigned HOST_WIDE_INT
) arg0
1443 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1450 val
= trunc_int_for_mode (val
, mode
);
1452 return GEN_INT (val
);
1455 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1458 Rather than test for specific case, we do this by a brute-force method
1459 and do all possible simplifications until no more changes occur. Then
1460 we rebuild the operation. */
1463 simplify_plus_minus (code
, mode
, op0
, op1
)
1465 enum machine_mode mode
;
1471 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1472 int first
= 1, negate
= 0, changed
;
1475 bzero ((char *) ops
, sizeof ops
);
1477 /* Set up the two operands and then expand them until nothing has been
1478 changed. If we run out of room in our array, give up; this should
1479 almost never happen. */
1481 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1488 for (i
= 0; i
< n_ops
; i
++)
1489 switch (GET_CODE (ops
[i
]))
1496 ops
[n_ops
] = XEXP (ops
[i
], 1);
1497 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1498 ops
[i
] = XEXP (ops
[i
], 0);
1504 ops
[i
] = XEXP (ops
[i
], 0);
1505 negs
[i
] = ! negs
[i
];
1510 ops
[i
] = XEXP (ops
[i
], 0);
1516 /* ~a -> (-a - 1) */
1519 ops
[n_ops
] = constm1_rtx
;
1520 negs
[n_ops
++] = negs
[i
];
1521 ops
[i
] = XEXP (ops
[i
], 0);
1522 negs
[i
] = ! negs
[i
];
1529 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1537 /* If we only have two operands, we can't do anything. */
1541 /* Now simplify each pair of operands until nothing changes. The first
1542 time through just simplify constants against each other. */
1549 for (i
= 0; i
< n_ops
- 1; i
++)
1550 for (j
= i
+ 1; j
< n_ops
; j
++)
1551 if (ops
[i
] != 0 && ops
[j
] != 0
1552 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1554 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1555 enum rtx_code ncode
= PLUS
;
1557 if (negs
[i
] && ! negs
[j
])
1558 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1559 else if (! negs
[i
] && negs
[j
])
1562 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1565 ops
[i
] = tem
, ops
[j
] = 0;
1566 negs
[i
] = negs
[i
] && negs
[j
];
1567 if (GET_CODE (tem
) == NEG
)
1568 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1570 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1571 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1579 /* Pack all the operands to the lower-numbered entries and give up if
1580 we didn't reduce the number of operands we had. Make sure we
1581 count a CONST as two operands. If we have the same number of
1582 operands, but have made more CONSTs than we had, this is also
1583 an improvement, so accept it. */
1585 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1588 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1589 if (GET_CODE (ops
[j
]) == CONST
)
1593 if (i
+ n_consts
> input_ops
1594 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1599 /* If we have a CONST_INT, put it last. */
1600 for (i
= 0; i
< n_ops
- 1; i
++)
1601 if (GET_CODE (ops
[i
]) == CONST_INT
)
1603 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1604 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1607 /* Put a non-negated operand first. If there aren't any, make all
1608 operands positive and negate the whole thing later. */
1609 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1614 for (i
= 0; i
< n_ops
; i
++)
1620 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1621 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1624 /* Now make the result by performing the requested operations. */
1626 for (i
= 1; i
< n_ops
; i
++)
1627 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1629 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1634 rtx op0
, op1
; /* Input */
1635 int equal
, op0lt
, op1lt
; /* Output */
1639 check_fold_consts (data
)
1642 struct cfc_args
*args
= (struct cfc_args
*) data
;
1643 REAL_VALUE_TYPE d0
, d1
;
1645 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1646 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1647 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1648 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1649 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1652 /* Like simplify_binary_operation except used for relational operators.
1653 MODE is the mode of the operands, not that of the result. If MODE
1654 is VOIDmode, both operands must also be VOIDmode and we compare the
1655 operands in "infinite precision".
1657 If no simplification is possible, this function returns zero. Otherwise,
1658 it returns either const_true_rtx or const0_rtx. */
1661 simplify_relational_operation (code
, mode
, op0
, op1
)
1663 enum machine_mode mode
;
1666 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1669 /* If op0 is a compare, extract the comparison arguments from it. */
1670 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1671 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1673 /* We can't simplify MODE_CC values since we don't know what the
1674 actual comparison is. */
1675 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1682 /* Make sure the constant is second. */
1683 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
1684 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
1686 tem
= op0
, op0
= op1
, op1
= tem
;
1687 code
= swap_condition (code
);
1690 /* For integer comparisons of A and B maybe we can simplify A - B and can
1691 then simplify a comparison of that with zero. If A and B are both either
1692 a register or a CONST_INT, this can't help; testing for these cases will
1693 prevent infinite recursion here and speed things up.
1695 If CODE is an unsigned comparison, then we can never do this optimization,
1696 because it gives an incorrect result if the subtraction wraps around zero.
1697 ANSI C defines unsigned operations such that they never overflow, and
1698 thus such cases can not be ignored. */
1700 if (INTEGRAL_MODE_P (mode
) && op1
!= const0_rtx
1701 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == CONST_INT
)
1702 && (GET_CODE (op1
) == REG
|| GET_CODE (op1
) == CONST_INT
))
1703 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1704 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1705 return simplify_relational_operation (signed_condition (code
),
1706 mode
, tem
, const0_rtx
);
1708 /* For non-IEEE floating-point, if the two operands are equal, we know the
1710 if (rtx_equal_p (op0
, op1
)
1711 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1712 || ! FLOAT_MODE_P (GET_MODE (op0
)) || flag_fast_math
))
1713 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1715 /* If the operands are floating-point constants, see if we can fold
1717 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1718 else if (GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
1719 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
1721 struct cfc_args args
;
1723 /* Setup input for check_fold_consts() */
1727 if (do_float_handler(check_fold_consts
, (PTR
) &args
) == 0)
1728 /* We got an exception from check_fold_consts() */
1731 /* Receive output from check_fold_consts() */
1733 op0lt
= op0ltu
= args
.op0lt
;
1734 op1lt
= op1ltu
= args
.op1lt
;
1736 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1738 /* Otherwise, see if the operands are both integers. */
1739 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1740 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
1741 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
1743 int width
= GET_MODE_BITSIZE (mode
);
1744 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1745 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1747 /* Get the two words comprising each integer constant. */
1748 if (GET_CODE (op0
) == CONST_DOUBLE
)
1750 l0u
= l0s
= CONST_DOUBLE_LOW (op0
);
1751 h0u
= h0s
= CONST_DOUBLE_HIGH (op0
);
1755 l0u
= l0s
= INTVAL (op0
);
1756 h0u
= h0s
= l0s
< 0 ? -1 : 0;
1759 if (GET_CODE (op1
) == CONST_DOUBLE
)
1761 l1u
= l1s
= CONST_DOUBLE_LOW (op1
);
1762 h1u
= h1s
= CONST_DOUBLE_HIGH (op1
);
1766 l1u
= l1s
= INTVAL (op1
);
1767 h1u
= h1s
= l1s
< 0 ? -1 : 0;
1770 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1771 we have to sign or zero-extend the values. */
1772 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1773 h0u
= h1u
= 0, h0s
= l0s
< 0 ? -1 : 0, h1s
= l1s
< 0 ? -1 : 0;
1775 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1777 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1778 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1780 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1781 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1783 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1784 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1787 equal
= (h0u
== h1u
&& l0u
== l1u
);
1788 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0s
< l1s
));
1789 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1s
< l0s
));
1790 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1791 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1794 /* Otherwise, there are some code-specific tests we can make. */
1800 /* References to the frame plus a constant or labels cannot
1801 be zero, but a SYMBOL_REF can due to #pragma weak. */
1802 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1803 || GET_CODE (op0
) == LABEL_REF
)
1804 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1805 /* On some machines, the ap reg can be 0 sometimes. */
1806 && op0
!= arg_pointer_rtx
1813 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1814 || GET_CODE (op0
) == LABEL_REF
)
1815 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1816 && op0
!= arg_pointer_rtx
1819 return const_true_rtx
;
1823 /* Unsigned values are never negative. */
1824 if (op1
== const0_rtx
)
1825 return const_true_rtx
;
1829 if (op1
== const0_rtx
)
1834 /* Unsigned values are never greater than the largest
1836 if (GET_CODE (op1
) == CONST_INT
1837 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1838 && INTEGRAL_MODE_P (mode
))
1839 return const_true_rtx
;
1843 if (GET_CODE (op1
) == CONST_INT
1844 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1845 && INTEGRAL_MODE_P (mode
))
1856 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1861 return equal
? const_true_rtx
: const0_rtx
;
1863 return ! equal
? const_true_rtx
: const0_rtx
;
1865 return op0lt
? const_true_rtx
: const0_rtx
;
1867 return op1lt
? const_true_rtx
: const0_rtx
;
1869 return op0ltu
? const_true_rtx
: const0_rtx
;
1871 return op1ltu
? const_true_rtx
: const0_rtx
;
1873 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
1875 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
1877 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
1879 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
1885 /* Simplify CODE, an operation with result mode MODE and three operands,
1886 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1887 a constant. Return 0 if no simplifications is possible. */
1890 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
1892 enum machine_mode mode
, op0_mode
;
1895 unsigned int width
= GET_MODE_BITSIZE (mode
);
1897 /* VOIDmode means "infinite" precision. */
1899 width
= HOST_BITS_PER_WIDE_INT
;
1905 if (GET_CODE (op0
) == CONST_INT
1906 && GET_CODE (op1
) == CONST_INT
1907 && GET_CODE (op2
) == CONST_INT
1908 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_BITSIZE (op0_mode
)
1909 && width
<= HOST_BITS_PER_WIDE_INT
)
1911 /* Extracting a bit-field from a constant */
1912 HOST_WIDE_INT val
= INTVAL (op0
);
1914 if (BITS_BIG_ENDIAN
)
1915 val
>>= (GET_MODE_BITSIZE (op0_mode
)
1916 - INTVAL (op2
) - INTVAL (op1
));
1918 val
>>= INTVAL (op2
);
1920 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
1922 /* First zero-extend. */
1923 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
1924 /* If desired, propagate sign bit. */
1925 if (code
== SIGN_EXTRACT
1926 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
1927 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
1930 /* Clear the bits that don't belong in our mode,
1931 unless they and our sign bit are all one.
1932 So we get either a reasonable negative value or a reasonable
1933 unsigned value for this mode. */
1934 if (width
< HOST_BITS_PER_WIDE_INT
1935 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
1936 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
1937 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1939 return GEN_INT (val
);
1944 if (GET_CODE (op0
) == CONST_INT
)
1945 return op0
!= const0_rtx
? op1
: op2
;
1947 /* Convert a == b ? b : a to "a". */
1948 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
1949 && rtx_equal_p (XEXP (op0
, 0), op1
)
1950 && rtx_equal_p (XEXP (op0
, 1), op2
))
1952 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
1953 && rtx_equal_p (XEXP (op0
, 1), op1
)
1954 && rtx_equal_p (XEXP (op0
, 0), op2
))
1956 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
1959 = simplify_relational_operation (GET_CODE (op0
), op0_mode
,
1960 XEXP (op0
, 0), XEXP (op0
, 1));
1962 /* See if any simplifications were possible. */
1963 if (temp
== const0_rtx
)
1965 else if (temp
== const1_rtx
)
1977 /* Simplify X, an rtx expression.
1979 Return the simplified expression or NULL if no simplifications
1982 This is the preferred entry point into the simplification routines;
1983 however, we still allow passes to call the more specific routines.
1985 Right now GCC has three (yes, three) major bodies of RTL simplficiation
1986 code that need to be unified.
1988 1. fold_rtx in cse.c. This code uses various CSE specific
1989 information to aid in RTL simplification.
1991 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
1992 it uses combine specific information to aid in RTL
1995 3. The routines in this file.
1998 Long term we want to only have one body of simplification code; to
1999 get to that state I recommend the following steps:
2001 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2002 which are not pass dependent state into these routines.
2004 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2005 use this routine whenever possible.
2007 3. Allow for pass dependent state to be provided to these
2008 routines and add simplifications based on the pass dependent
2009 state. Remove code from cse.c & combine.c that becomes
2012 It will take time, but ultimately the compiler will be easier to
2013 maintain and improve. It's totally silly that when we add a
2014 simplification that it needs to be added to 4 places (3 for RTL
2015 simplification and 1 for tree simplification. */
2022 enum machine_mode mode
;
2024 mode
= GET_MODE (x
);
2025 code
= GET_CODE (x
);
2027 switch (GET_RTX_CLASS (code
))
2030 return simplify_unary_operation (code
, mode
,
2031 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2034 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2038 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2039 XEXP (x
, 0), XEXP (x
, 1), XEXP (x
, 2));
2042 return simplify_relational_operation (code
, GET_MODE (XEXP (x
, 0)),
2043 XEXP (x
, 0), XEXP (x
, 1));
2050 /* Allocate a struct elt_list and fill in its two elements with the
2053 static struct elt_list
*
2054 new_elt_list (next
, elt
)
2055 struct elt_list
*next
;
2058 struct elt_list
*el
= empty_elt_lists
;
2061 empty_elt_lists
= el
->next
;
2063 el
= (struct elt_list
*) obstack_alloc (&cselib_obstack
,
2064 sizeof (struct elt_list
));
2070 /* Allocate a struct elt_loc_list and fill in its two elements with the
2073 static struct elt_loc_list
*
2074 new_elt_loc_list (next
, loc
)
2075 struct elt_loc_list
*next
;
2078 struct elt_loc_list
*el
= empty_elt_loc_lists
;
2081 empty_elt_loc_lists
= el
->next
;
2083 el
= (struct elt_loc_list
*) obstack_alloc (&cselib_obstack
,
2084 sizeof (struct elt_loc_list
));
2087 el
->setting_insn
= cselib_current_insn
;
2091 /* The elt_list at *PL is no longer needed. Unchain it and free its
2095 unchain_one_elt_list (pl
)
2096 struct elt_list
**pl
;
2098 struct elt_list
*l
= *pl
;
2101 l
->next
= empty_elt_lists
;
2102 empty_elt_lists
= l
;
2105 /* Likewise for elt_loc_lists. */
2108 unchain_one_elt_loc_list (pl
)
2109 struct elt_loc_list
**pl
;
2111 struct elt_loc_list
*l
= *pl
;
2114 l
->next
= empty_elt_loc_lists
;
2115 empty_elt_loc_lists
= l
;
2118 /* Likewise for cselib_vals. This also frees the addr_list associated with
2122 unchain_one_value (v
)
2125 while (v
->addr_list
)
2126 unchain_one_elt_list (&v
->addr_list
);
2128 v
->u
.next_free
= empty_vals
;
2132 /* Remove all entries from the hash table. Also used during
2140 for (i
= 0; i
< cselib_nregs
; i
++)
2143 htab_empty (hash_table
);
2144 obstack_free (&cselib_obstack
, cselib_startobj
);
2147 empty_elt_lists
= 0;
2148 empty_elt_loc_lists
= 0;
2149 n_useless_values
= 0;
2151 next_unknown_value
= 0;
2154 /* The equality test for our hash table. The first argument ENTRY is a table
2155 element (i.e. a cselib_val), while the second arg X is an rtx. */
2158 entry_and_rtx_equal_p (entry
, x_arg
)
2159 const void *entry
, *x_arg
;
2161 struct elt_loc_list
*l
;
2162 const cselib_val
*v
= (const cselib_val
*) entry
;
2163 rtx x
= (rtx
) x_arg
;
2165 /* We don't guarantee that distinct rtx's have different hash values,
2166 so we need to do a comparison. */
2167 for (l
= v
->locs
; l
; l
= l
->next
)
2168 if (rtx_equal_for_cselib_p (l
->loc
, x
))
2174 /* The hash function for our hash table. The value is always computed with
2175 hash_rtx when adding an element; this function just extracts the hash
2176 value from a cselib_val structure. */
2179 get_value_hash (entry
)
2182 const cselib_val
*v
= (const cselib_val
*) entry
;
2186 /* If there are no more locations that hold a value, the value has become
2187 useless. See whether that is the case for V. Return 1 if this has
2188 just become useless. */
2191 check_value_useless (v
)
2200 /* This is a marker to indicate that the value will be reclaimed. */
2206 /* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we
2207 only return true for values which point to a cselib_val whose value
2208 element has been set to zero, which implies the cselib_val will be
2212 references_value_p (x
, only_useless
)
2216 enum rtx_code code
= GET_CODE (x
);
2217 const char *fmt
= GET_RTX_FORMAT (code
);
2220 if (GET_CODE (x
) == VALUE
2221 && (! only_useless
|| CSELIB_VAL_PTR (x
)->value
== 0))
2224 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2226 if (fmt
[i
] == 'e' && references_value_p (XEXP (x
, i
), only_useless
))
2228 else if (fmt
[i
] == 'E')
2229 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2230 if (references_value_p (XVECEXP (x
, i
, j
), only_useless
))
2237 /* For all locations found in X, delete locations that reference useless
2238 values (i.e. values without any location). Called through
2242 discard_useless_locs (x
, info
)
2244 void *info ATTRIBUTE_UNUSED
;
2246 cselib_val
*v
= (cselib_val
*)*x
;
2247 struct elt_loc_list
**p
= &v
->locs
;
2251 if (references_value_p ((*p
)->loc
, 1))
2252 unchain_one_elt_loc_list (p
);
2257 if (check_value_useless (v
))
2258 values_became_useless
= 1;
2263 /* If X is a value with no locations, remove it from the hashtable. */
2266 discard_useless_values (x
, info
)
2268 void *info ATTRIBUTE_UNUSED
;
2270 cselib_val
*v
= (cselib_val
*)*x
;
2274 htab_clear_slot (hash_table
, x
);
2275 unchain_one_value (v
);
2282 /* Clean out useless values (i.e. those which no longer have locations
2283 associated with them) from the hash table. */
2286 remove_useless_values ()
2288 /* First pass: eliminate locations that reference the value. That in
2289 turn can make more values useless. */
2292 values_became_useless
= 0;
2293 htab_traverse (hash_table
, discard_useless_locs
, 0);
2295 while (values_became_useless
);
2297 /* Second pass: actually remove the values. */
2298 htab_traverse (hash_table
, discard_useless_values
, 0);
2300 if (n_useless_values
!= 0)
2304 /* Return nonzero if we can prove that X and Y contain the same value, taking
2305 our gathered information into account. */
2308 rtx_equal_for_cselib_p (x
, y
)
2315 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == MEM
)
2317 cselib_val
*e
= cselib_lookup (x
, VOIDmode
, 0);
2323 if (GET_CODE (y
) == REG
|| GET_CODE (y
) == MEM
)
2325 cselib_val
*e
= cselib_lookup (y
, VOIDmode
, 0);
2334 if (GET_CODE (x
) == VALUE
&& GET_CODE (y
) == VALUE
)
2335 return CSELIB_VAL_PTR (x
) == CSELIB_VAL_PTR (y
);
2337 if (GET_CODE (x
) == VALUE
)
2339 cselib_val
*e
= CSELIB_VAL_PTR (x
);
2340 struct elt_loc_list
*l
;
2342 for (l
= e
->locs
; l
; l
= l
->next
)
2346 /* Avoid infinite recursion. */
2347 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2349 else if (rtx_equal_for_cselib_p (t
, y
))
2356 if (GET_CODE (y
) == VALUE
)
2358 cselib_val
*e
= CSELIB_VAL_PTR (y
);
2359 struct elt_loc_list
*l
;
2361 for (l
= e
->locs
; l
; l
= l
->next
)
2365 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2367 else if (rtx_equal_for_cselib_p (x
, t
))
2374 if (GET_CODE (x
) != GET_CODE (y
) || GET_MODE (x
) != GET_MODE (y
))
2377 /* This won't be handled correctly by the code below. */
2378 if (GET_CODE (x
) == LABEL_REF
)
2379 return XEXP (x
, 0) == XEXP (y
, 0);
2381 code
= GET_CODE (x
);
2382 fmt
= GET_RTX_FORMAT (code
);
2384 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2391 if (XWINT (x
, i
) != XWINT (y
, i
))
2397 if (XINT (x
, i
) != XINT (y
, i
))
2403 /* Two vectors must have the same length. */
2404 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
2407 /* And the corresponding elements must match. */
2408 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2409 if (! rtx_equal_for_cselib_p (XVECEXP (x
, i
, j
),
2415 if (! rtx_equal_for_cselib_p (XEXP (x
, i
), XEXP (y
, i
)))
2421 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
2426 /* These are just backpointers, so they don't matter. */
2433 /* It is believed that rtx's at this level will never
2434 contain anything but integers and other rtx's,
2435 except for within LABEL_REFs and SYMBOL_REFs. */
2443 /* Hash an rtx. Return 0 if we couldn't hash the rtx.
2444 For registers and memory locations, we look up their cselib_val structure
2445 and return its VALUE element.
2446 Possible reasons for return 0 are: the object is volatile, or we couldn't
2447 find a register or memory location in the table and CREATE is zero. If
2448 CREATE is nonzero, table elts are created for regs and mem.
2449 MODE is used in hashing for CONST_INTs only;
2450 otherwise the mode of X is used. */
2453 hash_rtx (x
, mode
, create
)
2455 enum machine_mode mode
;
2462 unsigned int hash
= 0;
2464 /* repeat is used to turn tail-recursion into iteration. */
2466 code
= GET_CODE (x
);
2467 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2473 e
= cselib_lookup (x
, GET_MODE (x
), create
);
2481 hash
+= ((unsigned) CONST_INT
<< 7) + (unsigned) mode
+ INTVAL (x
);
2482 return hash
? hash
: CONST_INT
;
2485 /* This is like the general case, except that it only counts
2486 the integers representing the constant. */
2487 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2488 if (GET_MODE (x
) != VOIDmode
)
2489 for (i
= 2; i
< GET_RTX_LENGTH (CONST_DOUBLE
); i
++)
2490 hash
+= XWINT (x
, i
);
2492 hash
+= ((unsigned) CONST_DOUBLE_LOW (x
)
2493 + (unsigned) CONST_DOUBLE_HIGH (x
));
2494 return hash
? hash
: CONST_DOUBLE
;
2496 /* Assume there is only one rtx object for any given label. */
2499 += ((unsigned) LABEL_REF
<< 7) + (unsigned long) XEXP (x
, 0);
2500 return hash
? hash
: LABEL_REF
;
2504 += ((unsigned) SYMBOL_REF
<< 7) + (unsigned long) XSTR (x
, 0);
2505 return hash
? hash
: SYMBOL_REF
;
2514 case UNSPEC_VOLATILE
:
2518 if (MEM_VOLATILE_P (x
))
2527 i
= GET_RTX_LENGTH (code
) - 1;
2528 fmt
= GET_RTX_FORMAT (code
);
2533 rtx tem
= XEXP (x
, i
);
2534 unsigned int tem_hash
;
2536 /* If we are about to do the last recursive call
2537 needed at this level, change it into iteration.
2538 This function is called enough to be worth it. */
2545 tem_hash
= hash_rtx (tem
, 0, create
);
2551 else if (fmt
[i
] == 'E')
2552 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2554 unsigned int tem_hash
= hash_rtx (XVECEXP (x
, i
, j
), 0, create
);
2561 else if (fmt
[i
] == 's')
2563 const unsigned char *p
= (const unsigned char *) XSTR (x
, i
);
2569 else if (fmt
[i
] == 'i')
2570 hash
+= XINT (x
, i
);
2571 else if (fmt
[i
] == '0' || fmt
[i
] == 't')
2577 return hash
? hash
: 1 + GET_CODE (x
);
2580 /* Create a new value structure for VALUE and initialize it. The mode of the
2584 new_cselib_val (value
, mode
)
2586 enum machine_mode mode
;
2588 cselib_val
*e
= empty_vals
;
2591 empty_vals
= e
->u
.next_free
;
2593 e
= (cselib_val
*) obstack_alloc (&cselib_obstack
, sizeof (cselib_val
));
2599 e
->u
.val_rtx
= gen_rtx_VALUE (mode
);
2600 CSELIB_VAL_PTR (e
->u
.val_rtx
) = e
;
2606 /* ADDR_ELT is a value that is used as address. MEM_ELT is the value that
2607 contains the data at this address. X is a MEM that represents the
2608 value. Update the two value structures to represent this situation. */
2611 add_mem_for_addr (addr_elt
, mem_elt
, x
)
2612 cselib_val
*addr_elt
, *mem_elt
;
2616 struct elt_loc_list
*l
;
2618 /* Avoid duplicates. */
2619 for (l
= mem_elt
->locs
; l
; l
= l
->next
)
2620 if (GET_CODE (l
->loc
) == MEM
2621 && CSELIB_VAL_PTR (XEXP (l
->loc
, 0)) == addr_elt
)
2624 new = gen_rtx_MEM (GET_MODE (x
), addr_elt
->u
.val_rtx
);
2625 addr_elt
->addr_list
= new_elt_list (addr_elt
->addr_list
, mem_elt
);
2627 RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x
);
2628 MEM_COPY_ATTRIBUTES (new, x
);
2630 mem_elt
->locs
= new_elt_loc_list (mem_elt
->locs
, new);
2633 /* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx.
2634 If CREATE, make a new one if we haven't seen it before. */
2637 cselib_lookup_mem (x
, create
)
2643 cselib_val
*mem_elt
;
2646 if (MEM_VOLATILE_P (x
) || GET_MODE (x
) == BLKmode
2647 || (FLOAT_MODE_P (GET_MODE (x
)) && flag_float_store
))
2650 /* Look up the value for the address. */
2651 addr
= cselib_lookup (XEXP (x
, 0), GET_MODE (x
), create
);
2655 /* Find a value that describes a value of our mode at that address. */
2656 for (l
= addr
->addr_list
; l
; l
= l
->next
)
2657 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2663 mem_elt
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2664 add_mem_for_addr (addr
, mem_elt
, x
);
2665 slot
= htab_find_slot_with_hash (hash_table
, x
, mem_elt
->value
, INSERT
);
2670 /* Walk rtx X and replace all occurrences of REG and MEM subexpressions
2671 with VALUE expressions. This way, it becomes independent of changes
2672 to registers and memory.
2673 X isn't actually modified; if modifications are needed, new rtl is
2674 allocated. However, the return value can share rtl with X. */
2677 cselib_subst_to_values (x
)
2680 enum rtx_code code
= GET_CODE (x
);
2681 const char *fmt
= GET_RTX_FORMAT (code
);
2690 for (l
= REG_VALUES (REGNO (x
)); l
; l
= l
->next
)
2691 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2692 return l
->elt
->u
.val_rtx
;
2697 e
= cselib_lookup_mem (x
, 0);
2700 return e
->u
.val_rtx
;
2702 /* CONST_DOUBLEs must be special-cased here so that we won't try to
2703 look up the CONST_DOUBLE_MEM inside. */
2712 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2716 rtx t
= cselib_subst_to_values (XEXP (x
, i
));
2718 if (t
!= XEXP (x
, i
) && x
== copy
)
2719 copy
= shallow_copy_rtx (x
);
2723 else if (fmt
[i
] == 'E')
2727 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2729 rtx t
= cselib_subst_to_values (XVECEXP (x
, i
, j
));
2731 if (t
!= XVECEXP (x
, i
, j
) && XVEC (x
, i
) == XVEC (copy
, i
))
2734 copy
= shallow_copy_rtx (x
);
2736 XVEC (copy
, i
) = rtvec_alloc (XVECLEN (x
, i
));
2737 for (k
= 0; k
< j
; k
++)
2738 XVECEXP (copy
, i
, k
) = XVECEXP (x
, i
, k
);
2741 XVECEXP (copy
, i
, j
) = t
;
2749 /* Look up the rtl expression X in our tables and return the value it has.
2750 If CREATE is zero, we return NULL if we don't know the value. Otherwise,
2751 we create a new one if possible, using mode MODE if X doesn't have a mode
2752 (i.e. because it's a constant). */
2755 cselib_lookup (x
, mode
, create
)
2757 enum machine_mode mode
;
2762 unsigned int hashval
;
2764 if (GET_MODE (x
) != VOIDmode
)
2765 mode
= GET_MODE (x
);
2767 if (GET_CODE (x
) == VALUE
)
2768 return CSELIB_VAL_PTR (x
);
2770 if (GET_CODE (x
) == REG
)
2773 unsigned int i
= REGNO (x
);
2775 for (l
= REG_VALUES (i
); l
; l
= l
->next
)
2776 if (mode
== GET_MODE (l
->elt
->u
.val_rtx
))
2782 e
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2783 e
->locs
= new_elt_loc_list (e
->locs
, x
);
2784 REG_VALUES (i
) = new_elt_list (REG_VALUES (i
), e
);
2785 slot
= htab_find_slot_with_hash (hash_table
, x
, e
->value
, INSERT
);
2790 if (GET_CODE (x
) == MEM
)
2791 return cselib_lookup_mem (x
, create
);
2793 hashval
= hash_rtx (x
, mode
, create
);
2794 /* Can't even create if hashing is not possible. */
2798 slot
= htab_find_slot_with_hash (hash_table
, x
, hashval
,
2799 create
? INSERT
: NO_INSERT
);
2803 e
= (cselib_val
*) *slot
;
2807 e
= new_cselib_val (hashval
, mode
);
2809 /* We have to fill the slot before calling cselib_subst_to_values:
2810 the hash table is inconsistent until we do so, and
2811 cselib_subst_to_values will need to do lookups. */
2813 e
->locs
= new_elt_loc_list (e
->locs
, cselib_subst_to_values (x
));
2817 /* Invalidate any entries in reg_values that overlap REGNO. This is called
2818 if REGNO is changing. MODE is the mode of the assignment to REGNO, which
2819 is used to determine how many hard registers are being changed. If MODE
2820 is VOIDmode, then only REGNO is being changed; this is used when
2821 invalidating call clobbered registers across a call. */
2824 cselib_invalidate_regno (regno
, mode
)
2826 enum machine_mode mode
;
2828 unsigned int endregno
;
2831 /* If we see pseudos after reload, something is _wrong_. */
2832 if (reload_completed
&& regno
>= FIRST_PSEUDO_REGISTER
2833 && reg_renumber
[regno
] >= 0)
2836 /* Determine the range of registers that must be invalidated. For
2837 pseudos, only REGNO is affected. For hard regs, we must take MODE
2838 into account, and we must also invalidate lower register numbers
2839 if they contain values that overlap REGNO. */
2840 endregno
= regno
+ 1;
2841 if (regno
< FIRST_PSEUDO_REGISTER
&& mode
!= VOIDmode
)
2842 endregno
= regno
+ HARD_REGNO_NREGS (regno
, mode
);
2844 for (i
= 0; i
< endregno
; i
++)
2846 struct elt_list
**l
= ®_VALUES (i
);
2848 /* Go through all known values for this reg; if it overlaps the range
2849 we're invalidating, remove the value. */
2852 cselib_val
*v
= (*l
)->elt
;
2853 struct elt_loc_list
**p
;
2854 unsigned int this_last
= i
;
2856 if (i
< FIRST_PSEUDO_REGISTER
)
2857 this_last
+= HARD_REGNO_NREGS (i
, GET_MODE (v
->u
.val_rtx
)) - 1;
2859 if (this_last
< regno
)
2865 /* We have an overlap. */
2866 unchain_one_elt_list (l
);
2868 /* Now, we clear the mapping from value to reg. It must exist, so
2869 this code will crash intentionally if it doesn't. */
2870 for (p
= &v
->locs
; ; p
= &(*p
)->next
)
2874 if (GET_CODE (x
) == REG
&& REGNO (x
) == i
)
2876 unchain_one_elt_loc_list (p
);
2881 check_value_useless (v
);
2886 /* The memory at address MEM_BASE is being changed.
2887 Return whether this change will invalidate VAL. */
2890 cselib_mem_conflict_p (mem_base
, val
)
2898 code
= GET_CODE (val
);
2901 /* Get rid of a few simple cases quickly. */
2914 if (GET_MODE (mem_base
) == BLKmode
2915 || GET_MODE (val
) == BLKmode
2916 || anti_dependence (val
, mem_base
))
2919 /* The address may contain nested MEMs. */
2926 fmt
= GET_RTX_FORMAT (code
);
2927 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2931 if (cselib_mem_conflict_p (mem_base
, XEXP (val
, i
)))
2934 else if (fmt
[i
] == 'E')
2935 for (j
= 0; j
< XVECLEN (val
, i
); j
++)
2936 if (cselib_mem_conflict_p (mem_base
, XVECEXP (val
, i
, j
)))
2943 /* For the value found in SLOT, walk its locations to determine if any overlap
2944 INFO (which is a MEM rtx). */
2947 cselib_invalidate_mem_1 (slot
, info
)
2951 cselib_val
*v
= (cselib_val
*) *slot
;
2952 rtx mem_rtx
= (rtx
) info
;
2953 struct elt_loc_list
**p
= &v
->locs
;
2959 struct elt_list
**mem_chain
;
2961 /* MEMs may occur in locations only at the top level; below
2962 that every MEM or REG is substituted by its VALUE. */
2963 if (GET_CODE (x
) != MEM
2964 || ! cselib_mem_conflict_p (mem_rtx
, x
))
2970 /* This one overlaps. */
2971 /* We must have a mapping from this MEM's address to the
2972 value (E). Remove that, too. */
2973 addr
= cselib_lookup (XEXP (x
, 0), VOIDmode
, 0);
2974 mem_chain
= &addr
->addr_list
;
2977 if ((*mem_chain
)->elt
== v
)
2979 unchain_one_elt_list (mem_chain
);
2983 mem_chain
= &(*mem_chain
)->next
;
2986 unchain_one_elt_loc_list (p
);
2989 check_value_useless (v
);
2993 /* Invalidate any locations in the table which are changed because of a
2994 store to MEM_RTX. If this is called because of a non-const call
2995 instruction, MEM_RTX is (mem:BLK const0_rtx). */
2998 cselib_invalidate_mem (mem_rtx
)
3001 htab_traverse (hash_table
, cselib_invalidate_mem_1
, mem_rtx
);
3004 /* Invalidate DEST, which is being assigned to or clobbered. The second and
3005 the third parameter exist so that this function can be passed to
3006 note_stores; they are ignored. */
3009 cselib_invalidate_rtx (dest
, ignore
, data
)
3011 rtx ignore ATTRIBUTE_UNUSED
;
3012 void *data ATTRIBUTE_UNUSED
;
3014 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SIGN_EXTRACT
3015 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SUBREG
)
3016 dest
= XEXP (dest
, 0);
3018 if (GET_CODE (dest
) == REG
)
3019 cselib_invalidate_regno (REGNO (dest
), GET_MODE (dest
));
3020 else if (GET_CODE (dest
) == MEM
)
3021 cselib_invalidate_mem (dest
);
3023 /* Some machines don't define AUTO_INC_DEC, but they still use push
3024 instructions. We need to catch that case here in order to
3025 invalidate the stack pointer correctly. Note that invalidating
3026 the stack pointer is different from invalidating DEST. */
3027 if (push_operand (dest
, GET_MODE (dest
)))
3028 cselib_invalidate_rtx (stack_pointer_rtx
, NULL_RTX
, NULL
);
3031 /* Record the result of a SET instruction. DEST is being set; the source
3032 contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT
3033 describes its address. */
3036 cselib_record_set (dest
, src_elt
, dest_addr_elt
)
3038 cselib_val
*src_elt
, *dest_addr_elt
;
3040 int dreg
= GET_CODE (dest
) == REG
? (int) REGNO (dest
) : -1;
3042 if (src_elt
== 0 || side_effects_p (dest
))
3047 REG_VALUES (dreg
) = new_elt_list (REG_VALUES (dreg
), src_elt
);
3048 src_elt
->locs
= new_elt_loc_list (src_elt
->locs
, dest
);
3050 else if (GET_CODE (dest
) == MEM
&& dest_addr_elt
!= 0)
3051 add_mem_for_addr (dest_addr_elt
, src_elt
, dest
);
3054 /* Describe a single set that is part of an insn. */
3059 cselib_val
*src_elt
;
3060 cselib_val
*dest_addr_elt
;
3063 /* There is no good way to determine how many elements there can be
3064 in a PARALLEL. Since it's fairly cheap, use a really large number. */
3065 #define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
3067 /* Record the effects of any sets in INSN. */
3069 cselib_record_sets (insn
)
3074 struct set sets
[MAX_SETS
];
3075 rtx body
= PATTERN (insn
);
3077 body
= PATTERN (insn
);
3078 /* Find all sets. */
3079 if (GET_CODE (body
) == SET
)
3081 sets
[0].src
= SET_SRC (body
);
3082 sets
[0].dest
= SET_DEST (body
);
3085 else if (GET_CODE (body
) == PARALLEL
)
3087 /* Look through the PARALLEL and record the values being
3088 set, if possible. Also handle any CLOBBERs. */
3089 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; --i
)
3091 rtx x
= XVECEXP (body
, 0, i
);
3093 if (GET_CODE (x
) == SET
)
3095 sets
[n_sets
].src
= SET_SRC (x
);
3096 sets
[n_sets
].dest
= SET_DEST (x
);
3102 /* Look up the values that are read. Do this before invalidating the
3103 locations that are written. */
3104 for (i
= 0; i
< n_sets
; i
++)
3106 sets
[i
].src_elt
= cselib_lookup (sets
[i
].src
, GET_MODE (sets
[i
].dest
),
3108 if (GET_CODE (sets
[i
].dest
) == MEM
)
3109 sets
[i
].dest_addr_elt
= cselib_lookup (XEXP (sets
[i
].dest
, 0), Pmode
,
3112 sets
[i
].dest_addr_elt
= 0;
3115 /* Invalidate all locations written by this insn. Note that the elts we
3116 looked up in the previous loop aren't affected, just some of their
3117 locations may go away. */
3118 note_stores (body
, cselib_invalidate_rtx
, NULL
);
3120 /* Now enter the equivalences in our tables. */
3121 for (i
= 0; i
< n_sets
; i
++)
3122 cselib_record_set (sets
[i
].dest
, sets
[i
].src_elt
, sets
[i
].dest_addr_elt
);
3125 /* Record the effects of INSN. */
3128 cselib_process_insn (insn
)
3134 cselib_current_insn
= insn
;
3136 /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */
3137 if (GET_CODE (insn
) == CODE_LABEL
3138 || (GET_CODE (insn
) == NOTE
3139 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_SETJMP
)
3140 || (GET_CODE (insn
) == INSN
3141 && GET_CODE (PATTERN (insn
)) == ASM_OPERANDS
3142 && MEM_VOLATILE_P (PATTERN (insn
))))
3148 if (GET_RTX_CLASS (GET_CODE (insn
)) != 'i')
3150 cselib_current_insn
= 0;
3154 /* If this is a call instruction, forget anything stored in a
3155 call clobbered register, or, if this is not a const call, in
3157 if (GET_CODE (insn
) == CALL_INSN
)
3159 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3160 if (call_used_regs
[i
])
3161 cselib_invalidate_regno (i
, VOIDmode
);
3163 if (! CONST_CALL_P (insn
))
3164 cselib_invalidate_mem (callmem
);
3167 cselib_record_sets (insn
);
3170 /* Clobber any registers which appear in REG_INC notes. We
3171 could keep track of the changes to their values, but it is
3172 unlikely to help. */
3173 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
3174 if (REG_NOTE_KIND (x
) == REG_INC
)
3175 cselib_invalidate_rtx (XEXP (x
, 0), NULL_RTX
, NULL
);
3178 /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
3179 after we have processed the insn. */
3180 if (GET_CODE (insn
) == CALL_INSN
)
3181 for (x
= CALL_INSN_FUNCTION_USAGE (insn
); x
; x
= XEXP (x
, 1))
3182 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
3183 cselib_invalidate_rtx (XEXP (XEXP (x
, 0), 0), NULL_RTX
, NULL
);
3185 cselib_current_insn
= 0;
3187 if (n_useless_values
> MAX_USELESS_VALUES
)
3188 remove_useless_values ();
3191 /* Make sure our varrays are big enough. Not called from any cselib routines;
3192 it must be called by the user if it allocated new registers. */
3195 cselib_update_varray_sizes ()
3197 unsigned int nregs
= max_reg_num ();
3199 if (nregs
== cselib_nregs
)
3202 cselib_nregs
= nregs
;
3203 VARRAY_GROW (reg_values
, nregs
);
3206 /* Initialize cselib for one pass. The caller must also call
3207 init_alias_analysis. */
3212 /* These are only created once. */
3215 extern struct obstack permanent_obstack
;
3217 gcc_obstack_init (&cselib_obstack
);
3218 cselib_startobj
= obstack_alloc (&cselib_obstack
, 0);
3220 push_obstacks (&permanent_obstack
, &permanent_obstack
);
3221 callmem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
3223 ggc_add_rtx_root (&callmem
, 1);
3226 cselib_nregs
= max_reg_num ();
3227 VARRAY_ELT_LIST_INIT (reg_values
, cselib_nregs
, "reg_values");
3228 hash_table
= htab_create (31, get_value_hash
, entry_and_rtx_equal_p
, NULL
);
3232 /* Called when the current user is done with cselib. */
3238 htab_delete (hash_table
);