1 /* Code for range operators.
2 Copyright (C) 2017-2024 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
44 #include "gimple-walk.h"
47 #include "value-relation.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal
;
55 operator_not_equal op_not_equal
;
60 operator_identity op_ident
;
62 operator_cast op_cast
;
63 operator_plus op_plus
;
65 operator_minus op_minus
;
66 operator_negate op_negate
;
67 operator_mult op_mult
;
68 operator_addr_expr op_addr
;
69 operator_bitwise_not op_bitwise_not
;
70 operator_bitwise_xor op_bitwise_xor
;
71 operator_bitwise_and op_bitwise_and
;
72 operator_bitwise_or op_bitwise_or
;
76 // Instantaite a range operator table.
77 range_op_table operator_table
;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR
, op_equal
);
88 set (NE_EXPR
, op_not_equal
);
93 set (SSA_NAME
, op_ident
);
94 set (PAREN_EXPR
, op_ident
);
95 set (OBJ_TYPE_REF
, op_ident
);
96 set (REAL_CST
, op_cst
);
97 set (INTEGER_CST
, op_cst
);
98 set (NOP_EXPR
, op_cast
);
99 set (CONVERT_EXPR
, op_cast
);
100 set (PLUS_EXPR
, op_plus
);
101 set (ABS_EXPR
, op_abs
);
102 set (MINUS_EXPR
, op_minus
);
103 set (NEGATE_EXPR
, op_negate
);
104 set (MULT_EXPR
, op_mult
);
106 // Occur in both integer and pointer tables, but currently share
107 // integral implementation.
108 set (ADDR_EXPR
, op_addr
);
109 set (BIT_NOT_EXPR
, op_bitwise_not
);
110 set (BIT_XOR_EXPR
, op_bitwise_xor
);
112 // These are in both integer and pointer tables, but pointer has a different
114 // If commented out, there is a hybrid version in range-op-ptr.cc which
115 // is used until there is a pointer range class. Then we can simply
116 // uncomment the operator here and use the unified version.
118 // set (BIT_AND_EXPR, op_bitwise_and);
119 // set (BIT_IOR_EXPR, op_bitwise_or);
120 // set (MIN_EXPR, op_min);
121 // set (MAX_EXPR, op_max);
124 // Instantiate a default range operator for opcodes with no entry.
126 range_operator default_operator
;
128 // Create a default range_op_handler.
130 range_op_handler::range_op_handler ()
132 m_operator
= &default_operator
;
135 // Create a range_op_handler for CODE. Use a default operatoer if CODE
136 // does not have an entry.
138 range_op_handler::range_op_handler (unsigned code
)
140 m_operator
= operator_table
[code
];
142 m_operator
= &default_operator
;
145 // Return TRUE if this handler has a non-default operator.
147 range_op_handler::operator bool () const
149 return m_operator
!= &default_operator
;
152 // Return a pointer to the range operator assocaited with this handler.
153 // If it is a default operator, return NULL.
154 // This is the equivalent of indexing the range table.
157 range_op_handler::range_op () const
159 if (m_operator
!= &default_operator
)
164 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
165 // This is used to produce a unique value for each dispatch pattern. Shift
166 // values are based on the size of the m_discriminator field in value_range.h.
169 dispatch_trio (unsigned lhs
, unsigned op1
, unsigned op2
)
171 return ((lhs
<< 8) + (op1
<< 4) + (op2
));
174 // These are the supported dispatch patterns. These map to the parameter list
175 // of the routines in range_operator. Note the last 3 characters are
176 // shorthand for the LHS, OP1, and OP2 range discriminator class.
178 const unsigned RO_III
= dispatch_trio (VR_IRANGE
, VR_IRANGE
, VR_IRANGE
);
179 const unsigned RO_IFI
= dispatch_trio (VR_IRANGE
, VR_FRANGE
, VR_IRANGE
);
180 const unsigned RO_IFF
= dispatch_trio (VR_IRANGE
, VR_FRANGE
, VR_FRANGE
);
181 const unsigned RO_FFF
= dispatch_trio (VR_FRANGE
, VR_FRANGE
, VR_FRANGE
);
182 const unsigned RO_FIF
= dispatch_trio (VR_FRANGE
, VR_IRANGE
, VR_FRANGE
);
183 const unsigned RO_FII
= dispatch_trio (VR_FRANGE
, VR_IRANGE
, VR_IRANGE
);
184 const unsigned RO_PPP
= dispatch_trio (VR_PRANGE
, VR_PRANGE
, VR_PRANGE
);
185 const unsigned RO_PPI
= dispatch_trio (VR_PRANGE
, VR_PRANGE
, VR_IRANGE
);
186 const unsigned RO_IPP
= dispatch_trio (VR_IRANGE
, VR_PRANGE
, VR_PRANGE
);
187 const unsigned RO_IPI
= dispatch_trio (VR_IRANGE
, VR_PRANGE
, VR_IRANGE
);
188 const unsigned RO_PIP
= dispatch_trio (VR_PRANGE
, VR_IRANGE
, VR_PRANGE
);
189 const unsigned RO_PII
= dispatch_trio (VR_PRANGE
, VR_IRANGE
, VR_IRANGE
);
191 // Return a dispatch value for parameter types LHS, OP1 and OP2.
194 range_op_handler::dispatch_kind (const vrange
&lhs
, const vrange
&op1
,
195 const vrange
& op2
) const
197 return dispatch_trio (lhs
.m_discriminator
, op1
.m_discriminator
,
198 op2
.m_discriminator
);
202 range_op_handler::discriminator_fail (const vrange
&r1
,
204 const vrange
&r3
) const
206 const char name
[] = "IPF";
207 gcc_checking_assert (r1
.m_discriminator
< sizeof (name
) - 1);
208 gcc_checking_assert (r2
.m_discriminator
< sizeof (name
) - 1);
209 gcc_checking_assert (r3
.m_discriminator
< sizeof (name
) - 1);
211 "Unsupported operand combination in dispatch: RO_%c%c%c\n",
212 name
[r1
.m_discriminator
],
213 name
[r2
.m_discriminator
],
214 name
[r3
.m_discriminator
]);
219 has_pointer_operand_p (const vrange
&r1
, const vrange
&r2
, const vrange
&r3
)
221 return is_a
<prange
> (r1
) || is_a
<prange
> (r2
) || is_a
<prange
> (r3
);
224 // Dispatch a call to fold_range based on the types of R, LH and RH.
227 range_op_handler::fold_range (vrange
&r
, tree type
,
230 relation_trio rel
) const
232 gcc_checking_assert (m_operator
);
234 if (!lh
.undefined_p () && !rh
.undefined_p ())
235 gcc_assert (m_operator
->operand_check_p (type
, lh
.type (), rh
.type ()));
236 if (has_pointer_operand_p (r
, lh
, rh
)
237 && !m_operator
->pointers_handled_p (DISPATCH_FOLD_RANGE
,
238 dispatch_kind (r
, lh
, rh
)))
239 discriminator_fail (r
, lh
, rh
);
241 switch (dispatch_kind (r
, lh
, rh
))
244 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
246 as_a
<irange
> (rh
), rel
);
248 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
250 as_a
<irange
> (rh
), rel
);
252 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
254 as_a
<frange
> (rh
), rel
);
256 return m_operator
->fold_range (as_a
<frange
> (r
), type
,
258 as_a
<frange
> (rh
), rel
);
260 return m_operator
->fold_range (as_a
<frange
> (r
), type
,
262 as_a
<irange
> (rh
), rel
);
264 return m_operator
->fold_range (as_a
<prange
> (r
), type
,
266 as_a
<prange
> (rh
), rel
);
268 return m_operator
->fold_range (as_a
<prange
> (r
), type
,
270 as_a
<irange
> (rh
), rel
);
272 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
274 as_a
<prange
> (rh
), rel
);
276 return m_operator
->fold_range (as_a
<prange
> (r
), type
,
278 as_a
<prange
> (rh
), rel
);
280 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
282 as_a
<irange
> (rh
), rel
);
288 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
291 range_op_handler::op1_range (vrange
&r
, tree type
,
294 relation_trio rel
) const
296 gcc_checking_assert (m_operator
);
297 if (lhs
.undefined_p ())
300 if (!op2
.undefined_p ())
301 gcc_assert (m_operator
->operand_check_p (lhs
.type (), type
, op2
.type ()));
302 if (has_pointer_operand_p (r
, lhs
, op2
)
303 && !m_operator
->pointers_handled_p (DISPATCH_OP1_RANGE
,
304 dispatch_kind (r
, lhs
, op2
)))
305 discriminator_fail (r
, lhs
, op2
);
307 switch (dispatch_kind (r
, lhs
, op2
))
310 return m_operator
->op1_range (as_a
<irange
> (r
), type
,
312 as_a
<irange
> (op2
), rel
);
314 return m_operator
->op1_range (as_a
<prange
> (r
), type
,
316 as_a
<prange
> (op2
), rel
);
318 return m_operator
->op1_range (as_a
<prange
> (r
), type
,
320 as_a
<prange
> (op2
), rel
);
322 return m_operator
->op1_range (as_a
<prange
> (r
), type
,
324 as_a
<irange
> (op2
), rel
);
326 return m_operator
->op1_range (as_a
<irange
> (r
), type
,
328 as_a
<irange
> (op2
), rel
);
330 return m_operator
->op1_range (as_a
<frange
> (r
), type
,
332 as_a
<frange
> (op2
), rel
);
334 return m_operator
->op1_range (as_a
<frange
> (r
), type
,
336 as_a
<frange
> (op2
), rel
);
342 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
345 range_op_handler::op2_range (vrange
&r
, tree type
,
348 relation_trio rel
) const
350 gcc_checking_assert (m_operator
);
351 if (lhs
.undefined_p ())
354 if (!op1
.undefined_p ())
355 gcc_assert (m_operator
->operand_check_p (lhs
.type (), op1
.type (), type
));
356 if (has_pointer_operand_p (r
, lhs
, op1
)
357 && !m_operator
->pointers_handled_p (DISPATCH_OP2_RANGE
,
358 dispatch_kind (r
, lhs
, op1
)))
359 discriminator_fail (r
, lhs
, op1
);
361 switch (dispatch_kind (r
, lhs
, op1
))
364 return m_operator
->op2_range (as_a
<irange
> (r
), type
,
366 as_a
<irange
> (op1
), rel
);
368 return m_operator
->op2_range (as_a
<prange
> (r
), type
,
370 as_a
<prange
> (op1
), rel
);
372 return m_operator
->op2_range (as_a
<irange
> (r
), type
,
374 as_a
<prange
> (op1
), rel
);
376 return m_operator
->op2_range (as_a
<frange
> (r
), type
,
378 as_a
<frange
> (op1
), rel
);
380 return m_operator
->op2_range (as_a
<frange
> (r
), type
,
382 as_a
<frange
> (op1
), rel
);
388 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
391 range_op_handler::lhs_op1_relation (const vrange
&lhs
,
394 relation_kind rel
) const
396 gcc_checking_assert (m_operator
);
398 if (has_pointer_operand_p (lhs
, op1
, op2
)
399 && !m_operator
->pointers_handled_p (DISPATCH_LHS_OP1_RELATION
,
400 dispatch_kind (lhs
, op1
, op2
)))
401 discriminator_fail (lhs
, op1
, op2
);
404 switch (dispatch_kind (lhs
, op1
, op2
))
407 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
409 as_a
<irange
> (op2
), rel
);
411 return m_operator
->lhs_op1_relation (as_a
<prange
> (lhs
),
413 as_a
<prange
> (op2
), rel
);
415 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
417 as_a
<prange
> (op2
), rel
);
419 return m_operator
->lhs_op1_relation (as_a
<prange
> (lhs
),
421 as_a
<irange
> (op2
), rel
);
423 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
425 as_a
<frange
> (op2
), rel
);
427 return m_operator
->lhs_op1_relation (as_a
<frange
> (lhs
),
429 as_a
<frange
> (op2
), rel
);
435 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
438 range_op_handler::lhs_op2_relation (const vrange
&lhs
,
441 relation_kind rel
) const
443 gcc_checking_assert (m_operator
);
445 if (has_pointer_operand_p (lhs
, op1
, op2
)
446 && !m_operator
->pointers_handled_p (DISPATCH_LHS_OP2_RELATION
,
447 dispatch_kind (lhs
, op1
, op2
)))
448 discriminator_fail (lhs
, op1
, op2
);
450 switch (dispatch_kind (lhs
, op1
, op2
))
453 return m_operator
->lhs_op2_relation (as_a
<irange
> (lhs
),
455 as_a
<irange
> (op2
), rel
);
457 return m_operator
->lhs_op2_relation (as_a
<irange
> (lhs
),
459 as_a
<frange
> (op2
), rel
);
461 return m_operator
->lhs_op2_relation (as_a
<frange
> (lhs
),
463 as_a
<frange
> (op2
), rel
);
469 // Dispatch a call to op1_op2_relation based on the type of LHS.
472 range_op_handler::op1_op2_relation (const vrange
&lhs
,
474 const vrange
&op2
) const
476 gcc_checking_assert (m_operator
);
478 if (has_pointer_operand_p (lhs
, op1
, op2
)
479 && !m_operator
->pointers_handled_p (DISPATCH_OP1_OP2_RELATION
,
480 dispatch_kind (lhs
, op1
, op2
)))
481 discriminator_fail (lhs
, op1
, op2
);
483 switch (dispatch_kind (lhs
, op1
, op2
))
486 return m_operator
->op1_op2_relation (as_a
<irange
> (lhs
),
488 as_a
<irange
> (op2
));
491 return m_operator
->op1_op2_relation (as_a
<irange
> (lhs
),
493 as_a
<prange
> (op2
));
496 return m_operator
->op1_op2_relation (as_a
<irange
> (lhs
),
498 as_a
<frange
> (op2
));
501 return m_operator
->op1_op2_relation (as_a
<frange
> (lhs
),
503 as_a
<frange
> (op2
));
511 range_op_handler::overflow_free_p (const vrange
&lh
,
513 relation_trio rel
) const
515 gcc_checking_assert (m_operator
);
516 switch (dispatch_kind (lh
, lh
, rh
))
519 return m_operator
->overflow_free_p(as_a
<irange
> (lh
),
528 range_op_handler::operand_check_p (tree t1
, tree t2
, tree t3
) const
530 gcc_checking_assert (m_operator
);
531 return m_operator
->operand_check_p (t1
, t2
, t3
);
534 // Update the known bitmasks in R when applying the operation CODE to
538 update_known_bitmask (vrange
&r
, tree_code code
,
539 const vrange
&lh
, const vrange
&rh
)
541 if (r
.undefined_p () || lh
.undefined_p () || rh
.undefined_p ()
545 widest_int widest_value
, widest_mask
;
546 tree type
= r
.type ();
547 signop sign
= TYPE_SIGN (type
);
548 int prec
= TYPE_PRECISION (type
);
549 irange_bitmask lh_bits
= lh
.get_bitmask ();
550 irange_bitmask rh_bits
= rh
.get_bitmask ();
552 switch (get_gimple_rhs_class (code
))
554 case GIMPLE_UNARY_RHS
:
555 bit_value_unop (code
, sign
, prec
, &widest_value
, &widest_mask
,
556 TYPE_SIGN (lh
.type ()),
557 TYPE_PRECISION (lh
.type ()),
558 widest_int::from (lh_bits
.value (),
559 TYPE_SIGN (lh
.type ())),
560 widest_int::from (lh_bits
.mask (),
561 TYPE_SIGN (lh
.type ())));
563 case GIMPLE_BINARY_RHS
:
564 bit_value_binop (code
, sign
, prec
, &widest_value
, &widest_mask
,
565 TYPE_SIGN (lh
.type ()),
566 TYPE_PRECISION (lh
.type ()),
567 widest_int::from (lh_bits
.value (), sign
),
568 widest_int::from (lh_bits
.mask (), sign
),
569 TYPE_SIGN (rh
.type ()),
570 TYPE_PRECISION (rh
.type ()),
571 widest_int::from (rh_bits
.value (), sign
),
572 widest_int::from (rh_bits
.mask (), sign
));
578 wide_int mask
= wide_int::from (widest_mask
, prec
, sign
);
579 wide_int value
= wide_int::from (widest_value
, prec
, sign
);
580 // Bitmasks must have the unknown value bits cleared.
582 irange_bitmask
bm (value
, mask
);
583 r
.update_bitmask (bm
);
586 // Return the upper limit for a type.
588 static inline wide_int
589 max_limit (const_tree type
)
591 return irange_val_max (type
);
594 // Return the lower limit for a type.
596 static inline wide_int
597 min_limit (const_tree type
)
599 return irange_val_min (type
);
602 // Return false if shifting by OP is undefined behavior. Otherwise, return
603 // true and the range it is to be shifted by. This allows trimming out of
604 // undefined ranges, leaving only valid ranges if there are any.
607 get_shift_range (irange
&r
, tree type
, const irange
&op
)
609 if (op
.undefined_p ())
612 // Build valid range and intersect it with the shift range.
613 r
= value_range (op
.type (),
614 wi::shwi (0, TYPE_PRECISION (op
.type ())),
615 wi::shwi (TYPE_PRECISION (type
) - 1, TYPE_PRECISION (op
.type ())));
618 // If there are no valid ranges in the shift range, returned false.
619 if (r
.undefined_p ())
624 // Default wide_int fold operation returns [MIN, MAX].
627 range_operator::wi_fold (irange
&r
, tree type
,
628 const wide_int
&lh_lb ATTRIBUTE_UNUSED
,
629 const wide_int
&lh_ub ATTRIBUTE_UNUSED
,
630 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
631 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
633 gcc_checking_assert (r
.supports_type_p (type
));
634 r
.set_varying (type
);
637 // Call wi_fold when both op1 and op2 are equivalent. Further split small
638 // subranges into constants. This can provide better precision.
639 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
640 // [0,0][2, 2][4,4][6, 6][8, 8]
641 // LIMIT is the maximum number of elements in range allowed before we
642 // do not process them individually.
645 range_operator::wi_fold_in_parts_equiv (irange
&r
, tree type
,
646 const wide_int
&lh_lb
,
647 const wide_int
&lh_ub
,
648 unsigned limit
) const
651 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
652 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
653 // if there are 1 to 8 values in the LH range, split them up.
655 if (lh_range
>= 0 && lh_range
< limit
)
657 for (unsigned x
= 0; x
<= lh_range
; x
++)
659 wide_int val
= lh_lb
+ x
;
660 wi_fold (tmp
, type
, val
, val
, val
, val
);
664 // Otherwise just call wi_fold.
666 wi_fold (r
, type
, lh_lb
, lh_ub
, lh_lb
, lh_ub
);
669 // Call wi_fold, except further split small subranges into constants.
670 // This can provide better precision. For something 8 >> [0,1]
671 // Instead of [8, 16], we will produce [8,8][16,16]
674 range_operator::wi_fold_in_parts (irange
&r
, tree type
,
675 const wide_int
&lh_lb
,
676 const wide_int
&lh_ub
,
677 const wide_int
&rh_lb
,
678 const wide_int
&rh_ub
) const
681 widest_int rh_range
= wi::sub (widest_int::from (rh_ub
, TYPE_SIGN (type
)),
682 widest_int::from (rh_lb
, TYPE_SIGN (type
)));
683 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
684 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
685 // If there are 2, 3, or 4 values in the RH range, do them separately.
686 // Call wi_fold_in_parts to check the RH side.
687 if (rh_range
> 0 && rh_range
< 4)
689 wi_fold_in_parts (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_lb
);
692 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 1, rh_lb
+ 1);
696 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 2, rh_lb
+ 2);
700 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_ub
, rh_ub
);
703 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
704 // The RH side has been checked, so no recursion needed.
705 else if (lh_range
> 0 && lh_range
< 4)
707 wi_fold (r
, type
, lh_lb
, lh_lb
, rh_lb
, rh_ub
);
710 wi_fold (tmp
, type
, lh_lb
+ 1, lh_lb
+ 1, rh_lb
, rh_ub
);
714 wi_fold (tmp
, type
, lh_lb
+ 2, lh_lb
+ 2, rh_lb
, rh_ub
);
718 wi_fold (tmp
, type
, lh_ub
, lh_ub
, rh_lb
, rh_ub
);
721 // Otherwise just call wi_fold.
723 wi_fold (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
726 // The default for fold is to break all ranges into sub-ranges and
727 // invoke the wi_fold method on each sub-range pair.
730 range_operator::fold_range (irange
&r
, tree type
,
733 relation_trio trio
) const
735 gcc_checking_assert (r
.supports_type_p (type
));
736 if (empty_range_varying (r
, type
, lh
, rh
))
739 relation_kind rel
= trio
.op1_op2 ();
740 unsigned num_lh
= lh
.num_pairs ();
741 unsigned num_rh
= rh
.num_pairs ();
743 // If op1 and op2 are equivalences, then we don't need a complete cross
744 // product, just pairs of matching elements.
745 if (relation_equiv_p (rel
) && lh
== rh
)
749 for (unsigned x
= 0; x
< num_lh
; ++x
)
751 // If the number of subranges is too high, limit subrange creation.
752 unsigned limit
= (r
.num_pairs () > 32) ? 0 : 8;
753 wide_int lh_lb
= lh
.lower_bound (x
);
754 wide_int lh_ub
= lh
.upper_bound (x
);
755 wi_fold_in_parts_equiv (tmp
, type
, lh_lb
, lh_ub
, limit
);
760 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
761 update_bitmask (r
, lh
, rh
);
765 // If both ranges are single pairs, fold directly into the result range.
766 // If the number of subranges grows too high, produce a summary result as the
767 // loop becomes exponential with little benefit. See PR 103821.
768 if ((num_lh
== 1 && num_rh
== 1) || num_lh
* num_rh
> 12)
770 wi_fold_in_parts (r
, type
, lh
.lower_bound (), lh
.upper_bound (),
771 rh
.lower_bound (), rh
.upper_bound ());
772 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
773 update_bitmask (r
, lh
, rh
);
779 for (unsigned x
= 0; x
< num_lh
; ++x
)
780 for (unsigned y
= 0; y
< num_rh
; ++y
)
782 wide_int lh_lb
= lh
.lower_bound (x
);
783 wide_int lh_ub
= lh
.upper_bound (x
);
784 wide_int rh_lb
= rh
.lower_bound (y
);
785 wide_int rh_ub
= rh
.upper_bound (y
);
786 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
790 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
791 update_bitmask (r
, lh
, rh
);
795 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
796 update_bitmask (r
, lh
, rh
);
800 // The default for op1_range is to return false.
803 range_operator::op1_range (irange
&r ATTRIBUTE_UNUSED
,
804 tree type ATTRIBUTE_UNUSED
,
805 const irange
&lhs ATTRIBUTE_UNUSED
,
806 const irange
&op2 ATTRIBUTE_UNUSED
,
812 // The default for op2_range is to return false.
815 range_operator::op2_range (irange
&r ATTRIBUTE_UNUSED
,
816 tree type ATTRIBUTE_UNUSED
,
817 const irange
&lhs ATTRIBUTE_UNUSED
,
818 const irange
&op1 ATTRIBUTE_UNUSED
,
824 // The default relation routines return VREL_VARYING.
827 range_operator::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
828 const irange
&op1 ATTRIBUTE_UNUSED
,
829 const irange
&op2 ATTRIBUTE_UNUSED
,
830 relation_kind rel ATTRIBUTE_UNUSED
) const
836 range_operator::lhs_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
837 const irange
&op1 ATTRIBUTE_UNUSED
,
838 const irange
&op2 ATTRIBUTE_UNUSED
,
839 relation_kind rel ATTRIBUTE_UNUSED
) const
845 range_operator::op1_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
846 const irange
&op1 ATTRIBUTE_UNUSED
,
847 const irange
&op2 ATTRIBUTE_UNUSED
) const
852 // Default is no relation affects the LHS.
855 range_operator::op1_op2_relation_effect (irange
&lhs_range ATTRIBUTE_UNUSED
,
856 tree type ATTRIBUTE_UNUSED
,
857 const irange
&op1_range ATTRIBUTE_UNUSED
,
858 const irange
&op2_range ATTRIBUTE_UNUSED
,
859 relation_kind rel ATTRIBUTE_UNUSED
) const
865 range_operator::overflow_free_p (const irange
&, const irange
&,
871 // Apply any known bitmask updates based on this operator.
874 range_operator::update_bitmask (irange
&, const irange
&,
875 const irange
&) const
879 // Check that operand types are OK. Default to always OK.
882 range_operator::operand_check_p (tree
, tree
, tree
) const
887 // Create and return a range from a pair of wide-ints that are known
888 // to have overflowed (or underflowed).
891 value_range_from_overflowed_bounds (irange
&r
, tree type
,
892 const wide_int
&wmin
,
893 const wide_int
&wmax
)
895 const signop sgn
= TYPE_SIGN (type
);
896 const unsigned int prec
= TYPE_PRECISION (type
);
898 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
899 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
904 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
907 if (wi::cmp (tmax
, tem
, sgn
) > 0)
910 // If the anti-range would cover nothing, drop to varying.
911 // Likewise if the anti-range bounds are outside of the types
913 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
914 r
.set_varying (type
);
916 r
.set (type
, tmin
, tmax
, VR_ANTI_RANGE
);
919 // Create and return a range from a pair of wide-ints. MIN_OVF and
920 // MAX_OVF describe any overflow that might have occurred while
921 // calculating WMIN and WMAX respectively.
924 value_range_with_overflow (irange
&r
, tree type
,
925 const wide_int
&wmin
, const wide_int
&wmax
,
926 wi::overflow_type min_ovf
= wi::OVF_NONE
,
927 wi::overflow_type max_ovf
= wi::OVF_NONE
)
929 const signop sgn
= TYPE_SIGN (type
);
930 const unsigned int prec
= TYPE_PRECISION (type
);
931 const bool overflow_wraps
= TYPE_OVERFLOW_WRAPS (type
);
933 // For one bit precision if max != min, then the range covers all
935 if (prec
== 1 && wi::ne_p (wmax
, wmin
))
937 r
.set_varying (type
);
943 // If overflow wraps, truncate the values and adjust the range,
944 // kind, and bounds appropriately.
945 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
947 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
948 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
949 // If the limits are swapped, we wrapped around and cover
951 if (wi::gt_p (tmin
, tmax
, sgn
))
952 r
.set_varying (type
);
954 // No overflow or both overflow or underflow. The range
955 // kind stays normal.
956 r
.set (type
, tmin
, tmax
);
960 if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
961 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
962 value_range_from_overflowed_bounds (r
, type
, wmin
, wmax
);
964 // Other underflow and/or overflow, drop to VR_VARYING.
965 r
.set_varying (type
);
969 // If both bounds either underflowed or overflowed, then the result
971 if ((min_ovf
== wi::OVF_OVERFLOW
&& max_ovf
== wi::OVF_OVERFLOW
)
972 || (min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_UNDERFLOW
))
978 // If overflow does not wrap, saturate to [MIN, MAX].
979 wide_int new_lb
, new_ub
;
980 if (min_ovf
== wi::OVF_UNDERFLOW
)
981 new_lb
= wi::min_value (prec
, sgn
);
982 else if (min_ovf
== wi::OVF_OVERFLOW
)
983 new_lb
= wi::max_value (prec
, sgn
);
987 if (max_ovf
== wi::OVF_UNDERFLOW
)
988 new_ub
= wi::min_value (prec
, sgn
);
989 else if (max_ovf
== wi::OVF_OVERFLOW
)
990 new_ub
= wi::max_value (prec
, sgn
);
994 r
.set (type
, new_lb
, new_ub
);
998 // Create and return a range from a pair of wide-ints. Canonicalize
999 // the case where the bounds are swapped. In which case, we transform
1000 // [10,5] into [MIN,5][10,MAX].
1003 create_possibly_reversed_range (irange
&r
, tree type
,
1004 const wide_int
&new_lb
, const wide_int
&new_ub
)
1006 signop s
= TYPE_SIGN (type
);
1007 // If the bounds are swapped, treat the result as if an overflow occurred.
1008 if (wi::gt_p (new_lb
, new_ub
, s
))
1009 value_range_from_overflowed_bounds (r
, type
, new_lb
, new_ub
);
1011 // Otherwise it's just a normal range.
1012 r
.set (type
, new_lb
, new_ub
);
1015 // Return the summary information about boolean range LHS. If EMPTY/FULL,
1016 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
1019 get_bool_state (vrange
&r
, const vrange
&lhs
, tree val_type
)
1021 // If there is no result, then this is unexecutable.
1022 if (lhs
.undefined_p ())
1031 // For TRUE, we can't just test for [1,1] because Ada can have
1032 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
1033 if (lhs
.contains_p (build_zero_cst (lhs
.type ())))
1035 r
.set_varying (val_type
);
1042 // ------------------------------------------------------------------------
1045 operator_equal::update_bitmask (irange
&r
, const irange
&lh
,
1046 const irange
&rh
) const
1048 update_known_bitmask (r
, EQ_EXPR
, lh
, rh
);
1051 // Check if the LHS range indicates a relation between OP1 and OP2.
1054 operator_equal::op1_op2_relation (const irange
&lhs
, const irange
&,
1055 const irange
&) const
1057 if (lhs
.undefined_p ())
1058 return VREL_UNDEFINED
;
1060 // FALSE = op1 == op2 indicates NE_EXPR.
1064 // TRUE = op1 == op2 indicates EQ_EXPR.
1065 if (!contains_zero_p (lhs
))
1067 return VREL_VARYING
;
1071 operator_equal::fold_range (irange
&r
, tree type
,
1074 relation_trio rel
) const
1076 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_EQ
))
1079 // We can be sure the values are always equal or not if both ranges
1080 // consist of a single value, and then compare them.
1081 bool op1_const
= wi::eq_p (op1
.lower_bound (), op1
.upper_bound ());
1082 bool op2_const
= wi::eq_p (op2
.lower_bound (), op2
.upper_bound ());
1083 if (op1_const
&& op2_const
)
1085 if (wi::eq_p (op1
.lower_bound (), op2
.upper_bound()))
1086 r
= range_true (type
);
1088 r
= range_false (type
);
1092 // If ranges do not intersect, we know the range is not equal,
1093 // otherwise we don't know anything for sure.
1094 int_range_max tmp
= op1
;
1095 tmp
.intersect (op2
);
1096 if (tmp
.undefined_p ())
1097 r
= range_false (type
);
1098 // Check if a constant cannot satisfy the bitmask requirements.
1099 else if (op2_const
&& !op1
.get_bitmask ().member_p (op2
.lower_bound ()))
1100 r
= range_false (type
);
1101 else if (op1_const
&& !op2
.get_bitmask ().member_p (op1
.lower_bound ()))
1102 r
= range_false (type
);
1104 r
= range_true_and_false (type
);
1110 operator_equal::op1_range (irange
&r
, tree type
,
1113 relation_trio
) const
1115 switch (get_bool_state (r
, lhs
, type
))
1118 // If it's true, the result is the same as OP2.
1123 // If the result is false, the only time we know anything is
1124 // if OP2 is a constant.
1125 if (!op2
.undefined_p ()
1126 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
1132 r
.set_varying (type
);
1142 operator_equal::op2_range (irange
&r
, tree type
,
1145 relation_trio rel
) const
1147 return operator_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1150 // -------------------------------------------------------------------------
1153 operator_not_equal::update_bitmask (irange
&r
, const irange
&lh
,
1154 const irange
&rh
) const
1156 update_known_bitmask (r
, NE_EXPR
, lh
, rh
);
1159 // Check if the LHS range indicates a relation between OP1 and OP2.
1162 operator_not_equal::op1_op2_relation (const irange
&lhs
, const irange
&,
1163 const irange
&) const
1165 if (lhs
.undefined_p ())
1166 return VREL_UNDEFINED
;
1168 // FALSE = op1 != op2 indicates EQ_EXPR.
1172 // TRUE = op1 != op2 indicates NE_EXPR.
1173 if (!contains_zero_p (lhs
))
1175 return VREL_VARYING
;
1179 operator_not_equal::fold_range (irange
&r
, tree type
,
1182 relation_trio rel
) const
1184 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_NE
))
1187 // We can be sure the values are always equal or not if both ranges
1188 // consist of a single value, and then compare them.
1189 bool op1_const
= wi::eq_p (op1
.lower_bound (), op1
.upper_bound ());
1190 bool op2_const
= wi::eq_p (op2
.lower_bound (), op2
.upper_bound ());
1191 if (op1_const
&& op2_const
)
1193 if (wi::ne_p (op1
.lower_bound (), op2
.upper_bound()))
1194 r
= range_true (type
);
1196 r
= range_false (type
);
1200 // If ranges do not intersect, we know the range is not equal,
1201 // otherwise we don't know anything for sure.
1202 int_range_max tmp
= op1
;
1203 tmp
.intersect (op2
);
1204 if (tmp
.undefined_p ())
1205 r
= range_true (type
);
1206 // Check if a constant cannot satisfy the bitmask requirements.
1207 else if (op2_const
&& !op1
.get_bitmask ().member_p (op2
.lower_bound ()))
1208 r
= range_true (type
);
1209 else if (op1_const
&& !op2
.get_bitmask ().member_p (op1
.lower_bound ()))
1210 r
= range_true (type
);
1212 r
= range_true_and_false (type
);
1218 operator_not_equal::op1_range (irange
&r
, tree type
,
1221 relation_trio
) const
1223 switch (get_bool_state (r
, lhs
, type
))
1226 // If the result is true, the only time we know anything is if
1227 // OP2 is a constant.
1228 if (!op2
.undefined_p ()
1229 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
1235 r
.set_varying (type
);
1239 // If it's false, the result is the same as OP2.
1251 operator_not_equal::op2_range (irange
&r
, tree type
,
1254 relation_trio rel
) const
1256 return operator_not_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1259 // (X < VAL) produces the range of [MIN, VAL - 1].
1262 build_lt (irange
&r
, tree type
, const wide_int
&val
)
1264 wi::overflow_type ov
;
1266 signop sgn
= TYPE_SIGN (type
);
1268 // Signed 1 bit cannot represent 1 for subtraction.
1270 lim
= wi::add (val
, -1, sgn
, &ov
);
1272 lim
= wi::sub (val
, 1, sgn
, &ov
);
1274 // If val - 1 underflows, check if X < MIN, which is an empty range.
1278 r
= int_range
<1> (type
, min_limit (type
), lim
);
1281 // (X <= VAL) produces the range of [MIN, VAL].
1284 build_le (irange
&r
, tree type
, const wide_int
&val
)
1286 r
= int_range
<1> (type
, min_limit (type
), val
);
1289 // (X > VAL) produces the range of [VAL + 1, MAX].
1292 build_gt (irange
&r
, tree type
, const wide_int
&val
)
1294 wi::overflow_type ov
;
1296 signop sgn
= TYPE_SIGN (type
);
1298 // Signed 1 bit cannot represent 1 for addition.
1300 lim
= wi::sub (val
, -1, sgn
, &ov
);
1302 lim
= wi::add (val
, 1, sgn
, &ov
);
1303 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1307 r
= int_range
<1> (type
, lim
, max_limit (type
));
1310 // (X >= val) produces the range of [VAL, MAX].
1313 build_ge (irange
&r
, tree type
, const wide_int
&val
)
1315 r
= int_range
<1> (type
, val
, max_limit (type
));
1320 operator_lt::update_bitmask (irange
&r
, const irange
&lh
,
1321 const irange
&rh
) const
1323 update_known_bitmask (r
, LT_EXPR
, lh
, rh
);
1326 // Check if the LHS range indicates a relation between OP1 and OP2.
1329 operator_lt::op1_op2_relation (const irange
&lhs
, const irange
&,
1330 const irange
&) const
1332 if (lhs
.undefined_p ())
1333 return VREL_UNDEFINED
;
1335 // FALSE = op1 < op2 indicates GE_EXPR.
1339 // TRUE = op1 < op2 indicates LT_EXPR.
1340 if (!contains_zero_p (lhs
))
1342 return VREL_VARYING
;
1346 operator_lt::fold_range (irange
&r
, tree type
,
1349 relation_trio rel
) const
1351 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LT
))
1354 signop sign
= TYPE_SIGN (op1
.type ());
1355 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1357 if (wi::lt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1358 r
= range_true (type
);
1359 else if (!wi::lt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1360 r
= range_false (type
);
1361 // Use nonzero bits to determine if < 0 is false.
1362 else if (op2
.zero_p () && !wi::neg_p (op1
.get_nonzero_bits (), sign
))
1363 r
= range_false (type
);
1365 r
= range_true_and_false (type
);
1370 operator_lt::op1_range (irange
&r
, tree type
,
1373 relation_trio
) const
1375 if (op2
.undefined_p ())
1378 switch (get_bool_state (r
, lhs
, type
))
1381 build_lt (r
, type
, op2
.upper_bound ());
1385 build_ge (r
, type
, op2
.lower_bound ());
1395 operator_lt::op2_range (irange
&r
, tree type
,
1398 relation_trio
) const
1400 if (op1
.undefined_p ())
1403 switch (get_bool_state (r
, lhs
, type
))
1406 build_gt (r
, type
, op1
.lower_bound ());
1410 build_le (r
, type
, op1
.upper_bound ());
1421 operator_le::update_bitmask (irange
&r
, const irange
&lh
,
1422 const irange
&rh
) const
1424 update_known_bitmask (r
, LE_EXPR
, lh
, rh
);
1427 // Check if the LHS range indicates a relation between OP1 and OP2.
1430 operator_le::op1_op2_relation (const irange
&lhs
, const irange
&,
1431 const irange
&) const
1433 if (lhs
.undefined_p ())
1434 return VREL_UNDEFINED
;
1436 // FALSE = op1 <= op2 indicates GT_EXPR.
1440 // TRUE = op1 <= op2 indicates LE_EXPR.
1441 if (!contains_zero_p (lhs
))
1443 return VREL_VARYING
;
1447 operator_le::fold_range (irange
&r
, tree type
,
1450 relation_trio rel
) const
1452 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LE
))
1455 signop sign
= TYPE_SIGN (op1
.type ());
1456 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1458 if (wi::le_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1459 r
= range_true (type
);
1460 else if (!wi::le_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1461 r
= range_false (type
);
1463 r
= range_true_and_false (type
);
1468 operator_le::op1_range (irange
&r
, tree type
,
1471 relation_trio
) const
1473 if (op2
.undefined_p ())
1476 switch (get_bool_state (r
, lhs
, type
))
1479 build_le (r
, type
, op2
.upper_bound ());
1483 build_gt (r
, type
, op2
.lower_bound ());
1493 operator_le::op2_range (irange
&r
, tree type
,
1496 relation_trio
) const
1498 if (op1
.undefined_p ())
1501 switch (get_bool_state (r
, lhs
, type
))
1504 build_ge (r
, type
, op1
.lower_bound ());
1508 build_lt (r
, type
, op1
.upper_bound ());
1519 operator_gt::update_bitmask (irange
&r
, const irange
&lh
,
1520 const irange
&rh
) const
1522 update_known_bitmask (r
, GT_EXPR
, lh
, rh
);
1525 // Check if the LHS range indicates a relation between OP1 and OP2.
1528 operator_gt::op1_op2_relation (const irange
&lhs
, const irange
&,
1529 const irange
&) const
1531 if (lhs
.undefined_p ())
1532 return VREL_UNDEFINED
;
1534 // FALSE = op1 > op2 indicates LE_EXPR.
1538 // TRUE = op1 > op2 indicates GT_EXPR.
1539 if (!contains_zero_p (lhs
))
1541 return VREL_VARYING
;
1545 operator_gt::fold_range (irange
&r
, tree type
,
1546 const irange
&op1
, const irange
&op2
,
1547 relation_trio rel
) const
1549 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GT
))
1552 signop sign
= TYPE_SIGN (op1
.type ());
1553 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1555 if (wi::gt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1556 r
= range_true (type
);
1557 else if (!wi::gt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1558 r
= range_false (type
);
1560 r
= range_true_and_false (type
);
1565 operator_gt::op1_range (irange
&r
, tree type
,
1566 const irange
&lhs
, const irange
&op2
,
1567 relation_trio
) const
1569 if (op2
.undefined_p ())
1572 switch (get_bool_state (r
, lhs
, type
))
1575 build_gt (r
, type
, op2
.lower_bound ());
1579 build_le (r
, type
, op2
.upper_bound ());
1589 operator_gt::op2_range (irange
&r
, tree type
,
1592 relation_trio
) const
1594 if (op1
.undefined_p ())
1597 switch (get_bool_state (r
, lhs
, type
))
1600 build_lt (r
, type
, op1
.upper_bound ());
1604 build_ge (r
, type
, op1
.lower_bound ());
1615 operator_ge::update_bitmask (irange
&r
, const irange
&lh
,
1616 const irange
&rh
) const
1618 update_known_bitmask (r
, GE_EXPR
, lh
, rh
);
1621 // Check if the LHS range indicates a relation between OP1 and OP2.
1624 operator_ge::op1_op2_relation (const irange
&lhs
, const irange
&,
1625 const irange
&) const
1627 if (lhs
.undefined_p ())
1628 return VREL_UNDEFINED
;
1630 // FALSE = op1 >= op2 indicates LT_EXPR.
1634 // TRUE = op1 >= op2 indicates GE_EXPR.
1635 if (!contains_zero_p (lhs
))
1637 return VREL_VARYING
;
1641 operator_ge::fold_range (irange
&r
, tree type
,
1644 relation_trio rel
) const
1646 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GE
))
1649 signop sign
= TYPE_SIGN (op1
.type ());
1650 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1652 if (wi::ge_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1653 r
= range_true (type
);
1654 else if (!wi::ge_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1655 r
= range_false (type
);
1657 r
= range_true_and_false (type
);
1662 operator_ge::op1_range (irange
&r
, tree type
,
1665 relation_trio
) const
1667 if (op2
.undefined_p ())
1670 switch (get_bool_state (r
, lhs
, type
))
1673 build_ge (r
, type
, op2
.lower_bound ());
1677 build_lt (r
, type
, op2
.upper_bound ());
1687 operator_ge::op2_range (irange
&r
, tree type
,
1690 relation_trio
) const
1692 if (op1
.undefined_p ())
1695 switch (get_bool_state (r
, lhs
, type
))
1698 build_le (r
, type
, op1
.upper_bound ());
1702 build_gt (r
, type
, op1
.lower_bound ());
1713 operator_plus::update_bitmask (irange
&r
, const irange
&lh
,
1714 const irange
&rh
) const
1716 update_known_bitmask (r
, PLUS_EXPR
, lh
, rh
);
1719 // Check to see if the range of OP2 indicates anything about the relation
1720 // between LHS and OP1.
1723 operator_plus::lhs_op1_relation (const irange
&lhs
,
1726 relation_kind
) const
1728 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
1729 return VREL_VARYING
;
1731 tree type
= lhs
.type ();
1732 unsigned prec
= TYPE_PRECISION (type
);
1733 wi::overflow_type ovf1
, ovf2
;
1734 signop sign
= TYPE_SIGN (type
);
1736 // LHS = OP1 + 0 indicates LHS == OP1.
1740 if (TYPE_OVERFLOW_WRAPS (type
))
1742 wi::add (op1
.lower_bound (), op2
.lower_bound (), sign
, &ovf1
);
1743 wi::add (op1
.upper_bound (), op2
.upper_bound (), sign
, &ovf2
);
1746 ovf1
= ovf2
= wi::OVF_NONE
;
1748 // Never wrapping additions.
1751 // Positive op2 means lhs > op1.
1752 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1754 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1757 // Negative op2 means lhs < op1.
1758 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1760 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1763 // Always wrapping additions.
1764 else if (ovf1
&& ovf1
== ovf2
)
1766 // Positive op2 means lhs < op1.
1767 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1769 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1772 // Negative op2 means lhs > op1.
1773 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1775 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1779 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1780 if (!range_includes_zero_p (op2
))
1783 return VREL_VARYING
;
1786 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1790 operator_plus::lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1791 const irange
&op2
, relation_kind rel
) const
1793 return lhs_op1_relation (lhs
, op2
, op1
, rel
);
1797 operator_plus::wi_fold (irange
&r
, tree type
,
1798 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1799 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1801 wi::overflow_type ov_lb
, ov_ub
;
1802 signop s
= TYPE_SIGN (type
);
1803 wide_int new_lb
= wi::add (lh_lb
, rh_lb
, s
, &ov_lb
);
1804 wide_int new_ub
= wi::add (lh_ub
, rh_ub
, s
, &ov_ub
);
1805 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1808 // Given addition or subtraction, determine the possible NORMAL ranges and
1809 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1810 // Return the relation that exists between the LHS and OP1 in order for the
1811 // NORMAL range to apply.
1812 // a return value of VREL_VARYING means no ranges were applicable.
1814 static relation_kind
1815 plus_minus_ranges (irange
&r_ov
, irange
&r_normal
, const irange
&offset
,
1818 relation_kind kind
= VREL_VARYING
;
1819 // For now, only deal with constant adds. This could be extended to ranges
1820 // when someone is so motivated.
1821 if (!offset
.singleton_p () || offset
.zero_p ())
1824 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1825 wide_int off
= offset
.lower_bound ();
1826 if (wi::neg_p (off
, SIGNED
))
1829 off
= wi::neg (off
);
1832 wi::overflow_type ov
;
1833 tree type
= offset
.type ();
1834 unsigned prec
= TYPE_PRECISION (type
);
1837 // calculate the normal range and relation for the operation.
1841 lb
= wi::zero (prec
);
1842 ub
= wi::sub (irange_val_max (type
), off
, UNSIGNED
, &ov
);
1849 ub
= irange_val_max (type
);
1852 int_range
<2> normal_range (type
, lb
, ub
);
1853 int_range
<2> ov_range (type
, lb
, ub
, VR_ANTI_RANGE
);
1856 r_normal
= normal_range
;
1860 // Once op1 has been calculated by operator_plus or operator_minus, check
1861 // to see if the relation passed causes any part of the calculation to
1862 // be not possible. ie
1863 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1864 // and that further refines a_2 to [0, 0].
1865 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1866 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1867 // MINUS. IF any adjustment can be made, R will reflect it.
1870 adjust_op1_for_overflow (irange
&r
, const irange
&op2
, relation_kind rel
,
1873 if (r
.undefined_p ())
1875 tree type
= r
.type ();
1876 // Check for unsigned overflow and calculate the overflow part.
1877 signop s
= TYPE_SIGN (type
);
1878 if (!TYPE_OVERFLOW_WRAPS (type
) || s
== SIGNED
)
1881 // Only work with <, <=, >, >= relations.
1882 if (!relation_lt_le_gt_ge_p (rel
))
1885 // Get the ranges for this offset.
1886 int_range_max normal
, overflow
;
1887 relation_kind k
= plus_minus_ranges (overflow
, normal
, op2
, add_p
);
1889 // VREL_VARYING means there are no adjustments.
1890 if (k
== VREL_VARYING
)
1893 // If the relations match use the normal range, otherwise use overflow range.
1894 if (relation_intersect (k
, rel
) == k
)
1895 r
.intersect (normal
);
1897 r
.intersect (overflow
);
1902 operator_plus::op1_range (irange
&r
, tree type
,
1905 relation_trio trio
) const
1907 if (lhs
.undefined_p ())
1909 // Start with the default operation.
1910 range_op_handler
minus (MINUS_EXPR
);
1913 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1914 relation_kind rel
= trio
.lhs_op1 ();
1915 // Check for a relation refinement.
1917 adjust_op1_for_overflow (r
, op2
, rel
, true /* PLUS_EXPR */);
1922 operator_plus::op2_range (irange
&r
, tree type
,
1925 relation_trio rel
) const
1927 return op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1930 class operator_widen_plus_signed
: public range_operator
1933 virtual void wi_fold (irange
&r
, tree type
,
1934 const wide_int
&lh_lb
,
1935 const wide_int
&lh_ub
,
1936 const wide_int
&rh_lb
,
1937 const wide_int
&rh_ub
) const;
1938 } op_widen_plus_signed
;
1941 operator_widen_plus_signed::wi_fold (irange
&r
, tree type
,
1942 const wide_int
&lh_lb
,
1943 const wide_int
&lh_ub
,
1944 const wide_int
&rh_lb
,
1945 const wide_int
&rh_ub
) const
1947 wi::overflow_type ov_lb
, ov_ub
;
1948 signop s
= TYPE_SIGN (type
);
1951 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
1953 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
1954 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1955 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1957 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1958 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1960 r
= int_range
<2> (type
, new_lb
, new_ub
);
1963 class operator_widen_plus_unsigned
: public range_operator
1966 virtual void wi_fold (irange
&r
, tree type
,
1967 const wide_int
&lh_lb
,
1968 const wide_int
&lh_ub
,
1969 const wide_int
&rh_lb
,
1970 const wide_int
&rh_ub
) const;
1971 } op_widen_plus_unsigned
;
1974 operator_widen_plus_unsigned::wi_fold (irange
&r
, tree type
,
1975 const wide_int
&lh_lb
,
1976 const wide_int
&lh_ub
,
1977 const wide_int
&rh_lb
,
1978 const wide_int
&rh_ub
) const
1980 wi::overflow_type ov_lb
, ov_ub
;
1981 signop s
= TYPE_SIGN (type
);
1984 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
1986 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
1987 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1988 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1990 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1991 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1993 r
= int_range
<2> (type
, new_lb
, new_ub
);
1997 operator_minus::update_bitmask (irange
&r
, const irange
&lh
,
1998 const irange
&rh
) const
2000 update_known_bitmask (r
, MINUS_EXPR
, lh
, rh
);
2004 operator_minus::wi_fold (irange
&r
, tree type
,
2005 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2006 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2008 wi::overflow_type ov_lb
, ov_ub
;
2009 signop s
= TYPE_SIGN (type
);
2010 wide_int new_lb
= wi::sub (lh_lb
, rh_ub
, s
, &ov_lb
);
2011 wide_int new_ub
= wi::sub (lh_ub
, rh_lb
, s
, &ov_ub
);
2012 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
2016 // Return the relation between LHS and OP1 based on the relation between
2020 operator_minus::lhs_op1_relation (const irange
&, const irange
&op1
,
2021 const irange
&, relation_kind rel
) const
2023 if (!op1
.undefined_p () && TYPE_SIGN (op1
.type ()) == UNSIGNED
)
2032 return VREL_VARYING
;
2035 // Check to see if the relation REL between OP1 and OP2 has any effect on the
2036 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
2037 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
2040 minus_op1_op2_relation_effect (irange
&lhs_range
, tree type
,
2041 const irange
&op1_range ATTRIBUTE_UNUSED
,
2042 const irange
&op2_range ATTRIBUTE_UNUSED
,
2045 if (rel
== VREL_VARYING
)
2048 int_range
<2> rel_range
;
2049 unsigned prec
= TYPE_PRECISION (type
);
2050 signop sgn
= TYPE_SIGN (type
);
2052 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
2054 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
));
2055 else if (rel
== VREL_NE
)
2056 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
2058 else if (TYPE_OVERFLOW_WRAPS (type
))
2062 // For wrapping signed values and unsigned, if op1 > op2 or
2063 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
2066 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
2077 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
2079 rel_range
= int_range
<2> (type
, wi::one (prec
),
2080 wi::max_value (prec
, sgn
));
2082 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
2084 rel_range
= int_range
<2> (type
, wi::zero (prec
),
2085 wi::max_value (prec
, sgn
));
2087 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
2089 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
2090 wi::minus_one (prec
));
2092 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
2094 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
2101 lhs_range
.intersect (rel_range
);
2106 operator_minus::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
2107 const irange
&op1_range
,
2108 const irange
&op2_range
,
2109 relation_kind rel
) const
2111 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
2116 operator_minus::op1_range (irange
&r
, tree type
,
2119 relation_trio trio
) const
2121 if (lhs
.undefined_p ())
2123 // Start with the default operation.
2124 range_op_handler
minus (PLUS_EXPR
);
2127 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
2128 relation_kind rel
= trio
.lhs_op1 ();
2130 adjust_op1_for_overflow (r
, op2
, rel
, false /* PLUS_EXPR */);
2136 operator_minus::op2_range (irange
&r
, tree type
,
2139 relation_trio
) const
2141 if (lhs
.undefined_p ())
2143 return fold_range (r
, type
, op1
, lhs
);
2147 operator_min::update_bitmask (irange
&r
, const irange
&lh
,
2148 const irange
&rh
) const
2150 update_known_bitmask (r
, MIN_EXPR
, lh
, rh
);
2154 operator_min::wi_fold (irange
&r
, tree type
,
2155 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2156 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2158 signop s
= TYPE_SIGN (type
);
2159 wide_int new_lb
= wi::min (lh_lb
, rh_lb
, s
);
2160 wide_int new_ub
= wi::min (lh_ub
, rh_ub
, s
);
2161 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2166 operator_max::update_bitmask (irange
&r
, const irange
&lh
,
2167 const irange
&rh
) const
2169 update_known_bitmask (r
, MAX_EXPR
, lh
, rh
);
2173 operator_max::wi_fold (irange
&r
, tree type
,
2174 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2175 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2177 signop s
= TYPE_SIGN (type
);
2178 wide_int new_lb
= wi::max (lh_lb
, rh_lb
, s
);
2179 wide_int new_ub
= wi::max (lh_ub
, rh_ub
, s
);
2180 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2184 // Calculate the cross product of two sets of ranges and return it.
2186 // Multiplications, divisions and shifts are a bit tricky to handle,
2187 // depending on the mix of signs we have in the two ranges, we need to
2188 // operate on different values to get the minimum and maximum values
2189 // for the new range. One approach is to figure out all the
2190 // variations of range combinations and do the operations.
2192 // However, this involves several calls to compare_values and it is
2193 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
2194 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
2195 // figure the smallest and largest values to form the new range.
2198 cross_product_operator::wi_cross_product (irange
&r
, tree type
,
2199 const wide_int
&lh_lb
,
2200 const wide_int
&lh_ub
,
2201 const wide_int
&rh_lb
,
2202 const wide_int
&rh_ub
) const
2204 wide_int cp1
, cp2
, cp3
, cp4
;
2205 // Default to varying.
2206 r
.set_varying (type
);
2208 // Compute the 4 cross operations, bailing if we get an overflow we
2210 if (wi_op_overflows (cp1
, type
, lh_lb
, rh_lb
))
2212 if (wi::eq_p (lh_lb
, lh_ub
))
2214 else if (wi_op_overflows (cp3
, type
, lh_ub
, rh_lb
))
2216 if (wi::eq_p (rh_lb
, rh_ub
))
2218 else if (wi_op_overflows (cp2
, type
, lh_lb
, rh_ub
))
2220 if (wi::eq_p (lh_lb
, lh_ub
))
2222 else if (wi_op_overflows (cp4
, type
, lh_ub
, rh_ub
))
2226 signop sign
= TYPE_SIGN (type
);
2227 if (wi::gt_p (cp1
, cp2
, sign
))
2228 std::swap (cp1
, cp2
);
2229 if (wi::gt_p (cp3
, cp4
, sign
))
2230 std::swap (cp3
, cp4
);
2232 // Choose min and max from the ordered pairs.
2233 wide_int res_lb
= wi::min (cp1
, cp3
, sign
);
2234 wide_int res_ub
= wi::max (cp2
, cp4
, sign
);
2235 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
2240 operator_mult::update_bitmask (irange
&r
, const irange
&lh
,
2241 const irange
&rh
) const
2243 update_known_bitmask (r
, MULT_EXPR
, lh
, rh
);
2247 operator_mult::op1_range (irange
&r
, tree type
,
2248 const irange
&lhs
, const irange
&op2
,
2249 relation_trio
) const
2251 if (lhs
.undefined_p ())
2254 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2255 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2256 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2257 if (TYPE_OVERFLOW_WRAPS (type
))
2261 if (op2
.singleton_p (offset
) && offset
!= 0)
2262 return range_op_handler (TRUNC_DIV_EXPR
).fold_range (r
, type
, lhs
, op2
);
2267 operator_mult::op2_range (irange
&r
, tree type
,
2268 const irange
&lhs
, const irange
&op1
,
2269 relation_trio rel
) const
2271 return operator_mult::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
2275 operator_mult::wi_op_overflows (wide_int
&res
, tree type
,
2276 const wide_int
&w0
, const wide_int
&w1
) const
2278 wi::overflow_type overflow
= wi::OVF_NONE
;
2279 signop sign
= TYPE_SIGN (type
);
2280 res
= wi::mul (w0
, w1
, sign
, &overflow
);
2281 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2283 // For multiplication, the sign of the overflow is given
2284 // by the comparison of the signs of the operands.
2285 if (sign
== UNSIGNED
|| w0
.sign_mask () == w1
.sign_mask ())
2286 res
= wi::max_value (w0
.get_precision (), sign
);
2288 res
= wi::min_value (w0
.get_precision (), sign
);
2295 operator_mult::wi_fold (irange
&r
, tree type
,
2296 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2297 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2299 if (TYPE_OVERFLOW_UNDEFINED (type
))
2301 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2305 // Multiply the ranges when overflow wraps. This is basically fancy
2306 // code so we don't drop to varying with an unsigned
2309 // This test requires 2*prec bits if both operands are signed and
2310 // 2*prec + 2 bits if either is not. Therefore, extend the values
2311 // using the sign of the result to PREC2. From here on out,
2312 // everything is just signed math no matter what the input types
2315 signop sign
= TYPE_SIGN (type
);
2316 unsigned prec
= TYPE_PRECISION (type
);
2317 widest2_int min0
= widest2_int::from (lh_lb
, sign
);
2318 widest2_int max0
= widest2_int::from (lh_ub
, sign
);
2319 widest2_int min1
= widest2_int::from (rh_lb
, sign
);
2320 widest2_int max1
= widest2_int::from (rh_ub
, sign
);
2321 widest2_int sizem1
= wi::mask
<widest2_int
> (prec
, false);
2322 widest2_int size
= sizem1
+ 1;
2324 // Canonicalize the intervals.
2325 if (sign
== UNSIGNED
)
2327 if (wi::ltu_p (size
, min0
+ max0
))
2332 if (wi::ltu_p (size
, min1
+ max1
))
2339 // Sort the 4 products so that min is in prod0 and max is in
2341 widest2_int prod0
= min0
* min1
;
2342 widest2_int prod1
= min0
* max1
;
2343 widest2_int prod2
= max0
* min1
;
2344 widest2_int prod3
= max0
* max1
;
2346 // min0min1 > max0max1
2348 std::swap (prod0
, prod3
);
2350 // min0max1 > max0min1
2352 std::swap (prod1
, prod2
);
2355 std::swap (prod0
, prod1
);
2358 std::swap (prod2
, prod3
);
2361 prod2
= prod3
- prod0
;
2362 if (wi::geu_p (prod2
, sizem1
))
2364 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2365 if (TYPE_UNSIGNED (type
) && rh_lb
== rh_ub
2366 && wi::exact_log2 (rh_lb
) != -1 && prec
> 1)
2368 r
.set (type
, rh_lb
, wi::max_value (prec
, sign
));
2370 zero
.set_zero (type
);
2374 // The range covers all values.
2375 r
.set_varying (type
);
2379 wide_int new_lb
= wide_int::from (prod0
, prec
, sign
);
2380 wide_int new_ub
= wide_int::from (prod3
, prec
, sign
);
2381 create_possibly_reversed_range (r
, type
, new_lb
, new_ub
);
2385 class operator_widen_mult_signed
: public range_operator
2388 virtual void wi_fold (irange
&r
, tree type
,
2389 const wide_int
&lh_lb
,
2390 const wide_int
&lh_ub
,
2391 const wide_int
&rh_lb
,
2392 const wide_int
&rh_ub
)
2394 } op_widen_mult_signed
;
2397 operator_widen_mult_signed::wi_fold (irange
&r
, tree type
,
2398 const wide_int
&lh_lb
,
2399 const wide_int
&lh_ub
,
2400 const wide_int
&rh_lb
,
2401 const wide_int
&rh_ub
) const
2403 signop s
= TYPE_SIGN (type
);
2405 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
2406 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
2407 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2408 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2410 /* We don't expect a widening multiplication to be able to overflow but range
2411 calculations for multiplications are complicated. After widening the
2412 operands lets call the base class. */
2413 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2417 class operator_widen_mult_unsigned
: public range_operator
2420 virtual void wi_fold (irange
&r
, tree type
,
2421 const wide_int
&lh_lb
,
2422 const wide_int
&lh_ub
,
2423 const wide_int
&rh_lb
,
2424 const wide_int
&rh_ub
)
2426 } op_widen_mult_unsigned
;
2429 operator_widen_mult_unsigned::wi_fold (irange
&r
, tree type
,
2430 const wide_int
&lh_lb
,
2431 const wide_int
&lh_ub
,
2432 const wide_int
&rh_lb
,
2433 const wide_int
&rh_ub
) const
2435 signop s
= TYPE_SIGN (type
);
2437 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
2438 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
2439 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2440 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2442 /* We don't expect a widening multiplication to be able to overflow but range
2443 calculations for multiplications are complicated. After widening the
2444 operands lets call the base class. */
2445 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2448 class operator_div
: public cross_product_operator
2450 using range_operator::update_bitmask
;
2452 operator_div (tree_code div_kind
) { m_code
= div_kind
; }
2453 virtual void wi_fold (irange
&r
, tree type
,
2454 const wide_int
&lh_lb
,
2455 const wide_int
&lh_ub
,
2456 const wide_int
&rh_lb
,
2457 const wide_int
&rh_ub
) const final override
;
2458 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
2459 const wide_int
&, const wide_int
&)
2460 const final override
;
2461 void update_bitmask (irange
&r
, const irange
&lh
, const irange
&rh
) const
2462 { update_known_bitmask (r
, m_code
, lh
, rh
); }
2467 static operator_div
op_trunc_div (TRUNC_DIV_EXPR
);
2468 static operator_div
op_floor_div (FLOOR_DIV_EXPR
);
2469 static operator_div
op_round_div (ROUND_DIV_EXPR
);
2470 static operator_div
op_ceil_div (CEIL_DIV_EXPR
);
2473 operator_div::wi_op_overflows (wide_int
&res
, tree type
,
2474 const wide_int
&w0
, const wide_int
&w1
) const
2479 wi::overflow_type overflow
= wi::OVF_NONE
;
2480 signop sign
= TYPE_SIGN (type
);
2484 case EXACT_DIV_EXPR
:
2485 case TRUNC_DIV_EXPR
:
2486 res
= wi::div_trunc (w0
, w1
, sign
, &overflow
);
2488 case FLOOR_DIV_EXPR
:
2489 res
= wi::div_floor (w0
, w1
, sign
, &overflow
);
2491 case ROUND_DIV_EXPR
:
2492 res
= wi::div_round (w0
, w1
, sign
, &overflow
);
2495 res
= wi::div_ceil (w0
, w1
, sign
, &overflow
);
2501 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2503 // For division, the only case is -INF / -1 = +INF.
2504 res
= wi::max_value (w0
.get_precision (), sign
);
2511 operator_div::wi_fold (irange
&r
, tree type
,
2512 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2513 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2515 const wide_int dividend_min
= lh_lb
;
2516 const wide_int dividend_max
= lh_ub
;
2517 const wide_int divisor_min
= rh_lb
;
2518 const wide_int divisor_max
= rh_ub
;
2519 signop sign
= TYPE_SIGN (type
);
2520 unsigned prec
= TYPE_PRECISION (type
);
2521 wide_int extra_min
, extra_max
;
2523 // If we know we won't divide by zero, just do the division.
2524 if (!wi_includes_zero_p (type
, divisor_min
, divisor_max
))
2526 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2527 divisor_min
, divisor_max
);
2531 // If we're definitely dividing by zero, there's nothing to do.
2532 if (wi_zero_p (type
, divisor_min
, divisor_max
))
2538 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2539 // skip any division by zero.
2541 // First divide by the negative numbers, if any.
2542 if (wi::neg_p (divisor_min
, sign
))
2543 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2544 divisor_min
, wi::minus_one (prec
));
2548 // Then divide by the non-zero positive numbers, if any.
2549 if (wi::gt_p (divisor_max
, wi::zero (prec
), sign
))
2552 wi_cross_product (tmp
, type
, dividend_min
, dividend_max
,
2553 wi::one (prec
), divisor_max
);
2556 // We shouldn't still have undefined here.
2557 gcc_checking_assert (!r
.undefined_p ());
2561 class operator_exact_divide
: public operator_div
2563 using range_operator::op1_range
;
2565 operator_exact_divide () : operator_div (EXACT_DIV_EXPR
) { }
2566 virtual bool op1_range (irange
&r
, tree type
,
2569 relation_trio
) const;
2574 operator_exact_divide::op1_range (irange
&r
, tree type
,
2577 relation_trio
) const
2579 if (lhs
.undefined_p ())
2582 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2583 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2584 // We wont bother trying to enumerate all the in between stuff :-P
2585 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2586 // the time however.
2587 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2588 if (op2
.singleton_p (offset
) && offset
!= 0)
2589 return range_op_handler (MULT_EXPR
).fold_range (r
, type
, lhs
, op2
);
2594 class operator_lshift
: public cross_product_operator
2596 using range_operator::fold_range
;
2597 using range_operator::op1_range
;
2598 using range_operator::update_bitmask
;
2600 virtual bool op1_range (irange
&r
, tree type
, const irange
&lhs
,
2601 const irange
&op2
, relation_trio rel
= TRIO_VARYING
)
2602 const final override
;
2603 virtual bool fold_range (irange
&r
, tree type
, const irange
&op1
,
2604 const irange
&op2
, relation_trio rel
= TRIO_VARYING
)
2605 const final override
;
2607 virtual void wi_fold (irange
&r
, tree type
,
2608 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2609 const wide_int
&rh_lb
,
2610 const wide_int
&rh_ub
) const final override
;
2611 virtual bool wi_op_overflows (wide_int
&res
,
2614 const wide_int
&) const final override
;
2615 void update_bitmask (irange
&r
, const irange
&lh
,
2616 const irange
&rh
) const final override
2617 { update_known_bitmask (r
, LSHIFT_EXPR
, lh
, rh
); }
2618 // Check compatibility of LHS and op1.
2619 bool operand_check_p (tree t1
, tree t2
, tree
) const final override
2620 { return range_compatible_p (t1
, t2
); }
2623 class operator_rshift
: public cross_product_operator
2625 using range_operator::fold_range
;
2626 using range_operator::op1_range
;
2627 using range_operator::lhs_op1_relation
;
2628 using range_operator::update_bitmask
;
2630 virtual bool fold_range (irange
&r
, tree type
, const irange
&op1
,
2631 const irange
&op2
, relation_trio rel
= TRIO_VARYING
)
2632 const final override
;
2633 virtual void wi_fold (irange
&r
, tree type
,
2634 const wide_int
&lh_lb
,
2635 const wide_int
&lh_ub
,
2636 const wide_int
&rh_lb
,
2637 const wide_int
&rh_ub
) const final override
;
2638 virtual bool wi_op_overflows (wide_int
&res
,
2641 const wide_int
&w1
) const final override
;
2642 virtual bool op1_range (irange
&, tree type
, const irange
&lhs
,
2643 const irange
&op2
, relation_trio rel
= TRIO_VARYING
)
2644 const final override
;
2645 virtual relation_kind
lhs_op1_relation (const irange
&lhs
, const irange
&op1
,
2646 const irange
&op2
, relation_kind rel
)
2647 const final override
;
2648 void update_bitmask (irange
&r
, const irange
&lh
,
2649 const irange
&rh
) const final override
2650 { update_known_bitmask (r
, RSHIFT_EXPR
, lh
, rh
); }
2651 // Check compatibility of LHS and op1.
2652 bool operand_check_p (tree t1
, tree t2
, tree
) const final override
2653 { return range_compatible_p (t1
, t2
); }
2658 operator_rshift::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
2661 relation_kind
) const
2663 // If both operands range are >= 0, then the LHS <= op1.
2664 if (!op1
.undefined_p () && !op2
.undefined_p ()
2665 && wi::ge_p (op1
.lower_bound (), 0, TYPE_SIGN (op1
.type ()))
2666 && wi::ge_p (op2
.lower_bound (), 0, TYPE_SIGN (op2
.type ())))
2668 return VREL_VARYING
;
2672 operator_lshift::fold_range (irange
&r
, tree type
,
2675 relation_trio rel
) const
2677 int_range_max shift_range
;
2678 if (!get_shift_range (shift_range
, type
, op2
))
2680 if (op2
.undefined_p ())
2687 // Transform left shifts by constants into multiplies.
2688 if (shift_range
.singleton_p ())
2690 unsigned shift
= shift_range
.lower_bound ().to_uhwi ();
2691 wide_int tmp
= wi::set_bit_in_zero (shift
, TYPE_PRECISION (type
));
2692 int_range
<1> mult (type
, tmp
, tmp
);
2694 // Force wrapping multiplication.
2695 bool saved_flag_wrapv
= flag_wrapv
;
2696 bool saved_flag_wrapv_pointer
= flag_wrapv_pointer
;
2698 flag_wrapv_pointer
= 1;
2699 bool b
= op_mult
.fold_range (r
, type
, op1
, mult
);
2700 flag_wrapv
= saved_flag_wrapv
;
2701 flag_wrapv_pointer
= saved_flag_wrapv_pointer
;
2705 // Otherwise, invoke the generic fold routine.
2706 return range_operator::fold_range (r
, type
, op1
, shift_range
, rel
);
2710 operator_lshift::wi_fold (irange
&r
, tree type
,
2711 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2712 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2714 signop sign
= TYPE_SIGN (type
);
2715 unsigned prec
= TYPE_PRECISION (type
);
2716 int overflow_pos
= sign
== SIGNED
? prec
- 1 : prec
;
2717 int bound_shift
= overflow_pos
- rh_ub
.to_shwi ();
2718 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2719 // overflow. However, for that to happen, rh.max needs to be zero,
2720 // which means rh is a singleton range of zero, which means we simply return
2721 // [lh_lb, lh_ub] as the range.
2722 if (wi::eq_p (rh_ub
, rh_lb
) && wi::eq_p (rh_ub
, 0))
2724 r
= int_range
<2> (type
, lh_lb
, lh_ub
);
2728 wide_int bound
= wi::set_bit_in_zero (bound_shift
, prec
);
2729 wide_int complement
= ~(bound
- 1);
2730 wide_int low_bound
, high_bound
;
2731 bool in_bounds
= false;
2733 if (sign
== UNSIGNED
)
2736 high_bound
= complement
;
2737 if (wi::ltu_p (lh_ub
, low_bound
))
2739 // [5, 6] << [1, 2] == [10, 24].
2740 // We're shifting out only zeroes, the value increases
2744 else if (wi::ltu_p (high_bound
, lh_lb
))
2746 // [0xffffff00, 0xffffffff] << [1, 2]
2747 // == [0xfffffc00, 0xfffffffe].
2748 // We're shifting out only ones, the value decreases
2755 // [-1, 1] << [1, 2] == [-4, 4]
2756 low_bound
= complement
;
2758 if (wi::lts_p (lh_ub
, high_bound
)
2759 && wi::lts_p (low_bound
, lh_lb
))
2761 // For non-negative numbers, we're shifting out only zeroes,
2762 // the value increases monotonically. For negative numbers,
2763 // we're shifting out only ones, the value decreases
2770 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2772 r
.set_varying (type
);
2776 operator_lshift::wi_op_overflows (wide_int
&res
, tree type
,
2777 const wide_int
&w0
, const wide_int
&w1
) const
2779 signop sign
= TYPE_SIGN (type
);
2782 // It's unclear from the C standard whether shifts can overflow.
2783 // The following code ignores overflow; perhaps a C standard
2784 // interpretation ruling is needed.
2785 res
= wi::rshift (w0
, -w1
, sign
);
2788 res
= wi::lshift (w0
, w1
);
2793 operator_lshift::op1_range (irange
&r
,
2797 relation_trio
) const
2799 if (lhs
.undefined_p ())
2802 if (!contains_zero_p (lhs
))
2803 r
.set_nonzero (type
);
2805 r
.set_varying (type
);
2808 if (op2
.singleton_p (shift
))
2810 if (wi::lt_p (shift
, 0, SIGNED
))
2812 if (wi::ge_p (shift
, wi::uhwi (TYPE_PRECISION (type
),
2813 TYPE_PRECISION (op2
.type ())),
2822 // Work completely in unsigned mode to start.
2824 int_range_max tmp_range
;
2825 if (TYPE_SIGN (type
) == SIGNED
)
2827 int_range_max tmp
= lhs
;
2828 utype
= unsigned_type_for (type
);
2829 range_cast (tmp
, utype
);
2830 op_rshift
.fold_range (tmp_range
, utype
, tmp
, op2
);
2833 op_rshift
.fold_range (tmp_range
, utype
, lhs
, op2
);
2835 // Start with ranges which can produce the LHS by right shifting the
2836 // result by the shift amount.
2837 // ie [0x08, 0xF0] = op1 << 2 will start with
2838 // [00001000, 11110000] = op1 << 2
2839 // [0x02, 0x4C] aka [00000010, 00111100]
2841 // Then create a range from the LB with the least significant upper bit
2842 // set, to the upper bound with all the bits set.
2843 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2845 // Ideally we do this for each subrange, but just lump them all for now.
2846 unsigned low_bits
= TYPE_PRECISION (utype
) - shift
.to_uhwi ();
2847 wide_int up_mask
= wi::mask (low_bits
, true, TYPE_PRECISION (utype
));
2848 wide_int new_ub
= wi::bit_or (up_mask
, tmp_range
.upper_bound ());
2849 wide_int new_lb
= wi::set_bit (tmp_range
.lower_bound (), low_bits
);
2850 int_range
<2> fill_range (utype
, new_lb
, new_ub
);
2851 tmp_range
.union_ (fill_range
);
2854 range_cast (tmp_range
, type
);
2856 r
.intersect (tmp_range
);
2860 return !r
.varying_p ();
2864 operator_rshift::op1_range (irange
&r
,
2868 relation_trio
) const
2870 if (lhs
.undefined_p ())
2873 if (op2
.singleton_p (shift
))
2875 // Ignore nonsensical shifts.
2876 unsigned prec
= TYPE_PRECISION (type
);
2877 if (wi::ge_p (shift
,
2878 wi::uhwi (prec
, TYPE_PRECISION (op2
.type ())),
2887 // Folding the original operation may discard some impossible
2888 // ranges from the LHS.
2889 int_range_max lhs_refined
;
2890 op_rshift
.fold_range (lhs_refined
, type
, int_range
<1> (type
), op2
);
2891 lhs_refined
.intersect (lhs
);
2892 if (lhs_refined
.undefined_p ())
2897 int_range_max
shift_range (op2
.type (), shift
, shift
);
2898 int_range_max lb
, ub
;
2899 op_lshift
.fold_range (lb
, type
, lhs_refined
, shift_range
);
2901 // 0000 0111 = OP1 >> 3
2903 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2904 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2905 // right hand side (0x07).
2906 wide_int mask
= wi::bit_not (wi::lshift (wi::minus_one (prec
), shift
));
2907 int_range_max
mask_range (type
,
2908 wi::zero (TYPE_PRECISION (type
)),
2910 op_plus
.fold_range (ub
, type
, lb
, mask_range
);
2913 if (!contains_zero_p (lhs_refined
))
2915 mask_range
.invert ();
2916 r
.intersect (mask_range
);
2924 operator_rshift::wi_op_overflows (wide_int
&res
,
2927 const wide_int
&w1
) const
2929 signop sign
= TYPE_SIGN (type
);
2931 res
= wi::lshift (w0
, -w1
);
2934 // It's unclear from the C standard whether shifts can overflow.
2935 // The following code ignores overflow; perhaps a C standard
2936 // interpretation ruling is needed.
2937 res
= wi::rshift (w0
, w1
, sign
);
2943 operator_rshift::fold_range (irange
&r
, tree type
,
2946 relation_trio rel
) const
2948 int_range_max shift
;
2949 if (!get_shift_range (shift
, type
, op2
))
2951 if (op2
.undefined_p ())
2958 return range_operator::fold_range (r
, type
, op1
, shift
, rel
);
2962 operator_rshift::wi_fold (irange
&r
, tree type
,
2963 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2964 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2966 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2970 // Add a partial equivalence between the LHS and op1 for casts.
2973 operator_cast::lhs_op1_relation (const irange
&lhs
,
2975 const irange
&op2 ATTRIBUTE_UNUSED
,
2976 relation_kind
) const
2978 if (lhs
.undefined_p () || op1
.undefined_p ())
2979 return VREL_VARYING
;
2980 unsigned lhs_prec
= TYPE_PRECISION (lhs
.type ());
2981 unsigned op1_prec
= TYPE_PRECISION (op1
.type ());
2982 // If the result gets sign extended into a larger type check first if this
2983 // qualifies as a partial equivalence.
2984 if (TYPE_SIGN (op1
.type ()) == SIGNED
&& lhs_prec
> op1_prec
)
2986 // If the result is sign extended, and the LHS is larger than op1,
2987 // check if op1's range can be negative as the sign extension will
2988 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2989 int_range
<3> negs
= range_negatives (op1
.type ());
2990 negs
.intersect (op1
);
2991 if (!negs
.undefined_p ())
2992 return VREL_VARYING
;
2995 unsigned prec
= MIN (lhs_prec
, op1_prec
);
2996 return bits_to_pe (prec
);
2999 // Return TRUE if casting from INNER to OUTER is a truncating cast.
3002 operator_cast::truncating_cast_p (const irange
&inner
,
3003 const irange
&outer
) const
3005 return TYPE_PRECISION (outer
.type ()) < TYPE_PRECISION (inner
.type ());
3008 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
3011 operator_cast::inside_domain_p (const wide_int
&min
,
3012 const wide_int
&max
,
3013 const irange
&range
) const
3015 wide_int domain_min
= irange_val_min (range
.type ());
3016 wide_int domain_max
= irange_val_max (range
.type ());
3017 signop domain_sign
= TYPE_SIGN (range
.type ());
3018 return (wi::le_p (min
, domain_max
, domain_sign
)
3019 && wi::le_p (max
, domain_max
, domain_sign
)
3020 && wi::ge_p (min
, domain_min
, domain_sign
)
3021 && wi::ge_p (max
, domain_min
, domain_sign
));
3025 // Helper for fold_range which work on a pair at a time.
3028 operator_cast::fold_pair (irange
&r
, unsigned index
,
3029 const irange
&inner
,
3030 const irange
&outer
) const
3032 tree inner_type
= inner
.type ();
3033 tree outer_type
= outer
.type ();
3034 signop inner_sign
= TYPE_SIGN (inner_type
);
3035 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
3037 // check to see if casting from INNER to OUTER is a conversion that
3038 // fits in the resulting OUTER type.
3039 wide_int inner_lb
= inner
.lower_bound (index
);
3040 wide_int inner_ub
= inner
.upper_bound (index
);
3041 if (truncating_cast_p (inner
, outer
))
3043 // We may be able to accommodate a truncating cast if the
3044 // resulting range can be represented in the target type...
3045 if (wi::rshift (wi::sub (inner_ub
, inner_lb
),
3046 wi::uhwi (outer_prec
, TYPE_PRECISION (inner
.type ())),
3049 r
.set_varying (outer_type
);
3053 // ...but we must still verify that the final range fits in the
3054 // domain. This catches -fstrict-enum restrictions where the domain
3055 // range is smaller than what fits in the underlying type.
3056 wide_int min
= wide_int::from (inner_lb
, outer_prec
, inner_sign
);
3057 wide_int max
= wide_int::from (inner_ub
, outer_prec
, inner_sign
);
3058 if (inside_domain_p (min
, max
, outer
))
3059 create_possibly_reversed_range (r
, outer_type
, min
, max
);
3061 r
.set_varying (outer_type
);
3066 operator_cast::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3067 const irange
&inner
,
3068 const irange
&outer
,
3069 relation_trio
) const
3071 if (empty_range_varying (r
, type
, inner
, outer
))
3074 gcc_checking_assert (outer
.varying_p ());
3075 gcc_checking_assert (inner
.num_pairs () > 0);
3077 // Avoid a temporary by folding the first pair directly into the result.
3078 fold_pair (r
, 0, inner
, outer
);
3080 // Then process any additional pairs by unioning with their results.
3081 for (unsigned x
= 1; x
< inner
.num_pairs (); ++x
)
3084 fold_pair (tmp
, x
, inner
, outer
);
3090 update_bitmask (r
, inner
, outer
);
3095 operator_cast::update_bitmask (irange
&r
, const irange
&lh
,
3096 const irange
&rh
) const
3098 update_known_bitmask (r
, CONVERT_EXPR
, lh
, rh
);
3102 operator_cast::op1_range (irange
&r
, tree type
,
3105 relation_trio
) const
3107 if (lhs
.undefined_p ())
3109 tree lhs_type
= lhs
.type ();
3110 gcc_checking_assert (types_compatible_p (op2
.type(), type
));
3112 // If we are calculating a pointer, shortcut to what we really care about.
3113 if (POINTER_TYPE_P (type
))
3115 // Conversion from other pointers or a constant (including 0/NULL)
3116 // are straightforward.
3117 if (POINTER_TYPE_P (lhs
.type ())
3118 || (lhs
.singleton_p ()
3119 && TYPE_PRECISION (lhs
.type ()) >= TYPE_PRECISION (type
)))
3122 range_cast (r
, type
);
3126 // If the LHS is not a pointer nor a singleton, then it is
3127 // either VARYING or non-zero.
3128 if (!lhs
.undefined_p () && !contains_zero_p (lhs
))
3129 r
.set_nonzero (type
);
3131 r
.set_varying (type
);
3137 if (truncating_cast_p (op2
, lhs
))
3139 if (lhs
.varying_p ())
3140 r
.set_varying (type
);
3143 // We want to insert the LHS as an unsigned value since it
3144 // would not trigger the signed bit of the larger type.
3145 int_range_max converted_lhs
= lhs
;
3146 range_cast (converted_lhs
, unsigned_type_for (lhs_type
));
3147 range_cast (converted_lhs
, type
);
3148 // Start by building the positive signed outer range for the type.
3149 wide_int lim
= wi::set_bit_in_zero (TYPE_PRECISION (lhs_type
),
3150 TYPE_PRECISION (type
));
3151 create_possibly_reversed_range (r
, type
, lim
,
3152 wi::max_value (TYPE_PRECISION (type
),
3154 // For the signed part, we need to simply union the 2 ranges now.
3155 r
.union_ (converted_lhs
);
3157 // Create maximal negative number outside of LHS bits.
3158 lim
= wi::mask (TYPE_PRECISION (lhs_type
), true,
3159 TYPE_PRECISION (type
));
3160 // Add this to the unsigned LHS range(s).
3161 int_range_max
lim_range (type
, lim
, lim
);
3162 int_range_max lhs_neg
;
3163 range_op_handler (PLUS_EXPR
).fold_range (lhs_neg
, type
,
3164 converted_lhs
, lim_range
);
3165 // lhs_neg now has all the negative versions of the LHS.
3166 // Now union in all the values from SIGNED MIN (0x80000) to
3167 // lim-1 in order to fill in all the ranges with the upper
3170 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
3171 // we don't need to create a range from min to lim-1
3172 // calculate neg range traps trying to create [lim, lim - 1].
3173 wide_int min_val
= wi::min_value (TYPE_PRECISION (type
), SIGNED
);
3176 int_range_max
neg (type
,
3177 wi::min_value (TYPE_PRECISION (type
),
3180 lhs_neg
.union_ (neg
);
3182 // And finally, munge the signed and unsigned portions.
3185 // And intersect with any known value passed in the extra operand.
3191 if (TYPE_PRECISION (lhs_type
) == TYPE_PRECISION (type
))
3195 // The cast is not truncating, and the range is restricted to
3196 // the range of the RHS by this assignment.
3198 // Cast the range of the RHS to the type of the LHS.
3199 fold_range (tmp
, lhs_type
, int_range
<1> (type
), int_range
<1> (lhs_type
));
3200 // Intersect this with the LHS range will produce the range,
3201 // which will be cast to the RHS type before returning.
3202 tmp
.intersect (lhs
);
3205 // Cast the calculated range to the type of the RHS.
3206 fold_range (r
, type
, tmp
, int_range
<1> (type
));
3211 class operator_logical_and
: public range_operator
3213 using range_operator::fold_range
;
3214 using range_operator::op1_range
;
3215 using range_operator::op2_range
;
3217 virtual bool fold_range (irange
&r
, tree type
,
3220 relation_trio rel
= TRIO_VARYING
) const;
3221 virtual bool op1_range (irange
&r
, tree type
,
3224 relation_trio rel
= TRIO_VARYING
) const;
3225 virtual bool op2_range (irange
&r
, tree type
,
3228 relation_trio rel
= TRIO_VARYING
) const;
3229 // Check compatibility of all operands.
3230 bool operand_check_p (tree t1
, tree t2
, tree t3
) const final override
3231 { return range_compatible_p (t1
, t2
) && range_compatible_p (t1
, t3
); }
3235 operator_logical_and::fold_range (irange
&r
, tree type
,
3238 relation_trio
) const
3240 if (empty_range_varying (r
, type
, lh
, rh
))
3243 // Precision of LHS and both operands must match.
3244 if (TYPE_PRECISION (lh
.type ()) != TYPE_PRECISION (type
)
3245 || TYPE_PRECISION (type
) != TYPE_PRECISION (rh
.type ()))
3248 // 0 && anything is 0.
3249 if ((wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (lh
.upper_bound (), 0))
3250 || (wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (rh
.upper_bound (), 0)))
3251 r
= range_false (type
);
3252 else if (contains_zero_p (lh
) || contains_zero_p (rh
))
3253 // To reach this point, there must be a logical 1 on each side, and
3254 // the only remaining question is whether there is a zero or not.
3255 r
= range_true_and_false (type
);
3257 r
= range_true (type
);
3262 operator_logical_and::op1_range (irange
&r
, tree type
,
3264 const irange
&op2 ATTRIBUTE_UNUSED
,
3265 relation_trio
) const
3267 switch (get_bool_state (r
, lhs
, type
))
3270 // A true result means both sides of the AND must be true.
3271 r
= range_true (type
);
3274 // Any other result means only one side has to be false, the
3275 // other side can be anything. So we cannot be sure of any
3277 r
= range_true_and_false (type
);
3284 operator_logical_and::op2_range (irange
&r
, tree type
,
3287 relation_trio
) const
3289 return operator_logical_and::op1_range (r
, type
, lhs
, op1
);
3294 operator_bitwise_and::update_bitmask (irange
&r
, const irange
&lh
,
3295 const irange
&rh
) const
3297 update_known_bitmask (r
, BIT_AND_EXPR
, lh
, rh
);
3300 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3301 // by considering the number of leading redundant sign bit copies.
3302 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3303 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3305 wi_optimize_signed_bitwise_op (irange
&r
, tree type
,
3306 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3307 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3309 int lh_clrsb
= MIN (wi::clrsb (lh_lb
), wi::clrsb (lh_ub
));
3310 int rh_clrsb
= MIN (wi::clrsb (rh_lb
), wi::clrsb (rh_ub
));
3311 int new_clrsb
= MIN (lh_clrsb
, rh_clrsb
);
3314 int type_prec
= TYPE_PRECISION (type
);
3315 int rprec
= (type_prec
- new_clrsb
) - 1;
3316 value_range_with_overflow (r
, type
,
3317 wi::mask (rprec
, true, type_prec
),
3318 wi::mask (rprec
, false, type_prec
));
3322 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3326 operator_bitwise_and::lhs_op1_relation (const irange
&lhs
,
3329 relation_kind
) const
3331 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
3332 return VREL_VARYING
;
3333 if (!op2
.singleton_p ())
3334 return VREL_VARYING
;
3335 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3336 int prec1
= TYPE_PRECISION (op1
.type ());
3337 int prec2
= TYPE_PRECISION (op2
.type ());
3339 wide_int mask
= op2
.lower_bound ();
3340 if (wi::eq_p (mask
, wi::mask (8, false, prec2
)))
3342 else if (wi::eq_p (mask
, wi::mask (16, false, prec2
)))
3344 else if (wi::eq_p (mask
, wi::mask (32, false, prec2
)))
3346 else if (wi::eq_p (mask
, wi::mask (64, false, prec2
)))
3348 return bits_to_pe (MIN (prec1
, mask_prec
));
3351 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3352 // possible. Basically, see if we can optimize:
3356 // [LB op Z, UB op Z]
3358 // If the optimization was successful, accumulate the range in R and
3362 wi_optimize_and_or (irange
&r
,
3363 enum tree_code code
,
3365 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3366 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3368 // Calculate the singleton mask among the ranges, if any.
3369 wide_int lower_bound
, upper_bound
, mask
;
3370 if (wi::eq_p (rh_lb
, rh_ub
))
3373 lower_bound
= lh_lb
;
3374 upper_bound
= lh_ub
;
3376 else if (wi::eq_p (lh_lb
, lh_ub
))
3379 lower_bound
= rh_lb
;
3380 upper_bound
= rh_ub
;
3385 // If Z is a constant which (for op | its bitwise not) has n
3386 // consecutive least significant bits cleared followed by m 1
3387 // consecutive bits set immediately above it and either
3388 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3390 // The least significant n bits of all the values in the range are
3391 // cleared or set, the m bits above it are preserved and any bits
3392 // above these are required to be the same for all values in the
3396 if (code
== BIT_IOR_EXPR
)
3398 if (wi::eq_p (w
, 0))
3399 n
= w
.get_precision ();
3403 w
= ~(w
| wi::mask (n
, false, w
.get_precision ()));
3404 if (wi::eq_p (w
, 0))
3405 m
= w
.get_precision () - n
;
3407 m
= wi::ctz (w
) - n
;
3409 wide_int new_mask
= wi::mask (m
+ n
, true, w
.get_precision ());
3410 if ((new_mask
& lower_bound
) != (new_mask
& upper_bound
))
3413 wide_int res_lb
, res_ub
;
3414 if (code
== BIT_AND_EXPR
)
3416 res_lb
= wi::bit_and (lower_bound
, mask
);
3417 res_ub
= wi::bit_and (upper_bound
, mask
);
3419 else if (code
== BIT_IOR_EXPR
)
3421 res_lb
= wi::bit_or (lower_bound
, mask
);
3422 res_ub
= wi::bit_or (upper_bound
, mask
);
3426 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
3428 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3429 if (code
== BIT_IOR_EXPR
&& wi::ne_p (mask
, 0))
3432 tmp
.set_nonzero (type
);
3438 // For range [LB, UB] compute two wide_int bit masks.
3440 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3441 // for all numbers in the range the bit is 0, otherwise it might be 0
3444 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3445 // for all numbers in the range the bit is 1, otherwise it might be 0
3449 wi_set_zero_nonzero_bits (tree type
,
3450 const wide_int
&lb
, const wide_int
&ub
,
3451 wide_int
&maybe_nonzero
,
3452 wide_int
&mustbe_nonzero
)
3454 signop sign
= TYPE_SIGN (type
);
3456 if (wi::eq_p (lb
, ub
))
3457 maybe_nonzero
= mustbe_nonzero
= lb
;
3458 else if (wi::ge_p (lb
, 0, sign
) || wi::lt_p (ub
, 0, sign
))
3460 wide_int xor_mask
= lb
^ ub
;
3461 maybe_nonzero
= lb
| ub
;
3462 mustbe_nonzero
= lb
& ub
;
3465 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
3466 maybe_nonzero
.get_precision ());
3467 maybe_nonzero
= maybe_nonzero
| mask
;
3468 mustbe_nonzero
= wi::bit_and_not (mustbe_nonzero
, mask
);
3473 maybe_nonzero
= wi::minus_one (lb
.get_precision ());
3474 mustbe_nonzero
= wi::zero (lb
.get_precision ());
3479 operator_bitwise_and::wi_fold (irange
&r
, tree type
,
3480 const wide_int
&lh_lb
,
3481 const wide_int
&lh_ub
,
3482 const wide_int
&rh_lb
,
3483 const wide_int
&rh_ub
) const
3485 if (wi_optimize_and_or (r
, BIT_AND_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3488 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3489 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3490 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3491 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3492 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3493 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3495 wide_int new_lb
= mustbe_nonzero_lh
& mustbe_nonzero_rh
;
3496 wide_int new_ub
= maybe_nonzero_lh
& maybe_nonzero_rh
;
3497 signop sign
= TYPE_SIGN (type
);
3498 unsigned prec
= TYPE_PRECISION (type
);
3499 // If both input ranges contain only negative values, we can
3500 // truncate the result range maximum to the minimum of the
3501 // input range maxima.
3502 if (wi::lt_p (lh_ub
, 0, sign
) && wi::lt_p (rh_ub
, 0, sign
))
3504 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3505 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3507 // If either input range contains only non-negative values
3508 // we can truncate the result range maximum to the respective
3509 // maximum of the input range.
3510 if (wi::ge_p (lh_lb
, 0, sign
))
3511 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3512 if (wi::ge_p (rh_lb
, 0, sign
))
3513 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3514 // PR68217: In case of signed & sign-bit-CST should
3515 // result in [-INF, 0] instead of [-INF, INF].
3516 if (wi::gt_p (new_lb
, new_ub
, sign
))
3518 wide_int sign_bit
= wi::set_bit_in_zero (prec
- 1, prec
);
3520 && ((wi::eq_p (lh_lb
, lh_ub
)
3521 && !wi::cmps (lh_lb
, sign_bit
))
3522 || (wi::eq_p (rh_lb
, rh_ub
)
3523 && !wi::cmps (rh_lb
, sign_bit
))))
3525 new_lb
= wi::min_value (prec
, sign
);
3526 new_ub
= wi::zero (prec
);
3529 // If the limits got swapped around, return varying.
3530 if (wi::gt_p (new_lb
, new_ub
,sign
))
3533 && wi_optimize_signed_bitwise_op (r
, type
,
3537 r
.set_varying (type
);
3540 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3544 set_nonzero_range_from_mask (irange
&r
, tree type
, const irange
&lhs
)
3546 if (lhs
.undefined_p () || contains_zero_p (lhs
))
3547 r
.set_varying (type
);
3549 r
.set_nonzero (type
);
3552 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3553 (otherwise return VAL). VAL and MASK must be zero-extended for
3554 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3555 (to transform signed values into unsigned) and at the end xor
3559 masked_increment (const wide_int
&val_in
, const wide_int
&mask
,
3560 const wide_int
&sgnbit
, unsigned int prec
)
3562 wide_int bit
= wi::one (prec
), res
;
3565 wide_int val
= val_in
^ sgnbit
;
3566 for (i
= 0; i
< prec
; i
++, bit
+= bit
)
3569 if ((res
& bit
) == 0)
3572 res
= wi::bit_and_not (val
+ bit
, res
);
3574 if (wi::gtu_p (res
, val
))
3575 return res
^ sgnbit
;
3577 return val
^ sgnbit
;
3580 // This was shamelessly stolen from register_edge_assert_for_2 and
3581 // adjusted to work with iranges.
3584 operator_bitwise_and::simple_op1_range_solver (irange
&r
, tree type
,
3586 const irange
&op2
) const
3588 if (!op2
.singleton_p ())
3590 set_nonzero_range_from_mask (r
, type
, lhs
);
3593 unsigned int nprec
= TYPE_PRECISION (type
);
3594 wide_int cst2v
= op2
.lower_bound ();
3595 bool cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (type
));
3598 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3600 sgnbit
= wi::zero (nprec
);
3602 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3604 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3605 // maximum unsigned value is ~0. For signed comparison, if CST2
3606 // doesn't have the most significant bit set, handle it similarly. If
3607 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3608 wide_int valv
= lhs
.lower_bound ();
3609 wide_int minv
= valv
& cst2v
, maxv
;
3610 bool we_know_nothing
= false;
3613 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3614 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3617 // If we can't determine anything on this bound, fall
3618 // through and conservatively solve for the other end point.
3619 we_know_nothing
= true;
3622 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3623 if (we_know_nothing
)
3624 r
.set_varying (type
);
3626 create_possibly_reversed_range (r
, type
, minv
, maxv
);
3628 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3630 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3631 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3633 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3635 // For signed comparison, if CST2 doesn't have most significant bit
3636 // set, handle it similarly. If CST2 has MSB set, the maximum is
3637 // the same and minimum is INT_MIN.
3638 valv
= lhs
.upper_bound ();
3639 minv
= valv
& cst2v
;
3644 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3647 // If we couldn't determine anything on either bound, return
3649 if (we_know_nothing
)
3657 int_range
<2> upper_bits
;
3658 create_possibly_reversed_range (upper_bits
, type
, minv
, maxv
);
3659 r
.intersect (upper_bits
);
3663 operator_bitwise_and::op1_range (irange
&r
, tree type
,
3666 relation_trio
) const
3668 if (lhs
.undefined_p ())
3670 if (types_compatible_p (type
, boolean_type_node
))
3671 return op_logical_and
.op1_range (r
, type
, lhs
, op2
);
3674 for (unsigned i
= 0; i
< lhs
.num_pairs (); ++i
)
3676 int_range_max
chunk (lhs
.type (),
3677 lhs
.lower_bound (i
),
3678 lhs
.upper_bound (i
));
3680 simple_op1_range_solver (res
, type
, chunk
, op2
);
3683 if (r
.undefined_p ())
3684 set_nonzero_range_from_mask (r
, type
, lhs
);
3686 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3688 if (lhs
== op2
&& lhs
.singleton_p (mask
))
3690 r
.update_bitmask (irange_bitmask (mask
, ~mask
));
3694 // For 0 = op1 & MASK, op1 is ~MASK.
3695 if (lhs
.zero_p () && op2
.singleton_p ())
3697 wide_int nz
= wi::bit_not (op2
.get_nonzero_bits ());
3698 int_range
<2> tmp (type
);
3699 tmp
.set_nonzero_bits (nz
);
3706 operator_bitwise_and::op2_range (irange
&r
, tree type
,
3709 relation_trio
) const
3711 return operator_bitwise_and::op1_range (r
, type
, lhs
, op1
);
3715 class operator_logical_or
: public range_operator
3717 using range_operator::fold_range
;
3718 using range_operator::op1_range
;
3719 using range_operator::op2_range
;
3721 virtual bool fold_range (irange
&r
, tree type
,
3724 relation_trio rel
= TRIO_VARYING
) const;
3725 virtual bool op1_range (irange
&r
, tree type
,
3728 relation_trio rel
= TRIO_VARYING
) const;
3729 virtual bool op2_range (irange
&r
, tree type
,
3732 relation_trio rel
= TRIO_VARYING
) const;
3733 // Check compatibility of all operands.
3734 bool operand_check_p (tree t1
, tree t2
, tree t3
) const final override
3735 { return range_compatible_p (t1
, t2
) && range_compatible_p (t1
, t3
); }
3739 operator_logical_or::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3742 relation_trio
) const
3744 if (empty_range_varying (r
, type
, lh
, rh
))
3753 operator_logical_or::op1_range (irange
&r
, tree type
,
3755 const irange
&op2 ATTRIBUTE_UNUSED
,
3756 relation_trio
) const
3758 switch (get_bool_state (r
, lhs
, type
))
3761 // A false result means both sides of the OR must be false.
3762 r
= range_false (type
);
3765 // Any other result means only one side has to be true, the
3766 // other side can be anything. so we can't be sure of any result
3768 r
= range_true_and_false (type
);
3775 operator_logical_or::op2_range (irange
&r
, tree type
,
3778 relation_trio
) const
3780 return operator_logical_or::op1_range (r
, type
, lhs
, op1
);
3785 operator_bitwise_or::update_bitmask (irange
&r
, const irange
&lh
,
3786 const irange
&rh
) const
3788 update_known_bitmask (r
, BIT_IOR_EXPR
, lh
, rh
);
3792 operator_bitwise_or::wi_fold (irange
&r
, tree type
,
3793 const wide_int
&lh_lb
,
3794 const wide_int
&lh_ub
,
3795 const wide_int
&rh_lb
,
3796 const wide_int
&rh_ub
) const
3798 if (wi_optimize_and_or (r
, BIT_IOR_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3801 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3802 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3803 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3804 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3805 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3806 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3807 wide_int new_lb
= mustbe_nonzero_lh
| mustbe_nonzero_rh
;
3808 wide_int new_ub
= maybe_nonzero_lh
| maybe_nonzero_rh
;
3809 signop sign
= TYPE_SIGN (type
);
3810 // If the input ranges contain only positive values we can
3811 // truncate the minimum of the result range to the maximum
3812 // of the input range minima.
3813 if (wi::ge_p (lh_lb
, 0, sign
)
3814 && wi::ge_p (rh_lb
, 0, sign
))
3816 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3817 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3819 // If either input range contains only negative values
3820 // we can truncate the minimum of the result range to the
3821 // respective minimum range.
3822 if (wi::lt_p (lh_ub
, 0, sign
))
3823 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3824 if (wi::lt_p (rh_ub
, 0, sign
))
3825 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3826 // If the limits got swapped around, return a conservative range.
3827 if (wi::gt_p (new_lb
, new_ub
, sign
))
3829 // Make sure that nonzero|X is nonzero.
3830 if (wi::gt_p (lh_lb
, 0, sign
)
3831 || wi::gt_p (rh_lb
, 0, sign
)
3832 || wi::lt_p (lh_ub
, 0, sign
)
3833 || wi::lt_p (rh_ub
, 0, sign
))
3834 r
.set_nonzero (type
);
3835 else if (sign
== SIGNED
3836 && wi_optimize_signed_bitwise_op (r
, type
,
3841 r
.set_varying (type
);
3844 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3848 operator_bitwise_or::op1_range (irange
&r
, tree type
,
3851 relation_trio
) const
3853 if (lhs
.undefined_p ())
3855 // If this is really a logical wi_fold, call that.
3856 if (types_compatible_p (type
, boolean_type_node
))
3857 return op_logical_or
.op1_range (r
, type
, lhs
, op2
);
3864 r
.set_varying (type
);
3869 operator_bitwise_or::op2_range (irange
&r
, tree type
,
3872 relation_trio
) const
3874 return operator_bitwise_or::op1_range (r
, type
, lhs
, op1
);
3878 operator_bitwise_xor::update_bitmask (irange
&r
, const irange
&lh
,
3879 const irange
&rh
) const
3881 update_known_bitmask (r
, BIT_XOR_EXPR
, lh
, rh
);
3885 operator_bitwise_xor::wi_fold (irange
&r
, tree type
,
3886 const wide_int
&lh_lb
,
3887 const wide_int
&lh_ub
,
3888 const wide_int
&rh_lb
,
3889 const wide_int
&rh_ub
) const
3891 signop sign
= TYPE_SIGN (type
);
3892 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3893 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3894 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3895 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3896 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3897 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3899 wide_int result_zero_bits
= ((mustbe_nonzero_lh
& mustbe_nonzero_rh
)
3900 | ~(maybe_nonzero_lh
| maybe_nonzero_rh
));
3901 wide_int result_one_bits
3902 = (wi::bit_and_not (mustbe_nonzero_lh
, maybe_nonzero_rh
)
3903 | wi::bit_and_not (mustbe_nonzero_rh
, maybe_nonzero_lh
));
3904 wide_int new_ub
= ~result_zero_bits
;
3905 wide_int new_lb
= result_one_bits
;
3907 // If the range has all positive or all negative values, the result
3908 // is better than VARYING.
3909 if (wi::lt_p (new_lb
, 0, sign
) || wi::ge_p (new_ub
, 0, sign
))
3910 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3911 else if (sign
== SIGNED
3912 && wi_optimize_signed_bitwise_op (r
, type
,
3917 r
.set_varying (type
);
3919 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3920 if (wi::lt_p (lh_ub
, rh_lb
, sign
)
3921 || wi::lt_p (rh_ub
, lh_lb
, sign
)
3922 || wi::ne_p (result_one_bits
, 0))
3925 tmp
.set_nonzero (type
);
3931 operator_bitwise_xor::op1_op2_relation_effect (irange
&lhs_range
,
3935 relation_kind rel
) const
3937 if (rel
== VREL_VARYING
)
3940 int_range
<2> rel_range
;
3945 rel_range
.set_zero (type
);
3948 rel_range
.set_nonzero (type
);
3954 lhs_range
.intersect (rel_range
);
3959 operator_bitwise_xor::op1_range (irange
&r
, tree type
,
3962 relation_trio
) const
3964 if (lhs
.undefined_p () || lhs
.varying_p ())
3969 if (types_compatible_p (type
, boolean_type_node
))
3971 switch (get_bool_state (r
, lhs
, type
))
3974 if (op2
.varying_p ())
3975 r
.set_varying (type
);
3976 else if (op2
.zero_p ())
3977 r
= range_true (type
);
3978 // See get_bool_state for the rationale
3979 else if (op2
.undefined_p () || contains_zero_p (op2
))
3980 r
= range_true_and_false (type
);
3982 r
= range_false (type
);
3992 r
.set_varying (type
);
3997 operator_bitwise_xor::op2_range (irange
&r
, tree type
,
4000 relation_trio
) const
4002 return operator_bitwise_xor::op1_range (r
, type
, lhs
, op1
);
4005 class operator_trunc_mod
: public range_operator
4007 using range_operator::op1_range
;
4008 using range_operator::op2_range
;
4009 using range_operator::update_bitmask
;
4011 virtual void wi_fold (irange
&r
, tree type
,
4012 const wide_int
&lh_lb
,
4013 const wide_int
&lh_ub
,
4014 const wide_int
&rh_lb
,
4015 const wide_int
&rh_ub
) const;
4016 virtual bool op1_range (irange
&r
, tree type
,
4019 relation_trio
) const;
4020 virtual bool op2_range (irange
&r
, tree type
,
4023 relation_trio
) const;
4024 void update_bitmask (irange
&r
, const irange
&lh
, const irange
&rh
) const
4025 { update_known_bitmask (r
, TRUNC_MOD_EXPR
, lh
, rh
); }
4029 operator_trunc_mod::wi_fold (irange
&r
, tree type
,
4030 const wide_int
&lh_lb
,
4031 const wide_int
&lh_ub
,
4032 const wide_int
&rh_lb
,
4033 const wide_int
&rh_ub
) const
4035 wide_int new_lb
, new_ub
, tmp
;
4036 signop sign
= TYPE_SIGN (type
);
4037 unsigned prec
= TYPE_PRECISION (type
);
4039 // Mod 0 is undefined.
4040 if (wi_zero_p (type
, rh_lb
, rh_ub
))
4046 // Check for constant and try to fold.
4047 if (lh_lb
== lh_ub
&& rh_lb
== rh_ub
)
4049 wi::overflow_type ov
= wi::OVF_NONE
;
4050 tmp
= wi::mod_trunc (lh_lb
, rh_lb
, sign
, &ov
);
4051 if (ov
== wi::OVF_NONE
)
4053 r
= int_range
<2> (type
, tmp
, tmp
);
4058 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
4063 new_ub
= wi::smax (new_ub
, tmp
);
4066 if (sign
== UNSIGNED
)
4067 new_lb
= wi::zero (prec
);
4072 if (wi::gts_p (tmp
, 0))
4073 tmp
= wi::zero (prec
);
4074 new_lb
= wi::smax (new_lb
, tmp
);
4077 if (sign
== SIGNED
&& wi::neg_p (tmp
))
4078 tmp
= wi::zero (prec
);
4079 new_ub
= wi::min (new_ub
, tmp
, sign
);
4081 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
4085 operator_trunc_mod::op1_range (irange
&r
, tree type
,
4088 relation_trio
) const
4090 if (lhs
.undefined_p ())
4093 signop sign
= TYPE_SIGN (type
);
4094 unsigned prec
= TYPE_PRECISION (type
);
4095 // (a % b) >= x && x > 0 , then a >= x.
4096 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
4098 r
= value_range (type
, lhs
.lower_bound (), wi::max_value (prec
, sign
));
4101 // (a % b) <= x && x < 0 , then a <= x.
4102 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
4104 r
= value_range (type
, wi::min_value (prec
, sign
), lhs
.upper_bound ());
4111 operator_trunc_mod::op2_range (irange
&r
, tree type
,
4114 relation_trio
) const
4116 if (lhs
.undefined_p ())
4119 signop sign
= TYPE_SIGN (type
);
4120 unsigned prec
= TYPE_PRECISION (type
);
4121 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
4122 // or b > x for unsigned.
4123 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
4126 r
= value_range (type
, wi::neg (lhs
.lower_bound ()),
4127 lhs
.lower_bound (), VR_ANTI_RANGE
);
4128 else if (wi::lt_p (lhs
.lower_bound (), wi::max_value (prec
, sign
),
4130 r
= value_range (type
, lhs
.lower_bound () + 1,
4131 wi::max_value (prec
, sign
));
4136 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
4137 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
4139 if (wi::gt_p (lhs
.upper_bound (), wi::min_value (prec
, sign
), sign
))
4140 r
= value_range (type
, lhs
.upper_bound (),
4141 wi::neg (lhs
.upper_bound ()), VR_ANTI_RANGE
);
4150 class operator_logical_not
: public range_operator
4152 using range_operator::fold_range
;
4153 using range_operator::op1_range
;
4155 virtual bool fold_range (irange
&r
, tree type
,
4158 relation_trio rel
= TRIO_VARYING
) const;
4159 virtual bool op1_range (irange
&r
, tree type
,
4162 relation_trio rel
= TRIO_VARYING
) const;
4163 // Check compatibility of LHS and op1.
4164 bool operand_check_p (tree t1
, tree t2
, tree
) const final override
4165 { return range_compatible_p (t1
, t2
); }
4168 // Folding a logical NOT, oddly enough, involves doing nothing on the
4169 // forward pass through. During the initial walk backwards, the
4170 // logical NOT reversed the desired outcome on the way back, so on the
4171 // way forward all we do is pass the range forward.
4176 // to determine the TRUE branch, walking backward
4177 // if (b_3) if ([1,1])
4178 // b_3 = !b_2 [1,1] = ![0,0]
4179 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
4180 // which is the result we are looking for.. so.. pass it through.
4183 operator_logical_not::fold_range (irange
&r
, tree type
,
4185 const irange
&rh ATTRIBUTE_UNUSED
,
4186 relation_trio
) const
4188 if (empty_range_varying (r
, type
, lh
, rh
))
4192 if (!lh
.varying_p () && !lh
.undefined_p ())
4199 operator_logical_not::op1_range (irange
&r
,
4203 relation_trio
) const
4205 // Logical NOT is involutary...do it again.
4206 return fold_range (r
, type
, lhs
, op2
);
4210 operator_bitwise_not::fold_range (irange
&r
, tree type
,
4213 relation_trio
) const
4215 if (empty_range_varying (r
, type
, lh
, rh
))
4218 if (types_compatible_p (type
, boolean_type_node
))
4219 return op_logical_not
.fold_range (r
, type
, lh
, rh
);
4221 // ~X is simply -1 - X.
4222 int_range
<1> minusone (type
, wi::minus_one (TYPE_PRECISION (type
)),
4223 wi::minus_one (TYPE_PRECISION (type
)));
4224 return range_op_handler (MINUS_EXPR
).fold_range (r
, type
, minusone
, lh
);
4228 operator_bitwise_not::op1_range (irange
&r
, tree type
,
4231 relation_trio
) const
4233 if (lhs
.undefined_p ())
4235 if (types_compatible_p (type
, boolean_type_node
))
4236 return op_logical_not
.op1_range (r
, type
, lhs
, op2
);
4238 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4239 return fold_range (r
, type
, lhs
, op2
);
4243 operator_bitwise_not::update_bitmask (irange
&r
, const irange
&lh
,
4244 const irange
&rh
) const
4246 update_known_bitmask (r
, BIT_NOT_EXPR
, lh
, rh
);
4251 operator_cst::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4253 const irange
&rh ATTRIBUTE_UNUSED
,
4254 relation_trio
) const
4261 // Determine if there is a relationship between LHS and OP1.
4264 operator_identity::lhs_op1_relation (const irange
&lhs
,
4265 const irange
&op1 ATTRIBUTE_UNUSED
,
4266 const irange
&op2 ATTRIBUTE_UNUSED
,
4267 relation_kind
) const
4269 if (lhs
.undefined_p ())
4270 return VREL_VARYING
;
4271 // Simply a copy, so they are equivalent.
4276 operator_identity::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4278 const irange
&rh ATTRIBUTE_UNUSED
,
4279 relation_trio
) const
4286 operator_identity::op1_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4288 const irange
&op2 ATTRIBUTE_UNUSED
,
4289 relation_trio
) const
4296 class operator_unknown
: public range_operator
4298 using range_operator::fold_range
;
4300 virtual bool fold_range (irange
&r
, tree type
,
4303 relation_trio rel
= TRIO_VARYING
) const;
4307 operator_unknown::fold_range (irange
&r
, tree type
,
4308 const irange
&lh ATTRIBUTE_UNUSED
,
4309 const irange
&rh ATTRIBUTE_UNUSED
,
4310 relation_trio
) const
4312 r
.set_varying (type
);
4318 operator_abs::wi_fold (irange
&r
, tree type
,
4319 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4320 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4321 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4324 signop sign
= TYPE_SIGN (type
);
4325 unsigned prec
= TYPE_PRECISION (type
);
4327 // Pass through LH for the easy cases.
4328 if (sign
== UNSIGNED
|| wi::ge_p (lh_lb
, 0, sign
))
4330 r
= int_range
<1> (type
, lh_lb
, lh_ub
);
4334 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4336 wide_int min_value
= wi::min_value (prec
, sign
);
4337 wide_int max_value
= wi::max_value (prec
, sign
);
4338 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lh_lb
, min_value
))
4340 r
.set_varying (type
);
4344 // ABS_EXPR may flip the range around, if the original range
4345 // included negative values.
4346 if (wi::eq_p (lh_lb
, min_value
))
4348 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4349 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4350 if (wi::eq_p (lh_ub
, min_value
))
4352 r
= int_range
<1> (type
, min_value
, min_value
);
4358 min
= wi::abs (lh_lb
);
4360 if (wi::eq_p (lh_ub
, min_value
))
4363 max
= wi::abs (lh_ub
);
4365 // If the range contains zero then we know that the minimum value in the
4366 // range will be zero.
4367 if (wi::le_p (lh_lb
, 0, sign
) && wi::ge_p (lh_ub
, 0, sign
))
4369 if (wi::gt_p (min
, max
, sign
))
4371 min
= wi::zero (prec
);
4375 // If the range was reversed, swap MIN and MAX.
4376 if (wi::gt_p (min
, max
, sign
))
4377 std::swap (min
, max
);
4380 // If the new range has its limits swapped around (MIN > MAX), then
4381 // the operation caused one of them to wrap around. The only thing
4382 // we know is that the result is positive.
4383 if (wi::gt_p (min
, max
, sign
))
4385 min
= wi::zero (prec
);
4388 r
= int_range
<1> (type
, min
, max
);
4392 operator_abs::op1_range (irange
&r
, tree type
,
4395 relation_trio
) const
4397 if (empty_range_varying (r
, type
, lhs
, op2
))
4399 if (TYPE_UNSIGNED (type
))
4404 // Start with the positives because negatives are an impossible result.
4405 int_range_max positives
= range_positives (type
);
4406 positives
.intersect (lhs
);
4408 // Then add the negative of each pair:
4409 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4410 for (unsigned i
= 0; i
< positives
.num_pairs (); ++i
)
4411 r
.union_ (int_range
<1> (type
,
4412 -positives
.upper_bound (i
),
4413 -positives
.lower_bound (i
)));
4414 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4415 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4416 wide_int min_value
= wi::min_value (TYPE_PRECISION (type
), TYPE_SIGN (type
));
4417 wide_int lb
= lhs
.lower_bound ();
4418 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lb
, min_value
))
4419 r
.union_ (int_range
<2> (type
, lb
, lb
));
4424 operator_abs::update_bitmask (irange
&r
, const irange
&lh
,
4425 const irange
&rh
) const
4427 update_known_bitmask (r
, ABS_EXPR
, lh
, rh
);
4430 class operator_absu
: public range_operator
4432 using range_operator::update_bitmask
;
4434 virtual void wi_fold (irange
&r
, tree type
,
4435 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4436 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4437 virtual void update_bitmask (irange
&r
, const irange
&lh
,
4438 const irange
&rh
) const final override
;
4442 operator_absu::wi_fold (irange
&r
, tree type
,
4443 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4444 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4445 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4447 wide_int new_lb
, new_ub
;
4449 // Pass through VR0 the easy cases.
4450 if (wi::ges_p (lh_lb
, 0))
4457 new_lb
= wi::abs (lh_lb
);
4458 new_ub
= wi::abs (lh_ub
);
4460 // If the range contains zero then we know that the minimum
4461 // value in the range will be zero.
4462 if (wi::ges_p (lh_ub
, 0))
4464 if (wi::gtu_p (new_lb
, new_ub
))
4466 new_lb
= wi::zero (TYPE_PRECISION (type
));
4469 std::swap (new_lb
, new_ub
);
4472 gcc_checking_assert (TYPE_UNSIGNED (type
));
4473 r
= int_range
<1> (type
, new_lb
, new_ub
);
4477 operator_absu::update_bitmask (irange
&r
, const irange
&lh
,
4478 const irange
&rh
) const
4480 update_known_bitmask (r
, ABSU_EXPR
, lh
, rh
);
4485 operator_negate::fold_range (irange
&r
, tree type
,
4488 relation_trio
) const
4490 if (empty_range_varying (r
, type
, lh
, rh
))
4493 // -X is simply 0 - X.
4495 zero
.set_zero (type
);
4496 return range_op_handler (MINUS_EXPR
).fold_range (r
, type
, zero
, lh
);
4500 operator_negate::op1_range (irange
&r
, tree type
,
4503 relation_trio
) const
4505 // NEGATE is involutory.
4506 return fold_range (r
, type
, lhs
, op2
);
4511 operator_addr_expr::fold_range (irange
&r
, tree type
,
4514 relation_trio
) const
4516 if (empty_range_varying (r
, type
, lh
, rh
))
4519 // Return a non-null pointer of the LHS type (passed in op2).
4522 else if (lh
.undefined_p () || contains_zero_p (lh
))
4523 r
.set_varying (type
);
4525 r
.set_nonzero (type
);
4530 operator_addr_expr::op1_range (irange
&r
, tree type
,
4533 relation_trio
) const
4535 if (empty_range_varying (r
, type
, lhs
, op2
))
4538 // Return a non-null pointer of the LHS type (passed in op2), but only
4539 // if we cant overflow, eitherwise a no-zero offset could wrap to zero.
4541 if (!lhs
.undefined_p () && !contains_zero_p (lhs
) && TYPE_OVERFLOW_UNDEFINED (type
))
4542 r
.set_nonzero (type
);
4544 r
.set_varying (type
);
4548 // Initialize any integral operators to the primary table
4551 range_op_table::initialize_integral_ops ()
4553 set (TRUNC_DIV_EXPR
, op_trunc_div
);
4554 set (FLOOR_DIV_EXPR
, op_floor_div
);
4555 set (ROUND_DIV_EXPR
, op_round_div
);
4556 set (CEIL_DIV_EXPR
, op_ceil_div
);
4557 set (EXACT_DIV_EXPR
, op_exact_div
);
4558 set (LSHIFT_EXPR
, op_lshift
);
4559 set (RSHIFT_EXPR
, op_rshift
);
4560 set (TRUTH_AND_EXPR
, op_logical_and
);
4561 set (TRUTH_OR_EXPR
, op_logical_or
);
4562 set (TRUNC_MOD_EXPR
, op_trunc_mod
);
4563 set (TRUTH_NOT_EXPR
, op_logical_not
);
4564 set (IMAGPART_EXPR
, op_unknown
);
4565 set (REALPART_EXPR
, op_unknown
);
4566 set (ABSU_EXPR
, op_absu
);
4567 set (OP_WIDEN_MULT_SIGNED
, op_widen_mult_signed
);
4568 set (OP_WIDEN_MULT_UNSIGNED
, op_widen_mult_unsigned
);
4569 set (OP_WIDEN_PLUS_SIGNED
, op_widen_plus_signed
);
4570 set (OP_WIDEN_PLUS_UNSIGNED
, op_widen_plus_unsigned
);
4575 operator_plus::overflow_free_p (const irange
&lh
, const irange
&rh
,
4576 relation_trio
) const
4578 if (lh
.undefined_p () || rh
.undefined_p ())
4581 tree type
= lh
.type ();
4582 if (TYPE_OVERFLOW_UNDEFINED (type
))
4585 wi::overflow_type ovf
;
4586 signop sgn
= TYPE_SIGN (type
);
4587 wide_int wmax0
= lh
.upper_bound ();
4588 wide_int wmax1
= rh
.upper_bound ();
4589 wi::add (wmax0
, wmax1
, sgn
, &ovf
);
4590 if (ovf
!= wi::OVF_NONE
)
4593 if (TYPE_UNSIGNED (type
))
4596 wide_int wmin0
= lh
.lower_bound ();
4597 wide_int wmin1
= rh
.lower_bound ();
4598 wi::add (wmin0
, wmin1
, sgn
, &ovf
);
4599 if (ovf
!= wi::OVF_NONE
)
4606 operator_minus::overflow_free_p (const irange
&lh
, const irange
&rh
,
4607 relation_trio
) const
4609 if (lh
.undefined_p () || rh
.undefined_p ())
4612 tree type
= lh
.type ();
4613 if (TYPE_OVERFLOW_UNDEFINED (type
))
4616 wi::overflow_type ovf
;
4617 signop sgn
= TYPE_SIGN (type
);
4618 wide_int wmin0
= lh
.lower_bound ();
4619 wide_int wmax1
= rh
.upper_bound ();
4620 wi::sub (wmin0
, wmax1
, sgn
, &ovf
);
4621 if (ovf
!= wi::OVF_NONE
)
4624 if (TYPE_UNSIGNED (type
))
4627 wide_int wmax0
= lh
.upper_bound ();
4628 wide_int wmin1
= rh
.lower_bound ();
4629 wi::sub (wmax0
, wmin1
, sgn
, &ovf
);
4630 if (ovf
!= wi::OVF_NONE
)
4637 operator_mult::overflow_free_p (const irange
&lh
, const irange
&rh
,
4638 relation_trio
) const
4640 if (lh
.undefined_p () || rh
.undefined_p ())
4643 tree type
= lh
.type ();
4644 if (TYPE_OVERFLOW_UNDEFINED (type
))
4647 wi::overflow_type ovf
;
4648 signop sgn
= TYPE_SIGN (type
);
4649 wide_int wmax0
= lh
.upper_bound ();
4650 wide_int wmax1
= rh
.upper_bound ();
4651 wi::mul (wmax0
, wmax1
, sgn
, &ovf
);
4652 if (ovf
!= wi::OVF_NONE
)
4655 if (TYPE_UNSIGNED (type
))
4658 wide_int wmin0
= lh
.lower_bound ();
4659 wide_int wmin1
= rh
.lower_bound ();
4660 wi::mul (wmin0
, wmin1
, sgn
, &ovf
);
4661 if (ovf
!= wi::OVF_NONE
)
4664 wi::mul (wmin0
, wmax1
, sgn
, &ovf
);
4665 if (ovf
!= wi::OVF_NONE
)
4668 wi::mul (wmax0
, wmin1
, sgn
, &ovf
);
4669 if (ovf
!= wi::OVF_NONE
)
4676 #include "selftest.h"
4680 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4681 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4682 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4683 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4684 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4685 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4688 range_op_cast_tests ()
4690 int_range
<2> r0
, r1
, r2
, rold
;
4691 r0
.set_varying (integer_type_node
);
4692 wide_int maxint
= r0
.upper_bound ();
4694 // If a range is in any way outside of the range for the converted
4695 // to range, default to the range for the new type.
4696 r0
.set_varying (short_integer_type_node
);
4697 wide_int minshort
= r0
.lower_bound ();
4698 wide_int maxshort
= r0
.upper_bound ();
4699 if (TYPE_PRECISION (integer_type_node
)
4700 > TYPE_PRECISION (short_integer_type_node
))
4702 r1
= int_range
<1> (integer_type_node
,
4703 wi::zero (TYPE_PRECISION (integer_type_node
)),
4705 range_cast (r1
, short_integer_type_node
);
4706 ASSERT_TRUE (r1
.lower_bound () == minshort
4707 && r1
.upper_bound() == maxshort
);
4710 // (unsigned char)[-5,-1] => [251,255].
4711 r0
= rold
= int_range
<1> (signed_char_type_node
, SCHAR (-5), SCHAR (-1));
4712 range_cast (r0
, unsigned_char_type_node
);
4713 ASSERT_TRUE (r0
== int_range
<1> (unsigned_char_type_node
,
4714 UCHAR (251), UCHAR (255)));
4715 range_cast (r0
, signed_char_type_node
);
4716 ASSERT_TRUE (r0
== rold
);
4718 // (signed char)[15, 150] => [-128,-106][15,127].
4719 r0
= rold
= int_range
<1> (unsigned_char_type_node
, UCHAR (15), UCHAR (150));
4720 range_cast (r0
, signed_char_type_node
);
4721 r1
= int_range
<1> (signed_char_type_node
, SCHAR (15), SCHAR (127));
4722 r2
= int_range
<1> (signed_char_type_node
, SCHAR (-128), SCHAR (-106));
4724 ASSERT_TRUE (r1
== r0
);
4725 range_cast (r0
, unsigned_char_type_node
);
4726 ASSERT_TRUE (r0
== rold
);
4728 // (unsigned char)[-5, 5] => [0,5][251,255].
4729 r0
= rold
= int_range
<1> (signed_char_type_node
, SCHAR (-5), SCHAR (5));
4730 range_cast (r0
, unsigned_char_type_node
);
4731 r1
= int_range
<1> (unsigned_char_type_node
, UCHAR (251), UCHAR (255));
4732 r2
= int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (5));
4734 ASSERT_TRUE (r0
== r1
);
4735 range_cast (r0
, signed_char_type_node
);
4736 ASSERT_TRUE (r0
== rold
);
4738 // (unsigned char)[-5,5] => [0,5][251,255].
4739 r0
= int_range
<1> (integer_type_node
, INT (-5), INT (5));
4740 range_cast (r0
, unsigned_char_type_node
);
4741 r1
= int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (5));
4742 r1
.union_ (int_range
<1> (unsigned_char_type_node
, UCHAR (251), UCHAR (255)));
4743 ASSERT_TRUE (r0
== r1
);
4745 // (unsigned char)[5U,1974U] => [0,255].
4746 r0
= int_range
<1> (unsigned_type_node
, UINT (5), UINT (1974));
4747 range_cast (r0
, unsigned_char_type_node
);
4748 ASSERT_TRUE (r0
== int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (255)));
4749 range_cast (r0
, integer_type_node
);
4750 // Going to a wider range should not sign extend.
4751 ASSERT_TRUE (r0
== int_range
<1> (integer_type_node
, INT (0), INT (255)));
4753 // (unsigned char)[-350,15] => [0,255].
4754 r0
= int_range
<1> (integer_type_node
, INT (-350), INT (15));
4755 range_cast (r0
, unsigned_char_type_node
);
4756 ASSERT_TRUE (r0
== (int_range
<1>
4757 (unsigned_char_type_node
,
4758 min_limit (unsigned_char_type_node
),
4759 max_limit (unsigned_char_type_node
))));
4761 // Casting [-120,20] from signed char to unsigned short.
4762 // => [0, 20][0xff88, 0xffff].
4763 r0
= int_range
<1> (signed_char_type_node
, SCHAR (-120), SCHAR (20));
4764 range_cast (r0
, short_unsigned_type_node
);
4765 r1
= int_range
<1> (short_unsigned_type_node
, UINT16 (0), UINT16 (20));
4766 r2
= int_range
<1> (short_unsigned_type_node
,
4767 UINT16 (0xff88), UINT16 (0xffff));
4769 ASSERT_TRUE (r0
== r1
);
4770 // A truncating cast back to signed char will work because [-120, 20]
4771 // is representable in signed char.
4772 range_cast (r0
, signed_char_type_node
);
4773 ASSERT_TRUE (r0
== int_range
<1> (signed_char_type_node
,
4774 SCHAR (-120), SCHAR (20)));
4776 // unsigned char -> signed short
4777 // (signed short)[(unsigned char)25, (unsigned char)250]
4778 // => [(signed short)25, (signed short)250]
4779 r0
= rold
= int_range
<1> (unsigned_char_type_node
, UCHAR (25), UCHAR (250));
4780 range_cast (r0
, short_integer_type_node
);
4781 r1
= int_range
<1> (short_integer_type_node
, INT16 (25), INT16 (250));
4782 ASSERT_TRUE (r0
== r1
);
4783 range_cast (r0
, unsigned_char_type_node
);
4784 ASSERT_TRUE (r0
== rold
);
4786 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4787 r0
= int_range
<1> (long_long_integer_type_node
,
4788 min_limit (long_long_integer_type_node
),
4789 max_limit (long_long_integer_type_node
));
4790 range_cast (r0
, short_unsigned_type_node
);
4791 r1
= int_range
<1> (short_unsigned_type_node
,
4792 min_limit (short_unsigned_type_node
),
4793 max_limit (short_unsigned_type_node
));
4794 ASSERT_TRUE (r0
== r1
);
4796 // Casting NONZERO to a narrower type will wrap/overflow so
4797 // it's just the entire range for the narrower type.
4799 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4800 // is outside of the range of a smaller range, return the full
4802 if (TYPE_PRECISION (integer_type_node
)
4803 > TYPE_PRECISION (short_integer_type_node
))
4805 r0
.set_nonzero (integer_type_node
);
4806 range_cast (r0
, short_integer_type_node
);
4807 r1
= int_range
<1> (short_integer_type_node
,
4808 min_limit (short_integer_type_node
),
4809 max_limit (short_integer_type_node
));
4810 ASSERT_TRUE (r0
== r1
);
4813 // Casting NONZERO from a narrower signed to a wider signed.
4815 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4816 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4817 r0
.set_nonzero (short_integer_type_node
);
4818 range_cast (r0
, integer_type_node
);
4819 r1
= int_range
<1> (integer_type_node
, INT (-32768), INT (-1));
4820 r2
= int_range
<1> (integer_type_node
, INT (1), INT (32767));
4822 ASSERT_TRUE (r0
== r1
);
4826 range_op_lshift_tests ()
4828 // Test that 0x808.... & 0x8.... still contains 0x8....
4829 // for a large set of numbers.
4832 tree big_type
= long_long_unsigned_type_node
;
4833 unsigned big_prec
= TYPE_PRECISION (big_type
);
4834 // big_num = 0x808,0000,0000,0000
4835 wide_int big_num
= wi::lshift (wi::uhwi (0x808, big_prec
),
4836 wi::uhwi (48, big_prec
));
4837 op_bitwise_and
.fold_range (res
, big_type
,
4838 int_range
<1> (big_type
),
4839 int_range
<1> (big_type
, big_num
, big_num
));
4840 // val = 0x8,0000,0000,0000
4841 wide_int val
= wi::lshift (wi::uhwi (8, big_prec
),
4842 wi::uhwi (48, big_prec
));
4843 ASSERT_TRUE (res
.contains_p (val
));
4846 if (TYPE_PRECISION (unsigned_type_node
) > 31)
4848 // unsigned VARYING = op1 << 1 should be VARYING.
4849 int_range
<2> lhs (unsigned_type_node
);
4850 int_range
<2> shift (unsigned_type_node
, INT (1), INT (1));
4852 op_lshift
.op1_range (op1
, unsigned_type_node
, lhs
, shift
);
4853 ASSERT_TRUE (op1
.varying_p ());
4855 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4856 int_range
<2> zero (unsigned_type_node
, UINT (0), UINT (0));
4857 op_lshift
.op1_range (op1
, unsigned_type_node
, zero
, shift
);
4858 ASSERT_TRUE (op1
.num_pairs () == 2);
4859 // Remove the [0,0] range.
4860 op1
.intersect (zero
);
4861 ASSERT_TRUE (op1
.num_pairs () == 1);
4862 // op1 << 1 should be [0x8000,0x8000] << 1,
4863 // which should result in [0,0].
4864 int_range_max result
;
4865 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4866 ASSERT_TRUE (result
== zero
);
4868 // signed VARYING = op1 << 1 should be VARYING.
4869 if (TYPE_PRECISION (integer_type_node
) > 31)
4871 // unsigned VARYING = op1 << 1 should be VARYING.
4872 int_range
<2> lhs (integer_type_node
);
4873 int_range
<2> shift (integer_type_node
, INT (1), INT (1));
4875 op_lshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4876 ASSERT_TRUE (op1
.varying_p ());
4878 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4879 int_range
<2> zero (integer_type_node
, INT (0), INT (0));
4880 op_lshift
.op1_range (op1
, integer_type_node
, zero
, shift
);
4881 ASSERT_TRUE (op1
.num_pairs () == 2);
4882 // Remove the [0,0] range.
4883 op1
.intersect (zero
);
4884 ASSERT_TRUE (op1
.num_pairs () == 1);
4885 // op1 << 1 should be [0x8000,0x8000] << 1,
4886 // which should result in [0,0].
4887 int_range_max result
;
4888 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4889 ASSERT_TRUE (result
== zero
);
4894 range_op_rshift_tests ()
4896 // unsigned: [3, MAX] = OP1 >> 1
4898 int_range_max
lhs (unsigned_type_node
,
4899 UINT (3), max_limit (unsigned_type_node
));
4900 int_range_max
one (unsigned_type_node
,
4901 wi::one (TYPE_PRECISION (unsigned_type_node
)),
4902 wi::one (TYPE_PRECISION (unsigned_type_node
)));
4904 op_rshift
.op1_range (op1
, unsigned_type_node
, lhs
, one
);
4905 ASSERT_FALSE (op1
.contains_p (UINT (3)));
4908 // signed: [3, MAX] = OP1 >> 1
4910 int_range_max
lhs (integer_type_node
,
4911 INT (3), max_limit (integer_type_node
));
4912 int_range_max
one (integer_type_node
, INT (1), INT (1));
4914 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4915 ASSERT_FALSE (op1
.contains_p (INT (-2)));
4918 // This is impossible, so OP1 should be [].
4919 // signed: [MIN, MIN] = OP1 >> 1
4921 int_range_max
lhs (integer_type_node
,
4922 min_limit (integer_type_node
),
4923 min_limit (integer_type_node
));
4924 int_range_max
one (integer_type_node
, INT (1), INT (1));
4926 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4927 ASSERT_TRUE (op1
.undefined_p ());
4930 // signed: ~[-1] = OP1 >> 31
4931 if (TYPE_PRECISION (integer_type_node
) > 31)
4933 int_range_max
lhs (integer_type_node
, INT (-1), INT (-1), VR_ANTI_RANGE
);
4934 int_range_max
shift (integer_type_node
, INT (31), INT (31));
4936 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4937 int_range_max negatives
= range_negatives (integer_type_node
);
4938 negatives
.intersect (op1
);
4939 ASSERT_TRUE (negatives
.undefined_p ());
4944 range_op_bitwise_and_tests ()
4947 wide_int min
= min_limit (integer_type_node
);
4948 wide_int max
= max_limit (integer_type_node
);
4949 wide_int tiny
= wi::add (min
, wi::one (TYPE_PRECISION (integer_type_node
)));
4950 int_range_max
i1 (integer_type_node
, tiny
, max
);
4951 int_range_max
i2 (integer_type_node
, INT (255), INT (255));
4953 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4954 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4955 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4957 // VARYING = OP1 & 255: OP1 is VARYING
4958 i1
= int_range
<1> (integer_type_node
);
4959 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4960 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4962 // For 0 = x & MASK, x is ~MASK.
4964 int_range
<2> zero (integer_type_node
, INT (0), INT (0));
4965 int_range
<2> mask
= int_range
<2> (integer_type_node
, INT (7), INT (7));
4966 op_bitwise_and
.op1_range (res
, integer_type_node
, zero
, mask
);
4967 wide_int inv
= wi::shwi (~7U, TYPE_PRECISION (integer_type_node
));
4968 ASSERT_TRUE (res
.get_nonzero_bits () == inv
);
4971 // (NONZERO | X) is nonzero.
4972 i1
.set_nonzero (integer_type_node
);
4973 i2
.set_varying (integer_type_node
);
4974 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4975 ASSERT_TRUE (res
.nonzero_p ());
4977 // (NEGATIVE | X) is nonzero.
4978 i1
= int_range
<1> (integer_type_node
, INT (-5), INT (-3));
4979 i2
.set_varying (integer_type_node
);
4980 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4981 ASSERT_FALSE (res
.contains_p (INT (0)));
4985 range_relational_tests ()
4987 int_range
<2> lhs (unsigned_char_type_node
);
4988 int_range
<2> op1 (unsigned_char_type_node
, UCHAR (8), UCHAR (10));
4989 int_range
<2> op2 (unsigned_char_type_node
, UCHAR (20), UCHAR (20));
4991 // Never wrapping additions mean LHS > OP1.
4992 relation_kind code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4993 ASSERT_TRUE (code
== VREL_GT
);
4995 // Most wrapping additions mean nothing...
4996 op1
= int_range
<2> (unsigned_char_type_node
, UCHAR (8), UCHAR (10));
4997 op2
= int_range
<2> (unsigned_char_type_node
, UCHAR (0), UCHAR (255));
4998 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4999 ASSERT_TRUE (code
== VREL_VARYING
);
5001 // However, always wrapping additions mean LHS < OP1.
5002 op1
= int_range
<2> (unsigned_char_type_node
, UCHAR (1), UCHAR (255));
5003 op2
= int_range
<2> (unsigned_char_type_node
, UCHAR (255), UCHAR (255));
5004 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
5005 ASSERT_TRUE (code
== VREL_LT
);
5011 range_op_rshift_tests ();
5012 range_op_lshift_tests ();
5013 range_op_bitwise_and_tests ();
5014 range_op_cast_tests ();
5015 range_relational_tests ();
5017 extern void range_op_float_tests ();
5018 range_op_float_tests ();
5021 } // namespace selftest
5023 #endif // CHECKING_P