1 /* Code for range operators.
2 Copyright (C) 2017-2024 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
44 #include "gimple-walk.h"
47 #include "value-relation.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal
;
55 operator_not_equal op_not_equal
;
60 operator_identity op_ident
;
62 operator_cast op_cast
;
63 operator_plus op_plus
;
65 operator_minus op_minus
;
66 operator_negate op_negate
;
67 operator_mult op_mult
;
68 operator_addr_expr op_addr
;
69 operator_bitwise_not op_bitwise_not
;
70 operator_bitwise_xor op_bitwise_xor
;
71 operator_bitwise_and op_bitwise_and
;
72 operator_bitwise_or op_bitwise_or
;
76 // Instantaite a range operator table.
77 range_op_table operator_table
;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR
, op_equal
);
88 set (NE_EXPR
, op_not_equal
);
93 set (SSA_NAME
, op_ident
);
94 set (PAREN_EXPR
, op_ident
);
95 set (OBJ_TYPE_REF
, op_ident
);
96 set (REAL_CST
, op_cst
);
97 set (INTEGER_CST
, op_cst
);
98 set (NOP_EXPR
, op_cast
);
99 set (CONVERT_EXPR
, op_cast
);
100 set (PLUS_EXPR
, op_plus
);
101 set (ABS_EXPR
, op_abs
);
102 set (MINUS_EXPR
, op_minus
);
103 set (NEGATE_EXPR
, op_negate
);
104 set (MULT_EXPR
, op_mult
);
105 set (ADDR_EXPR
, op_addr
);
106 set (BIT_NOT_EXPR
, op_bitwise_not
);
107 set (BIT_XOR_EXPR
, op_bitwise_xor
);
108 set (BIT_AND_EXPR
, op_bitwise_and
);
109 set (BIT_IOR_EXPR
, op_bitwise_or
);
110 set (MIN_EXPR
, op_min
);
111 set (MAX_EXPR
, op_max
);
114 // Instantiate a default range operator for opcodes with no entry.
116 range_operator default_operator
;
118 // Create a default range_op_handler.
120 range_op_handler::range_op_handler ()
122 m_operator
= &default_operator
;
125 // Create a range_op_handler for CODE. Use a default operatoer if CODE
126 // does not have an entry.
128 range_op_handler::range_op_handler (unsigned code
)
130 m_operator
= operator_table
[code
];
132 m_operator
= &default_operator
;
135 // Return TRUE if this handler has a non-default operator.
137 range_op_handler::operator bool () const
139 return m_operator
!= &default_operator
;
142 // Return a pointer to the range operator assocaited with this handler.
143 // If it is a default operator, return NULL.
144 // This is the equivalent of indexing the range table.
147 range_op_handler::range_op () const
149 if (m_operator
!= &default_operator
)
154 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
155 // This is used to produce a unique value for each dispatch pattern. Shift
156 // values are based on the size of the m_discriminator field in value_range.h.
159 dispatch_trio (unsigned lhs
, unsigned op1
, unsigned op2
)
161 return ((lhs
<< 8) + (op1
<< 4) + (op2
));
164 // These are the supported dispatch patterns. These map to the parameter list
165 // of the routines in range_operator. Note the last 3 characters are
166 // shorthand for the LHS, OP1, and OP2 range discriminator class.
168 const unsigned RO_III
= dispatch_trio (VR_IRANGE
, VR_IRANGE
, VR_IRANGE
);
169 const unsigned RO_IFI
= dispatch_trio (VR_IRANGE
, VR_FRANGE
, VR_IRANGE
);
170 const unsigned RO_IFF
= dispatch_trio (VR_IRANGE
, VR_FRANGE
, VR_FRANGE
);
171 const unsigned RO_FFF
= dispatch_trio (VR_FRANGE
, VR_FRANGE
, VR_FRANGE
);
172 const unsigned RO_FIF
= dispatch_trio (VR_FRANGE
, VR_IRANGE
, VR_FRANGE
);
173 const unsigned RO_FII
= dispatch_trio (VR_FRANGE
, VR_IRANGE
, VR_IRANGE
);
174 const unsigned RO_PPP
= dispatch_trio (VR_PRANGE
, VR_PRANGE
, VR_PRANGE
);
175 const unsigned RO_PPI
= dispatch_trio (VR_PRANGE
, VR_PRANGE
, VR_IRANGE
);
176 const unsigned RO_IPP
= dispatch_trio (VR_IRANGE
, VR_PRANGE
, VR_PRANGE
);
177 const unsigned RO_IPI
= dispatch_trio (VR_IRANGE
, VR_PRANGE
, VR_IRANGE
);
178 const unsigned RO_PIP
= dispatch_trio (VR_PRANGE
, VR_IRANGE
, VR_PRANGE
);
179 const unsigned RO_PII
= dispatch_trio (VR_PRANGE
, VR_IRANGE
, VR_IRANGE
);
181 // Return a dispatch value for parameter types LHS, OP1 and OP2.
184 range_op_handler::dispatch_kind (const vrange
&lhs
, const vrange
&op1
,
185 const vrange
& op2
) const
187 return dispatch_trio (lhs
.m_discriminator
, op1
.m_discriminator
,
188 op2
.m_discriminator
);
192 range_op_handler::discriminator_fail (const vrange
&r1
,
194 const vrange
&r3
) const
196 const char name
[] = "IPF";
197 gcc_checking_assert (r1
.m_discriminator
< sizeof (name
) - 1);
198 gcc_checking_assert (r2
.m_discriminator
< sizeof (name
) - 1);
199 gcc_checking_assert (r3
.m_discriminator
< sizeof (name
) - 1);
200 fprintf (stderr
, "DISCRIMINATOR FAIL. Dispatch ====> RO_%c%c%c <====\n",
201 name
[r1
.m_discriminator
],
202 name
[r2
.m_discriminator
],
203 name
[r3
.m_discriminator
]);
208 has_pointer_operand_p (const vrange
&r1
, const vrange
&r2
, const vrange
&r3
)
210 return is_a
<prange
> (r1
) || is_a
<prange
> (r2
) || is_a
<prange
> (r3
);
213 // Dispatch a call to fold_range based on the types of R, LH and RH.
216 range_op_handler::fold_range (vrange
&r
, tree type
,
219 relation_trio rel
) const
221 gcc_checking_assert (m_operator
);
223 if (!lh
.undefined_p () && !rh
.undefined_p ())
224 gcc_assert (m_operator
->operand_check_p (type
, lh
.type (), rh
.type ()));
225 if (has_pointer_operand_p (r
, lh
, rh
)
226 && !m_operator
->pointers_handled_p (DISPATCH_FOLD_RANGE
,
227 dispatch_kind (r
, lh
, rh
)))
228 discriminator_fail (r
, lh
, rh
);
230 switch (dispatch_kind (r
, lh
, rh
))
233 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
235 as_a
<irange
> (rh
), rel
);
237 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
239 as_a
<irange
> (rh
), rel
);
241 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
243 as_a
<frange
> (rh
), rel
);
245 return m_operator
->fold_range (as_a
<frange
> (r
), type
,
247 as_a
<frange
> (rh
), rel
);
249 return m_operator
->fold_range (as_a
<frange
> (r
), type
,
251 as_a
<irange
> (rh
), rel
);
253 return m_operator
->fold_range (as_a
<prange
> (r
), type
,
255 as_a
<prange
> (rh
), rel
);
257 return m_operator
->fold_range (as_a
<prange
> (r
), type
,
259 as_a
<irange
> (rh
), rel
);
261 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
263 as_a
<prange
> (rh
), rel
);
265 return m_operator
->fold_range (as_a
<prange
> (r
), type
,
267 as_a
<prange
> (rh
), rel
);
269 return m_operator
->fold_range (as_a
<irange
> (r
), type
,
271 as_a
<irange
> (rh
), rel
);
277 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
280 range_op_handler::op1_range (vrange
&r
, tree type
,
283 relation_trio rel
) const
285 gcc_checking_assert (m_operator
);
286 if (lhs
.undefined_p ())
289 if (!op2
.undefined_p ())
290 gcc_assert (m_operator
->operand_check_p (lhs
.type (), type
, op2
.type ()));
291 if (has_pointer_operand_p (r
, lhs
, op2
)
292 && !m_operator
->pointers_handled_p (DISPATCH_OP1_RANGE
,
293 dispatch_kind (r
, lhs
, op2
)))
294 discriminator_fail (r
, lhs
, op2
);
296 switch (dispatch_kind (r
, lhs
, op2
))
299 return m_operator
->op1_range (as_a
<irange
> (r
), type
,
301 as_a
<irange
> (op2
), rel
);
303 return m_operator
->op1_range (as_a
<prange
> (r
), type
,
305 as_a
<prange
> (op2
), rel
);
307 return m_operator
->op1_range (as_a
<prange
> (r
), type
,
309 as_a
<prange
> (op2
), rel
);
311 return m_operator
->op1_range (as_a
<prange
> (r
), type
,
313 as_a
<irange
> (op2
), rel
);
315 return m_operator
->op1_range (as_a
<irange
> (r
), type
,
317 as_a
<irange
> (op2
), rel
);
319 return m_operator
->op1_range (as_a
<frange
> (r
), type
,
321 as_a
<frange
> (op2
), rel
);
323 return m_operator
->op1_range (as_a
<frange
> (r
), type
,
325 as_a
<frange
> (op2
), rel
);
331 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
334 range_op_handler::op2_range (vrange
&r
, tree type
,
337 relation_trio rel
) const
339 gcc_checking_assert (m_operator
);
340 if (lhs
.undefined_p ())
343 if (!op1
.undefined_p ())
344 gcc_assert (m_operator
->operand_check_p (lhs
.type (), op1
.type (), type
));
345 if (has_pointer_operand_p (r
, lhs
, op1
)
346 && !m_operator
->pointers_handled_p (DISPATCH_OP2_RANGE
,
347 dispatch_kind (r
, lhs
, op1
)))
348 discriminator_fail (r
, lhs
, op1
);
350 switch (dispatch_kind (r
, lhs
, op1
))
353 return m_operator
->op2_range (as_a
<irange
> (r
), type
,
355 as_a
<irange
> (op1
), rel
);
357 return m_operator
->op2_range (as_a
<prange
> (r
), type
,
359 as_a
<prange
> (op1
), rel
);
361 return m_operator
->op2_range (as_a
<irange
> (r
), type
,
363 as_a
<prange
> (op1
), rel
);
365 return m_operator
->op2_range (as_a
<frange
> (r
), type
,
367 as_a
<frange
> (op1
), rel
);
369 return m_operator
->op2_range (as_a
<frange
> (r
), type
,
371 as_a
<frange
> (op1
), rel
);
377 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
380 range_op_handler::lhs_op1_relation (const vrange
&lhs
,
383 relation_kind rel
) const
385 gcc_checking_assert (m_operator
);
387 if (has_pointer_operand_p (lhs
, op1
, op2
)
388 && !m_operator
->pointers_handled_p (DISPATCH_LHS_OP1_RELATION
,
389 dispatch_kind (lhs
, op1
, op2
)))
390 discriminator_fail (lhs
, op1
, op2
);
393 switch (dispatch_kind (lhs
, op1
, op2
))
396 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
398 as_a
<irange
> (op2
), rel
);
400 return m_operator
->lhs_op1_relation (as_a
<prange
> (lhs
),
402 as_a
<prange
> (op2
), rel
);
404 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
406 as_a
<prange
> (op2
), rel
);
408 return m_operator
->lhs_op1_relation (as_a
<prange
> (lhs
),
410 as_a
<irange
> (op2
), rel
);
412 return m_operator
->lhs_op1_relation (as_a
<irange
> (lhs
),
414 as_a
<frange
> (op2
), rel
);
416 return m_operator
->lhs_op1_relation (as_a
<frange
> (lhs
),
418 as_a
<frange
> (op2
), rel
);
424 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
427 range_op_handler::lhs_op2_relation (const vrange
&lhs
,
430 relation_kind rel
) const
432 gcc_checking_assert (m_operator
);
434 if (has_pointer_operand_p (lhs
, op1
, op2
)
435 && !m_operator
->pointers_handled_p (DISPATCH_LHS_OP2_RELATION
,
436 dispatch_kind (lhs
, op1
, op2
)))
437 discriminator_fail (lhs
, op1
, op2
);
439 switch (dispatch_kind (lhs
, op1
, op2
))
442 return m_operator
->lhs_op2_relation (as_a
<irange
> (lhs
),
444 as_a
<irange
> (op2
), rel
);
446 return m_operator
->lhs_op2_relation (as_a
<irange
> (lhs
),
448 as_a
<frange
> (op2
), rel
);
450 return m_operator
->lhs_op2_relation (as_a
<frange
> (lhs
),
452 as_a
<frange
> (op2
), rel
);
458 // Dispatch a call to op1_op2_relation based on the type of LHS.
461 range_op_handler::op1_op2_relation (const vrange
&lhs
,
463 const vrange
&op2
) const
465 gcc_checking_assert (m_operator
);
467 if (has_pointer_operand_p (lhs
, op1
, op2
)
468 && !m_operator
->pointers_handled_p (DISPATCH_OP1_OP2_RELATION
,
469 dispatch_kind (lhs
, op1
, op2
)))
470 discriminator_fail (lhs
, op1
, op2
);
472 switch (dispatch_kind (lhs
, op1
, op2
))
475 return m_operator
->op1_op2_relation (as_a
<irange
> (lhs
),
477 as_a
<irange
> (op2
));
480 return m_operator
->op1_op2_relation (as_a
<irange
> (lhs
),
482 as_a
<prange
> (op2
));
485 return m_operator
->op1_op2_relation (as_a
<irange
> (lhs
),
487 as_a
<frange
> (op2
));
490 return m_operator
->op1_op2_relation (as_a
<frange
> (lhs
),
492 as_a
<frange
> (op2
));
500 range_op_handler::overflow_free_p (const vrange
&lh
,
502 relation_trio rel
) const
504 gcc_checking_assert (m_operator
);
505 switch (dispatch_kind (lh
, lh
, rh
))
508 return m_operator
->overflow_free_p(as_a
<irange
> (lh
),
517 range_op_handler::operand_check_p (tree t1
, tree t2
, tree t3
) const
519 gcc_checking_assert (m_operator
);
520 return m_operator
->operand_check_p (t1
, t2
, t3
);
523 // Update the known bitmasks in R when applying the operation CODE to
527 update_known_bitmask (vrange
&r
, tree_code code
,
528 const vrange
&lh
, const vrange
&rh
)
530 if (r
.undefined_p () || lh
.undefined_p () || rh
.undefined_p ()
534 widest_int widest_value
, widest_mask
;
535 tree type
= r
.type ();
536 signop sign
= TYPE_SIGN (type
);
537 int prec
= TYPE_PRECISION (type
);
538 irange_bitmask lh_bits
= lh
.get_bitmask ();
539 irange_bitmask rh_bits
= rh
.get_bitmask ();
541 switch (get_gimple_rhs_class (code
))
543 case GIMPLE_UNARY_RHS
:
544 bit_value_unop (code
, sign
, prec
, &widest_value
, &widest_mask
,
545 TYPE_SIGN (lh
.type ()),
546 TYPE_PRECISION (lh
.type ()),
547 widest_int::from (lh_bits
.value (),
548 TYPE_SIGN (lh
.type ())),
549 widest_int::from (lh_bits
.mask (),
550 TYPE_SIGN (lh
.type ())));
552 case GIMPLE_BINARY_RHS
:
553 bit_value_binop (code
, sign
, prec
, &widest_value
, &widest_mask
,
554 TYPE_SIGN (lh
.type ()),
555 TYPE_PRECISION (lh
.type ()),
556 widest_int::from (lh_bits
.value (), sign
),
557 widest_int::from (lh_bits
.mask (), sign
),
558 TYPE_SIGN (rh
.type ()),
559 TYPE_PRECISION (rh
.type ()),
560 widest_int::from (rh_bits
.value (), sign
),
561 widest_int::from (rh_bits
.mask (), sign
));
567 wide_int mask
= wide_int::from (widest_mask
, prec
, sign
);
568 wide_int value
= wide_int::from (widest_value
, prec
, sign
);
569 // Bitmasks must have the unknown value bits cleared.
571 irange_bitmask
bm (value
, mask
);
572 r
.update_bitmask (bm
);
575 // Return the upper limit for a type.
577 static inline wide_int
578 max_limit (const_tree type
)
580 return irange_val_max (type
);
583 // Return the lower limit for a type.
585 static inline wide_int
586 min_limit (const_tree type
)
588 return irange_val_min (type
);
591 // Return false if shifting by OP is undefined behavior. Otherwise, return
592 // true and the range it is to be shifted by. This allows trimming out of
593 // undefined ranges, leaving only valid ranges if there are any.
596 get_shift_range (irange
&r
, tree type
, const irange
&op
)
598 if (op
.undefined_p ())
601 // Build valid range and intersect it with the shift range.
602 r
= value_range (op
.type (),
603 wi::shwi (0, TYPE_PRECISION (op
.type ())),
604 wi::shwi (TYPE_PRECISION (type
) - 1, TYPE_PRECISION (op
.type ())));
607 // If there are no valid ranges in the shift range, returned false.
608 if (r
.undefined_p ())
613 // Default wide_int fold operation returns [MIN, MAX].
616 range_operator::wi_fold (irange
&r
, tree type
,
617 const wide_int
&lh_lb ATTRIBUTE_UNUSED
,
618 const wide_int
&lh_ub ATTRIBUTE_UNUSED
,
619 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
620 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
622 gcc_checking_assert (r
.supports_type_p (type
));
623 r
.set_varying (type
);
626 // Call wi_fold when both op1 and op2 are equivalent. Further split small
627 // subranges into constants. This can provide better precision.
628 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
629 // [0,0][2, 2][4,4][6, 6][8, 8]
630 // LIMIT is the maximum number of elements in range allowed before we
631 // do not process them individually.
634 range_operator::wi_fold_in_parts_equiv (irange
&r
, tree type
,
635 const wide_int
&lh_lb
,
636 const wide_int
&lh_ub
,
637 unsigned limit
) const
640 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
641 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
642 // if there are 1 to 8 values in the LH range, split them up.
644 if (lh_range
>= 0 && lh_range
< limit
)
646 for (unsigned x
= 0; x
<= lh_range
; x
++)
648 wide_int val
= lh_lb
+ x
;
649 wi_fold (tmp
, type
, val
, val
, val
, val
);
653 // Otherwise just call wi_fold.
655 wi_fold (r
, type
, lh_lb
, lh_ub
, lh_lb
, lh_ub
);
658 // Call wi_fold, except further split small subranges into constants.
659 // This can provide better precision. For something 8 >> [0,1]
660 // Instead of [8, 16], we will produce [8,8][16,16]
663 range_operator::wi_fold_in_parts (irange
&r
, tree type
,
664 const wide_int
&lh_lb
,
665 const wide_int
&lh_ub
,
666 const wide_int
&rh_lb
,
667 const wide_int
&rh_ub
) const
670 widest_int rh_range
= wi::sub (widest_int::from (rh_ub
, TYPE_SIGN (type
)),
671 widest_int::from (rh_lb
, TYPE_SIGN (type
)));
672 widest_int lh_range
= wi::sub (widest_int::from (lh_ub
, TYPE_SIGN (type
)),
673 widest_int::from (lh_lb
, TYPE_SIGN (type
)));
674 // If there are 2, 3, or 4 values in the RH range, do them separately.
675 // Call wi_fold_in_parts to check the RH side.
676 if (rh_range
> 0 && rh_range
< 4)
678 wi_fold_in_parts (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_lb
);
681 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 1, rh_lb
+ 1);
685 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
+ 2, rh_lb
+ 2);
689 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_ub
, rh_ub
);
692 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
693 // The RH side has been checked, so no recursion needed.
694 else if (lh_range
> 0 && lh_range
< 4)
696 wi_fold (r
, type
, lh_lb
, lh_lb
, rh_lb
, rh_ub
);
699 wi_fold (tmp
, type
, lh_lb
+ 1, lh_lb
+ 1, rh_lb
, rh_ub
);
703 wi_fold (tmp
, type
, lh_lb
+ 2, lh_lb
+ 2, rh_lb
, rh_ub
);
707 wi_fold (tmp
, type
, lh_ub
, lh_ub
, rh_lb
, rh_ub
);
710 // Otherwise just call wi_fold.
712 wi_fold (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
715 // The default for fold is to break all ranges into sub-ranges and
716 // invoke the wi_fold method on each sub-range pair.
719 range_operator::fold_range (irange
&r
, tree type
,
722 relation_trio trio
) const
724 gcc_checking_assert (r
.supports_type_p (type
));
725 if (empty_range_varying (r
, type
, lh
, rh
))
728 relation_kind rel
= trio
.op1_op2 ();
729 unsigned num_lh
= lh
.num_pairs ();
730 unsigned num_rh
= rh
.num_pairs ();
732 // If op1 and op2 are equivalences, then we don't need a complete cross
733 // product, just pairs of matching elements.
734 if (relation_equiv_p (rel
) && lh
== rh
)
738 for (unsigned x
= 0; x
< num_lh
; ++x
)
740 // If the number of subranges is too high, limit subrange creation.
741 unsigned limit
= (r
.num_pairs () > 32) ? 0 : 8;
742 wide_int lh_lb
= lh
.lower_bound (x
);
743 wide_int lh_ub
= lh
.upper_bound (x
);
744 wi_fold_in_parts_equiv (tmp
, type
, lh_lb
, lh_ub
, limit
);
749 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
750 update_bitmask (r
, lh
, rh
);
754 // If both ranges are single pairs, fold directly into the result range.
755 // If the number of subranges grows too high, produce a summary result as the
756 // loop becomes exponential with little benefit. See PR 103821.
757 if ((num_lh
== 1 && num_rh
== 1) || num_lh
* num_rh
> 12)
759 wi_fold_in_parts (r
, type
, lh
.lower_bound (), lh
.upper_bound (),
760 rh
.lower_bound (), rh
.upper_bound ());
761 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
762 update_bitmask (r
, lh
, rh
);
768 for (unsigned x
= 0; x
< num_lh
; ++x
)
769 for (unsigned y
= 0; y
< num_rh
; ++y
)
771 wide_int lh_lb
= lh
.lower_bound (x
);
772 wide_int lh_ub
= lh
.upper_bound (x
);
773 wide_int rh_lb
= rh
.lower_bound (y
);
774 wide_int rh_ub
= rh
.upper_bound (y
);
775 wi_fold_in_parts (tmp
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
779 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
780 update_bitmask (r
, lh
, rh
);
784 op1_op2_relation_effect (r
, type
, lh
, rh
, rel
);
785 update_bitmask (r
, lh
, rh
);
789 // The default for op1_range is to return false.
792 range_operator::op1_range (irange
&r ATTRIBUTE_UNUSED
,
793 tree type ATTRIBUTE_UNUSED
,
794 const irange
&lhs ATTRIBUTE_UNUSED
,
795 const irange
&op2 ATTRIBUTE_UNUSED
,
801 // The default for op2_range is to return false.
804 range_operator::op2_range (irange
&r ATTRIBUTE_UNUSED
,
805 tree type ATTRIBUTE_UNUSED
,
806 const irange
&lhs ATTRIBUTE_UNUSED
,
807 const irange
&op1 ATTRIBUTE_UNUSED
,
813 // The default relation routines return VREL_VARYING.
816 range_operator::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
817 const irange
&op1 ATTRIBUTE_UNUSED
,
818 const irange
&op2 ATTRIBUTE_UNUSED
,
819 relation_kind rel ATTRIBUTE_UNUSED
) const
825 range_operator::lhs_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
826 const irange
&op1 ATTRIBUTE_UNUSED
,
827 const irange
&op2 ATTRIBUTE_UNUSED
,
828 relation_kind rel ATTRIBUTE_UNUSED
) const
834 range_operator::op1_op2_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
835 const irange
&op1 ATTRIBUTE_UNUSED
,
836 const irange
&op2 ATTRIBUTE_UNUSED
) const
841 // Default is no relation affects the LHS.
844 range_operator::op1_op2_relation_effect (irange
&lhs_range ATTRIBUTE_UNUSED
,
845 tree type ATTRIBUTE_UNUSED
,
846 const irange
&op1_range ATTRIBUTE_UNUSED
,
847 const irange
&op2_range ATTRIBUTE_UNUSED
,
848 relation_kind rel ATTRIBUTE_UNUSED
) const
854 range_operator::overflow_free_p (const irange
&, const irange
&,
860 // Apply any known bitmask updates based on this operator.
863 range_operator::update_bitmask (irange
&, const irange
&,
864 const irange
&) const
868 // Check that operand types are OK. Default to always OK.
871 range_operator::operand_check_p (tree
, tree
, tree
) const
876 // Create and return a range from a pair of wide-ints that are known
877 // to have overflowed (or underflowed).
880 value_range_from_overflowed_bounds (irange
&r
, tree type
,
881 const wide_int
&wmin
,
882 const wide_int
&wmax
)
884 const signop sgn
= TYPE_SIGN (type
);
885 const unsigned int prec
= TYPE_PRECISION (type
);
887 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
888 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
893 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
896 if (wi::cmp (tmax
, tem
, sgn
) > 0)
899 // If the anti-range would cover nothing, drop to varying.
900 // Likewise if the anti-range bounds are outside of the types
902 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
903 r
.set_varying (type
);
905 r
.set (type
, tmin
, tmax
, VR_ANTI_RANGE
);
908 // Create and return a range from a pair of wide-ints. MIN_OVF and
909 // MAX_OVF describe any overflow that might have occurred while
910 // calculating WMIN and WMAX respectively.
913 value_range_with_overflow (irange
&r
, tree type
,
914 const wide_int
&wmin
, const wide_int
&wmax
,
915 wi::overflow_type min_ovf
= wi::OVF_NONE
,
916 wi::overflow_type max_ovf
= wi::OVF_NONE
)
918 const signop sgn
= TYPE_SIGN (type
);
919 const unsigned int prec
= TYPE_PRECISION (type
);
920 const bool overflow_wraps
= TYPE_OVERFLOW_WRAPS (type
);
922 // For one bit precision if max != min, then the range covers all
924 if (prec
== 1 && wi::ne_p (wmax
, wmin
))
926 r
.set_varying (type
);
932 // If overflow wraps, truncate the values and adjust the range,
933 // kind, and bounds appropriately.
934 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
936 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
937 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
938 // If the limits are swapped, we wrapped around and cover
940 if (wi::gt_p (tmin
, tmax
, sgn
))
941 r
.set_varying (type
);
943 // No overflow or both overflow or underflow. The range
944 // kind stays normal.
945 r
.set (type
, tmin
, tmax
);
949 if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
950 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
951 value_range_from_overflowed_bounds (r
, type
, wmin
, wmax
);
953 // Other underflow and/or overflow, drop to VR_VARYING.
954 r
.set_varying (type
);
958 // If both bounds either underflowed or overflowed, then the result
960 if ((min_ovf
== wi::OVF_OVERFLOW
&& max_ovf
== wi::OVF_OVERFLOW
)
961 || (min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_UNDERFLOW
))
967 // If overflow does not wrap, saturate to [MIN, MAX].
968 wide_int new_lb
, new_ub
;
969 if (min_ovf
== wi::OVF_UNDERFLOW
)
970 new_lb
= wi::min_value (prec
, sgn
);
971 else if (min_ovf
== wi::OVF_OVERFLOW
)
972 new_lb
= wi::max_value (prec
, sgn
);
976 if (max_ovf
== wi::OVF_UNDERFLOW
)
977 new_ub
= wi::min_value (prec
, sgn
);
978 else if (max_ovf
== wi::OVF_OVERFLOW
)
979 new_ub
= wi::max_value (prec
, sgn
);
983 r
.set (type
, new_lb
, new_ub
);
987 // Create and return a range from a pair of wide-ints. Canonicalize
988 // the case where the bounds are swapped. In which case, we transform
989 // [10,5] into [MIN,5][10,MAX].
992 create_possibly_reversed_range (irange
&r
, tree type
,
993 const wide_int
&new_lb
, const wide_int
&new_ub
)
995 signop s
= TYPE_SIGN (type
);
996 // If the bounds are swapped, treat the result as if an overflow occurred.
997 if (wi::gt_p (new_lb
, new_ub
, s
))
998 value_range_from_overflowed_bounds (r
, type
, new_lb
, new_ub
);
1000 // Otherwise it's just a normal range.
1001 r
.set (type
, new_lb
, new_ub
);
1004 // Return the summary information about boolean range LHS. If EMPTY/FULL,
1005 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
1008 get_bool_state (vrange
&r
, const vrange
&lhs
, tree val_type
)
1010 // If there is no result, then this is unexecutable.
1011 if (lhs
.undefined_p ())
1020 // For TRUE, we can't just test for [1,1] because Ada can have
1021 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
1022 if (lhs
.contains_p (build_zero_cst (lhs
.type ())))
1024 r
.set_varying (val_type
);
1031 // ------------------------------------------------------------------------
1034 operator_equal::update_bitmask (irange
&r
, const irange
&lh
,
1035 const irange
&rh
) const
1037 update_known_bitmask (r
, EQ_EXPR
, lh
, rh
);
1040 // Check if the LHS range indicates a relation between OP1 and OP2.
1043 operator_equal::op1_op2_relation (const irange
&lhs
, const irange
&,
1044 const irange
&) const
1046 if (lhs
.undefined_p ())
1047 return VREL_UNDEFINED
;
1049 // FALSE = op1 == op2 indicates NE_EXPR.
1053 // TRUE = op1 == op2 indicates EQ_EXPR.
1054 if (!contains_zero_p (lhs
))
1056 return VREL_VARYING
;
1060 operator_equal::fold_range (irange
&r
, tree type
,
1063 relation_trio rel
) const
1065 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_EQ
))
1068 // We can be sure the values are always equal or not if both ranges
1069 // consist of a single value, and then compare them.
1070 bool op1_const
= wi::eq_p (op1
.lower_bound (), op1
.upper_bound ());
1071 bool op2_const
= wi::eq_p (op2
.lower_bound (), op2
.upper_bound ());
1072 if (op1_const
&& op2_const
)
1074 if (wi::eq_p (op1
.lower_bound (), op2
.upper_bound()))
1075 r
= range_true (type
);
1077 r
= range_false (type
);
1081 // If ranges do not intersect, we know the range is not equal,
1082 // otherwise we don't know anything for sure.
1083 int_range_max tmp
= op1
;
1084 tmp
.intersect (op2
);
1085 if (tmp
.undefined_p ())
1086 r
= range_false (type
);
1087 // Check if a constant cannot satisfy the bitmask requirements.
1088 else if (op2_const
&& !op1
.get_bitmask ().member_p (op2
.lower_bound ()))
1089 r
= range_false (type
);
1090 else if (op1_const
&& !op2
.get_bitmask ().member_p (op1
.lower_bound ()))
1091 r
= range_false (type
);
1093 r
= range_true_and_false (type
);
1099 operator_equal::op1_range (irange
&r
, tree type
,
1102 relation_trio
) const
1104 switch (get_bool_state (r
, lhs
, type
))
1107 // If it's true, the result is the same as OP2.
1112 // If the result is false, the only time we know anything is
1113 // if OP2 is a constant.
1114 if (!op2
.undefined_p ()
1115 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
1121 r
.set_varying (type
);
1131 operator_equal::op2_range (irange
&r
, tree type
,
1134 relation_trio rel
) const
1136 return operator_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1139 // -------------------------------------------------------------------------
1142 operator_not_equal::update_bitmask (irange
&r
, const irange
&lh
,
1143 const irange
&rh
) const
1145 update_known_bitmask (r
, NE_EXPR
, lh
, rh
);
1148 // Check if the LHS range indicates a relation between OP1 and OP2.
1151 operator_not_equal::op1_op2_relation (const irange
&lhs
, const irange
&,
1152 const irange
&) const
1154 if (lhs
.undefined_p ())
1155 return VREL_UNDEFINED
;
1157 // FALSE = op1 != op2 indicates EQ_EXPR.
1161 // TRUE = op1 != op2 indicates NE_EXPR.
1162 if (!contains_zero_p (lhs
))
1164 return VREL_VARYING
;
1168 operator_not_equal::fold_range (irange
&r
, tree type
,
1171 relation_trio rel
) const
1173 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_NE
))
1176 // We can be sure the values are always equal or not if both ranges
1177 // consist of a single value, and then compare them.
1178 bool op1_const
= wi::eq_p (op1
.lower_bound (), op1
.upper_bound ());
1179 bool op2_const
= wi::eq_p (op2
.lower_bound (), op2
.upper_bound ());
1180 if (op1_const
&& op2_const
)
1182 if (wi::ne_p (op1
.lower_bound (), op2
.upper_bound()))
1183 r
= range_true (type
);
1185 r
= range_false (type
);
1189 // If ranges do not intersect, we know the range is not equal,
1190 // otherwise we don't know anything for sure.
1191 int_range_max tmp
= op1
;
1192 tmp
.intersect (op2
);
1193 if (tmp
.undefined_p ())
1194 r
= range_true (type
);
1195 // Check if a constant cannot satisfy the bitmask requirements.
1196 else if (op2_const
&& !op1
.get_bitmask ().member_p (op2
.lower_bound ()))
1197 r
= range_true (type
);
1198 else if (op1_const
&& !op2
.get_bitmask ().member_p (op1
.lower_bound ()))
1199 r
= range_true (type
);
1201 r
= range_true_and_false (type
);
1207 operator_not_equal::op1_range (irange
&r
, tree type
,
1210 relation_trio
) const
1212 switch (get_bool_state (r
, lhs
, type
))
1215 // If the result is true, the only time we know anything is if
1216 // OP2 is a constant.
1217 if (!op2
.undefined_p ()
1218 && wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
1224 r
.set_varying (type
);
1228 // If it's false, the result is the same as OP2.
1240 operator_not_equal::op2_range (irange
&r
, tree type
,
1243 relation_trio rel
) const
1245 return operator_not_equal::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1248 // (X < VAL) produces the range of [MIN, VAL - 1].
1251 build_lt (irange
&r
, tree type
, const wide_int
&val
)
1253 wi::overflow_type ov
;
1255 signop sgn
= TYPE_SIGN (type
);
1257 // Signed 1 bit cannot represent 1 for subtraction.
1259 lim
= wi::add (val
, -1, sgn
, &ov
);
1261 lim
= wi::sub (val
, 1, sgn
, &ov
);
1263 // If val - 1 underflows, check if X < MIN, which is an empty range.
1267 r
= int_range
<1> (type
, min_limit (type
), lim
);
1270 // (X <= VAL) produces the range of [MIN, VAL].
1273 build_le (irange
&r
, tree type
, const wide_int
&val
)
1275 r
= int_range
<1> (type
, min_limit (type
), val
);
1278 // (X > VAL) produces the range of [VAL + 1, MAX].
1281 build_gt (irange
&r
, tree type
, const wide_int
&val
)
1283 wi::overflow_type ov
;
1285 signop sgn
= TYPE_SIGN (type
);
1287 // Signed 1 bit cannot represent 1 for addition.
1289 lim
= wi::sub (val
, -1, sgn
, &ov
);
1291 lim
= wi::add (val
, 1, sgn
, &ov
);
1292 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1296 r
= int_range
<1> (type
, lim
, max_limit (type
));
1299 // (X >= val) produces the range of [VAL, MAX].
1302 build_ge (irange
&r
, tree type
, const wide_int
&val
)
1304 r
= int_range
<1> (type
, val
, max_limit (type
));
1309 operator_lt::update_bitmask (irange
&r
, const irange
&lh
,
1310 const irange
&rh
) const
1312 update_known_bitmask (r
, LT_EXPR
, lh
, rh
);
1315 // Check if the LHS range indicates a relation between OP1 and OP2.
1318 operator_lt::op1_op2_relation (const irange
&lhs
, const irange
&,
1319 const irange
&) const
1321 if (lhs
.undefined_p ())
1322 return VREL_UNDEFINED
;
1324 // FALSE = op1 < op2 indicates GE_EXPR.
1328 // TRUE = op1 < op2 indicates LT_EXPR.
1329 if (!contains_zero_p (lhs
))
1331 return VREL_VARYING
;
1335 operator_lt::fold_range (irange
&r
, tree type
,
1338 relation_trio rel
) const
1340 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LT
))
1343 signop sign
= TYPE_SIGN (op1
.type ());
1344 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1346 if (wi::lt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1347 r
= range_true (type
);
1348 else if (!wi::lt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1349 r
= range_false (type
);
1350 // Use nonzero bits to determine if < 0 is false.
1351 else if (op2
.zero_p () && !wi::neg_p (op1
.get_nonzero_bits (), sign
))
1352 r
= range_false (type
);
1354 r
= range_true_and_false (type
);
1359 operator_lt::op1_range (irange
&r
, tree type
,
1362 relation_trio
) const
1364 if (op2
.undefined_p ())
1367 switch (get_bool_state (r
, lhs
, type
))
1370 build_lt (r
, type
, op2
.upper_bound ());
1374 build_ge (r
, type
, op2
.lower_bound ());
1384 operator_lt::op2_range (irange
&r
, tree type
,
1387 relation_trio
) const
1389 if (op1
.undefined_p ())
1392 switch (get_bool_state (r
, lhs
, type
))
1395 build_gt (r
, type
, op1
.lower_bound ());
1399 build_le (r
, type
, op1
.upper_bound ());
1410 operator_le::update_bitmask (irange
&r
, const irange
&lh
,
1411 const irange
&rh
) const
1413 update_known_bitmask (r
, LE_EXPR
, lh
, rh
);
1416 // Check if the LHS range indicates a relation between OP1 and OP2.
1419 operator_le::op1_op2_relation (const irange
&lhs
, const irange
&,
1420 const irange
&) const
1422 if (lhs
.undefined_p ())
1423 return VREL_UNDEFINED
;
1425 // FALSE = op1 <= op2 indicates GT_EXPR.
1429 // TRUE = op1 <= op2 indicates LE_EXPR.
1430 if (!contains_zero_p (lhs
))
1432 return VREL_VARYING
;
1436 operator_le::fold_range (irange
&r
, tree type
,
1439 relation_trio rel
) const
1441 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_LE
))
1444 signop sign
= TYPE_SIGN (op1
.type ());
1445 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1447 if (wi::le_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1448 r
= range_true (type
);
1449 else if (!wi::le_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1450 r
= range_false (type
);
1452 r
= range_true_and_false (type
);
1457 operator_le::op1_range (irange
&r
, tree type
,
1460 relation_trio
) const
1462 if (op2
.undefined_p ())
1465 switch (get_bool_state (r
, lhs
, type
))
1468 build_le (r
, type
, op2
.upper_bound ());
1472 build_gt (r
, type
, op2
.lower_bound ());
1482 operator_le::op2_range (irange
&r
, tree type
,
1485 relation_trio
) const
1487 if (op1
.undefined_p ())
1490 switch (get_bool_state (r
, lhs
, type
))
1493 build_ge (r
, type
, op1
.lower_bound ());
1497 build_lt (r
, type
, op1
.upper_bound ());
1508 operator_gt::update_bitmask (irange
&r
, const irange
&lh
,
1509 const irange
&rh
) const
1511 update_known_bitmask (r
, GT_EXPR
, lh
, rh
);
1514 // Check if the LHS range indicates a relation between OP1 and OP2.
1517 operator_gt::op1_op2_relation (const irange
&lhs
, const irange
&,
1518 const irange
&) const
1520 if (lhs
.undefined_p ())
1521 return VREL_UNDEFINED
;
1523 // FALSE = op1 > op2 indicates LE_EXPR.
1527 // TRUE = op1 > op2 indicates GT_EXPR.
1528 if (!contains_zero_p (lhs
))
1530 return VREL_VARYING
;
1534 operator_gt::fold_range (irange
&r
, tree type
,
1535 const irange
&op1
, const irange
&op2
,
1536 relation_trio rel
) const
1538 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GT
))
1541 signop sign
= TYPE_SIGN (op1
.type ());
1542 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1544 if (wi::gt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1545 r
= range_true (type
);
1546 else if (!wi::gt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1547 r
= range_false (type
);
1549 r
= range_true_and_false (type
);
1554 operator_gt::op1_range (irange
&r
, tree type
,
1555 const irange
&lhs
, const irange
&op2
,
1556 relation_trio
) const
1558 if (op2
.undefined_p ())
1561 switch (get_bool_state (r
, lhs
, type
))
1564 build_gt (r
, type
, op2
.lower_bound ());
1568 build_le (r
, type
, op2
.upper_bound ());
1578 operator_gt::op2_range (irange
&r
, tree type
,
1581 relation_trio
) const
1583 if (op1
.undefined_p ())
1586 switch (get_bool_state (r
, lhs
, type
))
1589 build_lt (r
, type
, op1
.upper_bound ());
1593 build_ge (r
, type
, op1
.lower_bound ());
1604 operator_ge::update_bitmask (irange
&r
, const irange
&lh
,
1605 const irange
&rh
) const
1607 update_known_bitmask (r
, GE_EXPR
, lh
, rh
);
1610 // Check if the LHS range indicates a relation between OP1 and OP2.
1613 operator_ge::op1_op2_relation (const irange
&lhs
, const irange
&,
1614 const irange
&) const
1616 if (lhs
.undefined_p ())
1617 return VREL_UNDEFINED
;
1619 // FALSE = op1 >= op2 indicates LT_EXPR.
1623 // TRUE = op1 >= op2 indicates GE_EXPR.
1624 if (!contains_zero_p (lhs
))
1626 return VREL_VARYING
;
1630 operator_ge::fold_range (irange
&r
, tree type
,
1633 relation_trio rel
) const
1635 if (relop_early_resolve (r
, type
, op1
, op2
, rel
, VREL_GE
))
1638 signop sign
= TYPE_SIGN (op1
.type ());
1639 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
1641 if (wi::ge_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
1642 r
= range_true (type
);
1643 else if (!wi::ge_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
1644 r
= range_false (type
);
1646 r
= range_true_and_false (type
);
1651 operator_ge::op1_range (irange
&r
, tree type
,
1654 relation_trio
) const
1656 if (op2
.undefined_p ())
1659 switch (get_bool_state (r
, lhs
, type
))
1662 build_ge (r
, type
, op2
.lower_bound ());
1666 build_lt (r
, type
, op2
.upper_bound ());
1676 operator_ge::op2_range (irange
&r
, tree type
,
1679 relation_trio
) const
1681 if (op1
.undefined_p ())
1684 switch (get_bool_state (r
, lhs
, type
))
1687 build_le (r
, type
, op1
.upper_bound ());
1691 build_gt (r
, type
, op1
.lower_bound ());
1702 operator_plus::update_bitmask (irange
&r
, const irange
&lh
,
1703 const irange
&rh
) const
1705 update_known_bitmask (r
, PLUS_EXPR
, lh
, rh
);
1708 // Check to see if the range of OP2 indicates anything about the relation
1709 // between LHS and OP1.
1712 operator_plus::lhs_op1_relation (const irange
&lhs
,
1715 relation_kind
) const
1717 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
1718 return VREL_VARYING
;
1720 tree type
= lhs
.type ();
1721 unsigned prec
= TYPE_PRECISION (type
);
1722 wi::overflow_type ovf1
, ovf2
;
1723 signop sign
= TYPE_SIGN (type
);
1725 // LHS = OP1 + 0 indicates LHS == OP1.
1729 if (TYPE_OVERFLOW_WRAPS (type
))
1731 wi::add (op1
.lower_bound (), op2
.lower_bound (), sign
, &ovf1
);
1732 wi::add (op1
.upper_bound (), op2
.upper_bound (), sign
, &ovf2
);
1735 ovf1
= ovf2
= wi::OVF_NONE
;
1737 // Never wrapping additions.
1740 // Positive op2 means lhs > op1.
1741 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1743 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1746 // Negative op2 means lhs < op1.
1747 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1749 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1752 // Always wrapping additions.
1753 else if (ovf1
&& ovf1
== ovf2
)
1755 // Positive op2 means lhs < op1.
1756 if (wi::gt_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1758 if (wi::ge_p (op2
.lower_bound (), wi::zero (prec
), sign
))
1761 // Negative op2 means lhs > op1.
1762 if (wi::lt_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1764 if (wi::le_p (op2
.upper_bound (), wi::zero (prec
), sign
))
1768 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1769 if (!range_includes_zero_p (op2
))
1772 return VREL_VARYING
;
1775 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1779 operator_plus::lhs_op2_relation (const irange
&lhs
, const irange
&op1
,
1780 const irange
&op2
, relation_kind rel
) const
1782 return lhs_op1_relation (lhs
, op2
, op1
, rel
);
1786 operator_plus::wi_fold (irange
&r
, tree type
,
1787 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1788 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1790 wi::overflow_type ov_lb
, ov_ub
;
1791 signop s
= TYPE_SIGN (type
);
1792 wide_int new_lb
= wi::add (lh_lb
, rh_lb
, s
, &ov_lb
);
1793 wide_int new_ub
= wi::add (lh_ub
, rh_ub
, s
, &ov_ub
);
1794 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
1797 // Given addition or subtraction, determine the possible NORMAL ranges and
1798 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1799 // Return the relation that exists between the LHS and OP1 in order for the
1800 // NORMAL range to apply.
1801 // a return value of VREL_VARYING means no ranges were applicable.
1803 static relation_kind
1804 plus_minus_ranges (irange
&r_ov
, irange
&r_normal
, const irange
&offset
,
1807 relation_kind kind
= VREL_VARYING
;
1808 // For now, only deal with constant adds. This could be extended to ranges
1809 // when someone is so motivated.
1810 if (!offset
.singleton_p () || offset
.zero_p ())
1813 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1814 wide_int off
= offset
.lower_bound ();
1815 if (wi::neg_p (off
, SIGNED
))
1818 off
= wi::neg (off
);
1821 wi::overflow_type ov
;
1822 tree type
= offset
.type ();
1823 unsigned prec
= TYPE_PRECISION (type
);
1826 // calculate the normal range and relation for the operation.
1830 lb
= wi::zero (prec
);
1831 ub
= wi::sub (irange_val_max (type
), off
, UNSIGNED
, &ov
);
1838 ub
= irange_val_max (type
);
1841 int_range
<2> normal_range (type
, lb
, ub
);
1842 int_range
<2> ov_range (type
, lb
, ub
, VR_ANTI_RANGE
);
1845 r_normal
= normal_range
;
1849 // Once op1 has been calculated by operator_plus or operator_minus, check
1850 // to see if the relation passed causes any part of the calculation to
1851 // be not possible. ie
1852 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1853 // and that further refines a_2 to [0, 0].
1854 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1855 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1856 // MINUS. IF any adjustment can be made, R will reflect it.
1859 adjust_op1_for_overflow (irange
&r
, const irange
&op2
, relation_kind rel
,
1862 if (r
.undefined_p ())
1864 tree type
= r
.type ();
1865 // Check for unsigned overflow and calculate the overflow part.
1866 signop s
= TYPE_SIGN (type
);
1867 if (!TYPE_OVERFLOW_WRAPS (type
) || s
== SIGNED
)
1870 // Only work with <, <=, >, >= relations.
1871 if (!relation_lt_le_gt_ge_p (rel
))
1874 // Get the ranges for this offset.
1875 int_range_max normal
, overflow
;
1876 relation_kind k
= plus_minus_ranges (overflow
, normal
, op2
, add_p
);
1878 // VREL_VARYING means there are no adjustments.
1879 if (k
== VREL_VARYING
)
1882 // If the relations match use the normal range, otherwise use overflow range.
1883 if (relation_intersect (k
, rel
) == k
)
1884 r
.intersect (normal
);
1886 r
.intersect (overflow
);
1891 operator_plus::op1_range (irange
&r
, tree type
,
1894 relation_trio trio
) const
1896 if (lhs
.undefined_p ())
1898 // Start with the default operation.
1899 range_op_handler
minus (MINUS_EXPR
);
1902 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
1903 relation_kind rel
= trio
.lhs_op1 ();
1904 // Check for a relation refinement.
1906 adjust_op1_for_overflow (r
, op2
, rel
, true /* PLUS_EXPR */);
1911 operator_plus::op2_range (irange
&r
, tree type
,
1914 relation_trio rel
) const
1916 return op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
1919 class operator_widen_plus_signed
: public range_operator
1922 virtual void wi_fold (irange
&r
, tree type
,
1923 const wide_int
&lh_lb
,
1924 const wide_int
&lh_ub
,
1925 const wide_int
&rh_lb
,
1926 const wide_int
&rh_ub
) const;
1927 } op_widen_plus_signed
;
1930 operator_widen_plus_signed::wi_fold (irange
&r
, tree type
,
1931 const wide_int
&lh_lb
,
1932 const wide_int
&lh_ub
,
1933 const wide_int
&rh_lb
,
1934 const wide_int
&rh_ub
) const
1936 wi::overflow_type ov_lb
, ov_ub
;
1937 signop s
= TYPE_SIGN (type
);
1940 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
1942 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
1943 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1944 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1946 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1947 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1949 r
= int_range
<2> (type
, new_lb
, new_ub
);
1952 class operator_widen_plus_unsigned
: public range_operator
1955 virtual void wi_fold (irange
&r
, tree type
,
1956 const wide_int
&lh_lb
,
1957 const wide_int
&lh_ub
,
1958 const wide_int
&rh_lb
,
1959 const wide_int
&rh_ub
) const;
1960 } op_widen_plus_unsigned
;
1963 operator_widen_plus_unsigned::wi_fold (irange
&r
, tree type
,
1964 const wide_int
&lh_lb
,
1965 const wide_int
&lh_ub
,
1966 const wide_int
&rh_lb
,
1967 const wide_int
&rh_ub
) const
1969 wi::overflow_type ov_lb
, ov_ub
;
1970 signop s
= TYPE_SIGN (type
);
1973 = wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
1975 = wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
1976 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
1977 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
1979 wide_int new_lb
= wi::add (lh_wlb
, rh_wlb
, s
, &ov_lb
);
1980 wide_int new_ub
= wi::add (lh_wub
, rh_wub
, s
, &ov_ub
);
1982 r
= int_range
<2> (type
, new_lb
, new_ub
);
1986 operator_minus::update_bitmask (irange
&r
, const irange
&lh
,
1987 const irange
&rh
) const
1989 update_known_bitmask (r
, MINUS_EXPR
, lh
, rh
);
1993 operator_minus::wi_fold (irange
&r
, tree type
,
1994 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1995 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1997 wi::overflow_type ov_lb
, ov_ub
;
1998 signop s
= TYPE_SIGN (type
);
1999 wide_int new_lb
= wi::sub (lh_lb
, rh_ub
, s
, &ov_lb
);
2000 wide_int new_ub
= wi::sub (lh_ub
, rh_lb
, s
, &ov_ub
);
2001 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
2005 // Return the relation between LHS and OP1 based on the relation between
2009 operator_minus::lhs_op1_relation (const irange
&, const irange
&op1
,
2010 const irange
&, relation_kind rel
) const
2012 if (!op1
.undefined_p () && TYPE_SIGN (op1
.type ()) == UNSIGNED
)
2021 return VREL_VARYING
;
2024 // Check to see if the relation REL between OP1 and OP2 has any effect on the
2025 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
2026 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
2029 minus_op1_op2_relation_effect (irange
&lhs_range
, tree type
,
2030 const irange
&op1_range ATTRIBUTE_UNUSED
,
2031 const irange
&op2_range ATTRIBUTE_UNUSED
,
2034 if (rel
== VREL_VARYING
)
2037 int_range
<2> rel_range
;
2038 unsigned prec
= TYPE_PRECISION (type
);
2039 signop sgn
= TYPE_SIGN (type
);
2041 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
2043 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
));
2044 else if (rel
== VREL_NE
)
2045 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
2047 else if (TYPE_OVERFLOW_WRAPS (type
))
2051 // For wrapping signed values and unsigned, if op1 > op2 or
2052 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
2055 rel_range
= int_range
<2> (type
, wi::zero (prec
), wi::zero (prec
),
2066 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
2068 rel_range
= int_range
<2> (type
, wi::one (prec
),
2069 wi::max_value (prec
, sgn
));
2071 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
2073 rel_range
= int_range
<2> (type
, wi::zero (prec
),
2074 wi::max_value (prec
, sgn
));
2076 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
2078 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
2079 wi::minus_one (prec
));
2081 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
2083 rel_range
= int_range
<2> (type
, wi::min_value (prec
, sgn
),
2090 lhs_range
.intersect (rel_range
);
2095 operator_minus::op1_op2_relation_effect (irange
&lhs_range
, tree type
,
2096 const irange
&op1_range
,
2097 const irange
&op2_range
,
2098 relation_kind rel
) const
2100 return minus_op1_op2_relation_effect (lhs_range
, type
, op1_range
, op2_range
,
2105 operator_minus::op1_range (irange
&r
, tree type
,
2108 relation_trio trio
) const
2110 if (lhs
.undefined_p ())
2112 // Start with the default operation.
2113 range_op_handler
minus (PLUS_EXPR
);
2116 bool res
= minus
.fold_range (r
, type
, lhs
, op2
);
2117 relation_kind rel
= trio
.lhs_op1 ();
2119 adjust_op1_for_overflow (r
, op2
, rel
, false /* PLUS_EXPR */);
2125 operator_minus::op2_range (irange
&r
, tree type
,
2128 relation_trio
) const
2130 if (lhs
.undefined_p ())
2132 return fold_range (r
, type
, op1
, lhs
);
2136 operator_min::update_bitmask (irange
&r
, const irange
&lh
,
2137 const irange
&rh
) const
2139 update_known_bitmask (r
, MIN_EXPR
, lh
, rh
);
2143 operator_min::wi_fold (irange
&r
, tree type
,
2144 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2145 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2147 signop s
= TYPE_SIGN (type
);
2148 wide_int new_lb
= wi::min (lh_lb
, rh_lb
, s
);
2149 wide_int new_ub
= wi::min (lh_ub
, rh_ub
, s
);
2150 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2155 operator_max::update_bitmask (irange
&r
, const irange
&lh
,
2156 const irange
&rh
) const
2158 update_known_bitmask (r
, MAX_EXPR
, lh
, rh
);
2162 operator_max::wi_fold (irange
&r
, tree type
,
2163 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2164 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2166 signop s
= TYPE_SIGN (type
);
2167 wide_int new_lb
= wi::max (lh_lb
, rh_lb
, s
);
2168 wide_int new_ub
= wi::max (lh_ub
, rh_ub
, s
);
2169 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2173 // Calculate the cross product of two sets of ranges and return it.
2175 // Multiplications, divisions and shifts are a bit tricky to handle,
2176 // depending on the mix of signs we have in the two ranges, we need to
2177 // operate on different values to get the minimum and maximum values
2178 // for the new range. One approach is to figure out all the
2179 // variations of range combinations and do the operations.
2181 // However, this involves several calls to compare_values and it is
2182 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
2183 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
2184 // figure the smallest and largest values to form the new range.
2187 cross_product_operator::wi_cross_product (irange
&r
, tree type
,
2188 const wide_int
&lh_lb
,
2189 const wide_int
&lh_ub
,
2190 const wide_int
&rh_lb
,
2191 const wide_int
&rh_ub
) const
2193 wide_int cp1
, cp2
, cp3
, cp4
;
2194 // Default to varying.
2195 r
.set_varying (type
);
2197 // Compute the 4 cross operations, bailing if we get an overflow we
2199 if (wi_op_overflows (cp1
, type
, lh_lb
, rh_lb
))
2201 if (wi::eq_p (lh_lb
, lh_ub
))
2203 else if (wi_op_overflows (cp3
, type
, lh_ub
, rh_lb
))
2205 if (wi::eq_p (rh_lb
, rh_ub
))
2207 else if (wi_op_overflows (cp2
, type
, lh_lb
, rh_ub
))
2209 if (wi::eq_p (lh_lb
, lh_ub
))
2211 else if (wi_op_overflows (cp4
, type
, lh_ub
, rh_ub
))
2215 signop sign
= TYPE_SIGN (type
);
2216 if (wi::gt_p (cp1
, cp2
, sign
))
2217 std::swap (cp1
, cp2
);
2218 if (wi::gt_p (cp3
, cp4
, sign
))
2219 std::swap (cp3
, cp4
);
2221 // Choose min and max from the ordered pairs.
2222 wide_int res_lb
= wi::min (cp1
, cp3
, sign
);
2223 wide_int res_ub
= wi::max (cp2
, cp4
, sign
);
2224 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
2229 operator_mult::update_bitmask (irange
&r
, const irange
&lh
,
2230 const irange
&rh
) const
2232 update_known_bitmask (r
, MULT_EXPR
, lh
, rh
);
2236 operator_mult::op1_range (irange
&r
, tree type
,
2237 const irange
&lhs
, const irange
&op2
,
2238 relation_trio
) const
2240 if (lhs
.undefined_p ())
2243 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2244 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2245 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2246 if (TYPE_OVERFLOW_WRAPS (type
))
2250 if (op2
.singleton_p (offset
) && offset
!= 0)
2251 return range_op_handler (TRUNC_DIV_EXPR
).fold_range (r
, type
, lhs
, op2
);
2256 operator_mult::op2_range (irange
&r
, tree type
,
2257 const irange
&lhs
, const irange
&op1
,
2258 relation_trio rel
) const
2260 return operator_mult::op1_range (r
, type
, lhs
, op1
, rel
.swap_op1_op2 ());
2264 operator_mult::wi_op_overflows (wide_int
&res
, tree type
,
2265 const wide_int
&w0
, const wide_int
&w1
) const
2267 wi::overflow_type overflow
= wi::OVF_NONE
;
2268 signop sign
= TYPE_SIGN (type
);
2269 res
= wi::mul (w0
, w1
, sign
, &overflow
);
2270 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2272 // For multiplication, the sign of the overflow is given
2273 // by the comparison of the signs of the operands.
2274 if (sign
== UNSIGNED
|| w0
.sign_mask () == w1
.sign_mask ())
2275 res
= wi::max_value (w0
.get_precision (), sign
);
2277 res
= wi::min_value (w0
.get_precision (), sign
);
2284 operator_mult::wi_fold (irange
&r
, tree type
,
2285 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2286 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2288 if (TYPE_OVERFLOW_UNDEFINED (type
))
2290 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2294 // Multiply the ranges when overflow wraps. This is basically fancy
2295 // code so we don't drop to varying with an unsigned
2298 // This test requires 2*prec bits if both operands are signed and
2299 // 2*prec + 2 bits if either is not. Therefore, extend the values
2300 // using the sign of the result to PREC2. From here on out,
2301 // everything is just signed math no matter what the input types
2304 signop sign
= TYPE_SIGN (type
);
2305 unsigned prec
= TYPE_PRECISION (type
);
2306 widest2_int min0
= widest2_int::from (lh_lb
, sign
);
2307 widest2_int max0
= widest2_int::from (lh_ub
, sign
);
2308 widest2_int min1
= widest2_int::from (rh_lb
, sign
);
2309 widest2_int max1
= widest2_int::from (rh_ub
, sign
);
2310 widest2_int sizem1
= wi::mask
<widest2_int
> (prec
, false);
2311 widest2_int size
= sizem1
+ 1;
2313 // Canonicalize the intervals.
2314 if (sign
== UNSIGNED
)
2316 if (wi::ltu_p (size
, min0
+ max0
))
2321 if (wi::ltu_p (size
, min1
+ max1
))
2328 // Sort the 4 products so that min is in prod0 and max is in
2330 widest2_int prod0
= min0
* min1
;
2331 widest2_int prod1
= min0
* max1
;
2332 widest2_int prod2
= max0
* min1
;
2333 widest2_int prod3
= max0
* max1
;
2335 // min0min1 > max0max1
2337 std::swap (prod0
, prod3
);
2339 // min0max1 > max0min1
2341 std::swap (prod1
, prod2
);
2344 std::swap (prod0
, prod1
);
2347 std::swap (prod2
, prod3
);
2350 prod2
= prod3
- prod0
;
2351 if (wi::geu_p (prod2
, sizem1
))
2353 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2354 if (TYPE_UNSIGNED (type
) && rh_lb
== rh_ub
2355 && wi::exact_log2 (rh_lb
) != -1 && prec
> 1)
2357 r
.set (type
, rh_lb
, wi::max_value (prec
, sign
));
2359 zero
.set_zero (type
);
2363 // The range covers all values.
2364 r
.set_varying (type
);
2368 wide_int new_lb
= wide_int::from (prod0
, prec
, sign
);
2369 wide_int new_ub
= wide_int::from (prod3
, prec
, sign
);
2370 create_possibly_reversed_range (r
, type
, new_lb
, new_ub
);
2374 class operator_widen_mult_signed
: public range_operator
2377 virtual void wi_fold (irange
&r
, tree type
,
2378 const wide_int
&lh_lb
,
2379 const wide_int
&lh_ub
,
2380 const wide_int
&rh_lb
,
2381 const wide_int
&rh_ub
)
2383 } op_widen_mult_signed
;
2386 operator_widen_mult_signed::wi_fold (irange
&r
, tree type
,
2387 const wide_int
&lh_lb
,
2388 const wide_int
&lh_ub
,
2389 const wide_int
&rh_lb
,
2390 const wide_int
&rh_ub
) const
2392 signop s
= TYPE_SIGN (type
);
2394 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, SIGNED
);
2395 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, SIGNED
);
2396 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2397 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2399 /* We don't expect a widening multiplication to be able to overflow but range
2400 calculations for multiplications are complicated. After widening the
2401 operands lets call the base class. */
2402 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2406 class operator_widen_mult_unsigned
: public range_operator
2409 virtual void wi_fold (irange
&r
, tree type
,
2410 const wide_int
&lh_lb
,
2411 const wide_int
&lh_ub
,
2412 const wide_int
&rh_lb
,
2413 const wide_int
&rh_ub
)
2415 } op_widen_mult_unsigned
;
2418 operator_widen_mult_unsigned::wi_fold (irange
&r
, tree type
,
2419 const wide_int
&lh_lb
,
2420 const wide_int
&lh_ub
,
2421 const wide_int
&rh_lb
,
2422 const wide_int
&rh_ub
) const
2424 signop s
= TYPE_SIGN (type
);
2426 wide_int lh_wlb
= wide_int::from (lh_lb
, wi::get_precision (lh_lb
) * 2, UNSIGNED
);
2427 wide_int lh_wub
= wide_int::from (lh_ub
, wi::get_precision (lh_ub
) * 2, UNSIGNED
);
2428 wide_int rh_wlb
= wide_int::from (rh_lb
, wi::get_precision (rh_lb
) * 2, s
);
2429 wide_int rh_wub
= wide_int::from (rh_ub
, wi::get_precision (rh_ub
) * 2, s
);
2431 /* We don't expect a widening multiplication to be able to overflow but range
2432 calculations for multiplications are complicated. After widening the
2433 operands lets call the base class. */
2434 return op_mult
.wi_fold (r
, type
, lh_wlb
, lh_wub
, rh_wlb
, rh_wub
);
2437 class operator_div
: public cross_product_operator
2439 using range_operator::update_bitmask
;
2441 operator_div (tree_code div_kind
) { m_code
= div_kind
; }
2442 virtual void wi_fold (irange
&r
, tree type
,
2443 const wide_int
&lh_lb
,
2444 const wide_int
&lh_ub
,
2445 const wide_int
&rh_lb
,
2446 const wide_int
&rh_ub
) const final override
;
2447 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
2448 const wide_int
&, const wide_int
&)
2449 const final override
;
2450 void update_bitmask (irange
&r
, const irange
&lh
, const irange
&rh
) const
2451 { update_known_bitmask (r
, m_code
, lh
, rh
); }
2456 static operator_div
op_trunc_div (TRUNC_DIV_EXPR
);
2457 static operator_div
op_floor_div (FLOOR_DIV_EXPR
);
2458 static operator_div
op_round_div (ROUND_DIV_EXPR
);
2459 static operator_div
op_ceil_div (CEIL_DIV_EXPR
);
2462 operator_div::wi_op_overflows (wide_int
&res
, tree type
,
2463 const wide_int
&w0
, const wide_int
&w1
) const
2468 wi::overflow_type overflow
= wi::OVF_NONE
;
2469 signop sign
= TYPE_SIGN (type
);
2473 case EXACT_DIV_EXPR
:
2474 case TRUNC_DIV_EXPR
:
2475 res
= wi::div_trunc (w0
, w1
, sign
, &overflow
);
2477 case FLOOR_DIV_EXPR
:
2478 res
= wi::div_floor (w0
, w1
, sign
, &overflow
);
2480 case ROUND_DIV_EXPR
:
2481 res
= wi::div_round (w0
, w1
, sign
, &overflow
);
2484 res
= wi::div_ceil (w0
, w1
, sign
, &overflow
);
2490 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
2492 // For division, the only case is -INF / -1 = +INF.
2493 res
= wi::max_value (w0
.get_precision (), sign
);
2500 operator_div::wi_fold (irange
&r
, tree type
,
2501 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2502 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2504 const wide_int dividend_min
= lh_lb
;
2505 const wide_int dividend_max
= lh_ub
;
2506 const wide_int divisor_min
= rh_lb
;
2507 const wide_int divisor_max
= rh_ub
;
2508 signop sign
= TYPE_SIGN (type
);
2509 unsigned prec
= TYPE_PRECISION (type
);
2510 wide_int extra_min
, extra_max
;
2512 // If we know we won't divide by zero, just do the division.
2513 if (!wi_includes_zero_p (type
, divisor_min
, divisor_max
))
2515 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2516 divisor_min
, divisor_max
);
2520 // If we're definitely dividing by zero, there's nothing to do.
2521 if (wi_zero_p (type
, divisor_min
, divisor_max
))
2527 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2528 // skip any division by zero.
2530 // First divide by the negative numbers, if any.
2531 if (wi::neg_p (divisor_min
, sign
))
2532 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
2533 divisor_min
, wi::minus_one (prec
));
2537 // Then divide by the non-zero positive numbers, if any.
2538 if (wi::gt_p (divisor_max
, wi::zero (prec
), sign
))
2541 wi_cross_product (tmp
, type
, dividend_min
, dividend_max
,
2542 wi::one (prec
), divisor_max
);
2545 // We shouldn't still have undefined here.
2546 gcc_checking_assert (!r
.undefined_p ());
2550 class operator_exact_divide
: public operator_div
2552 using range_operator::op1_range
;
2554 operator_exact_divide () : operator_div (EXACT_DIV_EXPR
) { }
2555 virtual bool op1_range (irange
&r
, tree type
,
2558 relation_trio
) const;
2563 operator_exact_divide::op1_range (irange
&r
, tree type
,
2566 relation_trio
) const
2568 if (lhs
.undefined_p ())
2571 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2572 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2573 // We wont bother trying to enumerate all the in between stuff :-P
2574 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2575 // the time however.
2576 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2577 if (op2
.singleton_p (offset
) && offset
!= 0)
2578 return range_op_handler (MULT_EXPR
).fold_range (r
, type
, lhs
, op2
);
2583 class operator_lshift
: public cross_product_operator
2585 using range_operator::fold_range
;
2586 using range_operator::op1_range
;
2587 using range_operator::update_bitmask
;
2589 virtual bool op1_range (irange
&r
, tree type
, const irange
&lhs
,
2590 const irange
&op2
, relation_trio rel
= TRIO_VARYING
)
2591 const final override
;
2592 virtual bool fold_range (irange
&r
, tree type
, const irange
&op1
,
2593 const irange
&op2
, relation_trio rel
= TRIO_VARYING
)
2594 const final override
;
2596 virtual void wi_fold (irange
&r
, tree type
,
2597 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2598 const wide_int
&rh_lb
,
2599 const wide_int
&rh_ub
) const final override
;
2600 virtual bool wi_op_overflows (wide_int
&res
,
2603 const wide_int
&) const final override
;
2604 void update_bitmask (irange
&r
, const irange
&lh
,
2605 const irange
&rh
) const final override
2606 { update_known_bitmask (r
, LSHIFT_EXPR
, lh
, rh
); }
2607 // Check compatibility of LHS and op1.
2608 bool operand_check_p (tree t1
, tree t2
, tree
) const final override
2609 { return range_compatible_p (t1
, t2
); }
2612 class operator_rshift
: public cross_product_operator
2614 using range_operator::fold_range
;
2615 using range_operator::op1_range
;
2616 using range_operator::lhs_op1_relation
;
2617 using range_operator::update_bitmask
;
2619 virtual bool fold_range (irange
&r
, tree type
, const irange
&op1
,
2620 const irange
&op2
, relation_trio rel
= TRIO_VARYING
)
2621 const final override
;
2622 virtual void wi_fold (irange
&r
, tree type
,
2623 const wide_int
&lh_lb
,
2624 const wide_int
&lh_ub
,
2625 const wide_int
&rh_lb
,
2626 const wide_int
&rh_ub
) const final override
;
2627 virtual bool wi_op_overflows (wide_int
&res
,
2630 const wide_int
&w1
) const final override
;
2631 virtual bool op1_range (irange
&, tree type
, const irange
&lhs
,
2632 const irange
&op2
, relation_trio rel
= TRIO_VARYING
)
2633 const final override
;
2634 virtual relation_kind
lhs_op1_relation (const irange
&lhs
, const irange
&op1
,
2635 const irange
&op2
, relation_kind rel
)
2636 const final override
;
2637 void update_bitmask (irange
&r
, const irange
&lh
,
2638 const irange
&rh
) const final override
2639 { update_known_bitmask (r
, RSHIFT_EXPR
, lh
, rh
); }
2640 // Check compatibility of LHS and op1.
2641 bool operand_check_p (tree t1
, tree t2
, tree
) const final override
2642 { return range_compatible_p (t1
, t2
); }
2647 operator_rshift::lhs_op1_relation (const irange
&lhs ATTRIBUTE_UNUSED
,
2650 relation_kind
) const
2652 // If both operands range are >= 0, then the LHS <= op1.
2653 if (!op1
.undefined_p () && !op2
.undefined_p ()
2654 && wi::ge_p (op1
.lower_bound (), 0, TYPE_SIGN (op1
.type ()))
2655 && wi::ge_p (op2
.lower_bound (), 0, TYPE_SIGN (op2
.type ())))
2657 return VREL_VARYING
;
2661 operator_lshift::fold_range (irange
&r
, tree type
,
2664 relation_trio rel
) const
2666 int_range_max shift_range
;
2667 if (!get_shift_range (shift_range
, type
, op2
))
2669 if (op2
.undefined_p ())
2676 // Transform left shifts by constants into multiplies.
2677 if (shift_range
.singleton_p ())
2679 unsigned shift
= shift_range
.lower_bound ().to_uhwi ();
2680 wide_int tmp
= wi::set_bit_in_zero (shift
, TYPE_PRECISION (type
));
2681 int_range
<1> mult (type
, tmp
, tmp
);
2683 // Force wrapping multiplication.
2684 bool saved_flag_wrapv
= flag_wrapv
;
2685 bool saved_flag_wrapv_pointer
= flag_wrapv_pointer
;
2687 flag_wrapv_pointer
= 1;
2688 bool b
= op_mult
.fold_range (r
, type
, op1
, mult
);
2689 flag_wrapv
= saved_flag_wrapv
;
2690 flag_wrapv_pointer
= saved_flag_wrapv_pointer
;
2694 // Otherwise, invoke the generic fold routine.
2695 return range_operator::fold_range (r
, type
, op1
, shift_range
, rel
);
2699 operator_lshift::wi_fold (irange
&r
, tree type
,
2700 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2701 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2703 signop sign
= TYPE_SIGN (type
);
2704 unsigned prec
= TYPE_PRECISION (type
);
2705 int overflow_pos
= sign
== SIGNED
? prec
- 1 : prec
;
2706 int bound_shift
= overflow_pos
- rh_ub
.to_shwi ();
2707 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2708 // overflow. However, for that to happen, rh.max needs to be zero,
2709 // which means rh is a singleton range of zero, which means we simply return
2710 // [lh_lb, lh_ub] as the range.
2711 if (wi::eq_p (rh_ub
, rh_lb
) && wi::eq_p (rh_ub
, 0))
2713 r
= int_range
<2> (type
, lh_lb
, lh_ub
);
2717 wide_int bound
= wi::set_bit_in_zero (bound_shift
, prec
);
2718 wide_int complement
= ~(bound
- 1);
2719 wide_int low_bound
, high_bound
;
2720 bool in_bounds
= false;
2722 if (sign
== UNSIGNED
)
2725 high_bound
= complement
;
2726 if (wi::ltu_p (lh_ub
, low_bound
))
2728 // [5, 6] << [1, 2] == [10, 24].
2729 // We're shifting out only zeroes, the value increases
2733 else if (wi::ltu_p (high_bound
, lh_lb
))
2735 // [0xffffff00, 0xffffffff] << [1, 2]
2736 // == [0xfffffc00, 0xfffffffe].
2737 // We're shifting out only ones, the value decreases
2744 // [-1, 1] << [1, 2] == [-4, 4]
2745 low_bound
= complement
;
2747 if (wi::lts_p (lh_ub
, high_bound
)
2748 && wi::lts_p (low_bound
, lh_lb
))
2750 // For non-negative numbers, we're shifting out only zeroes,
2751 // the value increases monotonically. For negative numbers,
2752 // we're shifting out only ones, the value decreases
2759 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2761 r
.set_varying (type
);
2765 operator_lshift::wi_op_overflows (wide_int
&res
, tree type
,
2766 const wide_int
&w0
, const wide_int
&w1
) const
2768 signop sign
= TYPE_SIGN (type
);
2771 // It's unclear from the C standard whether shifts can overflow.
2772 // The following code ignores overflow; perhaps a C standard
2773 // interpretation ruling is needed.
2774 res
= wi::rshift (w0
, -w1
, sign
);
2777 res
= wi::lshift (w0
, w1
);
2782 operator_lshift::op1_range (irange
&r
,
2786 relation_trio
) const
2788 if (lhs
.undefined_p ())
2791 if (!contains_zero_p (lhs
))
2792 r
.set_nonzero (type
);
2794 r
.set_varying (type
);
2797 if (op2
.singleton_p (shift
))
2799 if (wi::lt_p (shift
, 0, SIGNED
))
2801 if (wi::ge_p (shift
, wi::uhwi (TYPE_PRECISION (type
),
2802 TYPE_PRECISION (op2
.type ())),
2811 // Work completely in unsigned mode to start.
2813 int_range_max tmp_range
;
2814 if (TYPE_SIGN (type
) == SIGNED
)
2816 int_range_max tmp
= lhs
;
2817 utype
= unsigned_type_for (type
);
2818 range_cast (tmp
, utype
);
2819 op_rshift
.fold_range (tmp_range
, utype
, tmp
, op2
);
2822 op_rshift
.fold_range (tmp_range
, utype
, lhs
, op2
);
2824 // Start with ranges which can produce the LHS by right shifting the
2825 // result by the shift amount.
2826 // ie [0x08, 0xF0] = op1 << 2 will start with
2827 // [00001000, 11110000] = op1 << 2
2828 // [0x02, 0x4C] aka [00000010, 00111100]
2830 // Then create a range from the LB with the least significant upper bit
2831 // set, to the upper bound with all the bits set.
2832 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2834 // Ideally we do this for each subrange, but just lump them all for now.
2835 unsigned low_bits
= TYPE_PRECISION (utype
) - shift
.to_uhwi ();
2836 wide_int up_mask
= wi::mask (low_bits
, true, TYPE_PRECISION (utype
));
2837 wide_int new_ub
= wi::bit_or (up_mask
, tmp_range
.upper_bound ());
2838 wide_int new_lb
= wi::set_bit (tmp_range
.lower_bound (), low_bits
);
2839 int_range
<2> fill_range (utype
, new_lb
, new_ub
);
2840 tmp_range
.union_ (fill_range
);
2843 range_cast (tmp_range
, type
);
2845 r
.intersect (tmp_range
);
2849 return !r
.varying_p ();
2853 operator_rshift::op1_range (irange
&r
,
2857 relation_trio
) const
2859 if (lhs
.undefined_p ())
2862 if (op2
.singleton_p (shift
))
2864 // Ignore nonsensical shifts.
2865 unsigned prec
= TYPE_PRECISION (type
);
2866 if (wi::ge_p (shift
,
2867 wi::uhwi (prec
, TYPE_PRECISION (op2
.type ())),
2876 // Folding the original operation may discard some impossible
2877 // ranges from the LHS.
2878 int_range_max lhs_refined
;
2879 op_rshift
.fold_range (lhs_refined
, type
, int_range
<1> (type
), op2
);
2880 lhs_refined
.intersect (lhs
);
2881 if (lhs_refined
.undefined_p ())
2886 int_range_max
shift_range (op2
.type (), shift
, shift
);
2887 int_range_max lb
, ub
;
2888 op_lshift
.fold_range (lb
, type
, lhs_refined
, shift_range
);
2890 // 0000 0111 = OP1 >> 3
2892 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2893 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2894 // right hand side (0x07).
2895 wide_int mask
= wi::bit_not (wi::lshift (wi::minus_one (prec
), shift
));
2896 int_range_max
mask_range (type
,
2897 wi::zero (TYPE_PRECISION (type
)),
2899 op_plus
.fold_range (ub
, type
, lb
, mask_range
);
2902 if (!contains_zero_p (lhs_refined
))
2904 mask_range
.invert ();
2905 r
.intersect (mask_range
);
2913 operator_rshift::wi_op_overflows (wide_int
&res
,
2916 const wide_int
&w1
) const
2918 signop sign
= TYPE_SIGN (type
);
2920 res
= wi::lshift (w0
, -w1
);
2923 // It's unclear from the C standard whether shifts can overflow.
2924 // The following code ignores overflow; perhaps a C standard
2925 // interpretation ruling is needed.
2926 res
= wi::rshift (w0
, w1
, sign
);
2932 operator_rshift::fold_range (irange
&r
, tree type
,
2935 relation_trio rel
) const
2937 int_range_max shift
;
2938 if (!get_shift_range (shift
, type
, op2
))
2940 if (op2
.undefined_p ())
2947 return range_operator::fold_range (r
, type
, op1
, shift
, rel
);
2951 operator_rshift::wi_fold (irange
&r
, tree type
,
2952 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2953 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
2955 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
2959 // Add a partial equivalence between the LHS and op1 for casts.
2962 operator_cast::lhs_op1_relation (const irange
&lhs
,
2964 const irange
&op2 ATTRIBUTE_UNUSED
,
2965 relation_kind
) const
2967 if (lhs
.undefined_p () || op1
.undefined_p ())
2968 return VREL_VARYING
;
2969 unsigned lhs_prec
= TYPE_PRECISION (lhs
.type ());
2970 unsigned op1_prec
= TYPE_PRECISION (op1
.type ());
2971 // If the result gets sign extended into a larger type check first if this
2972 // qualifies as a partial equivalence.
2973 if (TYPE_SIGN (op1
.type ()) == SIGNED
&& lhs_prec
> op1_prec
)
2975 // If the result is sign extended, and the LHS is larger than op1,
2976 // check if op1's range can be negative as the sign extension will
2977 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2978 int_range
<3> negs
= range_negatives (op1
.type ());
2979 negs
.intersect (op1
);
2980 if (!negs
.undefined_p ())
2981 return VREL_VARYING
;
2984 unsigned prec
= MIN (lhs_prec
, op1_prec
);
2985 return bits_to_pe (prec
);
2988 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2991 operator_cast::truncating_cast_p (const irange
&inner
,
2992 const irange
&outer
) const
2994 return TYPE_PRECISION (outer
.type ()) < TYPE_PRECISION (inner
.type ());
2997 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
3000 operator_cast::inside_domain_p (const wide_int
&min
,
3001 const wide_int
&max
,
3002 const irange
&range
) const
3004 wide_int domain_min
= irange_val_min (range
.type ());
3005 wide_int domain_max
= irange_val_max (range
.type ());
3006 signop domain_sign
= TYPE_SIGN (range
.type ());
3007 return (wi::le_p (min
, domain_max
, domain_sign
)
3008 && wi::le_p (max
, domain_max
, domain_sign
)
3009 && wi::ge_p (min
, domain_min
, domain_sign
)
3010 && wi::ge_p (max
, domain_min
, domain_sign
));
3014 // Helper for fold_range which work on a pair at a time.
3017 operator_cast::fold_pair (irange
&r
, unsigned index
,
3018 const irange
&inner
,
3019 const irange
&outer
) const
3021 tree inner_type
= inner
.type ();
3022 tree outer_type
= outer
.type ();
3023 signop inner_sign
= TYPE_SIGN (inner_type
);
3024 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
3026 // check to see if casting from INNER to OUTER is a conversion that
3027 // fits in the resulting OUTER type.
3028 wide_int inner_lb
= inner
.lower_bound (index
);
3029 wide_int inner_ub
= inner
.upper_bound (index
);
3030 if (truncating_cast_p (inner
, outer
))
3032 // We may be able to accommodate a truncating cast if the
3033 // resulting range can be represented in the target type...
3034 if (wi::rshift (wi::sub (inner_ub
, inner_lb
),
3035 wi::uhwi (outer_prec
, TYPE_PRECISION (inner
.type ())),
3038 r
.set_varying (outer_type
);
3042 // ...but we must still verify that the final range fits in the
3043 // domain. This catches -fstrict-enum restrictions where the domain
3044 // range is smaller than what fits in the underlying type.
3045 wide_int min
= wide_int::from (inner_lb
, outer_prec
, inner_sign
);
3046 wide_int max
= wide_int::from (inner_ub
, outer_prec
, inner_sign
);
3047 if (inside_domain_p (min
, max
, outer
))
3048 create_possibly_reversed_range (r
, outer_type
, min
, max
);
3050 r
.set_varying (outer_type
);
3055 operator_cast::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3056 const irange
&inner
,
3057 const irange
&outer
,
3058 relation_trio
) const
3060 if (empty_range_varying (r
, type
, inner
, outer
))
3063 gcc_checking_assert (outer
.varying_p ());
3064 gcc_checking_assert (inner
.num_pairs () > 0);
3066 // Avoid a temporary by folding the first pair directly into the result.
3067 fold_pair (r
, 0, inner
, outer
);
3069 // Then process any additional pairs by unioning with their results.
3070 for (unsigned x
= 1; x
< inner
.num_pairs (); ++x
)
3073 fold_pair (tmp
, x
, inner
, outer
);
3079 update_bitmask (r
, inner
, outer
);
3084 operator_cast::update_bitmask (irange
&r
, const irange
&lh
,
3085 const irange
&rh
) const
3087 update_known_bitmask (r
, CONVERT_EXPR
, lh
, rh
);
3091 operator_cast::op1_range (irange
&r
, tree type
,
3094 relation_trio
) const
3096 if (lhs
.undefined_p ())
3098 tree lhs_type
= lhs
.type ();
3099 gcc_checking_assert (types_compatible_p (op2
.type(), type
));
3101 // If we are calculating a pointer, shortcut to what we really care about.
3102 if (POINTER_TYPE_P (type
))
3104 // Conversion from other pointers or a constant (including 0/NULL)
3105 // are straightforward.
3106 if (POINTER_TYPE_P (lhs
.type ())
3107 || (lhs
.singleton_p ()
3108 && TYPE_PRECISION (lhs
.type ()) >= TYPE_PRECISION (type
)))
3111 range_cast (r
, type
);
3115 // If the LHS is not a pointer nor a singleton, then it is
3116 // either VARYING or non-zero.
3117 if (!lhs
.undefined_p () && !contains_zero_p (lhs
))
3118 r
.set_nonzero (type
);
3120 r
.set_varying (type
);
3126 if (truncating_cast_p (op2
, lhs
))
3128 if (lhs
.varying_p ())
3129 r
.set_varying (type
);
3132 // We want to insert the LHS as an unsigned value since it
3133 // would not trigger the signed bit of the larger type.
3134 int_range_max converted_lhs
= lhs
;
3135 range_cast (converted_lhs
, unsigned_type_for (lhs_type
));
3136 range_cast (converted_lhs
, type
);
3137 // Start by building the positive signed outer range for the type.
3138 wide_int lim
= wi::set_bit_in_zero (TYPE_PRECISION (lhs_type
),
3139 TYPE_PRECISION (type
));
3140 create_possibly_reversed_range (r
, type
, lim
,
3141 wi::max_value (TYPE_PRECISION (type
),
3143 // For the signed part, we need to simply union the 2 ranges now.
3144 r
.union_ (converted_lhs
);
3146 // Create maximal negative number outside of LHS bits.
3147 lim
= wi::mask (TYPE_PRECISION (lhs_type
), true,
3148 TYPE_PRECISION (type
));
3149 // Add this to the unsigned LHS range(s).
3150 int_range_max
lim_range (type
, lim
, lim
);
3151 int_range_max lhs_neg
;
3152 range_op_handler (PLUS_EXPR
).fold_range (lhs_neg
, type
,
3153 converted_lhs
, lim_range
);
3154 // lhs_neg now has all the negative versions of the LHS.
3155 // Now union in all the values from SIGNED MIN (0x80000) to
3156 // lim-1 in order to fill in all the ranges with the upper
3159 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
3160 // we don't need to create a range from min to lim-1
3161 // calculate neg range traps trying to create [lim, lim - 1].
3162 wide_int min_val
= wi::min_value (TYPE_PRECISION (type
), SIGNED
);
3165 int_range_max
neg (type
,
3166 wi::min_value (TYPE_PRECISION (type
),
3169 lhs_neg
.union_ (neg
);
3171 // And finally, munge the signed and unsigned portions.
3174 // And intersect with any known value passed in the extra operand.
3180 if (TYPE_PRECISION (lhs_type
) == TYPE_PRECISION (type
))
3184 // The cast is not truncating, and the range is restricted to
3185 // the range of the RHS by this assignment.
3187 // Cast the range of the RHS to the type of the LHS.
3188 fold_range (tmp
, lhs_type
, int_range
<1> (type
), int_range
<1> (lhs_type
));
3189 // Intersect this with the LHS range will produce the range,
3190 // which will be cast to the RHS type before returning.
3191 tmp
.intersect (lhs
);
3194 // Cast the calculated range to the type of the RHS.
3195 fold_range (r
, type
, tmp
, int_range
<1> (type
));
3200 class operator_logical_and
: public range_operator
3202 using range_operator::fold_range
;
3203 using range_operator::op1_range
;
3204 using range_operator::op2_range
;
3206 virtual bool fold_range (irange
&r
, tree type
,
3209 relation_trio rel
= TRIO_VARYING
) const;
3210 virtual bool op1_range (irange
&r
, tree type
,
3213 relation_trio rel
= TRIO_VARYING
) const;
3214 virtual bool op2_range (irange
&r
, tree type
,
3217 relation_trio rel
= TRIO_VARYING
) const;
3218 // Check compatibility of all operands.
3219 bool operand_check_p (tree t1
, tree t2
, tree t3
) const final override
3220 { return range_compatible_p (t1
, t2
) && range_compatible_p (t1
, t3
); }
3224 operator_logical_and::fold_range (irange
&r
, tree type
,
3227 relation_trio
) const
3229 if (empty_range_varying (r
, type
, lh
, rh
))
3232 // Precision of LHS and both operands must match.
3233 if (TYPE_PRECISION (lh
.type ()) != TYPE_PRECISION (type
)
3234 || TYPE_PRECISION (type
) != TYPE_PRECISION (rh
.type ()))
3237 // 0 && anything is 0.
3238 if ((wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (lh
.upper_bound (), 0))
3239 || (wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (rh
.upper_bound (), 0)))
3240 r
= range_false (type
);
3241 else if (contains_zero_p (lh
) || contains_zero_p (rh
))
3242 // To reach this point, there must be a logical 1 on each side, and
3243 // the only remaining question is whether there is a zero or not.
3244 r
= range_true_and_false (type
);
3246 r
= range_true (type
);
3251 operator_logical_and::op1_range (irange
&r
, tree type
,
3253 const irange
&op2 ATTRIBUTE_UNUSED
,
3254 relation_trio
) const
3256 switch (get_bool_state (r
, lhs
, type
))
3259 // A true result means both sides of the AND must be true.
3260 r
= range_true (type
);
3263 // Any other result means only one side has to be false, the
3264 // other side can be anything. So we cannot be sure of any
3266 r
= range_true_and_false (type
);
3273 operator_logical_and::op2_range (irange
&r
, tree type
,
3276 relation_trio
) const
3278 return operator_logical_and::op1_range (r
, type
, lhs
, op1
);
3283 operator_bitwise_and::update_bitmask (irange
&r
, const irange
&lh
,
3284 const irange
&rh
) const
3286 update_known_bitmask (r
, BIT_AND_EXPR
, lh
, rh
);
3289 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3290 // by considering the number of leading redundant sign bit copies.
3291 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3292 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3294 wi_optimize_signed_bitwise_op (irange
&r
, tree type
,
3295 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3296 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3298 int lh_clrsb
= MIN (wi::clrsb (lh_lb
), wi::clrsb (lh_ub
));
3299 int rh_clrsb
= MIN (wi::clrsb (rh_lb
), wi::clrsb (rh_ub
));
3300 int new_clrsb
= MIN (lh_clrsb
, rh_clrsb
);
3303 int type_prec
= TYPE_PRECISION (type
);
3304 int rprec
= (type_prec
- new_clrsb
) - 1;
3305 value_range_with_overflow (r
, type
,
3306 wi::mask (rprec
, true, type_prec
),
3307 wi::mask (rprec
, false, type_prec
));
3311 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3315 operator_bitwise_and::lhs_op1_relation (const irange
&lhs
,
3318 relation_kind
) const
3320 if (lhs
.undefined_p () || op1
.undefined_p () || op2
.undefined_p ())
3321 return VREL_VARYING
;
3322 if (!op2
.singleton_p ())
3323 return VREL_VARYING
;
3324 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3325 int prec1
= TYPE_PRECISION (op1
.type ());
3326 int prec2
= TYPE_PRECISION (op2
.type ());
3328 wide_int mask
= op2
.lower_bound ();
3329 if (wi::eq_p (mask
, wi::mask (8, false, prec2
)))
3331 else if (wi::eq_p (mask
, wi::mask (16, false, prec2
)))
3333 else if (wi::eq_p (mask
, wi::mask (32, false, prec2
)))
3335 else if (wi::eq_p (mask
, wi::mask (64, false, prec2
)))
3337 return bits_to_pe (MIN (prec1
, mask_prec
));
3340 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3341 // possible. Basically, see if we can optimize:
3345 // [LB op Z, UB op Z]
3347 // If the optimization was successful, accumulate the range in R and
3351 wi_optimize_and_or (irange
&r
,
3352 enum tree_code code
,
3354 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3355 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
3357 // Calculate the singleton mask among the ranges, if any.
3358 wide_int lower_bound
, upper_bound
, mask
;
3359 if (wi::eq_p (rh_lb
, rh_ub
))
3362 lower_bound
= lh_lb
;
3363 upper_bound
= lh_ub
;
3365 else if (wi::eq_p (lh_lb
, lh_ub
))
3368 lower_bound
= rh_lb
;
3369 upper_bound
= rh_ub
;
3374 // If Z is a constant which (for op | its bitwise not) has n
3375 // consecutive least significant bits cleared followed by m 1
3376 // consecutive bits set immediately above it and either
3377 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3379 // The least significant n bits of all the values in the range are
3380 // cleared or set, the m bits above it are preserved and any bits
3381 // above these are required to be the same for all values in the
3385 if (code
== BIT_IOR_EXPR
)
3387 if (wi::eq_p (w
, 0))
3388 n
= w
.get_precision ();
3392 w
= ~(w
| wi::mask (n
, false, w
.get_precision ()));
3393 if (wi::eq_p (w
, 0))
3394 m
= w
.get_precision () - n
;
3396 m
= wi::ctz (w
) - n
;
3398 wide_int new_mask
= wi::mask (m
+ n
, true, w
.get_precision ());
3399 if ((new_mask
& lower_bound
) != (new_mask
& upper_bound
))
3402 wide_int res_lb
, res_ub
;
3403 if (code
== BIT_AND_EXPR
)
3405 res_lb
= wi::bit_and (lower_bound
, mask
);
3406 res_ub
= wi::bit_and (upper_bound
, mask
);
3408 else if (code
== BIT_IOR_EXPR
)
3410 res_lb
= wi::bit_or (lower_bound
, mask
);
3411 res_ub
= wi::bit_or (upper_bound
, mask
);
3415 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
3417 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3418 if (code
== BIT_IOR_EXPR
&& wi::ne_p (mask
, 0))
3421 tmp
.set_nonzero (type
);
3427 // For range [LB, UB] compute two wide_int bit masks.
3429 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3430 // for all numbers in the range the bit is 0, otherwise it might be 0
3433 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3434 // for all numbers in the range the bit is 1, otherwise it might be 0
3438 wi_set_zero_nonzero_bits (tree type
,
3439 const wide_int
&lb
, const wide_int
&ub
,
3440 wide_int
&maybe_nonzero
,
3441 wide_int
&mustbe_nonzero
)
3443 signop sign
= TYPE_SIGN (type
);
3445 if (wi::eq_p (lb
, ub
))
3446 maybe_nonzero
= mustbe_nonzero
= lb
;
3447 else if (wi::ge_p (lb
, 0, sign
) || wi::lt_p (ub
, 0, sign
))
3449 wide_int xor_mask
= lb
^ ub
;
3450 maybe_nonzero
= lb
| ub
;
3451 mustbe_nonzero
= lb
& ub
;
3454 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
3455 maybe_nonzero
.get_precision ());
3456 maybe_nonzero
= maybe_nonzero
| mask
;
3457 mustbe_nonzero
= wi::bit_and_not (mustbe_nonzero
, mask
);
3462 maybe_nonzero
= wi::minus_one (lb
.get_precision ());
3463 mustbe_nonzero
= wi::zero (lb
.get_precision ());
3468 operator_bitwise_and::wi_fold (irange
&r
, tree type
,
3469 const wide_int
&lh_lb
,
3470 const wide_int
&lh_ub
,
3471 const wide_int
&rh_lb
,
3472 const wide_int
&rh_ub
) const
3474 if (wi_optimize_and_or (r
, BIT_AND_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3477 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3478 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3479 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3480 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3481 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3482 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3484 wide_int new_lb
= mustbe_nonzero_lh
& mustbe_nonzero_rh
;
3485 wide_int new_ub
= maybe_nonzero_lh
& maybe_nonzero_rh
;
3486 signop sign
= TYPE_SIGN (type
);
3487 unsigned prec
= TYPE_PRECISION (type
);
3488 // If both input ranges contain only negative values, we can
3489 // truncate the result range maximum to the minimum of the
3490 // input range maxima.
3491 if (wi::lt_p (lh_ub
, 0, sign
) && wi::lt_p (rh_ub
, 0, sign
))
3493 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3494 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3496 // If either input range contains only non-negative values
3497 // we can truncate the result range maximum to the respective
3498 // maximum of the input range.
3499 if (wi::ge_p (lh_lb
, 0, sign
))
3500 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
3501 if (wi::ge_p (rh_lb
, 0, sign
))
3502 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
3503 // PR68217: In case of signed & sign-bit-CST should
3504 // result in [-INF, 0] instead of [-INF, INF].
3505 if (wi::gt_p (new_lb
, new_ub
, sign
))
3507 wide_int sign_bit
= wi::set_bit_in_zero (prec
- 1, prec
);
3509 && ((wi::eq_p (lh_lb
, lh_ub
)
3510 && !wi::cmps (lh_lb
, sign_bit
))
3511 || (wi::eq_p (rh_lb
, rh_ub
)
3512 && !wi::cmps (rh_lb
, sign_bit
))))
3514 new_lb
= wi::min_value (prec
, sign
);
3515 new_ub
= wi::zero (prec
);
3518 // If the limits got swapped around, return varying.
3519 if (wi::gt_p (new_lb
, new_ub
,sign
))
3522 && wi_optimize_signed_bitwise_op (r
, type
,
3526 r
.set_varying (type
);
3529 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3533 set_nonzero_range_from_mask (irange
&r
, tree type
, const irange
&lhs
)
3535 if (lhs
.undefined_p () || contains_zero_p (lhs
))
3536 r
.set_varying (type
);
3538 r
.set_nonzero (type
);
3541 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3542 (otherwise return VAL). VAL and MASK must be zero-extended for
3543 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3544 (to transform signed values into unsigned) and at the end xor
3548 masked_increment (const wide_int
&val_in
, const wide_int
&mask
,
3549 const wide_int
&sgnbit
, unsigned int prec
)
3551 wide_int bit
= wi::one (prec
), res
;
3554 wide_int val
= val_in
^ sgnbit
;
3555 for (i
= 0; i
< prec
; i
++, bit
+= bit
)
3558 if ((res
& bit
) == 0)
3561 res
= wi::bit_and_not (val
+ bit
, res
);
3563 if (wi::gtu_p (res
, val
))
3564 return res
^ sgnbit
;
3566 return val
^ sgnbit
;
3569 // This was shamelessly stolen from register_edge_assert_for_2 and
3570 // adjusted to work with iranges.
3573 operator_bitwise_and::simple_op1_range_solver (irange
&r
, tree type
,
3575 const irange
&op2
) const
3577 if (!op2
.singleton_p ())
3579 set_nonzero_range_from_mask (r
, type
, lhs
);
3582 unsigned int nprec
= TYPE_PRECISION (type
);
3583 wide_int cst2v
= op2
.lower_bound ();
3584 bool cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (type
));
3587 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3589 sgnbit
= wi::zero (nprec
);
3591 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3593 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3594 // maximum unsigned value is ~0. For signed comparison, if CST2
3595 // doesn't have the most significant bit set, handle it similarly. If
3596 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3597 wide_int valv
= lhs
.lower_bound ();
3598 wide_int minv
= valv
& cst2v
, maxv
;
3599 bool we_know_nothing
= false;
3602 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3603 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3606 // If we can't determine anything on this bound, fall
3607 // through and conservatively solve for the other end point.
3608 we_know_nothing
= true;
3611 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3612 if (we_know_nothing
)
3613 r
.set_varying (type
);
3615 create_possibly_reversed_range (r
, type
, minv
, maxv
);
3617 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3619 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3620 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3622 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3624 // For signed comparison, if CST2 doesn't have most significant bit
3625 // set, handle it similarly. If CST2 has MSB set, the maximum is
3626 // the same and minimum is INT_MIN.
3627 valv
= lhs
.upper_bound ();
3628 minv
= valv
& cst2v
;
3633 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3636 // If we couldn't determine anything on either bound, return
3638 if (we_know_nothing
)
3646 int_range
<2> upper_bits
;
3647 create_possibly_reversed_range (upper_bits
, type
, minv
, maxv
);
3648 r
.intersect (upper_bits
);
3652 operator_bitwise_and::op1_range (irange
&r
, tree type
,
3655 relation_trio
) const
3657 if (lhs
.undefined_p ())
3659 if (types_compatible_p (type
, boolean_type_node
))
3660 return op_logical_and
.op1_range (r
, type
, lhs
, op2
);
3663 for (unsigned i
= 0; i
< lhs
.num_pairs (); ++i
)
3665 int_range_max
chunk (lhs
.type (),
3666 lhs
.lower_bound (i
),
3667 lhs
.upper_bound (i
));
3669 simple_op1_range_solver (res
, type
, chunk
, op2
);
3672 if (r
.undefined_p ())
3673 set_nonzero_range_from_mask (r
, type
, lhs
);
3675 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3677 if (lhs
== op2
&& lhs
.singleton_p (mask
))
3679 r
.update_bitmask (irange_bitmask (mask
, ~mask
));
3683 // For 0 = op1 & MASK, op1 is ~MASK.
3684 if (lhs
.zero_p () && op2
.singleton_p ())
3686 wide_int nz
= wi::bit_not (op2
.get_nonzero_bits ());
3687 int_range
<2> tmp (type
);
3688 tmp
.set_nonzero_bits (nz
);
3695 operator_bitwise_and::op2_range (irange
&r
, tree type
,
3698 relation_trio
) const
3700 return operator_bitwise_and::op1_range (r
, type
, lhs
, op1
);
3704 class operator_logical_or
: public range_operator
3706 using range_operator::fold_range
;
3707 using range_operator::op1_range
;
3708 using range_operator::op2_range
;
3710 virtual bool fold_range (irange
&r
, tree type
,
3713 relation_trio rel
= TRIO_VARYING
) const;
3714 virtual bool op1_range (irange
&r
, tree type
,
3717 relation_trio rel
= TRIO_VARYING
) const;
3718 virtual bool op2_range (irange
&r
, tree type
,
3721 relation_trio rel
= TRIO_VARYING
) const;
3722 // Check compatibility of all operands.
3723 bool operand_check_p (tree t1
, tree t2
, tree t3
) const final override
3724 { return range_compatible_p (t1
, t2
) && range_compatible_p (t1
, t3
); }
3728 operator_logical_or::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
3731 relation_trio
) const
3733 if (empty_range_varying (r
, type
, lh
, rh
))
3742 operator_logical_or::op1_range (irange
&r
, tree type
,
3744 const irange
&op2 ATTRIBUTE_UNUSED
,
3745 relation_trio
) const
3747 switch (get_bool_state (r
, lhs
, type
))
3750 // A false result means both sides of the OR must be false.
3751 r
= range_false (type
);
3754 // Any other result means only one side has to be true, the
3755 // other side can be anything. so we can't be sure of any result
3757 r
= range_true_and_false (type
);
3764 operator_logical_or::op2_range (irange
&r
, tree type
,
3767 relation_trio
) const
3769 return operator_logical_or::op1_range (r
, type
, lhs
, op1
);
3774 operator_bitwise_or::update_bitmask (irange
&r
, const irange
&lh
,
3775 const irange
&rh
) const
3777 update_known_bitmask (r
, BIT_IOR_EXPR
, lh
, rh
);
3781 operator_bitwise_or::wi_fold (irange
&r
, tree type
,
3782 const wide_int
&lh_lb
,
3783 const wide_int
&lh_ub
,
3784 const wide_int
&rh_lb
,
3785 const wide_int
&rh_ub
) const
3787 if (wi_optimize_and_or (r
, BIT_IOR_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
3790 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3791 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3792 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3793 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3794 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3795 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3796 wide_int new_lb
= mustbe_nonzero_lh
| mustbe_nonzero_rh
;
3797 wide_int new_ub
= maybe_nonzero_lh
| maybe_nonzero_rh
;
3798 signop sign
= TYPE_SIGN (type
);
3799 // If the input ranges contain only positive values we can
3800 // truncate the minimum of the result range to the maximum
3801 // of the input range minima.
3802 if (wi::ge_p (lh_lb
, 0, sign
)
3803 && wi::ge_p (rh_lb
, 0, sign
))
3805 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3806 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3808 // If either input range contains only negative values
3809 // we can truncate the minimum of the result range to the
3810 // respective minimum range.
3811 if (wi::lt_p (lh_ub
, 0, sign
))
3812 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
3813 if (wi::lt_p (rh_ub
, 0, sign
))
3814 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
3815 // If the limits got swapped around, return a conservative range.
3816 if (wi::gt_p (new_lb
, new_ub
, sign
))
3818 // Make sure that nonzero|X is nonzero.
3819 if (wi::gt_p (lh_lb
, 0, sign
)
3820 || wi::gt_p (rh_lb
, 0, sign
)
3821 || wi::lt_p (lh_ub
, 0, sign
)
3822 || wi::lt_p (rh_ub
, 0, sign
))
3823 r
.set_nonzero (type
);
3824 else if (sign
== SIGNED
3825 && wi_optimize_signed_bitwise_op (r
, type
,
3830 r
.set_varying (type
);
3833 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3837 operator_bitwise_or::op1_range (irange
&r
, tree type
,
3840 relation_trio
) const
3842 if (lhs
.undefined_p ())
3844 // If this is really a logical wi_fold, call that.
3845 if (types_compatible_p (type
, boolean_type_node
))
3846 return op_logical_or
.op1_range (r
, type
, lhs
, op2
);
3853 r
.set_varying (type
);
3858 operator_bitwise_or::op2_range (irange
&r
, tree type
,
3861 relation_trio
) const
3863 return operator_bitwise_or::op1_range (r
, type
, lhs
, op1
);
3867 operator_bitwise_xor::update_bitmask (irange
&r
, const irange
&lh
,
3868 const irange
&rh
) const
3870 update_known_bitmask (r
, BIT_XOR_EXPR
, lh
, rh
);
3874 operator_bitwise_xor::wi_fold (irange
&r
, tree type
,
3875 const wide_int
&lh_lb
,
3876 const wide_int
&lh_ub
,
3877 const wide_int
&rh_lb
,
3878 const wide_int
&rh_ub
) const
3880 signop sign
= TYPE_SIGN (type
);
3881 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
3882 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
3883 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
3884 maybe_nonzero_lh
, mustbe_nonzero_lh
);
3885 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
3886 maybe_nonzero_rh
, mustbe_nonzero_rh
);
3888 wide_int result_zero_bits
= ((mustbe_nonzero_lh
& mustbe_nonzero_rh
)
3889 | ~(maybe_nonzero_lh
| maybe_nonzero_rh
));
3890 wide_int result_one_bits
3891 = (wi::bit_and_not (mustbe_nonzero_lh
, maybe_nonzero_rh
)
3892 | wi::bit_and_not (mustbe_nonzero_rh
, maybe_nonzero_lh
));
3893 wide_int new_ub
= ~result_zero_bits
;
3894 wide_int new_lb
= result_one_bits
;
3896 // If the range has all positive or all negative values, the result
3897 // is better than VARYING.
3898 if (wi::lt_p (new_lb
, 0, sign
) || wi::ge_p (new_ub
, 0, sign
))
3899 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
3900 else if (sign
== SIGNED
3901 && wi_optimize_signed_bitwise_op (r
, type
,
3906 r
.set_varying (type
);
3908 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3909 if (wi::lt_p (lh_ub
, rh_lb
, sign
)
3910 || wi::lt_p (rh_ub
, lh_lb
, sign
)
3911 || wi::ne_p (result_one_bits
, 0))
3914 tmp
.set_nonzero (type
);
3920 operator_bitwise_xor::op1_op2_relation_effect (irange
&lhs_range
,
3924 relation_kind rel
) const
3926 if (rel
== VREL_VARYING
)
3929 int_range
<2> rel_range
;
3934 rel_range
.set_zero (type
);
3937 rel_range
.set_nonzero (type
);
3943 lhs_range
.intersect (rel_range
);
3948 operator_bitwise_xor::op1_range (irange
&r
, tree type
,
3951 relation_trio
) const
3953 if (lhs
.undefined_p () || lhs
.varying_p ())
3958 if (types_compatible_p (type
, boolean_type_node
))
3960 switch (get_bool_state (r
, lhs
, type
))
3963 if (op2
.varying_p ())
3964 r
.set_varying (type
);
3965 else if (op2
.zero_p ())
3966 r
= range_true (type
);
3967 // See get_bool_state for the rationale
3968 else if (op2
.undefined_p () || contains_zero_p (op2
))
3969 r
= range_true_and_false (type
);
3971 r
= range_false (type
);
3981 r
.set_varying (type
);
3986 operator_bitwise_xor::op2_range (irange
&r
, tree type
,
3989 relation_trio
) const
3991 return operator_bitwise_xor::op1_range (r
, type
, lhs
, op1
);
3994 class operator_trunc_mod
: public range_operator
3996 using range_operator::op1_range
;
3997 using range_operator::op2_range
;
3998 using range_operator::update_bitmask
;
4000 virtual void wi_fold (irange
&r
, tree type
,
4001 const wide_int
&lh_lb
,
4002 const wide_int
&lh_ub
,
4003 const wide_int
&rh_lb
,
4004 const wide_int
&rh_ub
) const;
4005 virtual bool op1_range (irange
&r
, tree type
,
4008 relation_trio
) const;
4009 virtual bool op2_range (irange
&r
, tree type
,
4012 relation_trio
) const;
4013 void update_bitmask (irange
&r
, const irange
&lh
, const irange
&rh
) const
4014 { update_known_bitmask (r
, TRUNC_MOD_EXPR
, lh
, rh
); }
4018 operator_trunc_mod::wi_fold (irange
&r
, tree type
,
4019 const wide_int
&lh_lb
,
4020 const wide_int
&lh_ub
,
4021 const wide_int
&rh_lb
,
4022 const wide_int
&rh_ub
) const
4024 wide_int new_lb
, new_ub
, tmp
;
4025 signop sign
= TYPE_SIGN (type
);
4026 unsigned prec
= TYPE_PRECISION (type
);
4028 // Mod 0 is undefined.
4029 if (wi_zero_p (type
, rh_lb
, rh_ub
))
4035 // Check for constant and try to fold.
4036 if (lh_lb
== lh_ub
&& rh_lb
== rh_ub
)
4038 wi::overflow_type ov
= wi::OVF_NONE
;
4039 tmp
= wi::mod_trunc (lh_lb
, rh_lb
, sign
, &ov
);
4040 if (ov
== wi::OVF_NONE
)
4042 r
= int_range
<2> (type
, tmp
, tmp
);
4047 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
4052 new_ub
= wi::smax (new_ub
, tmp
);
4055 if (sign
== UNSIGNED
)
4056 new_lb
= wi::zero (prec
);
4061 if (wi::gts_p (tmp
, 0))
4062 tmp
= wi::zero (prec
);
4063 new_lb
= wi::smax (new_lb
, tmp
);
4066 if (sign
== SIGNED
&& wi::neg_p (tmp
))
4067 tmp
= wi::zero (prec
);
4068 new_ub
= wi::min (new_ub
, tmp
, sign
);
4070 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
4074 operator_trunc_mod::op1_range (irange
&r
, tree type
,
4077 relation_trio
) const
4079 if (lhs
.undefined_p ())
4082 signop sign
= TYPE_SIGN (type
);
4083 unsigned prec
= TYPE_PRECISION (type
);
4084 // (a % b) >= x && x > 0 , then a >= x.
4085 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
4087 r
= value_range (type
, lhs
.lower_bound (), wi::max_value (prec
, sign
));
4090 // (a % b) <= x && x < 0 , then a <= x.
4091 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
4093 r
= value_range (type
, wi::min_value (prec
, sign
), lhs
.upper_bound ());
4100 operator_trunc_mod::op2_range (irange
&r
, tree type
,
4103 relation_trio
) const
4105 if (lhs
.undefined_p ())
4108 signop sign
= TYPE_SIGN (type
);
4109 unsigned prec
= TYPE_PRECISION (type
);
4110 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
4111 // or b > x for unsigned.
4112 if (wi::gt_p (lhs
.lower_bound (), 0, sign
))
4115 r
= value_range (type
, wi::neg (lhs
.lower_bound ()),
4116 lhs
.lower_bound (), VR_ANTI_RANGE
);
4117 else if (wi::lt_p (lhs
.lower_bound (), wi::max_value (prec
, sign
),
4119 r
= value_range (type
, lhs
.lower_bound () + 1,
4120 wi::max_value (prec
, sign
));
4125 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
4126 if (wi::lt_p (lhs
.upper_bound (), 0, sign
))
4128 if (wi::gt_p (lhs
.upper_bound (), wi::min_value (prec
, sign
), sign
))
4129 r
= value_range (type
, lhs
.upper_bound (),
4130 wi::neg (lhs
.upper_bound ()), VR_ANTI_RANGE
);
4139 class operator_logical_not
: public range_operator
4141 using range_operator::fold_range
;
4142 using range_operator::op1_range
;
4144 virtual bool fold_range (irange
&r
, tree type
,
4147 relation_trio rel
= TRIO_VARYING
) const;
4148 virtual bool op1_range (irange
&r
, tree type
,
4151 relation_trio rel
= TRIO_VARYING
) const;
4152 // Check compatibility of LHS and op1.
4153 bool operand_check_p (tree t1
, tree t2
, tree
) const final override
4154 { return range_compatible_p (t1
, t2
); }
4157 // Folding a logical NOT, oddly enough, involves doing nothing on the
4158 // forward pass through. During the initial walk backwards, the
4159 // logical NOT reversed the desired outcome on the way back, so on the
4160 // way forward all we do is pass the range forward.
4165 // to determine the TRUE branch, walking backward
4166 // if (b_3) if ([1,1])
4167 // b_3 = !b_2 [1,1] = ![0,0]
4168 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
4169 // which is the result we are looking for.. so.. pass it through.
4172 operator_logical_not::fold_range (irange
&r
, tree type
,
4174 const irange
&rh ATTRIBUTE_UNUSED
,
4175 relation_trio
) const
4177 if (empty_range_varying (r
, type
, lh
, rh
))
4181 if (!lh
.varying_p () && !lh
.undefined_p ())
4188 operator_logical_not::op1_range (irange
&r
,
4192 relation_trio
) const
4194 // Logical NOT is involutary...do it again.
4195 return fold_range (r
, type
, lhs
, op2
);
4199 operator_bitwise_not::fold_range (irange
&r
, tree type
,
4202 relation_trio
) const
4204 if (empty_range_varying (r
, type
, lh
, rh
))
4207 if (types_compatible_p (type
, boolean_type_node
))
4208 return op_logical_not
.fold_range (r
, type
, lh
, rh
);
4210 // ~X is simply -1 - X.
4211 int_range
<1> minusone (type
, wi::minus_one (TYPE_PRECISION (type
)),
4212 wi::minus_one (TYPE_PRECISION (type
)));
4213 return range_op_handler (MINUS_EXPR
).fold_range (r
, type
, minusone
, lh
);
4217 operator_bitwise_not::op1_range (irange
&r
, tree type
,
4220 relation_trio
) const
4222 if (lhs
.undefined_p ())
4224 if (types_compatible_p (type
, boolean_type_node
))
4225 return op_logical_not
.op1_range (r
, type
, lhs
, op2
);
4227 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4228 return fold_range (r
, type
, lhs
, op2
);
4232 operator_bitwise_not::update_bitmask (irange
&r
, const irange
&lh
,
4233 const irange
&rh
) const
4235 update_known_bitmask (r
, BIT_NOT_EXPR
, lh
, rh
);
4240 operator_cst::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4242 const irange
&rh ATTRIBUTE_UNUSED
,
4243 relation_trio
) const
4250 // Determine if there is a relationship between LHS and OP1.
4253 operator_identity::lhs_op1_relation (const irange
&lhs
,
4254 const irange
&op1 ATTRIBUTE_UNUSED
,
4255 const irange
&op2 ATTRIBUTE_UNUSED
,
4256 relation_kind
) const
4258 if (lhs
.undefined_p ())
4259 return VREL_VARYING
;
4260 // Simply a copy, so they are equivalent.
4265 operator_identity::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4267 const irange
&rh ATTRIBUTE_UNUSED
,
4268 relation_trio
) const
4275 operator_identity::op1_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
4277 const irange
&op2 ATTRIBUTE_UNUSED
,
4278 relation_trio
) const
4285 class operator_unknown
: public range_operator
4287 using range_operator::fold_range
;
4289 virtual bool fold_range (irange
&r
, tree type
,
4292 relation_trio rel
= TRIO_VARYING
) const;
4296 operator_unknown::fold_range (irange
&r
, tree type
,
4297 const irange
&lh ATTRIBUTE_UNUSED
,
4298 const irange
&rh ATTRIBUTE_UNUSED
,
4299 relation_trio
) const
4301 r
.set_varying (type
);
4307 operator_abs::wi_fold (irange
&r
, tree type
,
4308 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4309 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4310 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4313 signop sign
= TYPE_SIGN (type
);
4314 unsigned prec
= TYPE_PRECISION (type
);
4316 // Pass through LH for the easy cases.
4317 if (sign
== UNSIGNED
|| wi::ge_p (lh_lb
, 0, sign
))
4319 r
= int_range
<1> (type
, lh_lb
, lh_ub
);
4323 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4325 wide_int min_value
= wi::min_value (prec
, sign
);
4326 wide_int max_value
= wi::max_value (prec
, sign
);
4327 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lh_lb
, min_value
))
4329 r
.set_varying (type
);
4333 // ABS_EXPR may flip the range around, if the original range
4334 // included negative values.
4335 if (wi::eq_p (lh_lb
, min_value
))
4337 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4338 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4339 if (wi::eq_p (lh_ub
, min_value
))
4341 r
= int_range
<1> (type
, min_value
, min_value
);
4347 min
= wi::abs (lh_lb
);
4349 if (wi::eq_p (lh_ub
, min_value
))
4352 max
= wi::abs (lh_ub
);
4354 // If the range contains zero then we know that the minimum value in the
4355 // range will be zero.
4356 if (wi::le_p (lh_lb
, 0, sign
) && wi::ge_p (lh_ub
, 0, sign
))
4358 if (wi::gt_p (min
, max
, sign
))
4360 min
= wi::zero (prec
);
4364 // If the range was reversed, swap MIN and MAX.
4365 if (wi::gt_p (min
, max
, sign
))
4366 std::swap (min
, max
);
4369 // If the new range has its limits swapped around (MIN > MAX), then
4370 // the operation caused one of them to wrap around. The only thing
4371 // we know is that the result is positive.
4372 if (wi::gt_p (min
, max
, sign
))
4374 min
= wi::zero (prec
);
4377 r
= int_range
<1> (type
, min
, max
);
4381 operator_abs::op1_range (irange
&r
, tree type
,
4384 relation_trio
) const
4386 if (empty_range_varying (r
, type
, lhs
, op2
))
4388 if (TYPE_UNSIGNED (type
))
4393 // Start with the positives because negatives are an impossible result.
4394 int_range_max positives
= range_positives (type
);
4395 positives
.intersect (lhs
);
4397 // Then add the negative of each pair:
4398 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4399 for (unsigned i
= 0; i
< positives
.num_pairs (); ++i
)
4400 r
.union_ (int_range
<1> (type
,
4401 -positives
.upper_bound (i
),
4402 -positives
.lower_bound (i
)));
4403 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4404 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4405 wide_int min_value
= wi::min_value (TYPE_PRECISION (type
), TYPE_SIGN (type
));
4406 wide_int lb
= lhs
.lower_bound ();
4407 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lb
, min_value
))
4408 r
.union_ (int_range
<2> (type
, lb
, lb
));
4413 operator_abs::update_bitmask (irange
&r
, const irange
&lh
,
4414 const irange
&rh
) const
4416 update_known_bitmask (r
, ABS_EXPR
, lh
, rh
);
4419 class operator_absu
: public range_operator
4421 using range_operator::update_bitmask
;
4423 virtual void wi_fold (irange
&r
, tree type
,
4424 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4425 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
4426 virtual void update_bitmask (irange
&r
, const irange
&lh
,
4427 const irange
&rh
) const final override
;
4431 operator_absu::wi_fold (irange
&r
, tree type
,
4432 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
4433 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
4434 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
4436 wide_int new_lb
, new_ub
;
4438 // Pass through VR0 the easy cases.
4439 if (wi::ges_p (lh_lb
, 0))
4446 new_lb
= wi::abs (lh_lb
);
4447 new_ub
= wi::abs (lh_ub
);
4449 // If the range contains zero then we know that the minimum
4450 // value in the range will be zero.
4451 if (wi::ges_p (lh_ub
, 0))
4453 if (wi::gtu_p (new_lb
, new_ub
))
4455 new_lb
= wi::zero (TYPE_PRECISION (type
));
4458 std::swap (new_lb
, new_ub
);
4461 gcc_checking_assert (TYPE_UNSIGNED (type
));
4462 r
= int_range
<1> (type
, new_lb
, new_ub
);
4466 operator_absu::update_bitmask (irange
&r
, const irange
&lh
,
4467 const irange
&rh
) const
4469 update_known_bitmask (r
, ABSU_EXPR
, lh
, rh
);
4474 operator_negate::fold_range (irange
&r
, tree type
,
4477 relation_trio
) const
4479 if (empty_range_varying (r
, type
, lh
, rh
))
4482 // -X is simply 0 - X.
4484 zero
.set_zero (type
);
4485 return range_op_handler (MINUS_EXPR
).fold_range (r
, type
, zero
, lh
);
4489 operator_negate::op1_range (irange
&r
, tree type
,
4492 relation_trio
) const
4494 // NEGATE is involutory.
4495 return fold_range (r
, type
, lhs
, op2
);
4500 operator_addr_expr::fold_range (irange
&r
, tree type
,
4503 relation_trio
) const
4505 if (empty_range_varying (r
, type
, lh
, rh
))
4508 // Return a non-null pointer of the LHS type (passed in op2).
4511 else if (lh
.undefined_p () || contains_zero_p (lh
))
4512 r
.set_varying (type
);
4514 r
.set_nonzero (type
);
4519 operator_addr_expr::op1_range (irange
&r
, tree type
,
4522 relation_trio
) const
4524 if (empty_range_varying (r
, type
, lhs
, op2
))
4527 // Return a non-null pointer of the LHS type (passed in op2), but only
4528 // if we cant overflow, eitherwise a no-zero offset could wrap to zero.
4530 if (!lhs
.undefined_p () && !contains_zero_p (lhs
) && TYPE_OVERFLOW_UNDEFINED (type
))
4531 r
.set_nonzero (type
);
4533 r
.set_varying (type
);
4537 // Initialize any integral operators to the primary table
4540 range_op_table::initialize_integral_ops ()
4542 set (TRUNC_DIV_EXPR
, op_trunc_div
);
4543 set (FLOOR_DIV_EXPR
, op_floor_div
);
4544 set (ROUND_DIV_EXPR
, op_round_div
);
4545 set (CEIL_DIV_EXPR
, op_ceil_div
);
4546 set (EXACT_DIV_EXPR
, op_exact_div
);
4547 set (LSHIFT_EXPR
, op_lshift
);
4548 set (RSHIFT_EXPR
, op_rshift
);
4549 set (TRUTH_AND_EXPR
, op_logical_and
);
4550 set (TRUTH_OR_EXPR
, op_logical_or
);
4551 set (TRUNC_MOD_EXPR
, op_trunc_mod
);
4552 set (TRUTH_NOT_EXPR
, op_logical_not
);
4553 set (IMAGPART_EXPR
, op_unknown
);
4554 set (REALPART_EXPR
, op_unknown
);
4555 set (ABSU_EXPR
, op_absu
);
4556 set (OP_WIDEN_MULT_SIGNED
, op_widen_mult_signed
);
4557 set (OP_WIDEN_MULT_UNSIGNED
, op_widen_mult_unsigned
);
4558 set (OP_WIDEN_PLUS_SIGNED
, op_widen_plus_signed
);
4559 set (OP_WIDEN_PLUS_UNSIGNED
, op_widen_plus_unsigned
);
4564 operator_plus::overflow_free_p (const irange
&lh
, const irange
&rh
,
4565 relation_trio
) const
4567 if (lh
.undefined_p () || rh
.undefined_p ())
4570 tree type
= lh
.type ();
4571 if (TYPE_OVERFLOW_UNDEFINED (type
))
4574 wi::overflow_type ovf
;
4575 signop sgn
= TYPE_SIGN (type
);
4576 wide_int wmax0
= lh
.upper_bound ();
4577 wide_int wmax1
= rh
.upper_bound ();
4578 wi::add (wmax0
, wmax1
, sgn
, &ovf
);
4579 if (ovf
!= wi::OVF_NONE
)
4582 if (TYPE_UNSIGNED (type
))
4585 wide_int wmin0
= lh
.lower_bound ();
4586 wide_int wmin1
= rh
.lower_bound ();
4587 wi::add (wmin0
, wmin1
, sgn
, &ovf
);
4588 if (ovf
!= wi::OVF_NONE
)
4595 operator_minus::overflow_free_p (const irange
&lh
, const irange
&rh
,
4596 relation_trio
) const
4598 if (lh
.undefined_p () || rh
.undefined_p ())
4601 tree type
= lh
.type ();
4602 if (TYPE_OVERFLOW_UNDEFINED (type
))
4605 wi::overflow_type ovf
;
4606 signop sgn
= TYPE_SIGN (type
);
4607 wide_int wmin0
= lh
.lower_bound ();
4608 wide_int wmax1
= rh
.upper_bound ();
4609 wi::sub (wmin0
, wmax1
, sgn
, &ovf
);
4610 if (ovf
!= wi::OVF_NONE
)
4613 if (TYPE_UNSIGNED (type
))
4616 wide_int wmax0
= lh
.upper_bound ();
4617 wide_int wmin1
= rh
.lower_bound ();
4618 wi::sub (wmax0
, wmin1
, sgn
, &ovf
);
4619 if (ovf
!= wi::OVF_NONE
)
4626 operator_mult::overflow_free_p (const irange
&lh
, const irange
&rh
,
4627 relation_trio
) const
4629 if (lh
.undefined_p () || rh
.undefined_p ())
4632 tree type
= lh
.type ();
4633 if (TYPE_OVERFLOW_UNDEFINED (type
))
4636 wi::overflow_type ovf
;
4637 signop sgn
= TYPE_SIGN (type
);
4638 wide_int wmax0
= lh
.upper_bound ();
4639 wide_int wmax1
= rh
.upper_bound ();
4640 wi::mul (wmax0
, wmax1
, sgn
, &ovf
);
4641 if (ovf
!= wi::OVF_NONE
)
4644 if (TYPE_UNSIGNED (type
))
4647 wide_int wmin0
= lh
.lower_bound ();
4648 wide_int wmin1
= rh
.lower_bound ();
4649 wi::mul (wmin0
, wmin1
, sgn
, &ovf
);
4650 if (ovf
!= wi::OVF_NONE
)
4653 wi::mul (wmin0
, wmax1
, sgn
, &ovf
);
4654 if (ovf
!= wi::OVF_NONE
)
4657 wi::mul (wmax0
, wmin1
, sgn
, &ovf
);
4658 if (ovf
!= wi::OVF_NONE
)
4665 #include "selftest.h"
4669 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4670 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4671 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4672 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4673 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4674 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4677 range_op_cast_tests ()
4679 int_range
<2> r0
, r1
, r2
, rold
;
4680 r0
.set_varying (integer_type_node
);
4681 wide_int maxint
= r0
.upper_bound ();
4683 // If a range is in any way outside of the range for the converted
4684 // to range, default to the range for the new type.
4685 r0
.set_varying (short_integer_type_node
);
4686 wide_int minshort
= r0
.lower_bound ();
4687 wide_int maxshort
= r0
.upper_bound ();
4688 if (TYPE_PRECISION (integer_type_node
)
4689 > TYPE_PRECISION (short_integer_type_node
))
4691 r1
= int_range
<1> (integer_type_node
,
4692 wi::zero (TYPE_PRECISION (integer_type_node
)),
4694 range_cast (r1
, short_integer_type_node
);
4695 ASSERT_TRUE (r1
.lower_bound () == minshort
4696 && r1
.upper_bound() == maxshort
);
4699 // (unsigned char)[-5,-1] => [251,255].
4700 r0
= rold
= int_range
<1> (signed_char_type_node
, SCHAR (-5), SCHAR (-1));
4701 range_cast (r0
, unsigned_char_type_node
);
4702 ASSERT_TRUE (r0
== int_range
<1> (unsigned_char_type_node
,
4703 UCHAR (251), UCHAR (255)));
4704 range_cast (r0
, signed_char_type_node
);
4705 ASSERT_TRUE (r0
== rold
);
4707 // (signed char)[15, 150] => [-128,-106][15,127].
4708 r0
= rold
= int_range
<1> (unsigned_char_type_node
, UCHAR (15), UCHAR (150));
4709 range_cast (r0
, signed_char_type_node
);
4710 r1
= int_range
<1> (signed_char_type_node
, SCHAR (15), SCHAR (127));
4711 r2
= int_range
<1> (signed_char_type_node
, SCHAR (-128), SCHAR (-106));
4713 ASSERT_TRUE (r1
== r0
);
4714 range_cast (r0
, unsigned_char_type_node
);
4715 ASSERT_TRUE (r0
== rold
);
4717 // (unsigned char)[-5, 5] => [0,5][251,255].
4718 r0
= rold
= int_range
<1> (signed_char_type_node
, SCHAR (-5), SCHAR (5));
4719 range_cast (r0
, unsigned_char_type_node
);
4720 r1
= int_range
<1> (unsigned_char_type_node
, UCHAR (251), UCHAR (255));
4721 r2
= int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (5));
4723 ASSERT_TRUE (r0
== r1
);
4724 range_cast (r0
, signed_char_type_node
);
4725 ASSERT_TRUE (r0
== rold
);
4727 // (unsigned char)[-5,5] => [0,5][251,255].
4728 r0
= int_range
<1> (integer_type_node
, INT (-5), INT (5));
4729 range_cast (r0
, unsigned_char_type_node
);
4730 r1
= int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (5));
4731 r1
.union_ (int_range
<1> (unsigned_char_type_node
, UCHAR (251), UCHAR (255)));
4732 ASSERT_TRUE (r0
== r1
);
4734 // (unsigned char)[5U,1974U] => [0,255].
4735 r0
= int_range
<1> (unsigned_type_node
, UINT (5), UINT (1974));
4736 range_cast (r0
, unsigned_char_type_node
);
4737 ASSERT_TRUE (r0
== int_range
<1> (unsigned_char_type_node
, UCHAR (0), UCHAR (255)));
4738 range_cast (r0
, integer_type_node
);
4739 // Going to a wider range should not sign extend.
4740 ASSERT_TRUE (r0
== int_range
<1> (integer_type_node
, INT (0), INT (255)));
4742 // (unsigned char)[-350,15] => [0,255].
4743 r0
= int_range
<1> (integer_type_node
, INT (-350), INT (15));
4744 range_cast (r0
, unsigned_char_type_node
);
4745 ASSERT_TRUE (r0
== (int_range
<1>
4746 (unsigned_char_type_node
,
4747 min_limit (unsigned_char_type_node
),
4748 max_limit (unsigned_char_type_node
))));
4750 // Casting [-120,20] from signed char to unsigned short.
4751 // => [0, 20][0xff88, 0xffff].
4752 r0
= int_range
<1> (signed_char_type_node
, SCHAR (-120), SCHAR (20));
4753 range_cast (r0
, short_unsigned_type_node
);
4754 r1
= int_range
<1> (short_unsigned_type_node
, UINT16 (0), UINT16 (20));
4755 r2
= int_range
<1> (short_unsigned_type_node
,
4756 UINT16 (0xff88), UINT16 (0xffff));
4758 ASSERT_TRUE (r0
== r1
);
4759 // A truncating cast back to signed char will work because [-120, 20]
4760 // is representable in signed char.
4761 range_cast (r0
, signed_char_type_node
);
4762 ASSERT_TRUE (r0
== int_range
<1> (signed_char_type_node
,
4763 SCHAR (-120), SCHAR (20)));
4765 // unsigned char -> signed short
4766 // (signed short)[(unsigned char)25, (unsigned char)250]
4767 // => [(signed short)25, (signed short)250]
4768 r0
= rold
= int_range
<1> (unsigned_char_type_node
, UCHAR (25), UCHAR (250));
4769 range_cast (r0
, short_integer_type_node
);
4770 r1
= int_range
<1> (short_integer_type_node
, INT16 (25), INT16 (250));
4771 ASSERT_TRUE (r0
== r1
);
4772 range_cast (r0
, unsigned_char_type_node
);
4773 ASSERT_TRUE (r0
== rold
);
4775 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4776 r0
= int_range
<1> (long_long_integer_type_node
,
4777 min_limit (long_long_integer_type_node
),
4778 max_limit (long_long_integer_type_node
));
4779 range_cast (r0
, short_unsigned_type_node
);
4780 r1
= int_range
<1> (short_unsigned_type_node
,
4781 min_limit (short_unsigned_type_node
),
4782 max_limit (short_unsigned_type_node
));
4783 ASSERT_TRUE (r0
== r1
);
4785 // Casting NONZERO to a narrower type will wrap/overflow so
4786 // it's just the entire range for the narrower type.
4788 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4789 // is outside of the range of a smaller range, return the full
4791 if (TYPE_PRECISION (integer_type_node
)
4792 > TYPE_PRECISION (short_integer_type_node
))
4794 r0
.set_nonzero (integer_type_node
);
4795 range_cast (r0
, short_integer_type_node
);
4796 r1
= int_range
<1> (short_integer_type_node
,
4797 min_limit (short_integer_type_node
),
4798 max_limit (short_integer_type_node
));
4799 ASSERT_TRUE (r0
== r1
);
4802 // Casting NONZERO from a narrower signed to a wider signed.
4804 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4805 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4806 r0
.set_nonzero (short_integer_type_node
);
4807 range_cast (r0
, integer_type_node
);
4808 r1
= int_range
<1> (integer_type_node
, INT (-32768), INT (-1));
4809 r2
= int_range
<1> (integer_type_node
, INT (1), INT (32767));
4811 ASSERT_TRUE (r0
== r1
);
4815 range_op_lshift_tests ()
4817 // Test that 0x808.... & 0x8.... still contains 0x8....
4818 // for a large set of numbers.
4821 tree big_type
= long_long_unsigned_type_node
;
4822 unsigned big_prec
= TYPE_PRECISION (big_type
);
4823 // big_num = 0x808,0000,0000,0000
4824 wide_int big_num
= wi::lshift (wi::uhwi (0x808, big_prec
),
4825 wi::uhwi (48, big_prec
));
4826 op_bitwise_and
.fold_range (res
, big_type
,
4827 int_range
<1> (big_type
),
4828 int_range
<1> (big_type
, big_num
, big_num
));
4829 // val = 0x8,0000,0000,0000
4830 wide_int val
= wi::lshift (wi::uhwi (8, big_prec
),
4831 wi::uhwi (48, big_prec
));
4832 ASSERT_TRUE (res
.contains_p (val
));
4835 if (TYPE_PRECISION (unsigned_type_node
) > 31)
4837 // unsigned VARYING = op1 << 1 should be VARYING.
4838 int_range
<2> lhs (unsigned_type_node
);
4839 int_range
<2> shift (unsigned_type_node
, INT (1), INT (1));
4841 op_lshift
.op1_range (op1
, unsigned_type_node
, lhs
, shift
);
4842 ASSERT_TRUE (op1
.varying_p ());
4844 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4845 int_range
<2> zero (unsigned_type_node
, UINT (0), UINT (0));
4846 op_lshift
.op1_range (op1
, unsigned_type_node
, zero
, shift
);
4847 ASSERT_TRUE (op1
.num_pairs () == 2);
4848 // Remove the [0,0] range.
4849 op1
.intersect (zero
);
4850 ASSERT_TRUE (op1
.num_pairs () == 1);
4851 // op1 << 1 should be [0x8000,0x8000] << 1,
4852 // which should result in [0,0].
4853 int_range_max result
;
4854 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4855 ASSERT_TRUE (result
== zero
);
4857 // signed VARYING = op1 << 1 should be VARYING.
4858 if (TYPE_PRECISION (integer_type_node
) > 31)
4860 // unsigned VARYING = op1 << 1 should be VARYING.
4861 int_range
<2> lhs (integer_type_node
);
4862 int_range
<2> shift (integer_type_node
, INT (1), INT (1));
4864 op_lshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4865 ASSERT_TRUE (op1
.varying_p ());
4867 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4868 int_range
<2> zero (integer_type_node
, INT (0), INT (0));
4869 op_lshift
.op1_range (op1
, integer_type_node
, zero
, shift
);
4870 ASSERT_TRUE (op1
.num_pairs () == 2);
4871 // Remove the [0,0] range.
4872 op1
.intersect (zero
);
4873 ASSERT_TRUE (op1
.num_pairs () == 1);
4874 // op1 << 1 should be [0x8000,0x8000] << 1,
4875 // which should result in [0,0].
4876 int_range_max result
;
4877 op_lshift
.fold_range (result
, unsigned_type_node
, op1
, shift
);
4878 ASSERT_TRUE (result
== zero
);
4883 range_op_rshift_tests ()
4885 // unsigned: [3, MAX] = OP1 >> 1
4887 int_range_max
lhs (unsigned_type_node
,
4888 UINT (3), max_limit (unsigned_type_node
));
4889 int_range_max
one (unsigned_type_node
,
4890 wi::one (TYPE_PRECISION (unsigned_type_node
)),
4891 wi::one (TYPE_PRECISION (unsigned_type_node
)));
4893 op_rshift
.op1_range (op1
, unsigned_type_node
, lhs
, one
);
4894 ASSERT_FALSE (op1
.contains_p (UINT (3)));
4897 // signed: [3, MAX] = OP1 >> 1
4899 int_range_max
lhs (integer_type_node
,
4900 INT (3), max_limit (integer_type_node
));
4901 int_range_max
one (integer_type_node
, INT (1), INT (1));
4903 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4904 ASSERT_FALSE (op1
.contains_p (INT (-2)));
4907 // This is impossible, so OP1 should be [].
4908 // signed: [MIN, MIN] = OP1 >> 1
4910 int_range_max
lhs (integer_type_node
,
4911 min_limit (integer_type_node
),
4912 min_limit (integer_type_node
));
4913 int_range_max
one (integer_type_node
, INT (1), INT (1));
4915 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
4916 ASSERT_TRUE (op1
.undefined_p ());
4919 // signed: ~[-1] = OP1 >> 31
4920 if (TYPE_PRECISION (integer_type_node
) > 31)
4922 int_range_max
lhs (integer_type_node
, INT (-1), INT (-1), VR_ANTI_RANGE
);
4923 int_range_max
shift (integer_type_node
, INT (31), INT (31));
4925 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
4926 int_range_max negatives
= range_negatives (integer_type_node
);
4927 negatives
.intersect (op1
);
4928 ASSERT_TRUE (negatives
.undefined_p ());
4933 range_op_bitwise_and_tests ()
4936 wide_int min
= min_limit (integer_type_node
);
4937 wide_int max
= max_limit (integer_type_node
);
4938 wide_int tiny
= wi::add (min
, wi::one (TYPE_PRECISION (integer_type_node
)));
4939 int_range_max
i1 (integer_type_node
, tiny
, max
);
4940 int_range_max
i2 (integer_type_node
, INT (255), INT (255));
4942 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4943 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4944 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4946 // VARYING = OP1 & 255: OP1 is VARYING
4947 i1
= int_range
<1> (integer_type_node
);
4948 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
4949 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
4951 // For 0 = x & MASK, x is ~MASK.
4953 int_range
<2> zero (integer_type_node
, INT (0), INT (0));
4954 int_range
<2> mask
= int_range
<2> (integer_type_node
, INT (7), INT (7));
4955 op_bitwise_and
.op1_range (res
, integer_type_node
, zero
, mask
);
4956 wide_int inv
= wi::shwi (~7U, TYPE_PRECISION (integer_type_node
));
4957 ASSERT_TRUE (res
.get_nonzero_bits () == inv
);
4960 // (NONZERO | X) is nonzero.
4961 i1
.set_nonzero (integer_type_node
);
4962 i2
.set_varying (integer_type_node
);
4963 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4964 ASSERT_TRUE (res
.nonzero_p ());
4966 // (NEGATIVE | X) is nonzero.
4967 i1
= int_range
<1> (integer_type_node
, INT (-5), INT (-3));
4968 i2
.set_varying (integer_type_node
);
4969 op_bitwise_or
.fold_range (res
, integer_type_node
, i1
, i2
);
4970 ASSERT_FALSE (res
.contains_p (INT (0)));
4974 range_relational_tests ()
4976 int_range
<2> lhs (unsigned_char_type_node
);
4977 int_range
<2> op1 (unsigned_char_type_node
, UCHAR (8), UCHAR (10));
4978 int_range
<2> op2 (unsigned_char_type_node
, UCHAR (20), UCHAR (20));
4980 // Never wrapping additions mean LHS > OP1.
4981 relation_kind code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4982 ASSERT_TRUE (code
== VREL_GT
);
4984 // Most wrapping additions mean nothing...
4985 op1
= int_range
<2> (unsigned_char_type_node
, UCHAR (8), UCHAR (10));
4986 op2
= int_range
<2> (unsigned_char_type_node
, UCHAR (0), UCHAR (255));
4987 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4988 ASSERT_TRUE (code
== VREL_VARYING
);
4990 // However, always wrapping additions mean LHS < OP1.
4991 op1
= int_range
<2> (unsigned_char_type_node
, UCHAR (1), UCHAR (255));
4992 op2
= int_range
<2> (unsigned_char_type_node
, UCHAR (255), UCHAR (255));
4993 code
= op_plus
.lhs_op1_relation (lhs
, op1
, op2
, VREL_VARYING
);
4994 ASSERT_TRUE (code
== VREL_LT
);
5000 range_op_rshift_tests ();
5001 range_op_lshift_tests ();
5002 range_op_bitwise_and_tests ();
5003 range_op_cast_tests ();
5004 range_relational_tests ();
5006 extern void range_op_float_tests ();
5007 range_op_float_tests ();
5010 } // namespace selftest
5012 #endif // CHECKING_P