]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/range-op.cc
[PATCH v1 1/1] RISC-V: Nan-box the result of movbf on soft-bf16
[thirdparty/gcc.git] / gcc / range-op.cc
1 /* Code for range operators.
2 Copyright (C) 2017-2024 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "insn-codes.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "cfganal.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
43 #include "tree-eh.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "wide-int.h"
47 #include "value-relation.h"
48 #include "range-op.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
51
52 // Instantiate the operators which apply to multiple types here.
53
54 operator_equal op_equal;
55 operator_not_equal op_not_equal;
56 operator_lt op_lt;
57 operator_le op_le;
58 operator_gt op_gt;
59 operator_ge op_ge;
60 operator_identity op_ident;
61 operator_cst op_cst;
62 operator_cast op_cast;
63 operator_plus op_plus;
64 operator_abs op_abs;
65 operator_minus op_minus;
66 operator_negate op_negate;
67 operator_mult op_mult;
68 operator_addr_expr op_addr;
69 operator_bitwise_not op_bitwise_not;
70 operator_bitwise_xor op_bitwise_xor;
71 operator_bitwise_and op_bitwise_and;
72 operator_bitwise_or op_bitwise_or;
73 operator_min op_min;
74 operator_max op_max;
75
76 // Instantaite a range operator table.
77 range_op_table operator_table;
78
79 // Invoke the initialization routines for each class of range.
80
81 range_op_table::range_op_table ()
82 {
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
86
87 set (EQ_EXPR, op_equal);
88 set (NE_EXPR, op_not_equal);
89 set (LT_EXPR, op_lt);
90 set (LE_EXPR, op_le);
91 set (GT_EXPR, op_gt);
92 set (GE_EXPR, op_ge);
93 set (SSA_NAME, op_ident);
94 set (PAREN_EXPR, op_ident);
95 set (OBJ_TYPE_REF, op_ident);
96 set (REAL_CST, op_cst);
97 set (INTEGER_CST, op_cst);
98 set (NOP_EXPR, op_cast);
99 set (CONVERT_EXPR, op_cast);
100 set (PLUS_EXPR, op_plus);
101 set (ABS_EXPR, op_abs);
102 set (MINUS_EXPR, op_minus);
103 set (NEGATE_EXPR, op_negate);
104 set (MULT_EXPR, op_mult);
105 set (ADDR_EXPR, op_addr);
106 set (BIT_NOT_EXPR, op_bitwise_not);
107 set (BIT_XOR_EXPR, op_bitwise_xor);
108 set (BIT_AND_EXPR, op_bitwise_and);
109 set (BIT_IOR_EXPR, op_bitwise_or);
110 set (MIN_EXPR, op_min);
111 set (MAX_EXPR, op_max);
112 }
113
114 // Instantiate a default range operator for opcodes with no entry.
115
116 range_operator default_operator;
117
118 // Create a default range_op_handler.
119
120 range_op_handler::range_op_handler ()
121 {
122 m_operator = &default_operator;
123 }
124
125 // Create a range_op_handler for CODE. Use a default operatoer if CODE
126 // does not have an entry.
127
128 range_op_handler::range_op_handler (unsigned code)
129 {
130 m_operator = operator_table[code];
131 if (!m_operator)
132 m_operator = &default_operator;
133 }
134
135 // Return TRUE if this handler has a non-default operator.
136
137 range_op_handler::operator bool () const
138 {
139 return m_operator != &default_operator;
140 }
141
142 // Return a pointer to the range operator assocaited with this handler.
143 // If it is a default operator, return NULL.
144 // This is the equivalent of indexing the range table.
145
146 range_operator *
147 range_op_handler::range_op () const
148 {
149 if (m_operator != &default_operator)
150 return m_operator;
151 return NULL;
152 }
153
154 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
155 // This is used to produce a unique value for each dispatch pattern. Shift
156 // values are based on the size of the m_discriminator field in value_range.h.
157
158 constexpr unsigned
159 dispatch_trio (unsigned lhs, unsigned op1, unsigned op2)
160 {
161 return ((lhs << 8) + (op1 << 4) + (op2));
162 }
163
164 // These are the supported dispatch patterns. These map to the parameter list
165 // of the routines in range_operator. Note the last 3 characters are
166 // shorthand for the LHS, OP1, and OP2 range discriminator class.
167
168 const unsigned RO_III = dispatch_trio (VR_IRANGE, VR_IRANGE, VR_IRANGE);
169 const unsigned RO_IFI = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_IRANGE);
170 const unsigned RO_IFF = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_FRANGE);
171 const unsigned RO_FFF = dispatch_trio (VR_FRANGE, VR_FRANGE, VR_FRANGE);
172 const unsigned RO_FIF = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_FRANGE);
173 const unsigned RO_FII = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_IRANGE);
174 const unsigned RO_PPP = dispatch_trio (VR_PRANGE, VR_PRANGE, VR_PRANGE);
175 const unsigned RO_PPI = dispatch_trio (VR_PRANGE, VR_PRANGE, VR_IRANGE);
176 const unsigned RO_IPP = dispatch_trio (VR_IRANGE, VR_PRANGE, VR_PRANGE);
177 const unsigned RO_IPI = dispatch_trio (VR_IRANGE, VR_PRANGE, VR_IRANGE);
178 const unsigned RO_PIP = dispatch_trio (VR_PRANGE, VR_IRANGE, VR_PRANGE);
179 const unsigned RO_PII = dispatch_trio (VR_PRANGE, VR_IRANGE, VR_IRANGE);
180
181 // Return a dispatch value for parameter types LHS, OP1 and OP2.
182
183 unsigned
184 range_op_handler::dispatch_kind (const vrange &lhs, const vrange &op1,
185 const vrange& op2) const
186 {
187 return dispatch_trio (lhs.m_discriminator, op1.m_discriminator,
188 op2.m_discriminator);
189 }
190
191 void
192 range_op_handler::discriminator_fail (const vrange &r1,
193 const vrange &r2,
194 const vrange &r3) const
195 {
196 const char name[] = "IPF";
197 gcc_checking_assert (r1.m_discriminator < sizeof (name) - 1);
198 gcc_checking_assert (r2.m_discriminator < sizeof (name) - 1);
199 gcc_checking_assert (r3.m_discriminator < sizeof (name) - 1);
200 fprintf (stderr, "DISCRIMINATOR FAIL. Dispatch ====> RO_%c%c%c <====\n",
201 name[r1.m_discriminator],
202 name[r2.m_discriminator],
203 name[r3.m_discriminator]);
204 gcc_unreachable ();
205 }
206
207 static inline bool
208 has_pointer_operand_p (const vrange &r1, const vrange &r2, const vrange &r3)
209 {
210 return is_a <prange> (r1) || is_a <prange> (r2) || is_a <prange> (r3);
211 }
212
213 // Dispatch a call to fold_range based on the types of R, LH and RH.
214
215 bool
216 range_op_handler::fold_range (vrange &r, tree type,
217 const vrange &lh,
218 const vrange &rh,
219 relation_trio rel) const
220 {
221 gcc_checking_assert (m_operator);
222 #if CHECKING_P
223 if (!lh.undefined_p () && !rh.undefined_p ())
224 gcc_assert (m_operator->operand_check_p (type, lh.type (), rh.type ()));
225 if (has_pointer_operand_p (r, lh, rh)
226 && !m_operator->pointers_handled_p (DISPATCH_FOLD_RANGE,
227 dispatch_kind (r, lh, rh)))
228 discriminator_fail (r, lh, rh);
229 #endif
230 switch (dispatch_kind (r, lh, rh))
231 {
232 case RO_III:
233 return m_operator->fold_range (as_a <irange> (r), type,
234 as_a <irange> (lh),
235 as_a <irange> (rh), rel);
236 case RO_IFI:
237 return m_operator->fold_range (as_a <irange> (r), type,
238 as_a <frange> (lh),
239 as_a <irange> (rh), rel);
240 case RO_IFF:
241 return m_operator->fold_range (as_a <irange> (r), type,
242 as_a <frange> (lh),
243 as_a <frange> (rh), rel);
244 case RO_FFF:
245 return m_operator->fold_range (as_a <frange> (r), type,
246 as_a <frange> (lh),
247 as_a <frange> (rh), rel);
248 case RO_FII:
249 return m_operator->fold_range (as_a <frange> (r), type,
250 as_a <irange> (lh),
251 as_a <irange> (rh), rel);
252 case RO_PPP:
253 return m_operator->fold_range (as_a <prange> (r), type,
254 as_a <prange> (lh),
255 as_a <prange> (rh), rel);
256 case RO_PPI:
257 return m_operator->fold_range (as_a <prange> (r), type,
258 as_a <prange> (lh),
259 as_a <irange> (rh), rel);
260 case RO_IPP:
261 return m_operator->fold_range (as_a <irange> (r), type,
262 as_a <prange> (lh),
263 as_a <prange> (rh), rel);
264 case RO_PIP:
265 return m_operator->fold_range (as_a <prange> (r), type,
266 as_a <irange> (lh),
267 as_a <prange> (rh), rel);
268 case RO_IPI:
269 return m_operator->fold_range (as_a <irange> (r), type,
270 as_a <prange> (lh),
271 as_a <irange> (rh), rel);
272 default:
273 return false;
274 }
275 }
276
277 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
278
279 bool
280 range_op_handler::op1_range (vrange &r, tree type,
281 const vrange &lhs,
282 const vrange &op2,
283 relation_trio rel) const
284 {
285 gcc_checking_assert (m_operator);
286 if (lhs.undefined_p ())
287 return false;
288 #if CHECKING_P
289 if (!op2.undefined_p ())
290 gcc_assert (m_operator->operand_check_p (lhs.type (), type, op2.type ()));
291 if (has_pointer_operand_p (r, lhs, op2)
292 && !m_operator->pointers_handled_p (DISPATCH_OP1_RANGE,
293 dispatch_kind (r, lhs, op2)))
294 discriminator_fail (r, lhs, op2);
295 #endif
296 switch (dispatch_kind (r, lhs, op2))
297 {
298 case RO_III:
299 return m_operator->op1_range (as_a <irange> (r), type,
300 as_a <irange> (lhs),
301 as_a <irange> (op2), rel);
302 case RO_PPP:
303 return m_operator->op1_range (as_a <prange> (r), type,
304 as_a <prange> (lhs),
305 as_a <prange> (op2), rel);
306 case RO_PIP:
307 return m_operator->op1_range (as_a <prange> (r), type,
308 as_a <irange> (lhs),
309 as_a <prange> (op2), rel);
310 case RO_PPI:
311 return m_operator->op1_range (as_a <prange> (r), type,
312 as_a <prange> (lhs),
313 as_a <irange> (op2), rel);
314 case RO_IPI:
315 return m_operator->op1_range (as_a <irange> (r), type,
316 as_a <prange> (lhs),
317 as_a <irange> (op2), rel);
318 case RO_FIF:
319 return m_operator->op1_range (as_a <frange> (r), type,
320 as_a <irange> (lhs),
321 as_a <frange> (op2), rel);
322 case RO_FFF:
323 return m_operator->op1_range (as_a <frange> (r), type,
324 as_a <frange> (lhs),
325 as_a <frange> (op2), rel);
326 default:
327 return false;
328 }
329 }
330
331 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
332
333 bool
334 range_op_handler::op2_range (vrange &r, tree type,
335 const vrange &lhs,
336 const vrange &op1,
337 relation_trio rel) const
338 {
339 gcc_checking_assert (m_operator);
340 if (lhs.undefined_p ())
341 return false;
342 #if CHECKING_P
343 if (!op1.undefined_p ())
344 gcc_assert (m_operator->operand_check_p (lhs.type (), op1.type (), type));
345 if (has_pointer_operand_p (r, lhs, op1)
346 && !m_operator->pointers_handled_p (DISPATCH_OP2_RANGE,
347 dispatch_kind (r, lhs, op1)))
348 discriminator_fail (r, lhs, op1);
349 #endif
350 switch (dispatch_kind (r, lhs, op1))
351 {
352 case RO_III:
353 return m_operator->op2_range (as_a <irange> (r), type,
354 as_a <irange> (lhs),
355 as_a <irange> (op1), rel);
356 case RO_PIP:
357 return m_operator->op2_range (as_a <prange> (r), type,
358 as_a <irange> (lhs),
359 as_a <prange> (op1), rel);
360 case RO_IPP:
361 return m_operator->op2_range (as_a <irange> (r), type,
362 as_a <prange> (lhs),
363 as_a <prange> (op1), rel);
364 case RO_FIF:
365 return m_operator->op2_range (as_a <frange> (r), type,
366 as_a <irange> (lhs),
367 as_a <frange> (op1), rel);
368 case RO_FFF:
369 return m_operator->op2_range (as_a <frange> (r), type,
370 as_a <frange> (lhs),
371 as_a <frange> (op1), rel);
372 default:
373 return false;
374 }
375 }
376
377 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
378
379 relation_kind
380 range_op_handler::lhs_op1_relation (const vrange &lhs,
381 const vrange &op1,
382 const vrange &op2,
383 relation_kind rel) const
384 {
385 gcc_checking_assert (m_operator);
386 #if CHECKING_P
387 if (has_pointer_operand_p (lhs, op1, op2)
388 && !m_operator->pointers_handled_p (DISPATCH_LHS_OP1_RELATION,
389 dispatch_kind (lhs, op1, op2)))
390 discriminator_fail (lhs, op1, op2);
391 #endif
392
393 switch (dispatch_kind (lhs, op1, op2))
394 {
395 case RO_III:
396 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
397 as_a <irange> (op1),
398 as_a <irange> (op2), rel);
399 case RO_PPP:
400 return m_operator->lhs_op1_relation (as_a <prange> (lhs),
401 as_a <prange> (op1),
402 as_a <prange> (op2), rel);
403 case RO_IPP:
404 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
405 as_a <prange> (op1),
406 as_a <prange> (op2), rel);
407 case RO_PII:
408 return m_operator->lhs_op1_relation (as_a <prange> (lhs),
409 as_a <irange> (op1),
410 as_a <irange> (op2), rel);
411 case RO_IFF:
412 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
413 as_a <frange> (op1),
414 as_a <frange> (op2), rel);
415 case RO_FFF:
416 return m_operator->lhs_op1_relation (as_a <frange> (lhs),
417 as_a <frange> (op1),
418 as_a <frange> (op2), rel);
419 default:
420 return VREL_VARYING;
421 }
422 }
423
424 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
425
426 relation_kind
427 range_op_handler::lhs_op2_relation (const vrange &lhs,
428 const vrange &op1,
429 const vrange &op2,
430 relation_kind rel) const
431 {
432 gcc_checking_assert (m_operator);
433 #if CHECKING_P
434 if (has_pointer_operand_p (lhs, op1, op2)
435 && !m_operator->pointers_handled_p (DISPATCH_LHS_OP2_RELATION,
436 dispatch_kind (lhs, op1, op2)))
437 discriminator_fail (lhs, op1, op2);
438 #endif
439 switch (dispatch_kind (lhs, op1, op2))
440 {
441 case RO_III:
442 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
443 as_a <irange> (op1),
444 as_a <irange> (op2), rel);
445 case RO_IFF:
446 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
447 as_a <frange> (op1),
448 as_a <frange> (op2), rel);
449 case RO_FFF:
450 return m_operator->lhs_op2_relation (as_a <frange> (lhs),
451 as_a <frange> (op1),
452 as_a <frange> (op2), rel);
453 default:
454 return VREL_VARYING;
455 }
456 }
457
458 // Dispatch a call to op1_op2_relation based on the type of LHS.
459
460 relation_kind
461 range_op_handler::op1_op2_relation (const vrange &lhs,
462 const vrange &op1,
463 const vrange &op2) const
464 {
465 gcc_checking_assert (m_operator);
466 #if CHECKING_P
467 if (has_pointer_operand_p (lhs, op1, op2)
468 && !m_operator->pointers_handled_p (DISPATCH_OP1_OP2_RELATION,
469 dispatch_kind (lhs, op1, op2)))
470 discriminator_fail (lhs, op1, op2);
471 #endif
472 switch (dispatch_kind (lhs, op1, op2))
473 {
474 case RO_III:
475 return m_operator->op1_op2_relation (as_a <irange> (lhs),
476 as_a <irange> (op1),
477 as_a <irange> (op2));
478
479 case RO_IPP:
480 return m_operator->op1_op2_relation (as_a <irange> (lhs),
481 as_a <prange> (op1),
482 as_a <prange> (op2));
483
484 case RO_IFF:
485 return m_operator->op1_op2_relation (as_a <irange> (lhs),
486 as_a <frange> (op1),
487 as_a <frange> (op2));
488
489 case RO_FFF:
490 return m_operator->op1_op2_relation (as_a <frange> (lhs),
491 as_a <frange> (op1),
492 as_a <frange> (op2));
493
494 default:
495 return VREL_VARYING;
496 }
497 }
498
499 bool
500 range_op_handler::overflow_free_p (const vrange &lh,
501 const vrange &rh,
502 relation_trio rel) const
503 {
504 gcc_checking_assert (m_operator);
505 switch (dispatch_kind (lh, lh, rh))
506 {
507 case RO_III:
508 return m_operator->overflow_free_p(as_a <irange> (lh),
509 as_a <irange> (rh),
510 rel);
511 default:
512 return false;
513 }
514 }
515
516 bool
517 range_op_handler::operand_check_p (tree t1, tree t2, tree t3) const
518 {
519 gcc_checking_assert (m_operator);
520 return m_operator->operand_check_p (t1, t2, t3);
521 }
522
523 // Update the known bitmasks in R when applying the operation CODE to
524 // LH and RH.
525
526 void
527 update_known_bitmask (vrange &r, tree_code code,
528 const vrange &lh, const vrange &rh)
529 {
530 if (r.undefined_p () || lh.undefined_p () || rh.undefined_p ()
531 || r.singleton_p ())
532 return;
533
534 widest_int widest_value, widest_mask;
535 tree type = r.type ();
536 signop sign = TYPE_SIGN (type);
537 int prec = TYPE_PRECISION (type);
538 irange_bitmask lh_bits = lh.get_bitmask ();
539 irange_bitmask rh_bits = rh.get_bitmask ();
540
541 switch (get_gimple_rhs_class (code))
542 {
543 case GIMPLE_UNARY_RHS:
544 bit_value_unop (code, sign, prec, &widest_value, &widest_mask,
545 TYPE_SIGN (lh.type ()),
546 TYPE_PRECISION (lh.type ()),
547 widest_int::from (lh_bits.value (),
548 TYPE_SIGN (lh.type ())),
549 widest_int::from (lh_bits.mask (),
550 TYPE_SIGN (lh.type ())));
551 break;
552 case GIMPLE_BINARY_RHS:
553 bit_value_binop (code, sign, prec, &widest_value, &widest_mask,
554 TYPE_SIGN (lh.type ()),
555 TYPE_PRECISION (lh.type ()),
556 widest_int::from (lh_bits.value (), sign),
557 widest_int::from (lh_bits.mask (), sign),
558 TYPE_SIGN (rh.type ()),
559 TYPE_PRECISION (rh.type ()),
560 widest_int::from (rh_bits.value (), sign),
561 widest_int::from (rh_bits.mask (), sign));
562 break;
563 default:
564 gcc_unreachable ();
565 }
566
567 wide_int mask = wide_int::from (widest_mask, prec, sign);
568 wide_int value = wide_int::from (widest_value, prec, sign);
569 // Bitmasks must have the unknown value bits cleared.
570 value &= ~mask;
571 irange_bitmask bm (value, mask);
572 r.update_bitmask (bm);
573 }
574
575 // Return the upper limit for a type.
576
577 static inline wide_int
578 max_limit (const_tree type)
579 {
580 return irange_val_max (type);
581 }
582
583 // Return the lower limit for a type.
584
585 static inline wide_int
586 min_limit (const_tree type)
587 {
588 return irange_val_min (type);
589 }
590
591 // Return false if shifting by OP is undefined behavior. Otherwise, return
592 // true and the range it is to be shifted by. This allows trimming out of
593 // undefined ranges, leaving only valid ranges if there are any.
594
595 static inline bool
596 get_shift_range (irange &r, tree type, const irange &op)
597 {
598 if (op.undefined_p ())
599 return false;
600
601 // Build valid range and intersect it with the shift range.
602 r = value_range (op.type (),
603 wi::shwi (0, TYPE_PRECISION (op.type ())),
604 wi::shwi (TYPE_PRECISION (type) - 1, TYPE_PRECISION (op.type ())));
605 r.intersect (op);
606
607 // If there are no valid ranges in the shift range, returned false.
608 if (r.undefined_p ())
609 return false;
610 return true;
611 }
612
613 // Default wide_int fold operation returns [MIN, MAX].
614
615 void
616 range_operator::wi_fold (irange &r, tree type,
617 const wide_int &lh_lb ATTRIBUTE_UNUSED,
618 const wide_int &lh_ub ATTRIBUTE_UNUSED,
619 const wide_int &rh_lb ATTRIBUTE_UNUSED,
620 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
621 {
622 gcc_checking_assert (r.supports_type_p (type));
623 r.set_varying (type);
624 }
625
626 // Call wi_fold when both op1 and op2 are equivalent. Further split small
627 // subranges into constants. This can provide better precision.
628 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
629 // [0,0][2, 2][4,4][6, 6][8, 8]
630 // LIMIT is the maximum number of elements in range allowed before we
631 // do not process them individually.
632
633 void
634 range_operator::wi_fold_in_parts_equiv (irange &r, tree type,
635 const wide_int &lh_lb,
636 const wide_int &lh_ub,
637 unsigned limit) const
638 {
639 int_range_max tmp;
640 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
641 widest_int::from (lh_lb, TYPE_SIGN (type)));
642 // if there are 1 to 8 values in the LH range, split them up.
643 r.set_undefined ();
644 if (lh_range >= 0 && lh_range < limit)
645 {
646 for (unsigned x = 0; x <= lh_range; x++)
647 {
648 wide_int val = lh_lb + x;
649 wi_fold (tmp, type, val, val, val, val);
650 r.union_ (tmp);
651 }
652 }
653 // Otherwise just call wi_fold.
654 else
655 wi_fold (r, type, lh_lb, lh_ub, lh_lb, lh_ub);
656 }
657
658 // Call wi_fold, except further split small subranges into constants.
659 // This can provide better precision. For something 8 >> [0,1]
660 // Instead of [8, 16], we will produce [8,8][16,16]
661
662 void
663 range_operator::wi_fold_in_parts (irange &r, tree type,
664 const wide_int &lh_lb,
665 const wide_int &lh_ub,
666 const wide_int &rh_lb,
667 const wide_int &rh_ub) const
668 {
669 int_range_max tmp;
670 widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)),
671 widest_int::from (rh_lb, TYPE_SIGN (type)));
672 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
673 widest_int::from (lh_lb, TYPE_SIGN (type)));
674 // If there are 2, 3, or 4 values in the RH range, do them separately.
675 // Call wi_fold_in_parts to check the RH side.
676 if (rh_range > 0 && rh_range < 4)
677 {
678 wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb);
679 if (rh_range > 1)
680 {
681 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1);
682 r.union_ (tmp);
683 if (rh_range == 3)
684 {
685 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2);
686 r.union_ (tmp);
687 }
688 }
689 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub);
690 r.union_ (tmp);
691 }
692 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
693 // The RH side has been checked, so no recursion needed.
694 else if (lh_range > 0 && lh_range < 4)
695 {
696 wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub);
697 if (lh_range > 1)
698 {
699 wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub);
700 r.union_ (tmp);
701 if (lh_range == 3)
702 {
703 wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub);
704 r.union_ (tmp);
705 }
706 }
707 wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub);
708 r.union_ (tmp);
709 }
710 // Otherwise just call wi_fold.
711 else
712 wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
713 }
714
715 // The default for fold is to break all ranges into sub-ranges and
716 // invoke the wi_fold method on each sub-range pair.
717
718 bool
719 range_operator::fold_range (irange &r, tree type,
720 const irange &lh,
721 const irange &rh,
722 relation_trio trio) const
723 {
724 gcc_checking_assert (r.supports_type_p (type));
725 if (empty_range_varying (r, type, lh, rh))
726 return true;
727
728 relation_kind rel = trio.op1_op2 ();
729 unsigned num_lh = lh.num_pairs ();
730 unsigned num_rh = rh.num_pairs ();
731
732 // If op1 and op2 are equivalences, then we don't need a complete cross
733 // product, just pairs of matching elements.
734 if (relation_equiv_p (rel) && lh == rh)
735 {
736 int_range_max tmp;
737 r.set_undefined ();
738 for (unsigned x = 0; x < num_lh; ++x)
739 {
740 // If the number of subranges is too high, limit subrange creation.
741 unsigned limit = (r.num_pairs () > 32) ? 0 : 8;
742 wide_int lh_lb = lh.lower_bound (x);
743 wide_int lh_ub = lh.upper_bound (x);
744 wi_fold_in_parts_equiv (tmp, type, lh_lb, lh_ub, limit);
745 r.union_ (tmp);
746 if (r.varying_p ())
747 break;
748 }
749 op1_op2_relation_effect (r, type, lh, rh, rel);
750 update_bitmask (r, lh, rh);
751 return true;
752 }
753
754 // If both ranges are single pairs, fold directly into the result range.
755 // If the number of subranges grows too high, produce a summary result as the
756 // loop becomes exponential with little benefit. See PR 103821.
757 if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12)
758 {
759 wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (),
760 rh.lower_bound (), rh.upper_bound ());
761 op1_op2_relation_effect (r, type, lh, rh, rel);
762 update_bitmask (r, lh, rh);
763 return true;
764 }
765
766 int_range_max tmp;
767 r.set_undefined ();
768 for (unsigned x = 0; x < num_lh; ++x)
769 for (unsigned y = 0; y < num_rh; ++y)
770 {
771 wide_int lh_lb = lh.lower_bound (x);
772 wide_int lh_ub = lh.upper_bound (x);
773 wide_int rh_lb = rh.lower_bound (y);
774 wide_int rh_ub = rh.upper_bound (y);
775 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
776 r.union_ (tmp);
777 if (r.varying_p ())
778 {
779 op1_op2_relation_effect (r, type, lh, rh, rel);
780 update_bitmask (r, lh, rh);
781 return true;
782 }
783 }
784 op1_op2_relation_effect (r, type, lh, rh, rel);
785 update_bitmask (r, lh, rh);
786 return true;
787 }
788
789 // The default for op1_range is to return false.
790
791 bool
792 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
793 tree type ATTRIBUTE_UNUSED,
794 const irange &lhs ATTRIBUTE_UNUSED,
795 const irange &op2 ATTRIBUTE_UNUSED,
796 relation_trio) const
797 {
798 return false;
799 }
800
801 // The default for op2_range is to return false.
802
803 bool
804 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
805 tree type ATTRIBUTE_UNUSED,
806 const irange &lhs ATTRIBUTE_UNUSED,
807 const irange &op1 ATTRIBUTE_UNUSED,
808 relation_trio) const
809 {
810 return false;
811 }
812
813 // The default relation routines return VREL_VARYING.
814
815 relation_kind
816 range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
817 const irange &op1 ATTRIBUTE_UNUSED,
818 const irange &op2 ATTRIBUTE_UNUSED,
819 relation_kind rel ATTRIBUTE_UNUSED) const
820 {
821 return VREL_VARYING;
822 }
823
824 relation_kind
825 range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
826 const irange &op1 ATTRIBUTE_UNUSED,
827 const irange &op2 ATTRIBUTE_UNUSED,
828 relation_kind rel ATTRIBUTE_UNUSED) const
829 {
830 return VREL_VARYING;
831 }
832
833 relation_kind
834 range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
835 const irange &op1 ATTRIBUTE_UNUSED,
836 const irange &op2 ATTRIBUTE_UNUSED) const
837 {
838 return VREL_VARYING;
839 }
840
841 // Default is no relation affects the LHS.
842
843 bool
844 range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED,
845 tree type ATTRIBUTE_UNUSED,
846 const irange &op1_range ATTRIBUTE_UNUSED,
847 const irange &op2_range ATTRIBUTE_UNUSED,
848 relation_kind rel ATTRIBUTE_UNUSED) const
849 {
850 return false;
851 }
852
853 bool
854 range_operator::overflow_free_p (const irange &, const irange &,
855 relation_trio) const
856 {
857 return false;
858 }
859
860 // Apply any known bitmask updates based on this operator.
861
862 void
863 range_operator::update_bitmask (irange &, const irange &,
864 const irange &) const
865 {
866 }
867
868 // Check that operand types are OK. Default to always OK.
869
870 bool
871 range_operator::operand_check_p (tree, tree, tree) const
872 {
873 return true;
874 }
875
876 // Create and return a range from a pair of wide-ints that are known
877 // to have overflowed (or underflowed).
878
879 static void
880 value_range_from_overflowed_bounds (irange &r, tree type,
881 const wide_int &wmin,
882 const wide_int &wmax)
883 {
884 const signop sgn = TYPE_SIGN (type);
885 const unsigned int prec = TYPE_PRECISION (type);
886
887 wide_int tmin = wide_int::from (wmin, prec, sgn);
888 wide_int tmax = wide_int::from (wmax, prec, sgn);
889
890 bool covers = false;
891 wide_int tem = tmin;
892 tmin = tmax + 1;
893 if (wi::cmp (tmin, tmax, sgn) < 0)
894 covers = true;
895 tmax = tem - 1;
896 if (wi::cmp (tmax, tem, sgn) > 0)
897 covers = true;
898
899 // If the anti-range would cover nothing, drop to varying.
900 // Likewise if the anti-range bounds are outside of the types
901 // values.
902 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
903 r.set_varying (type);
904 else
905 r.set (type, tmin, tmax, VR_ANTI_RANGE);
906 }
907
908 // Create and return a range from a pair of wide-ints. MIN_OVF and
909 // MAX_OVF describe any overflow that might have occurred while
910 // calculating WMIN and WMAX respectively.
911
912 static void
913 value_range_with_overflow (irange &r, tree type,
914 const wide_int &wmin, const wide_int &wmax,
915 wi::overflow_type min_ovf = wi::OVF_NONE,
916 wi::overflow_type max_ovf = wi::OVF_NONE)
917 {
918 const signop sgn = TYPE_SIGN (type);
919 const unsigned int prec = TYPE_PRECISION (type);
920 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
921
922 // For one bit precision if max != min, then the range covers all
923 // values.
924 if (prec == 1 && wi::ne_p (wmax, wmin))
925 {
926 r.set_varying (type);
927 return;
928 }
929
930 if (overflow_wraps)
931 {
932 // If overflow wraps, truncate the values and adjust the range,
933 // kind, and bounds appropriately.
934 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
935 {
936 wide_int tmin = wide_int::from (wmin, prec, sgn);
937 wide_int tmax = wide_int::from (wmax, prec, sgn);
938 // If the limits are swapped, we wrapped around and cover
939 // the entire range.
940 if (wi::gt_p (tmin, tmax, sgn))
941 r.set_varying (type);
942 else
943 // No overflow or both overflow or underflow. The range
944 // kind stays normal.
945 r.set (type, tmin, tmax);
946 return;
947 }
948
949 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
950 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
951 value_range_from_overflowed_bounds (r, type, wmin, wmax);
952 else
953 // Other underflow and/or overflow, drop to VR_VARYING.
954 r.set_varying (type);
955 }
956 else
957 {
958 // If both bounds either underflowed or overflowed, then the result
959 // is undefined.
960 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
961 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
962 {
963 r.set_undefined ();
964 return;
965 }
966
967 // If overflow does not wrap, saturate to [MIN, MAX].
968 wide_int new_lb, new_ub;
969 if (min_ovf == wi::OVF_UNDERFLOW)
970 new_lb = wi::min_value (prec, sgn);
971 else if (min_ovf == wi::OVF_OVERFLOW)
972 new_lb = wi::max_value (prec, sgn);
973 else
974 new_lb = wmin;
975
976 if (max_ovf == wi::OVF_UNDERFLOW)
977 new_ub = wi::min_value (prec, sgn);
978 else if (max_ovf == wi::OVF_OVERFLOW)
979 new_ub = wi::max_value (prec, sgn);
980 else
981 new_ub = wmax;
982
983 r.set (type, new_lb, new_ub);
984 }
985 }
986
987 // Create and return a range from a pair of wide-ints. Canonicalize
988 // the case where the bounds are swapped. In which case, we transform
989 // [10,5] into [MIN,5][10,MAX].
990
991 static inline void
992 create_possibly_reversed_range (irange &r, tree type,
993 const wide_int &new_lb, const wide_int &new_ub)
994 {
995 signop s = TYPE_SIGN (type);
996 // If the bounds are swapped, treat the result as if an overflow occurred.
997 if (wi::gt_p (new_lb, new_ub, s))
998 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
999 else
1000 // Otherwise it's just a normal range.
1001 r.set (type, new_lb, new_ub);
1002 }
1003
1004 // Return the summary information about boolean range LHS. If EMPTY/FULL,
1005 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
1006
1007 bool_range_state
1008 get_bool_state (vrange &r, const vrange &lhs, tree val_type)
1009 {
1010 // If there is no result, then this is unexecutable.
1011 if (lhs.undefined_p ())
1012 {
1013 r.set_undefined ();
1014 return BRS_EMPTY;
1015 }
1016
1017 if (lhs.zero_p ())
1018 return BRS_FALSE;
1019
1020 // For TRUE, we can't just test for [1,1] because Ada can have
1021 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
1022 if (lhs.contains_p (build_zero_cst (lhs.type ())))
1023 {
1024 r.set_varying (val_type);
1025 return BRS_FULL;
1026 }
1027
1028 return BRS_TRUE;
1029 }
1030
1031 // ------------------------------------------------------------------------
1032
1033 void
1034 operator_equal::update_bitmask (irange &r, const irange &lh,
1035 const irange &rh) const
1036 {
1037 update_known_bitmask (r, EQ_EXPR, lh, rh);
1038 }
1039
1040 // Check if the LHS range indicates a relation between OP1 and OP2.
1041
1042 relation_kind
1043 operator_equal::op1_op2_relation (const irange &lhs, const irange &,
1044 const irange &) const
1045 {
1046 if (lhs.undefined_p ())
1047 return VREL_UNDEFINED;
1048
1049 // FALSE = op1 == op2 indicates NE_EXPR.
1050 if (lhs.zero_p ())
1051 return VREL_NE;
1052
1053 // TRUE = op1 == op2 indicates EQ_EXPR.
1054 if (!contains_zero_p (lhs))
1055 return VREL_EQ;
1056 return VREL_VARYING;
1057 }
1058
1059 bool
1060 operator_equal::fold_range (irange &r, tree type,
1061 const irange &op1,
1062 const irange &op2,
1063 relation_trio rel) const
1064 {
1065 if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ))
1066 return true;
1067
1068 // We can be sure the values are always equal or not if both ranges
1069 // consist of a single value, and then compare them.
1070 bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ());
1071 bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ());
1072 if (op1_const && op2_const)
1073 {
1074 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
1075 r = range_true (type);
1076 else
1077 r = range_false (type);
1078 }
1079 else
1080 {
1081 // If ranges do not intersect, we know the range is not equal,
1082 // otherwise we don't know anything for sure.
1083 int_range_max tmp = op1;
1084 tmp.intersect (op2);
1085 if (tmp.undefined_p ())
1086 r = range_false (type);
1087 // Check if a constant cannot satisfy the bitmask requirements.
1088 else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ()))
1089 r = range_false (type);
1090 else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ()))
1091 r = range_false (type);
1092 else
1093 r = range_true_and_false (type);
1094 }
1095 return true;
1096 }
1097
1098 bool
1099 operator_equal::op1_range (irange &r, tree type,
1100 const irange &lhs,
1101 const irange &op2,
1102 relation_trio) const
1103 {
1104 switch (get_bool_state (r, lhs, type))
1105 {
1106 case BRS_TRUE:
1107 // If it's true, the result is the same as OP2.
1108 r = op2;
1109 break;
1110
1111 case BRS_FALSE:
1112 // If the result is false, the only time we know anything is
1113 // if OP2 is a constant.
1114 if (!op2.undefined_p ()
1115 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1116 {
1117 r = op2;
1118 r.invert ();
1119 }
1120 else
1121 r.set_varying (type);
1122 break;
1123
1124 default:
1125 break;
1126 }
1127 return true;
1128 }
1129
1130 bool
1131 operator_equal::op2_range (irange &r, tree type,
1132 const irange &lhs,
1133 const irange &op1,
1134 relation_trio rel) const
1135 {
1136 return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1137 }
1138
1139 // -------------------------------------------------------------------------
1140
1141 void
1142 operator_not_equal::update_bitmask (irange &r, const irange &lh,
1143 const irange &rh) const
1144 {
1145 update_known_bitmask (r, NE_EXPR, lh, rh);
1146 }
1147
1148 // Check if the LHS range indicates a relation between OP1 and OP2.
1149
1150 relation_kind
1151 operator_not_equal::op1_op2_relation (const irange &lhs, const irange &,
1152 const irange &) const
1153 {
1154 if (lhs.undefined_p ())
1155 return VREL_UNDEFINED;
1156
1157 // FALSE = op1 != op2 indicates EQ_EXPR.
1158 if (lhs.zero_p ())
1159 return VREL_EQ;
1160
1161 // TRUE = op1 != op2 indicates NE_EXPR.
1162 if (!contains_zero_p (lhs))
1163 return VREL_NE;
1164 return VREL_VARYING;
1165 }
1166
1167 bool
1168 operator_not_equal::fold_range (irange &r, tree type,
1169 const irange &op1,
1170 const irange &op2,
1171 relation_trio rel) const
1172 {
1173 if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE))
1174 return true;
1175
1176 // We can be sure the values are always equal or not if both ranges
1177 // consist of a single value, and then compare them.
1178 bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ());
1179 bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ());
1180 if (op1_const && op2_const)
1181 {
1182 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
1183 r = range_true (type);
1184 else
1185 r = range_false (type);
1186 }
1187 else
1188 {
1189 // If ranges do not intersect, we know the range is not equal,
1190 // otherwise we don't know anything for sure.
1191 int_range_max tmp = op1;
1192 tmp.intersect (op2);
1193 if (tmp.undefined_p ())
1194 r = range_true (type);
1195 // Check if a constant cannot satisfy the bitmask requirements.
1196 else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ()))
1197 r = range_true (type);
1198 else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ()))
1199 r = range_true (type);
1200 else
1201 r = range_true_and_false (type);
1202 }
1203 return true;
1204 }
1205
1206 bool
1207 operator_not_equal::op1_range (irange &r, tree type,
1208 const irange &lhs,
1209 const irange &op2,
1210 relation_trio) const
1211 {
1212 switch (get_bool_state (r, lhs, type))
1213 {
1214 case BRS_TRUE:
1215 // If the result is true, the only time we know anything is if
1216 // OP2 is a constant.
1217 if (!op2.undefined_p ()
1218 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1219 {
1220 r = op2;
1221 r.invert ();
1222 }
1223 else
1224 r.set_varying (type);
1225 break;
1226
1227 case BRS_FALSE:
1228 // If it's false, the result is the same as OP2.
1229 r = op2;
1230 break;
1231
1232 default:
1233 break;
1234 }
1235 return true;
1236 }
1237
1238
1239 bool
1240 operator_not_equal::op2_range (irange &r, tree type,
1241 const irange &lhs,
1242 const irange &op1,
1243 relation_trio rel) const
1244 {
1245 return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1246 }
1247
1248 // (X < VAL) produces the range of [MIN, VAL - 1].
1249
1250 static void
1251 build_lt (irange &r, tree type, const wide_int &val)
1252 {
1253 wi::overflow_type ov;
1254 wide_int lim;
1255 signop sgn = TYPE_SIGN (type);
1256
1257 // Signed 1 bit cannot represent 1 for subtraction.
1258 if (sgn == SIGNED)
1259 lim = wi::add (val, -1, sgn, &ov);
1260 else
1261 lim = wi::sub (val, 1, sgn, &ov);
1262
1263 // If val - 1 underflows, check if X < MIN, which is an empty range.
1264 if (ov)
1265 r.set_undefined ();
1266 else
1267 r = int_range<1> (type, min_limit (type), lim);
1268 }
1269
1270 // (X <= VAL) produces the range of [MIN, VAL].
1271
1272 static void
1273 build_le (irange &r, tree type, const wide_int &val)
1274 {
1275 r = int_range<1> (type, min_limit (type), val);
1276 }
1277
1278 // (X > VAL) produces the range of [VAL + 1, MAX].
1279
1280 static void
1281 build_gt (irange &r, tree type, const wide_int &val)
1282 {
1283 wi::overflow_type ov;
1284 wide_int lim;
1285 signop sgn = TYPE_SIGN (type);
1286
1287 // Signed 1 bit cannot represent 1 for addition.
1288 if (sgn == SIGNED)
1289 lim = wi::sub (val, -1, sgn, &ov);
1290 else
1291 lim = wi::add (val, 1, sgn, &ov);
1292 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1293 if (ov)
1294 r.set_undefined ();
1295 else
1296 r = int_range<1> (type, lim, max_limit (type));
1297 }
1298
1299 // (X >= val) produces the range of [VAL, MAX].
1300
1301 static void
1302 build_ge (irange &r, tree type, const wide_int &val)
1303 {
1304 r = int_range<1> (type, val, max_limit (type));
1305 }
1306
1307
1308 void
1309 operator_lt::update_bitmask (irange &r, const irange &lh,
1310 const irange &rh) const
1311 {
1312 update_known_bitmask (r, LT_EXPR, lh, rh);
1313 }
1314
1315 // Check if the LHS range indicates a relation between OP1 and OP2.
1316
1317 relation_kind
1318 operator_lt::op1_op2_relation (const irange &lhs, const irange &,
1319 const irange &) const
1320 {
1321 if (lhs.undefined_p ())
1322 return VREL_UNDEFINED;
1323
1324 // FALSE = op1 < op2 indicates GE_EXPR.
1325 if (lhs.zero_p ())
1326 return VREL_GE;
1327
1328 // TRUE = op1 < op2 indicates LT_EXPR.
1329 if (!contains_zero_p (lhs))
1330 return VREL_LT;
1331 return VREL_VARYING;
1332 }
1333
1334 bool
1335 operator_lt::fold_range (irange &r, tree type,
1336 const irange &op1,
1337 const irange &op2,
1338 relation_trio rel) const
1339 {
1340 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT))
1341 return true;
1342
1343 signop sign = TYPE_SIGN (op1.type ());
1344 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1345
1346 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
1347 r = range_true (type);
1348 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
1349 r = range_false (type);
1350 // Use nonzero bits to determine if < 0 is false.
1351 else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign))
1352 r = range_false (type);
1353 else
1354 r = range_true_and_false (type);
1355 return true;
1356 }
1357
1358 bool
1359 operator_lt::op1_range (irange &r, tree type,
1360 const irange &lhs,
1361 const irange &op2,
1362 relation_trio) const
1363 {
1364 if (op2.undefined_p ())
1365 return false;
1366
1367 switch (get_bool_state (r, lhs, type))
1368 {
1369 case BRS_TRUE:
1370 build_lt (r, type, op2.upper_bound ());
1371 break;
1372
1373 case BRS_FALSE:
1374 build_ge (r, type, op2.lower_bound ());
1375 break;
1376
1377 default:
1378 break;
1379 }
1380 return true;
1381 }
1382
1383 bool
1384 operator_lt::op2_range (irange &r, tree type,
1385 const irange &lhs,
1386 const irange &op1,
1387 relation_trio) const
1388 {
1389 if (op1.undefined_p ())
1390 return false;
1391
1392 switch (get_bool_state (r, lhs, type))
1393 {
1394 case BRS_TRUE:
1395 build_gt (r, type, op1.lower_bound ());
1396 break;
1397
1398 case BRS_FALSE:
1399 build_le (r, type, op1.upper_bound ());
1400 break;
1401
1402 default:
1403 break;
1404 }
1405 return true;
1406 }
1407
1408
1409 void
1410 operator_le::update_bitmask (irange &r, const irange &lh,
1411 const irange &rh) const
1412 {
1413 update_known_bitmask (r, LE_EXPR, lh, rh);
1414 }
1415
1416 // Check if the LHS range indicates a relation between OP1 and OP2.
1417
1418 relation_kind
1419 operator_le::op1_op2_relation (const irange &lhs, const irange &,
1420 const irange &) const
1421 {
1422 if (lhs.undefined_p ())
1423 return VREL_UNDEFINED;
1424
1425 // FALSE = op1 <= op2 indicates GT_EXPR.
1426 if (lhs.zero_p ())
1427 return VREL_GT;
1428
1429 // TRUE = op1 <= op2 indicates LE_EXPR.
1430 if (!contains_zero_p (lhs))
1431 return VREL_LE;
1432 return VREL_VARYING;
1433 }
1434
1435 bool
1436 operator_le::fold_range (irange &r, tree type,
1437 const irange &op1,
1438 const irange &op2,
1439 relation_trio rel) const
1440 {
1441 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE))
1442 return true;
1443
1444 signop sign = TYPE_SIGN (op1.type ());
1445 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1446
1447 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
1448 r = range_true (type);
1449 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
1450 r = range_false (type);
1451 else
1452 r = range_true_and_false (type);
1453 return true;
1454 }
1455
1456 bool
1457 operator_le::op1_range (irange &r, tree type,
1458 const irange &lhs,
1459 const irange &op2,
1460 relation_trio) const
1461 {
1462 if (op2.undefined_p ())
1463 return false;
1464
1465 switch (get_bool_state (r, lhs, type))
1466 {
1467 case BRS_TRUE:
1468 build_le (r, type, op2.upper_bound ());
1469 break;
1470
1471 case BRS_FALSE:
1472 build_gt (r, type, op2.lower_bound ());
1473 break;
1474
1475 default:
1476 break;
1477 }
1478 return true;
1479 }
1480
1481 bool
1482 operator_le::op2_range (irange &r, tree type,
1483 const irange &lhs,
1484 const irange &op1,
1485 relation_trio) const
1486 {
1487 if (op1.undefined_p ())
1488 return false;
1489
1490 switch (get_bool_state (r, lhs, type))
1491 {
1492 case BRS_TRUE:
1493 build_ge (r, type, op1.lower_bound ());
1494 break;
1495
1496 case BRS_FALSE:
1497 build_lt (r, type, op1.upper_bound ());
1498 break;
1499
1500 default:
1501 break;
1502 }
1503 return true;
1504 }
1505
1506
1507 void
1508 operator_gt::update_bitmask (irange &r, const irange &lh,
1509 const irange &rh) const
1510 {
1511 update_known_bitmask (r, GT_EXPR, lh, rh);
1512 }
1513
1514 // Check if the LHS range indicates a relation between OP1 and OP2.
1515
1516 relation_kind
1517 operator_gt::op1_op2_relation (const irange &lhs, const irange &,
1518 const irange &) const
1519 {
1520 if (lhs.undefined_p ())
1521 return VREL_UNDEFINED;
1522
1523 // FALSE = op1 > op2 indicates LE_EXPR.
1524 if (lhs.zero_p ())
1525 return VREL_LE;
1526
1527 // TRUE = op1 > op2 indicates GT_EXPR.
1528 if (!contains_zero_p (lhs))
1529 return VREL_GT;
1530 return VREL_VARYING;
1531 }
1532
1533 bool
1534 operator_gt::fold_range (irange &r, tree type,
1535 const irange &op1, const irange &op2,
1536 relation_trio rel) const
1537 {
1538 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT))
1539 return true;
1540
1541 signop sign = TYPE_SIGN (op1.type ());
1542 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1543
1544 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
1545 r = range_true (type);
1546 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
1547 r = range_false (type);
1548 else
1549 r = range_true_and_false (type);
1550 return true;
1551 }
1552
1553 bool
1554 operator_gt::op1_range (irange &r, tree type,
1555 const irange &lhs, const irange &op2,
1556 relation_trio) const
1557 {
1558 if (op2.undefined_p ())
1559 return false;
1560
1561 switch (get_bool_state (r, lhs, type))
1562 {
1563 case BRS_TRUE:
1564 build_gt (r, type, op2.lower_bound ());
1565 break;
1566
1567 case BRS_FALSE:
1568 build_le (r, type, op2.upper_bound ());
1569 break;
1570
1571 default:
1572 break;
1573 }
1574 return true;
1575 }
1576
1577 bool
1578 operator_gt::op2_range (irange &r, tree type,
1579 const irange &lhs,
1580 const irange &op1,
1581 relation_trio) const
1582 {
1583 if (op1.undefined_p ())
1584 return false;
1585
1586 switch (get_bool_state (r, lhs, type))
1587 {
1588 case BRS_TRUE:
1589 build_lt (r, type, op1.upper_bound ());
1590 break;
1591
1592 case BRS_FALSE:
1593 build_ge (r, type, op1.lower_bound ());
1594 break;
1595
1596 default:
1597 break;
1598 }
1599 return true;
1600 }
1601
1602
1603 void
1604 operator_ge::update_bitmask (irange &r, const irange &lh,
1605 const irange &rh) const
1606 {
1607 update_known_bitmask (r, GE_EXPR, lh, rh);
1608 }
1609
1610 // Check if the LHS range indicates a relation between OP1 and OP2.
1611
1612 relation_kind
1613 operator_ge::op1_op2_relation (const irange &lhs, const irange &,
1614 const irange &) const
1615 {
1616 if (lhs.undefined_p ())
1617 return VREL_UNDEFINED;
1618
1619 // FALSE = op1 >= op2 indicates LT_EXPR.
1620 if (lhs.zero_p ())
1621 return VREL_LT;
1622
1623 // TRUE = op1 >= op2 indicates GE_EXPR.
1624 if (!contains_zero_p (lhs))
1625 return VREL_GE;
1626 return VREL_VARYING;
1627 }
1628
1629 bool
1630 operator_ge::fold_range (irange &r, tree type,
1631 const irange &op1,
1632 const irange &op2,
1633 relation_trio rel) const
1634 {
1635 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE))
1636 return true;
1637
1638 signop sign = TYPE_SIGN (op1.type ());
1639 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1640
1641 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
1642 r = range_true (type);
1643 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
1644 r = range_false (type);
1645 else
1646 r = range_true_and_false (type);
1647 return true;
1648 }
1649
1650 bool
1651 operator_ge::op1_range (irange &r, tree type,
1652 const irange &lhs,
1653 const irange &op2,
1654 relation_trio) const
1655 {
1656 if (op2.undefined_p ())
1657 return false;
1658
1659 switch (get_bool_state (r, lhs, type))
1660 {
1661 case BRS_TRUE:
1662 build_ge (r, type, op2.lower_bound ());
1663 break;
1664
1665 case BRS_FALSE:
1666 build_lt (r, type, op2.upper_bound ());
1667 break;
1668
1669 default:
1670 break;
1671 }
1672 return true;
1673 }
1674
1675 bool
1676 operator_ge::op2_range (irange &r, tree type,
1677 const irange &lhs,
1678 const irange &op1,
1679 relation_trio) const
1680 {
1681 if (op1.undefined_p ())
1682 return false;
1683
1684 switch (get_bool_state (r, lhs, type))
1685 {
1686 case BRS_TRUE:
1687 build_le (r, type, op1.upper_bound ());
1688 break;
1689
1690 case BRS_FALSE:
1691 build_gt (r, type, op1.lower_bound ());
1692 break;
1693
1694 default:
1695 break;
1696 }
1697 return true;
1698 }
1699
1700
1701 void
1702 operator_plus::update_bitmask (irange &r, const irange &lh,
1703 const irange &rh) const
1704 {
1705 update_known_bitmask (r, PLUS_EXPR, lh, rh);
1706 }
1707
1708 // Check to see if the range of OP2 indicates anything about the relation
1709 // between LHS and OP1.
1710
1711 relation_kind
1712 operator_plus::lhs_op1_relation (const irange &lhs,
1713 const irange &op1,
1714 const irange &op2,
1715 relation_kind) const
1716 {
1717 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
1718 return VREL_VARYING;
1719
1720 tree type = lhs.type ();
1721 unsigned prec = TYPE_PRECISION (type);
1722 wi::overflow_type ovf1, ovf2;
1723 signop sign = TYPE_SIGN (type);
1724
1725 // LHS = OP1 + 0 indicates LHS == OP1.
1726 if (op2.zero_p ())
1727 return VREL_EQ;
1728
1729 if (TYPE_OVERFLOW_WRAPS (type))
1730 {
1731 wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1);
1732 wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2);
1733 }
1734 else
1735 ovf1 = ovf2 = wi::OVF_NONE;
1736
1737 // Never wrapping additions.
1738 if (!ovf1 && !ovf2)
1739 {
1740 // Positive op2 means lhs > op1.
1741 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1742 return VREL_GT;
1743 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1744 return VREL_GE;
1745
1746 // Negative op2 means lhs < op1.
1747 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1748 return VREL_LT;
1749 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1750 return VREL_LE;
1751 }
1752 // Always wrapping additions.
1753 else if (ovf1 && ovf1 == ovf2)
1754 {
1755 // Positive op2 means lhs < op1.
1756 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1757 return VREL_LT;
1758 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1759 return VREL_LE;
1760
1761 // Negative op2 means lhs > op1.
1762 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1763 return VREL_GT;
1764 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1765 return VREL_GE;
1766 }
1767
1768 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1769 if (!range_includes_zero_p (op2))
1770 return VREL_NE;
1771
1772 return VREL_VARYING;
1773 }
1774
1775 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1776 // operands.
1777
1778 relation_kind
1779 operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1,
1780 const irange &op2, relation_kind rel) const
1781 {
1782 return lhs_op1_relation (lhs, op2, op1, rel);
1783 }
1784
1785 void
1786 operator_plus::wi_fold (irange &r, tree type,
1787 const wide_int &lh_lb, const wide_int &lh_ub,
1788 const wide_int &rh_lb, const wide_int &rh_ub) const
1789 {
1790 wi::overflow_type ov_lb, ov_ub;
1791 signop s = TYPE_SIGN (type);
1792 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
1793 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
1794 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1795 }
1796
1797 // Given addition or subtraction, determine the possible NORMAL ranges and
1798 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1799 // Return the relation that exists between the LHS and OP1 in order for the
1800 // NORMAL range to apply.
1801 // a return value of VREL_VARYING means no ranges were applicable.
1802
1803 static relation_kind
1804 plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset,
1805 bool add_p)
1806 {
1807 relation_kind kind = VREL_VARYING;
1808 // For now, only deal with constant adds. This could be extended to ranges
1809 // when someone is so motivated.
1810 if (!offset.singleton_p () || offset.zero_p ())
1811 return kind;
1812
1813 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1814 wide_int off = offset.lower_bound ();
1815 if (wi::neg_p (off, SIGNED))
1816 {
1817 add_p = !add_p;
1818 off = wi::neg (off);
1819 }
1820
1821 wi::overflow_type ov;
1822 tree type = offset.type ();
1823 unsigned prec = TYPE_PRECISION (type);
1824 wide_int ub;
1825 wide_int lb;
1826 // calculate the normal range and relation for the operation.
1827 if (add_p)
1828 {
1829 // [ 0 , INF - OFF]
1830 lb = wi::zero (prec);
1831 ub = wi::sub (irange_val_max (type), off, UNSIGNED, &ov);
1832 kind = VREL_GT;
1833 }
1834 else
1835 {
1836 // [ OFF, INF ]
1837 lb = off;
1838 ub = irange_val_max (type);
1839 kind = VREL_LT;
1840 }
1841 int_range<2> normal_range (type, lb, ub);
1842 int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE);
1843
1844 r_ov = ov_range;
1845 r_normal = normal_range;
1846 return kind;
1847 }
1848
1849 // Once op1 has been calculated by operator_plus or operator_minus, check
1850 // to see if the relation passed causes any part of the calculation to
1851 // be not possible. ie
1852 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1853 // and that further refines a_2 to [0, 0].
1854 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1855 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1856 // MINUS. IF any adjustment can be made, R will reflect it.
1857
1858 static void
1859 adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel,
1860 bool add_p)
1861 {
1862 if (r.undefined_p ())
1863 return;
1864 tree type = r.type ();
1865 // Check for unsigned overflow and calculate the overflow part.
1866 signop s = TYPE_SIGN (type);
1867 if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED)
1868 return;
1869
1870 // Only work with <, <=, >, >= relations.
1871 if (!relation_lt_le_gt_ge_p (rel))
1872 return;
1873
1874 // Get the ranges for this offset.
1875 int_range_max normal, overflow;
1876 relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p);
1877
1878 // VREL_VARYING means there are no adjustments.
1879 if (k == VREL_VARYING)
1880 return;
1881
1882 // If the relations match use the normal range, otherwise use overflow range.
1883 if (relation_intersect (k, rel) == k)
1884 r.intersect (normal);
1885 else
1886 r.intersect (overflow);
1887 return;
1888 }
1889
1890 bool
1891 operator_plus::op1_range (irange &r, tree type,
1892 const irange &lhs,
1893 const irange &op2,
1894 relation_trio trio) const
1895 {
1896 if (lhs.undefined_p ())
1897 return false;
1898 // Start with the default operation.
1899 range_op_handler minus (MINUS_EXPR);
1900 if (!minus)
1901 return false;
1902 bool res = minus.fold_range (r, type, lhs, op2);
1903 relation_kind rel = trio.lhs_op1 ();
1904 // Check for a relation refinement.
1905 if (res)
1906 adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */);
1907 return res;
1908 }
1909
1910 bool
1911 operator_plus::op2_range (irange &r, tree type,
1912 const irange &lhs,
1913 const irange &op1,
1914 relation_trio rel) const
1915 {
1916 return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1917 }
1918
1919 class operator_widen_plus_signed : public range_operator
1920 {
1921 public:
1922 virtual void wi_fold (irange &r, tree type,
1923 const wide_int &lh_lb,
1924 const wide_int &lh_ub,
1925 const wide_int &rh_lb,
1926 const wide_int &rh_ub) const;
1927 } op_widen_plus_signed;
1928
1929 void
1930 operator_widen_plus_signed::wi_fold (irange &r, tree type,
1931 const wide_int &lh_lb,
1932 const wide_int &lh_ub,
1933 const wide_int &rh_lb,
1934 const wide_int &rh_ub) const
1935 {
1936 wi::overflow_type ov_lb, ov_ub;
1937 signop s = TYPE_SIGN (type);
1938
1939 wide_int lh_wlb
1940 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
1941 wide_int lh_wub
1942 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
1943 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1944 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1945
1946 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1947 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1948
1949 r = int_range<2> (type, new_lb, new_ub);
1950 }
1951
1952 class operator_widen_plus_unsigned : public range_operator
1953 {
1954 public:
1955 virtual void wi_fold (irange &r, tree type,
1956 const wide_int &lh_lb,
1957 const wide_int &lh_ub,
1958 const wide_int &rh_lb,
1959 const wide_int &rh_ub) const;
1960 } op_widen_plus_unsigned;
1961
1962 void
1963 operator_widen_plus_unsigned::wi_fold (irange &r, tree type,
1964 const wide_int &lh_lb,
1965 const wide_int &lh_ub,
1966 const wide_int &rh_lb,
1967 const wide_int &rh_ub) const
1968 {
1969 wi::overflow_type ov_lb, ov_ub;
1970 signop s = TYPE_SIGN (type);
1971
1972 wide_int lh_wlb
1973 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
1974 wide_int lh_wub
1975 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
1976 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1977 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1978
1979 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1980 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1981
1982 r = int_range<2> (type, new_lb, new_ub);
1983 }
1984
1985 void
1986 operator_minus::update_bitmask (irange &r, const irange &lh,
1987 const irange &rh) const
1988 {
1989 update_known_bitmask (r, MINUS_EXPR, lh, rh);
1990 }
1991
1992 void
1993 operator_minus::wi_fold (irange &r, tree type,
1994 const wide_int &lh_lb, const wide_int &lh_ub,
1995 const wide_int &rh_lb, const wide_int &rh_ub) const
1996 {
1997 wi::overflow_type ov_lb, ov_ub;
1998 signop s = TYPE_SIGN (type);
1999 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
2000 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
2001 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
2002 }
2003
2004
2005 // Return the relation between LHS and OP1 based on the relation between
2006 // OP1 and OP2.
2007
2008 relation_kind
2009 operator_minus::lhs_op1_relation (const irange &, const irange &op1,
2010 const irange &, relation_kind rel) const
2011 {
2012 if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED)
2013 switch (rel)
2014 {
2015 case VREL_GT:
2016 case VREL_GE:
2017 return VREL_LE;
2018 default:
2019 break;
2020 }
2021 return VREL_VARYING;
2022 }
2023
2024 // Check to see if the relation REL between OP1 and OP2 has any effect on the
2025 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
2026 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
2027
2028 bool
2029 minus_op1_op2_relation_effect (irange &lhs_range, tree type,
2030 const irange &op1_range ATTRIBUTE_UNUSED,
2031 const irange &op2_range ATTRIBUTE_UNUSED,
2032 relation_kind rel)
2033 {
2034 if (rel == VREL_VARYING)
2035 return false;
2036
2037 int_range<2> rel_range;
2038 unsigned prec = TYPE_PRECISION (type);
2039 signop sgn = TYPE_SIGN (type);
2040
2041 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
2042 if (rel == VREL_EQ)
2043 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec));
2044 else if (rel == VREL_NE)
2045 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
2046 VR_ANTI_RANGE);
2047 else if (TYPE_OVERFLOW_WRAPS (type))
2048 {
2049 switch (rel)
2050 {
2051 // For wrapping signed values and unsigned, if op1 > op2 or
2052 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
2053 case VREL_GT:
2054 case VREL_LT:
2055 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
2056 VR_ANTI_RANGE);
2057 break;
2058 default:
2059 return false;
2060 }
2061 }
2062 else
2063 {
2064 switch (rel)
2065 {
2066 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
2067 case VREL_GT:
2068 rel_range = int_range<2> (type, wi::one (prec),
2069 wi::max_value (prec, sgn));
2070 break;
2071 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
2072 case VREL_GE:
2073 rel_range = int_range<2> (type, wi::zero (prec),
2074 wi::max_value (prec, sgn));
2075 break;
2076 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
2077 case VREL_LT:
2078 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
2079 wi::minus_one (prec));
2080 break;
2081 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
2082 case VREL_LE:
2083 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
2084 wi::zero (prec));
2085 break;
2086 default:
2087 return false;
2088 }
2089 }
2090 lhs_range.intersect (rel_range);
2091 return true;
2092 }
2093
2094 bool
2095 operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type,
2096 const irange &op1_range,
2097 const irange &op2_range,
2098 relation_kind rel) const
2099 {
2100 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
2101 rel);
2102 }
2103
2104 bool
2105 operator_minus::op1_range (irange &r, tree type,
2106 const irange &lhs,
2107 const irange &op2,
2108 relation_trio trio) const
2109 {
2110 if (lhs.undefined_p ())
2111 return false;
2112 // Start with the default operation.
2113 range_op_handler minus (PLUS_EXPR);
2114 if (!minus)
2115 return false;
2116 bool res = minus.fold_range (r, type, lhs, op2);
2117 relation_kind rel = trio.lhs_op1 ();
2118 if (res)
2119 adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */);
2120 return res;
2121
2122 }
2123
2124 bool
2125 operator_minus::op2_range (irange &r, tree type,
2126 const irange &lhs,
2127 const irange &op1,
2128 relation_trio) const
2129 {
2130 if (lhs.undefined_p ())
2131 return false;
2132 return fold_range (r, type, op1, lhs);
2133 }
2134
2135 void
2136 operator_min::update_bitmask (irange &r, const irange &lh,
2137 const irange &rh) const
2138 {
2139 update_known_bitmask (r, MIN_EXPR, lh, rh);
2140 }
2141
2142 void
2143 operator_min::wi_fold (irange &r, tree type,
2144 const wide_int &lh_lb, const wide_int &lh_ub,
2145 const wide_int &rh_lb, const wide_int &rh_ub) const
2146 {
2147 signop s = TYPE_SIGN (type);
2148 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
2149 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
2150 value_range_with_overflow (r, type, new_lb, new_ub);
2151 }
2152
2153
2154 void
2155 operator_max::update_bitmask (irange &r, const irange &lh,
2156 const irange &rh) const
2157 {
2158 update_known_bitmask (r, MAX_EXPR, lh, rh);
2159 }
2160
2161 void
2162 operator_max::wi_fold (irange &r, tree type,
2163 const wide_int &lh_lb, const wide_int &lh_ub,
2164 const wide_int &rh_lb, const wide_int &rh_ub) const
2165 {
2166 signop s = TYPE_SIGN (type);
2167 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
2168 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
2169 value_range_with_overflow (r, type, new_lb, new_ub);
2170 }
2171
2172
2173 // Calculate the cross product of two sets of ranges and return it.
2174 //
2175 // Multiplications, divisions and shifts are a bit tricky to handle,
2176 // depending on the mix of signs we have in the two ranges, we need to
2177 // operate on different values to get the minimum and maximum values
2178 // for the new range. One approach is to figure out all the
2179 // variations of range combinations and do the operations.
2180 //
2181 // However, this involves several calls to compare_values and it is
2182 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
2183 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
2184 // figure the smallest and largest values to form the new range.
2185
2186 void
2187 cross_product_operator::wi_cross_product (irange &r, tree type,
2188 const wide_int &lh_lb,
2189 const wide_int &lh_ub,
2190 const wide_int &rh_lb,
2191 const wide_int &rh_ub) const
2192 {
2193 wide_int cp1, cp2, cp3, cp4;
2194 // Default to varying.
2195 r.set_varying (type);
2196
2197 // Compute the 4 cross operations, bailing if we get an overflow we
2198 // can't handle.
2199 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
2200 return;
2201 if (wi::eq_p (lh_lb, lh_ub))
2202 cp3 = cp1;
2203 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
2204 return;
2205 if (wi::eq_p (rh_lb, rh_ub))
2206 cp2 = cp1;
2207 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
2208 return;
2209 if (wi::eq_p (lh_lb, lh_ub))
2210 cp4 = cp2;
2211 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
2212 return;
2213
2214 // Order pairs.
2215 signop sign = TYPE_SIGN (type);
2216 if (wi::gt_p (cp1, cp2, sign))
2217 std::swap (cp1, cp2);
2218 if (wi::gt_p (cp3, cp4, sign))
2219 std::swap (cp3, cp4);
2220
2221 // Choose min and max from the ordered pairs.
2222 wide_int res_lb = wi::min (cp1, cp3, sign);
2223 wide_int res_ub = wi::max (cp2, cp4, sign);
2224 value_range_with_overflow (r, type, res_lb, res_ub);
2225 }
2226
2227
2228 void
2229 operator_mult::update_bitmask (irange &r, const irange &lh,
2230 const irange &rh) const
2231 {
2232 update_known_bitmask (r, MULT_EXPR, lh, rh);
2233 }
2234
2235 bool
2236 operator_mult::op1_range (irange &r, tree type,
2237 const irange &lhs, const irange &op2,
2238 relation_trio) const
2239 {
2240 if (lhs.undefined_p ())
2241 return false;
2242
2243 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2244 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2245 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2246 if (TYPE_OVERFLOW_WRAPS (type))
2247 return false;
2248
2249 wide_int offset;
2250 if (op2.singleton_p (offset) && offset != 0)
2251 return range_op_handler (TRUNC_DIV_EXPR).fold_range (r, type, lhs, op2);
2252 return false;
2253 }
2254
2255 bool
2256 operator_mult::op2_range (irange &r, tree type,
2257 const irange &lhs, const irange &op1,
2258 relation_trio rel) const
2259 {
2260 return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
2261 }
2262
2263 bool
2264 operator_mult::wi_op_overflows (wide_int &res, tree type,
2265 const wide_int &w0, const wide_int &w1) const
2266 {
2267 wi::overflow_type overflow = wi::OVF_NONE;
2268 signop sign = TYPE_SIGN (type);
2269 res = wi::mul (w0, w1, sign, &overflow);
2270 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2271 {
2272 // For multiplication, the sign of the overflow is given
2273 // by the comparison of the signs of the operands.
2274 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
2275 res = wi::max_value (w0.get_precision (), sign);
2276 else
2277 res = wi::min_value (w0.get_precision (), sign);
2278 return false;
2279 }
2280 return overflow;
2281 }
2282
2283 void
2284 operator_mult::wi_fold (irange &r, tree type,
2285 const wide_int &lh_lb, const wide_int &lh_ub,
2286 const wide_int &rh_lb, const wide_int &rh_ub) const
2287 {
2288 if (TYPE_OVERFLOW_UNDEFINED (type))
2289 {
2290 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2291 return;
2292 }
2293
2294 // Multiply the ranges when overflow wraps. This is basically fancy
2295 // code so we don't drop to varying with an unsigned
2296 // [-3,-1]*[-3,-1].
2297 //
2298 // This test requires 2*prec bits if both operands are signed and
2299 // 2*prec + 2 bits if either is not. Therefore, extend the values
2300 // using the sign of the result to PREC2. From here on out,
2301 // everything is just signed math no matter what the input types
2302 // were.
2303
2304 signop sign = TYPE_SIGN (type);
2305 unsigned prec = TYPE_PRECISION (type);
2306 widest2_int min0 = widest2_int::from (lh_lb, sign);
2307 widest2_int max0 = widest2_int::from (lh_ub, sign);
2308 widest2_int min1 = widest2_int::from (rh_lb, sign);
2309 widest2_int max1 = widest2_int::from (rh_ub, sign);
2310 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
2311 widest2_int size = sizem1 + 1;
2312
2313 // Canonicalize the intervals.
2314 if (sign == UNSIGNED)
2315 {
2316 if (wi::ltu_p (size, min0 + max0))
2317 {
2318 min0 -= size;
2319 max0 -= size;
2320 }
2321 if (wi::ltu_p (size, min1 + max1))
2322 {
2323 min1 -= size;
2324 max1 -= size;
2325 }
2326 }
2327
2328 // Sort the 4 products so that min is in prod0 and max is in
2329 // prod3.
2330 widest2_int prod0 = min0 * min1;
2331 widest2_int prod1 = min0 * max1;
2332 widest2_int prod2 = max0 * min1;
2333 widest2_int prod3 = max0 * max1;
2334
2335 // min0min1 > max0max1
2336 if (prod0 > prod3)
2337 std::swap (prod0, prod3);
2338
2339 // min0max1 > max0min1
2340 if (prod1 > prod2)
2341 std::swap (prod1, prod2);
2342
2343 if (prod0 > prod1)
2344 std::swap (prod0, prod1);
2345
2346 if (prod2 > prod3)
2347 std::swap (prod2, prod3);
2348
2349 // diff = max - min
2350 prod2 = prod3 - prod0;
2351 if (wi::geu_p (prod2, sizem1))
2352 {
2353 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2354 if (TYPE_UNSIGNED (type) && rh_lb == rh_ub
2355 && wi::exact_log2 (rh_lb) != -1 && prec > 1)
2356 {
2357 r.set (type, rh_lb, wi::max_value (prec, sign));
2358 int_range<2> zero;
2359 zero.set_zero (type);
2360 r.union_ (zero);
2361 }
2362 else
2363 // The range covers all values.
2364 r.set_varying (type);
2365 }
2366 else
2367 {
2368 wide_int new_lb = wide_int::from (prod0, prec, sign);
2369 wide_int new_ub = wide_int::from (prod3, prec, sign);
2370 create_possibly_reversed_range (r, type, new_lb, new_ub);
2371 }
2372 }
2373
2374 class operator_widen_mult_signed : public range_operator
2375 {
2376 public:
2377 virtual void wi_fold (irange &r, tree type,
2378 const wide_int &lh_lb,
2379 const wide_int &lh_ub,
2380 const wide_int &rh_lb,
2381 const wide_int &rh_ub)
2382 const;
2383 } op_widen_mult_signed;
2384
2385 void
2386 operator_widen_mult_signed::wi_fold (irange &r, tree type,
2387 const wide_int &lh_lb,
2388 const wide_int &lh_ub,
2389 const wide_int &rh_lb,
2390 const wide_int &rh_ub) const
2391 {
2392 signop s = TYPE_SIGN (type);
2393
2394 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
2395 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
2396 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2397 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2398
2399 /* We don't expect a widening multiplication to be able to overflow but range
2400 calculations for multiplications are complicated. After widening the
2401 operands lets call the base class. */
2402 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2403 }
2404
2405
2406 class operator_widen_mult_unsigned : public range_operator
2407 {
2408 public:
2409 virtual void wi_fold (irange &r, tree type,
2410 const wide_int &lh_lb,
2411 const wide_int &lh_ub,
2412 const wide_int &rh_lb,
2413 const wide_int &rh_ub)
2414 const;
2415 } op_widen_mult_unsigned;
2416
2417 void
2418 operator_widen_mult_unsigned::wi_fold (irange &r, tree type,
2419 const wide_int &lh_lb,
2420 const wide_int &lh_ub,
2421 const wide_int &rh_lb,
2422 const wide_int &rh_ub) const
2423 {
2424 signop s = TYPE_SIGN (type);
2425
2426 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
2427 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
2428 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2429 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2430
2431 /* We don't expect a widening multiplication to be able to overflow but range
2432 calculations for multiplications are complicated. After widening the
2433 operands lets call the base class. */
2434 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2435 }
2436
2437 class operator_div : public cross_product_operator
2438 {
2439 using range_operator::update_bitmask;
2440 public:
2441 operator_div (tree_code div_kind) { m_code = div_kind; }
2442 virtual void wi_fold (irange &r, tree type,
2443 const wide_int &lh_lb,
2444 const wide_int &lh_ub,
2445 const wide_int &rh_lb,
2446 const wide_int &rh_ub) const final override;
2447 virtual bool wi_op_overflows (wide_int &res, tree type,
2448 const wide_int &, const wide_int &)
2449 const final override;
2450 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
2451 { update_known_bitmask (r, m_code, lh, rh); }
2452 protected:
2453 tree_code m_code;
2454 };
2455
2456 static operator_div op_trunc_div (TRUNC_DIV_EXPR);
2457 static operator_div op_floor_div (FLOOR_DIV_EXPR);
2458 static operator_div op_round_div (ROUND_DIV_EXPR);
2459 static operator_div op_ceil_div (CEIL_DIV_EXPR);
2460
2461 bool
2462 operator_div::wi_op_overflows (wide_int &res, tree type,
2463 const wide_int &w0, const wide_int &w1) const
2464 {
2465 if (w1 == 0)
2466 return true;
2467
2468 wi::overflow_type overflow = wi::OVF_NONE;
2469 signop sign = TYPE_SIGN (type);
2470
2471 switch (m_code)
2472 {
2473 case EXACT_DIV_EXPR:
2474 case TRUNC_DIV_EXPR:
2475 res = wi::div_trunc (w0, w1, sign, &overflow);
2476 break;
2477 case FLOOR_DIV_EXPR:
2478 res = wi::div_floor (w0, w1, sign, &overflow);
2479 break;
2480 case ROUND_DIV_EXPR:
2481 res = wi::div_round (w0, w1, sign, &overflow);
2482 break;
2483 case CEIL_DIV_EXPR:
2484 res = wi::div_ceil (w0, w1, sign, &overflow);
2485 break;
2486 default:
2487 gcc_unreachable ();
2488 }
2489
2490 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2491 {
2492 // For division, the only case is -INF / -1 = +INF.
2493 res = wi::max_value (w0.get_precision (), sign);
2494 return false;
2495 }
2496 return overflow;
2497 }
2498
2499 void
2500 operator_div::wi_fold (irange &r, tree type,
2501 const wide_int &lh_lb, const wide_int &lh_ub,
2502 const wide_int &rh_lb, const wide_int &rh_ub) const
2503 {
2504 const wide_int dividend_min = lh_lb;
2505 const wide_int dividend_max = lh_ub;
2506 const wide_int divisor_min = rh_lb;
2507 const wide_int divisor_max = rh_ub;
2508 signop sign = TYPE_SIGN (type);
2509 unsigned prec = TYPE_PRECISION (type);
2510 wide_int extra_min, extra_max;
2511
2512 // If we know we won't divide by zero, just do the division.
2513 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
2514 {
2515 wi_cross_product (r, type, dividend_min, dividend_max,
2516 divisor_min, divisor_max);
2517 return;
2518 }
2519
2520 // If we're definitely dividing by zero, there's nothing to do.
2521 if (wi_zero_p (type, divisor_min, divisor_max))
2522 {
2523 r.set_undefined ();
2524 return;
2525 }
2526
2527 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2528 // skip any division by zero.
2529
2530 // First divide by the negative numbers, if any.
2531 if (wi::neg_p (divisor_min, sign))
2532 wi_cross_product (r, type, dividend_min, dividend_max,
2533 divisor_min, wi::minus_one (prec));
2534 else
2535 r.set_undefined ();
2536
2537 // Then divide by the non-zero positive numbers, if any.
2538 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
2539 {
2540 int_range_max tmp;
2541 wi_cross_product (tmp, type, dividend_min, dividend_max,
2542 wi::one (prec), divisor_max);
2543 r.union_ (tmp);
2544 }
2545 // We shouldn't still have undefined here.
2546 gcc_checking_assert (!r.undefined_p ());
2547 }
2548
2549
2550 class operator_exact_divide : public operator_div
2551 {
2552 using range_operator::op1_range;
2553 public:
2554 operator_exact_divide () : operator_div (EXACT_DIV_EXPR) { }
2555 virtual bool op1_range (irange &r, tree type,
2556 const irange &lhs,
2557 const irange &op2,
2558 relation_trio) const;
2559
2560 } op_exact_div;
2561
2562 bool
2563 operator_exact_divide::op1_range (irange &r, tree type,
2564 const irange &lhs,
2565 const irange &op2,
2566 relation_trio) const
2567 {
2568 if (lhs.undefined_p ())
2569 return false;
2570 wide_int offset;
2571 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2572 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2573 // We wont bother trying to enumerate all the in between stuff :-P
2574 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2575 // the time however.
2576 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2577 if (op2.singleton_p (offset) && offset != 0)
2578 return range_op_handler (MULT_EXPR).fold_range (r, type, lhs, op2);
2579 return false;
2580 }
2581
2582
2583 class operator_lshift : public cross_product_operator
2584 {
2585 using range_operator::fold_range;
2586 using range_operator::op1_range;
2587 using range_operator::update_bitmask;
2588 public:
2589 virtual bool op1_range (irange &r, tree type, const irange &lhs,
2590 const irange &op2, relation_trio rel = TRIO_VARYING)
2591 const final override;
2592 virtual bool fold_range (irange &r, tree type, const irange &op1,
2593 const irange &op2, relation_trio rel = TRIO_VARYING)
2594 const final override;
2595
2596 virtual void wi_fold (irange &r, tree type,
2597 const wide_int &lh_lb, const wide_int &lh_ub,
2598 const wide_int &rh_lb,
2599 const wide_int &rh_ub) const final override;
2600 virtual bool wi_op_overflows (wide_int &res,
2601 tree type,
2602 const wide_int &,
2603 const wide_int &) const final override;
2604 void update_bitmask (irange &r, const irange &lh,
2605 const irange &rh) const final override
2606 { update_known_bitmask (r, LSHIFT_EXPR, lh, rh); }
2607 // Check compatibility of LHS and op1.
2608 bool operand_check_p (tree t1, tree t2, tree) const final override
2609 { return range_compatible_p (t1, t2); }
2610 } op_lshift;
2611
2612 class operator_rshift : public cross_product_operator
2613 {
2614 using range_operator::fold_range;
2615 using range_operator::op1_range;
2616 using range_operator::lhs_op1_relation;
2617 using range_operator::update_bitmask;
2618 public:
2619 virtual bool fold_range (irange &r, tree type, const irange &op1,
2620 const irange &op2, relation_trio rel = TRIO_VARYING)
2621 const final override;
2622 virtual void wi_fold (irange &r, tree type,
2623 const wide_int &lh_lb,
2624 const wide_int &lh_ub,
2625 const wide_int &rh_lb,
2626 const wide_int &rh_ub) const final override;
2627 virtual bool wi_op_overflows (wide_int &res,
2628 tree type,
2629 const wide_int &w0,
2630 const wide_int &w1) const final override;
2631 virtual bool op1_range (irange &, tree type, const irange &lhs,
2632 const irange &op2, relation_trio rel = TRIO_VARYING)
2633 const final override;
2634 virtual relation_kind lhs_op1_relation (const irange &lhs, const irange &op1,
2635 const irange &op2, relation_kind rel)
2636 const final override;
2637 void update_bitmask (irange &r, const irange &lh,
2638 const irange &rh) const final override
2639 { update_known_bitmask (r, RSHIFT_EXPR, lh, rh); }
2640 // Check compatibility of LHS and op1.
2641 bool operand_check_p (tree t1, tree t2, tree) const final override
2642 { return range_compatible_p (t1, t2); }
2643 } op_rshift;
2644
2645
2646 relation_kind
2647 operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
2648 const irange &op1,
2649 const irange &op2,
2650 relation_kind) const
2651 {
2652 // If both operands range are >= 0, then the LHS <= op1.
2653 if (!op1.undefined_p () && !op2.undefined_p ()
2654 && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ()))
2655 && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ())))
2656 return VREL_LE;
2657 return VREL_VARYING;
2658 }
2659
2660 bool
2661 operator_lshift::fold_range (irange &r, tree type,
2662 const irange &op1,
2663 const irange &op2,
2664 relation_trio rel) const
2665 {
2666 int_range_max shift_range;
2667 if (!get_shift_range (shift_range, type, op2))
2668 {
2669 if (op2.undefined_p ())
2670 r.set_undefined ();
2671 else
2672 r.set_zero (type);
2673 return true;
2674 }
2675
2676 // Transform left shifts by constants into multiplies.
2677 if (shift_range.singleton_p ())
2678 {
2679 unsigned shift = shift_range.lower_bound ().to_uhwi ();
2680 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
2681 int_range<1> mult (type, tmp, tmp);
2682
2683 // Force wrapping multiplication.
2684 bool saved_flag_wrapv = flag_wrapv;
2685 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
2686 flag_wrapv = 1;
2687 flag_wrapv_pointer = 1;
2688 bool b = op_mult.fold_range (r, type, op1, mult);
2689 flag_wrapv = saved_flag_wrapv;
2690 flag_wrapv_pointer = saved_flag_wrapv_pointer;
2691 return b;
2692 }
2693 else
2694 // Otherwise, invoke the generic fold routine.
2695 return range_operator::fold_range (r, type, op1, shift_range, rel);
2696 }
2697
2698 void
2699 operator_lshift::wi_fold (irange &r, tree type,
2700 const wide_int &lh_lb, const wide_int &lh_ub,
2701 const wide_int &rh_lb, const wide_int &rh_ub) const
2702 {
2703 signop sign = TYPE_SIGN (type);
2704 unsigned prec = TYPE_PRECISION (type);
2705 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
2706 int bound_shift = overflow_pos - rh_ub.to_shwi ();
2707 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2708 // overflow. However, for that to happen, rh.max needs to be zero,
2709 // which means rh is a singleton range of zero, which means we simply return
2710 // [lh_lb, lh_ub] as the range.
2711 if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0))
2712 {
2713 r = int_range<2> (type, lh_lb, lh_ub);
2714 return;
2715 }
2716
2717 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2718 wide_int complement = ~(bound - 1);
2719 wide_int low_bound, high_bound;
2720 bool in_bounds = false;
2721
2722 if (sign == UNSIGNED)
2723 {
2724 low_bound = bound;
2725 high_bound = complement;
2726 if (wi::ltu_p (lh_ub, low_bound))
2727 {
2728 // [5, 6] << [1, 2] == [10, 24].
2729 // We're shifting out only zeroes, the value increases
2730 // monotonically.
2731 in_bounds = true;
2732 }
2733 else if (wi::ltu_p (high_bound, lh_lb))
2734 {
2735 // [0xffffff00, 0xffffffff] << [1, 2]
2736 // == [0xfffffc00, 0xfffffffe].
2737 // We're shifting out only ones, the value decreases
2738 // monotonically.
2739 in_bounds = true;
2740 }
2741 }
2742 else
2743 {
2744 // [-1, 1] << [1, 2] == [-4, 4]
2745 low_bound = complement;
2746 high_bound = bound;
2747 if (wi::lts_p (lh_ub, high_bound)
2748 && wi::lts_p (low_bound, lh_lb))
2749 {
2750 // For non-negative numbers, we're shifting out only zeroes,
2751 // the value increases monotonically. For negative numbers,
2752 // we're shifting out only ones, the value decreases
2753 // monotonically.
2754 in_bounds = true;
2755 }
2756 }
2757
2758 if (in_bounds)
2759 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2760 else
2761 r.set_varying (type);
2762 }
2763
2764 bool
2765 operator_lshift::wi_op_overflows (wide_int &res, tree type,
2766 const wide_int &w0, const wide_int &w1) const
2767 {
2768 signop sign = TYPE_SIGN (type);
2769 if (wi::neg_p (w1))
2770 {
2771 // It's unclear from the C standard whether shifts can overflow.
2772 // The following code ignores overflow; perhaps a C standard
2773 // interpretation ruling is needed.
2774 res = wi::rshift (w0, -w1, sign);
2775 }
2776 else
2777 res = wi::lshift (w0, w1);
2778 return false;
2779 }
2780
2781 bool
2782 operator_lshift::op1_range (irange &r,
2783 tree type,
2784 const irange &lhs,
2785 const irange &op2,
2786 relation_trio) const
2787 {
2788 if (lhs.undefined_p ())
2789 return false;
2790
2791 if (!contains_zero_p (lhs))
2792 r.set_nonzero (type);
2793 else
2794 r.set_varying (type);
2795
2796 wide_int shift;
2797 if (op2.singleton_p (shift))
2798 {
2799 if (wi::lt_p (shift, 0, SIGNED))
2800 return false;
2801 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
2802 TYPE_PRECISION (op2.type ())),
2803 UNSIGNED))
2804 return false;
2805 if (shift == 0)
2806 {
2807 r.intersect (lhs);
2808 return true;
2809 }
2810
2811 // Work completely in unsigned mode to start.
2812 tree utype = type;
2813 int_range_max tmp_range;
2814 if (TYPE_SIGN (type) == SIGNED)
2815 {
2816 int_range_max tmp = lhs;
2817 utype = unsigned_type_for (type);
2818 range_cast (tmp, utype);
2819 op_rshift.fold_range (tmp_range, utype, tmp, op2);
2820 }
2821 else
2822 op_rshift.fold_range (tmp_range, utype, lhs, op2);
2823
2824 // Start with ranges which can produce the LHS by right shifting the
2825 // result by the shift amount.
2826 // ie [0x08, 0xF0] = op1 << 2 will start with
2827 // [00001000, 11110000] = op1 << 2
2828 // [0x02, 0x4C] aka [00000010, 00111100]
2829
2830 // Then create a range from the LB with the least significant upper bit
2831 // set, to the upper bound with all the bits set.
2832 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2833
2834 // Ideally we do this for each subrange, but just lump them all for now.
2835 unsigned low_bits = TYPE_PRECISION (utype) - shift.to_uhwi ();
2836 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
2837 wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ());
2838 wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits);
2839 int_range<2> fill_range (utype, new_lb, new_ub);
2840 tmp_range.union_ (fill_range);
2841
2842 if (utype != type)
2843 range_cast (tmp_range, type);
2844
2845 r.intersect (tmp_range);
2846 return true;
2847 }
2848
2849 return !r.varying_p ();
2850 }
2851
2852 bool
2853 operator_rshift::op1_range (irange &r,
2854 tree type,
2855 const irange &lhs,
2856 const irange &op2,
2857 relation_trio) const
2858 {
2859 if (lhs.undefined_p ())
2860 return false;
2861 wide_int shift;
2862 if (op2.singleton_p (shift))
2863 {
2864 // Ignore nonsensical shifts.
2865 unsigned prec = TYPE_PRECISION (type);
2866 if (wi::ge_p (shift,
2867 wi::uhwi (prec, TYPE_PRECISION (op2.type ())),
2868 UNSIGNED))
2869 return false;
2870 if (shift == 0)
2871 {
2872 r = lhs;
2873 return true;
2874 }
2875
2876 // Folding the original operation may discard some impossible
2877 // ranges from the LHS.
2878 int_range_max lhs_refined;
2879 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
2880 lhs_refined.intersect (lhs);
2881 if (lhs_refined.undefined_p ())
2882 {
2883 r.set_undefined ();
2884 return true;
2885 }
2886 int_range_max shift_range (op2.type (), shift, shift);
2887 int_range_max lb, ub;
2888 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
2889 // LHS
2890 // 0000 0111 = OP1 >> 3
2891 //
2892 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2893 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2894 // right hand side (0x07).
2895 wide_int mask = wi::bit_not (wi::lshift (wi::minus_one (prec), shift));
2896 int_range_max mask_range (type,
2897 wi::zero (TYPE_PRECISION (type)),
2898 mask);
2899 op_plus.fold_range (ub, type, lb, mask_range);
2900 r = lb;
2901 r.union_ (ub);
2902 if (!contains_zero_p (lhs_refined))
2903 {
2904 mask_range.invert ();
2905 r.intersect (mask_range);
2906 }
2907 return true;
2908 }
2909 return false;
2910 }
2911
2912 bool
2913 operator_rshift::wi_op_overflows (wide_int &res,
2914 tree type,
2915 const wide_int &w0,
2916 const wide_int &w1) const
2917 {
2918 signop sign = TYPE_SIGN (type);
2919 if (wi::neg_p (w1))
2920 res = wi::lshift (w0, -w1);
2921 else
2922 {
2923 // It's unclear from the C standard whether shifts can overflow.
2924 // The following code ignores overflow; perhaps a C standard
2925 // interpretation ruling is needed.
2926 res = wi::rshift (w0, w1, sign);
2927 }
2928 return false;
2929 }
2930
2931 bool
2932 operator_rshift::fold_range (irange &r, tree type,
2933 const irange &op1,
2934 const irange &op2,
2935 relation_trio rel) const
2936 {
2937 int_range_max shift;
2938 if (!get_shift_range (shift, type, op2))
2939 {
2940 if (op2.undefined_p ())
2941 r.set_undefined ();
2942 else
2943 r.set_zero (type);
2944 return true;
2945 }
2946
2947 return range_operator::fold_range (r, type, op1, shift, rel);
2948 }
2949
2950 void
2951 operator_rshift::wi_fold (irange &r, tree type,
2952 const wide_int &lh_lb, const wide_int &lh_ub,
2953 const wide_int &rh_lb, const wide_int &rh_ub) const
2954 {
2955 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2956 }
2957
2958
2959 // Add a partial equivalence between the LHS and op1 for casts.
2960
2961 relation_kind
2962 operator_cast::lhs_op1_relation (const irange &lhs,
2963 const irange &op1,
2964 const irange &op2 ATTRIBUTE_UNUSED,
2965 relation_kind) const
2966 {
2967 if (lhs.undefined_p () || op1.undefined_p ())
2968 return VREL_VARYING;
2969 unsigned lhs_prec = TYPE_PRECISION (lhs.type ());
2970 unsigned op1_prec = TYPE_PRECISION (op1.type ());
2971 // If the result gets sign extended into a larger type check first if this
2972 // qualifies as a partial equivalence.
2973 if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec)
2974 {
2975 // If the result is sign extended, and the LHS is larger than op1,
2976 // check if op1's range can be negative as the sign extension will
2977 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2978 int_range<3> negs = range_negatives (op1.type ());
2979 negs.intersect (op1);
2980 if (!negs.undefined_p ())
2981 return VREL_VARYING;
2982 }
2983
2984 unsigned prec = MIN (lhs_prec, op1_prec);
2985 return bits_to_pe (prec);
2986 }
2987
2988 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2989
2990 inline bool
2991 operator_cast::truncating_cast_p (const irange &inner,
2992 const irange &outer) const
2993 {
2994 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
2995 }
2996
2997 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2998
2999 bool
3000 operator_cast::inside_domain_p (const wide_int &min,
3001 const wide_int &max,
3002 const irange &range) const
3003 {
3004 wide_int domain_min = irange_val_min (range.type ());
3005 wide_int domain_max = irange_val_max (range.type ());
3006 signop domain_sign = TYPE_SIGN (range.type ());
3007 return (wi::le_p (min, domain_max, domain_sign)
3008 && wi::le_p (max, domain_max, domain_sign)
3009 && wi::ge_p (min, domain_min, domain_sign)
3010 && wi::ge_p (max, domain_min, domain_sign));
3011 }
3012
3013
3014 // Helper for fold_range which work on a pair at a time.
3015
3016 void
3017 operator_cast::fold_pair (irange &r, unsigned index,
3018 const irange &inner,
3019 const irange &outer) const
3020 {
3021 tree inner_type = inner.type ();
3022 tree outer_type = outer.type ();
3023 signop inner_sign = TYPE_SIGN (inner_type);
3024 unsigned outer_prec = TYPE_PRECISION (outer_type);
3025
3026 // check to see if casting from INNER to OUTER is a conversion that
3027 // fits in the resulting OUTER type.
3028 wide_int inner_lb = inner.lower_bound (index);
3029 wide_int inner_ub = inner.upper_bound (index);
3030 if (truncating_cast_p (inner, outer))
3031 {
3032 // We may be able to accommodate a truncating cast if the
3033 // resulting range can be represented in the target type...
3034 if (wi::rshift (wi::sub (inner_ub, inner_lb),
3035 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
3036 inner_sign) != 0)
3037 {
3038 r.set_varying (outer_type);
3039 return;
3040 }
3041 }
3042 // ...but we must still verify that the final range fits in the
3043 // domain. This catches -fstrict-enum restrictions where the domain
3044 // range is smaller than what fits in the underlying type.
3045 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
3046 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
3047 if (inside_domain_p (min, max, outer))
3048 create_possibly_reversed_range (r, outer_type, min, max);
3049 else
3050 r.set_varying (outer_type);
3051 }
3052
3053
3054 bool
3055 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3056 const irange &inner,
3057 const irange &outer,
3058 relation_trio) const
3059 {
3060 if (empty_range_varying (r, type, inner, outer))
3061 return true;
3062
3063 gcc_checking_assert (outer.varying_p ());
3064 gcc_checking_assert (inner.num_pairs () > 0);
3065
3066 // Avoid a temporary by folding the first pair directly into the result.
3067 fold_pair (r, 0, inner, outer);
3068
3069 // Then process any additional pairs by unioning with their results.
3070 for (unsigned x = 1; x < inner.num_pairs (); ++x)
3071 {
3072 int_range_max tmp;
3073 fold_pair (tmp, x, inner, outer);
3074 r.union_ (tmp);
3075 if (r.varying_p ())
3076 return true;
3077 }
3078
3079 update_bitmask (r, inner, outer);
3080 return true;
3081 }
3082
3083 void
3084 operator_cast::update_bitmask (irange &r, const irange &lh,
3085 const irange &rh) const
3086 {
3087 update_known_bitmask (r, CONVERT_EXPR, lh, rh);
3088 }
3089
3090 bool
3091 operator_cast::op1_range (irange &r, tree type,
3092 const irange &lhs,
3093 const irange &op2,
3094 relation_trio) const
3095 {
3096 if (lhs.undefined_p ())
3097 return false;
3098 tree lhs_type = lhs.type ();
3099 gcc_checking_assert (types_compatible_p (op2.type(), type));
3100
3101 // If we are calculating a pointer, shortcut to what we really care about.
3102 if (POINTER_TYPE_P (type))
3103 {
3104 // Conversion from other pointers or a constant (including 0/NULL)
3105 // are straightforward.
3106 if (POINTER_TYPE_P (lhs.type ())
3107 || (lhs.singleton_p ()
3108 && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type)))
3109 {
3110 r = lhs;
3111 range_cast (r, type);
3112 }
3113 else
3114 {
3115 // If the LHS is not a pointer nor a singleton, then it is
3116 // either VARYING or non-zero.
3117 if (!lhs.undefined_p () && !contains_zero_p (lhs))
3118 r.set_nonzero (type);
3119 else
3120 r.set_varying (type);
3121 }
3122 r.intersect (op2);
3123 return true;
3124 }
3125
3126 if (truncating_cast_p (op2, lhs))
3127 {
3128 if (lhs.varying_p ())
3129 r.set_varying (type);
3130 else
3131 {
3132 // We want to insert the LHS as an unsigned value since it
3133 // would not trigger the signed bit of the larger type.
3134 int_range_max converted_lhs = lhs;
3135 range_cast (converted_lhs, unsigned_type_for (lhs_type));
3136 range_cast (converted_lhs, type);
3137 // Start by building the positive signed outer range for the type.
3138 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
3139 TYPE_PRECISION (type));
3140 create_possibly_reversed_range (r, type, lim,
3141 wi::max_value (TYPE_PRECISION (type),
3142 SIGNED));
3143 // For the signed part, we need to simply union the 2 ranges now.
3144 r.union_ (converted_lhs);
3145
3146 // Create maximal negative number outside of LHS bits.
3147 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
3148 TYPE_PRECISION (type));
3149 // Add this to the unsigned LHS range(s).
3150 int_range_max lim_range (type, lim, lim);
3151 int_range_max lhs_neg;
3152 range_op_handler (PLUS_EXPR).fold_range (lhs_neg, type,
3153 converted_lhs, lim_range);
3154 // lhs_neg now has all the negative versions of the LHS.
3155 // Now union in all the values from SIGNED MIN (0x80000) to
3156 // lim-1 in order to fill in all the ranges with the upper
3157 // bits set.
3158
3159 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
3160 // we don't need to create a range from min to lim-1
3161 // calculate neg range traps trying to create [lim, lim - 1].
3162 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
3163 if (lim != min_val)
3164 {
3165 int_range_max neg (type,
3166 wi::min_value (TYPE_PRECISION (type),
3167 SIGNED),
3168 lim - 1);
3169 lhs_neg.union_ (neg);
3170 }
3171 // And finally, munge the signed and unsigned portions.
3172 r.union_ (lhs_neg);
3173 }
3174 // And intersect with any known value passed in the extra operand.
3175 r.intersect (op2);
3176 return true;
3177 }
3178
3179 int_range_max tmp;
3180 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
3181 tmp = lhs;
3182 else
3183 {
3184 // The cast is not truncating, and the range is restricted to
3185 // the range of the RHS by this assignment.
3186 //
3187 // Cast the range of the RHS to the type of the LHS.
3188 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
3189 // Intersect this with the LHS range will produce the range,
3190 // which will be cast to the RHS type before returning.
3191 tmp.intersect (lhs);
3192 }
3193
3194 // Cast the calculated range to the type of the RHS.
3195 fold_range (r, type, tmp, int_range<1> (type));
3196 return true;
3197 }
3198
3199
3200 class operator_logical_and : public range_operator
3201 {
3202 using range_operator::fold_range;
3203 using range_operator::op1_range;
3204 using range_operator::op2_range;
3205 public:
3206 virtual bool fold_range (irange &r, tree type,
3207 const irange &lh,
3208 const irange &rh,
3209 relation_trio rel = TRIO_VARYING) const;
3210 virtual bool op1_range (irange &r, tree type,
3211 const irange &lhs,
3212 const irange &op2,
3213 relation_trio rel = TRIO_VARYING) const;
3214 virtual bool op2_range (irange &r, tree type,
3215 const irange &lhs,
3216 const irange &op1,
3217 relation_trio rel = TRIO_VARYING) const;
3218 // Check compatibility of all operands.
3219 bool operand_check_p (tree t1, tree t2, tree t3) const final override
3220 { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
3221 } op_logical_and;
3222
3223 bool
3224 operator_logical_and::fold_range (irange &r, tree type,
3225 const irange &lh,
3226 const irange &rh,
3227 relation_trio) const
3228 {
3229 if (empty_range_varying (r, type, lh, rh))
3230 return true;
3231
3232 // Precision of LHS and both operands must match.
3233 if (TYPE_PRECISION (lh.type ()) != TYPE_PRECISION (type)
3234 || TYPE_PRECISION (type) != TYPE_PRECISION (rh.type ()))
3235 return false;
3236
3237 // 0 && anything is 0.
3238 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
3239 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
3240 r = range_false (type);
3241 else if (contains_zero_p (lh) || contains_zero_p (rh))
3242 // To reach this point, there must be a logical 1 on each side, and
3243 // the only remaining question is whether there is a zero or not.
3244 r = range_true_and_false (type);
3245 else
3246 r = range_true (type);
3247 return true;
3248 }
3249
3250 bool
3251 operator_logical_and::op1_range (irange &r, tree type,
3252 const irange &lhs,
3253 const irange &op2 ATTRIBUTE_UNUSED,
3254 relation_trio) const
3255 {
3256 switch (get_bool_state (r, lhs, type))
3257 {
3258 case BRS_TRUE:
3259 // A true result means both sides of the AND must be true.
3260 r = range_true (type);
3261 break;
3262 default:
3263 // Any other result means only one side has to be false, the
3264 // other side can be anything. So we cannot be sure of any
3265 // result here.
3266 r = range_true_and_false (type);
3267 break;
3268 }
3269 return true;
3270 }
3271
3272 bool
3273 operator_logical_and::op2_range (irange &r, tree type,
3274 const irange &lhs,
3275 const irange &op1,
3276 relation_trio) const
3277 {
3278 return operator_logical_and::op1_range (r, type, lhs, op1);
3279 }
3280
3281
3282 void
3283 operator_bitwise_and::update_bitmask (irange &r, const irange &lh,
3284 const irange &rh) const
3285 {
3286 update_known_bitmask (r, BIT_AND_EXPR, lh, rh);
3287 }
3288
3289 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3290 // by considering the number of leading redundant sign bit copies.
3291 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3292 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3293 static bool
3294 wi_optimize_signed_bitwise_op (irange &r, tree type,
3295 const wide_int &lh_lb, const wide_int &lh_ub,
3296 const wide_int &rh_lb, const wide_int &rh_ub)
3297 {
3298 int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
3299 int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
3300 int new_clrsb = MIN (lh_clrsb, rh_clrsb);
3301 if (new_clrsb == 0)
3302 return false;
3303 int type_prec = TYPE_PRECISION (type);
3304 int rprec = (type_prec - new_clrsb) - 1;
3305 value_range_with_overflow (r, type,
3306 wi::mask (rprec, true, type_prec),
3307 wi::mask (rprec, false, type_prec));
3308 return true;
3309 }
3310
3311 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3312 // the LHS and op1.
3313
3314 relation_kind
3315 operator_bitwise_and::lhs_op1_relation (const irange &lhs,
3316 const irange &op1,
3317 const irange &op2,
3318 relation_kind) const
3319 {
3320 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
3321 return VREL_VARYING;
3322 if (!op2.singleton_p ())
3323 return VREL_VARYING;
3324 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3325 int prec1 = TYPE_PRECISION (op1.type ());
3326 int prec2 = TYPE_PRECISION (op2.type ());
3327 int mask_prec = 0;
3328 wide_int mask = op2.lower_bound ();
3329 if (wi::eq_p (mask, wi::mask (8, false, prec2)))
3330 mask_prec = 8;
3331 else if (wi::eq_p (mask, wi::mask (16, false, prec2)))
3332 mask_prec = 16;
3333 else if (wi::eq_p (mask, wi::mask (32, false, prec2)))
3334 mask_prec = 32;
3335 else if (wi::eq_p (mask, wi::mask (64, false, prec2)))
3336 mask_prec = 64;
3337 return bits_to_pe (MIN (prec1, mask_prec));
3338 }
3339
3340 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3341 // possible. Basically, see if we can optimize:
3342 //
3343 // [LB, UB] op Z
3344 // into:
3345 // [LB op Z, UB op Z]
3346 //
3347 // If the optimization was successful, accumulate the range in R and
3348 // return TRUE.
3349
3350 static bool
3351 wi_optimize_and_or (irange &r,
3352 enum tree_code code,
3353 tree type,
3354 const wide_int &lh_lb, const wide_int &lh_ub,
3355 const wide_int &rh_lb, const wide_int &rh_ub)
3356 {
3357 // Calculate the singleton mask among the ranges, if any.
3358 wide_int lower_bound, upper_bound, mask;
3359 if (wi::eq_p (rh_lb, rh_ub))
3360 {
3361 mask = rh_lb;
3362 lower_bound = lh_lb;
3363 upper_bound = lh_ub;
3364 }
3365 else if (wi::eq_p (lh_lb, lh_ub))
3366 {
3367 mask = lh_lb;
3368 lower_bound = rh_lb;
3369 upper_bound = rh_ub;
3370 }
3371 else
3372 return false;
3373
3374 // If Z is a constant which (for op | its bitwise not) has n
3375 // consecutive least significant bits cleared followed by m 1
3376 // consecutive bits set immediately above it and either
3377 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3378 //
3379 // The least significant n bits of all the values in the range are
3380 // cleared or set, the m bits above it are preserved and any bits
3381 // above these are required to be the same for all values in the
3382 // range.
3383 wide_int w = mask;
3384 int m = 0, n = 0;
3385 if (code == BIT_IOR_EXPR)
3386 w = ~w;
3387 if (wi::eq_p (w, 0))
3388 n = w.get_precision ();
3389 else
3390 {
3391 n = wi::ctz (w);
3392 w = ~(w | wi::mask (n, false, w.get_precision ()));
3393 if (wi::eq_p (w, 0))
3394 m = w.get_precision () - n;
3395 else
3396 m = wi::ctz (w) - n;
3397 }
3398 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
3399 if ((new_mask & lower_bound) != (new_mask & upper_bound))
3400 return false;
3401
3402 wide_int res_lb, res_ub;
3403 if (code == BIT_AND_EXPR)
3404 {
3405 res_lb = wi::bit_and (lower_bound, mask);
3406 res_ub = wi::bit_and (upper_bound, mask);
3407 }
3408 else if (code == BIT_IOR_EXPR)
3409 {
3410 res_lb = wi::bit_or (lower_bound, mask);
3411 res_ub = wi::bit_or (upper_bound, mask);
3412 }
3413 else
3414 gcc_unreachable ();
3415 value_range_with_overflow (r, type, res_lb, res_ub);
3416
3417 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3418 if (code == BIT_IOR_EXPR && wi::ne_p (mask, 0))
3419 {
3420 int_range<2> tmp;
3421 tmp.set_nonzero (type);
3422 r.intersect (tmp);
3423 }
3424 return true;
3425 }
3426
3427 // For range [LB, UB] compute two wide_int bit masks.
3428 //
3429 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3430 // for all numbers in the range the bit is 0, otherwise it might be 0
3431 // or 1.
3432 //
3433 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3434 // for all numbers in the range the bit is 1, otherwise it might be 0
3435 // or 1.
3436
3437 void
3438 wi_set_zero_nonzero_bits (tree type,
3439 const wide_int &lb, const wide_int &ub,
3440 wide_int &maybe_nonzero,
3441 wide_int &mustbe_nonzero)
3442 {
3443 signop sign = TYPE_SIGN (type);
3444
3445 if (wi::eq_p (lb, ub))
3446 maybe_nonzero = mustbe_nonzero = lb;
3447 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
3448 {
3449 wide_int xor_mask = lb ^ ub;
3450 maybe_nonzero = lb | ub;
3451 mustbe_nonzero = lb & ub;
3452 if (xor_mask != 0)
3453 {
3454 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
3455 maybe_nonzero.get_precision ());
3456 maybe_nonzero = maybe_nonzero | mask;
3457 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
3458 }
3459 }
3460 else
3461 {
3462 maybe_nonzero = wi::minus_one (lb.get_precision ());
3463 mustbe_nonzero = wi::zero (lb.get_precision ());
3464 }
3465 }
3466
3467 void
3468 operator_bitwise_and::wi_fold (irange &r, tree type,
3469 const wide_int &lh_lb,
3470 const wide_int &lh_ub,
3471 const wide_int &rh_lb,
3472 const wide_int &rh_ub) const
3473 {
3474 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3475 return;
3476
3477 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3478 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3479 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3480 maybe_nonzero_lh, mustbe_nonzero_lh);
3481 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3482 maybe_nonzero_rh, mustbe_nonzero_rh);
3483
3484 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
3485 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
3486 signop sign = TYPE_SIGN (type);
3487 unsigned prec = TYPE_PRECISION (type);
3488 // If both input ranges contain only negative values, we can
3489 // truncate the result range maximum to the minimum of the
3490 // input range maxima.
3491 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
3492 {
3493 new_ub = wi::min (new_ub, lh_ub, sign);
3494 new_ub = wi::min (new_ub, rh_ub, sign);
3495 }
3496 // If either input range contains only non-negative values
3497 // we can truncate the result range maximum to the respective
3498 // maximum of the input range.
3499 if (wi::ge_p (lh_lb, 0, sign))
3500 new_ub = wi::min (new_ub, lh_ub, sign);
3501 if (wi::ge_p (rh_lb, 0, sign))
3502 new_ub = wi::min (new_ub, rh_ub, sign);
3503 // PR68217: In case of signed & sign-bit-CST should
3504 // result in [-INF, 0] instead of [-INF, INF].
3505 if (wi::gt_p (new_lb, new_ub, sign))
3506 {
3507 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
3508 if (sign == SIGNED
3509 && ((wi::eq_p (lh_lb, lh_ub)
3510 && !wi::cmps (lh_lb, sign_bit))
3511 || (wi::eq_p (rh_lb, rh_ub)
3512 && !wi::cmps (rh_lb, sign_bit))))
3513 {
3514 new_lb = wi::min_value (prec, sign);
3515 new_ub = wi::zero (prec);
3516 }
3517 }
3518 // If the limits got swapped around, return varying.
3519 if (wi::gt_p (new_lb, new_ub,sign))
3520 {
3521 if (sign == SIGNED
3522 && wi_optimize_signed_bitwise_op (r, type,
3523 lh_lb, lh_ub,
3524 rh_lb, rh_ub))
3525 return;
3526 r.set_varying (type);
3527 }
3528 else
3529 value_range_with_overflow (r, type, new_lb, new_ub);
3530 }
3531
3532 static void
3533 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
3534 {
3535 if (lhs.undefined_p () || contains_zero_p (lhs))
3536 r.set_varying (type);
3537 else
3538 r.set_nonzero (type);
3539 }
3540
3541 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3542 (otherwise return VAL). VAL and MASK must be zero-extended for
3543 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3544 (to transform signed values into unsigned) and at the end xor
3545 SGNBIT back. */
3546
3547 wide_int
3548 masked_increment (const wide_int &val_in, const wide_int &mask,
3549 const wide_int &sgnbit, unsigned int prec)
3550 {
3551 wide_int bit = wi::one (prec), res;
3552 unsigned int i;
3553
3554 wide_int val = val_in ^ sgnbit;
3555 for (i = 0; i < prec; i++, bit += bit)
3556 {
3557 res = mask;
3558 if ((res & bit) == 0)
3559 continue;
3560 res = bit - 1;
3561 res = wi::bit_and_not (val + bit, res);
3562 res &= mask;
3563 if (wi::gtu_p (res, val))
3564 return res ^ sgnbit;
3565 }
3566 return val ^ sgnbit;
3567 }
3568
3569 // This was shamelessly stolen from register_edge_assert_for_2 and
3570 // adjusted to work with iranges.
3571
3572 void
3573 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
3574 const irange &lhs,
3575 const irange &op2) const
3576 {
3577 if (!op2.singleton_p ())
3578 {
3579 set_nonzero_range_from_mask (r, type, lhs);
3580 return;
3581 }
3582 unsigned int nprec = TYPE_PRECISION (type);
3583 wide_int cst2v = op2.lower_bound ();
3584 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
3585 wide_int sgnbit;
3586 if (cst2n)
3587 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3588 else
3589 sgnbit = wi::zero (nprec);
3590
3591 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3592 //
3593 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3594 // maximum unsigned value is ~0. For signed comparison, if CST2
3595 // doesn't have the most significant bit set, handle it similarly. If
3596 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3597 wide_int valv = lhs.lower_bound ();
3598 wide_int minv = valv & cst2v, maxv;
3599 bool we_know_nothing = false;
3600 if (minv != valv)
3601 {
3602 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3603 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3604 if (minv == valv)
3605 {
3606 // If we can't determine anything on this bound, fall
3607 // through and conservatively solve for the other end point.
3608 we_know_nothing = true;
3609 }
3610 }
3611 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3612 if (we_know_nothing)
3613 r.set_varying (type);
3614 else
3615 create_possibly_reversed_range (r, type, minv, maxv);
3616
3617 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3618 //
3619 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3620 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3621 // VAL2 where
3622 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3623 // as maximum.
3624 // For signed comparison, if CST2 doesn't have most significant bit
3625 // set, handle it similarly. If CST2 has MSB set, the maximum is
3626 // the same and minimum is INT_MIN.
3627 valv = lhs.upper_bound ();
3628 minv = valv & cst2v;
3629 if (minv == valv)
3630 maxv = valv;
3631 else
3632 {
3633 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3634 if (maxv == valv)
3635 {
3636 // If we couldn't determine anything on either bound, return
3637 // undefined.
3638 if (we_know_nothing)
3639 r.set_undefined ();
3640 return;
3641 }
3642 maxv -= 1;
3643 }
3644 maxv |= ~cst2v;
3645 minv = sgnbit;
3646 int_range<2> upper_bits;
3647 create_possibly_reversed_range (upper_bits, type, minv, maxv);
3648 r.intersect (upper_bits);
3649 }
3650
3651 bool
3652 operator_bitwise_and::op1_range (irange &r, tree type,
3653 const irange &lhs,
3654 const irange &op2,
3655 relation_trio) const
3656 {
3657 if (lhs.undefined_p ())
3658 return false;
3659 if (types_compatible_p (type, boolean_type_node))
3660 return op_logical_and.op1_range (r, type, lhs, op2);
3661
3662 r.set_undefined ();
3663 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
3664 {
3665 int_range_max chunk (lhs.type (),
3666 lhs.lower_bound (i),
3667 lhs.upper_bound (i));
3668 int_range_max res;
3669 simple_op1_range_solver (res, type, chunk, op2);
3670 r.union_ (res);
3671 }
3672 if (r.undefined_p ())
3673 set_nonzero_range_from_mask (r, type, lhs);
3674
3675 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3676 wide_int mask;
3677 if (lhs == op2 && lhs.singleton_p (mask))
3678 {
3679 r.update_bitmask (irange_bitmask (mask, ~mask));
3680 return true;
3681 }
3682
3683 // For 0 = op1 & MASK, op1 is ~MASK.
3684 if (lhs.zero_p () && op2.singleton_p ())
3685 {
3686 wide_int nz = wi::bit_not (op2.get_nonzero_bits ());
3687 int_range<2> tmp (type);
3688 tmp.set_nonzero_bits (nz);
3689 r.intersect (tmp);
3690 }
3691 return true;
3692 }
3693
3694 bool
3695 operator_bitwise_and::op2_range (irange &r, tree type,
3696 const irange &lhs,
3697 const irange &op1,
3698 relation_trio) const
3699 {
3700 return operator_bitwise_and::op1_range (r, type, lhs, op1);
3701 }
3702
3703
3704 class operator_logical_or : public range_operator
3705 {
3706 using range_operator::fold_range;
3707 using range_operator::op1_range;
3708 using range_operator::op2_range;
3709 public:
3710 virtual bool fold_range (irange &r, tree type,
3711 const irange &lh,
3712 const irange &rh,
3713 relation_trio rel = TRIO_VARYING) const;
3714 virtual bool op1_range (irange &r, tree type,
3715 const irange &lhs,
3716 const irange &op2,
3717 relation_trio rel = TRIO_VARYING) const;
3718 virtual bool op2_range (irange &r, tree type,
3719 const irange &lhs,
3720 const irange &op1,
3721 relation_trio rel = TRIO_VARYING) const;
3722 // Check compatibility of all operands.
3723 bool operand_check_p (tree t1, tree t2, tree t3) const final override
3724 { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
3725 } op_logical_or;
3726
3727 bool
3728 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3729 const irange &lh,
3730 const irange &rh,
3731 relation_trio) const
3732 {
3733 if (empty_range_varying (r, type, lh, rh))
3734 return true;
3735
3736 r = lh;
3737 r.union_ (rh);
3738 return true;
3739 }
3740
3741 bool
3742 operator_logical_or::op1_range (irange &r, tree type,
3743 const irange &lhs,
3744 const irange &op2 ATTRIBUTE_UNUSED,
3745 relation_trio) const
3746 {
3747 switch (get_bool_state (r, lhs, type))
3748 {
3749 case BRS_FALSE:
3750 // A false result means both sides of the OR must be false.
3751 r = range_false (type);
3752 break;
3753 default:
3754 // Any other result means only one side has to be true, the
3755 // other side can be anything. so we can't be sure of any result
3756 // here.
3757 r = range_true_and_false (type);
3758 break;
3759 }
3760 return true;
3761 }
3762
3763 bool
3764 operator_logical_or::op2_range (irange &r, tree type,
3765 const irange &lhs,
3766 const irange &op1,
3767 relation_trio) const
3768 {
3769 return operator_logical_or::op1_range (r, type, lhs, op1);
3770 }
3771
3772
3773 void
3774 operator_bitwise_or::update_bitmask (irange &r, const irange &lh,
3775 const irange &rh) const
3776 {
3777 update_known_bitmask (r, BIT_IOR_EXPR, lh, rh);
3778 }
3779
3780 void
3781 operator_bitwise_or::wi_fold (irange &r, tree type,
3782 const wide_int &lh_lb,
3783 const wide_int &lh_ub,
3784 const wide_int &rh_lb,
3785 const wide_int &rh_ub) const
3786 {
3787 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3788 return;
3789
3790 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3791 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3792 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3793 maybe_nonzero_lh, mustbe_nonzero_lh);
3794 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3795 maybe_nonzero_rh, mustbe_nonzero_rh);
3796 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
3797 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
3798 signop sign = TYPE_SIGN (type);
3799 // If the input ranges contain only positive values we can
3800 // truncate the minimum of the result range to the maximum
3801 // of the input range minima.
3802 if (wi::ge_p (lh_lb, 0, sign)
3803 && wi::ge_p (rh_lb, 0, sign))
3804 {
3805 new_lb = wi::max (new_lb, lh_lb, sign);
3806 new_lb = wi::max (new_lb, rh_lb, sign);
3807 }
3808 // If either input range contains only negative values
3809 // we can truncate the minimum of the result range to the
3810 // respective minimum range.
3811 if (wi::lt_p (lh_ub, 0, sign))
3812 new_lb = wi::max (new_lb, lh_lb, sign);
3813 if (wi::lt_p (rh_ub, 0, sign))
3814 new_lb = wi::max (new_lb, rh_lb, sign);
3815 // If the limits got swapped around, return a conservative range.
3816 if (wi::gt_p (new_lb, new_ub, sign))
3817 {
3818 // Make sure that nonzero|X is nonzero.
3819 if (wi::gt_p (lh_lb, 0, sign)
3820 || wi::gt_p (rh_lb, 0, sign)
3821 || wi::lt_p (lh_ub, 0, sign)
3822 || wi::lt_p (rh_ub, 0, sign))
3823 r.set_nonzero (type);
3824 else if (sign == SIGNED
3825 && wi_optimize_signed_bitwise_op (r, type,
3826 lh_lb, lh_ub,
3827 rh_lb, rh_ub))
3828 return;
3829 else
3830 r.set_varying (type);
3831 return;
3832 }
3833 value_range_with_overflow (r, type, new_lb, new_ub);
3834 }
3835
3836 bool
3837 operator_bitwise_or::op1_range (irange &r, tree type,
3838 const irange &lhs,
3839 const irange &op2,
3840 relation_trio) const
3841 {
3842 if (lhs.undefined_p ())
3843 return false;
3844 // If this is really a logical wi_fold, call that.
3845 if (types_compatible_p (type, boolean_type_node))
3846 return op_logical_or.op1_range (r, type, lhs, op2);
3847
3848 if (lhs.zero_p ())
3849 {
3850 r.set_zero (type);
3851 return true;
3852 }
3853 r.set_varying (type);
3854 return true;
3855 }
3856
3857 bool
3858 operator_bitwise_or::op2_range (irange &r, tree type,
3859 const irange &lhs,
3860 const irange &op1,
3861 relation_trio) const
3862 {
3863 return operator_bitwise_or::op1_range (r, type, lhs, op1);
3864 }
3865
3866 void
3867 operator_bitwise_xor::update_bitmask (irange &r, const irange &lh,
3868 const irange &rh) const
3869 {
3870 update_known_bitmask (r, BIT_XOR_EXPR, lh, rh);
3871 }
3872
3873 void
3874 operator_bitwise_xor::wi_fold (irange &r, tree type,
3875 const wide_int &lh_lb,
3876 const wide_int &lh_ub,
3877 const wide_int &rh_lb,
3878 const wide_int &rh_ub) const
3879 {
3880 signop sign = TYPE_SIGN (type);
3881 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3882 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3883 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3884 maybe_nonzero_lh, mustbe_nonzero_lh);
3885 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3886 maybe_nonzero_rh, mustbe_nonzero_rh);
3887
3888 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
3889 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
3890 wide_int result_one_bits
3891 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
3892 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
3893 wide_int new_ub = ~result_zero_bits;
3894 wide_int new_lb = result_one_bits;
3895
3896 // If the range has all positive or all negative values, the result
3897 // is better than VARYING.
3898 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
3899 value_range_with_overflow (r, type, new_lb, new_ub);
3900 else if (sign == SIGNED
3901 && wi_optimize_signed_bitwise_op (r, type,
3902 lh_lb, lh_ub,
3903 rh_lb, rh_ub))
3904 ; /* Do nothing. */
3905 else
3906 r.set_varying (type);
3907
3908 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3909 if (wi::lt_p (lh_ub, rh_lb, sign)
3910 || wi::lt_p (rh_ub, lh_lb, sign)
3911 || wi::ne_p (result_one_bits, 0))
3912 {
3913 int_range<2> tmp;
3914 tmp.set_nonzero (type);
3915 r.intersect (tmp);
3916 }
3917 }
3918
3919 bool
3920 operator_bitwise_xor::op1_op2_relation_effect (irange &lhs_range,
3921 tree type,
3922 const irange &,
3923 const irange &,
3924 relation_kind rel) const
3925 {
3926 if (rel == VREL_VARYING)
3927 return false;
3928
3929 int_range<2> rel_range;
3930
3931 switch (rel)
3932 {
3933 case VREL_EQ:
3934 rel_range.set_zero (type);
3935 break;
3936 case VREL_NE:
3937 rel_range.set_nonzero (type);
3938 break;
3939 default:
3940 return false;
3941 }
3942
3943 lhs_range.intersect (rel_range);
3944 return true;
3945 }
3946
3947 bool
3948 operator_bitwise_xor::op1_range (irange &r, tree type,
3949 const irange &lhs,
3950 const irange &op2,
3951 relation_trio) const
3952 {
3953 if (lhs.undefined_p () || lhs.varying_p ())
3954 {
3955 r = lhs;
3956 return true;
3957 }
3958 if (types_compatible_p (type, boolean_type_node))
3959 {
3960 switch (get_bool_state (r, lhs, type))
3961 {
3962 case BRS_TRUE:
3963 if (op2.varying_p ())
3964 r.set_varying (type);
3965 else if (op2.zero_p ())
3966 r = range_true (type);
3967 // See get_bool_state for the rationale
3968 else if (op2.undefined_p () || contains_zero_p (op2))
3969 r = range_true_and_false (type);
3970 else
3971 r = range_false (type);
3972 break;
3973 case BRS_FALSE:
3974 r = op2;
3975 break;
3976 default:
3977 break;
3978 }
3979 return true;
3980 }
3981 r.set_varying (type);
3982 return true;
3983 }
3984
3985 bool
3986 operator_bitwise_xor::op2_range (irange &r, tree type,
3987 const irange &lhs,
3988 const irange &op1,
3989 relation_trio) const
3990 {
3991 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
3992 }
3993
3994 class operator_trunc_mod : public range_operator
3995 {
3996 using range_operator::op1_range;
3997 using range_operator::op2_range;
3998 using range_operator::update_bitmask;
3999 public:
4000 virtual void wi_fold (irange &r, tree type,
4001 const wide_int &lh_lb,
4002 const wide_int &lh_ub,
4003 const wide_int &rh_lb,
4004 const wide_int &rh_ub) const;
4005 virtual bool op1_range (irange &r, tree type,
4006 const irange &lhs,
4007 const irange &op2,
4008 relation_trio) const;
4009 virtual bool op2_range (irange &r, tree type,
4010 const irange &lhs,
4011 const irange &op1,
4012 relation_trio) const;
4013 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
4014 { update_known_bitmask (r, TRUNC_MOD_EXPR, lh, rh); }
4015 } op_trunc_mod;
4016
4017 void
4018 operator_trunc_mod::wi_fold (irange &r, tree type,
4019 const wide_int &lh_lb,
4020 const wide_int &lh_ub,
4021 const wide_int &rh_lb,
4022 const wide_int &rh_ub) const
4023 {
4024 wide_int new_lb, new_ub, tmp;
4025 signop sign = TYPE_SIGN (type);
4026 unsigned prec = TYPE_PRECISION (type);
4027
4028 // Mod 0 is undefined.
4029 if (wi_zero_p (type, rh_lb, rh_ub))
4030 {
4031 r.set_undefined ();
4032 return;
4033 }
4034
4035 // Check for constant and try to fold.
4036 if (lh_lb == lh_ub && rh_lb == rh_ub)
4037 {
4038 wi::overflow_type ov = wi::OVF_NONE;
4039 tmp = wi::mod_trunc (lh_lb, rh_lb, sign, &ov);
4040 if (ov == wi::OVF_NONE)
4041 {
4042 r = int_range<2> (type, tmp, tmp);
4043 return;
4044 }
4045 }
4046
4047 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
4048 new_ub = rh_ub - 1;
4049 if (sign == SIGNED)
4050 {
4051 tmp = -1 - rh_lb;
4052 new_ub = wi::smax (new_ub, tmp);
4053 }
4054
4055 if (sign == UNSIGNED)
4056 new_lb = wi::zero (prec);
4057 else
4058 {
4059 new_lb = -new_ub;
4060 tmp = lh_lb;
4061 if (wi::gts_p (tmp, 0))
4062 tmp = wi::zero (prec);
4063 new_lb = wi::smax (new_lb, tmp);
4064 }
4065 tmp = lh_ub;
4066 if (sign == SIGNED && wi::neg_p (tmp))
4067 tmp = wi::zero (prec);
4068 new_ub = wi::min (new_ub, tmp, sign);
4069
4070 value_range_with_overflow (r, type, new_lb, new_ub);
4071 }
4072
4073 bool
4074 operator_trunc_mod::op1_range (irange &r, tree type,
4075 const irange &lhs,
4076 const irange &,
4077 relation_trio) const
4078 {
4079 if (lhs.undefined_p ())
4080 return false;
4081 // PR 91029.
4082 signop sign = TYPE_SIGN (type);
4083 unsigned prec = TYPE_PRECISION (type);
4084 // (a % b) >= x && x > 0 , then a >= x.
4085 if (wi::gt_p (lhs.lower_bound (), 0, sign))
4086 {
4087 r = value_range (type, lhs.lower_bound (), wi::max_value (prec, sign));
4088 return true;
4089 }
4090 // (a % b) <= x && x < 0 , then a <= x.
4091 if (wi::lt_p (lhs.upper_bound (), 0, sign))
4092 {
4093 r = value_range (type, wi::min_value (prec, sign), lhs.upper_bound ());
4094 return true;
4095 }
4096 return false;
4097 }
4098
4099 bool
4100 operator_trunc_mod::op2_range (irange &r, tree type,
4101 const irange &lhs,
4102 const irange &,
4103 relation_trio) const
4104 {
4105 if (lhs.undefined_p ())
4106 return false;
4107 // PR 91029.
4108 signop sign = TYPE_SIGN (type);
4109 unsigned prec = TYPE_PRECISION (type);
4110 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
4111 // or b > x for unsigned.
4112 if (wi::gt_p (lhs.lower_bound (), 0, sign))
4113 {
4114 if (sign == SIGNED)
4115 r = value_range (type, wi::neg (lhs.lower_bound ()),
4116 lhs.lower_bound (), VR_ANTI_RANGE);
4117 else if (wi::lt_p (lhs.lower_bound (), wi::max_value (prec, sign),
4118 sign))
4119 r = value_range (type, lhs.lower_bound () + 1,
4120 wi::max_value (prec, sign));
4121 else
4122 return false;
4123 return true;
4124 }
4125 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
4126 if (wi::lt_p (lhs.upper_bound (), 0, sign))
4127 {
4128 if (wi::gt_p (lhs.upper_bound (), wi::min_value (prec, sign), sign))
4129 r = value_range (type, lhs.upper_bound (),
4130 wi::neg (lhs.upper_bound ()), VR_ANTI_RANGE);
4131 else
4132 return false;
4133 return true;
4134 }
4135 return false;
4136 }
4137
4138
4139 class operator_logical_not : public range_operator
4140 {
4141 using range_operator::fold_range;
4142 using range_operator::op1_range;
4143 public:
4144 virtual bool fold_range (irange &r, tree type,
4145 const irange &lh,
4146 const irange &rh,
4147 relation_trio rel = TRIO_VARYING) const;
4148 virtual bool op1_range (irange &r, tree type,
4149 const irange &lhs,
4150 const irange &op2,
4151 relation_trio rel = TRIO_VARYING) const;
4152 // Check compatibility of LHS and op1.
4153 bool operand_check_p (tree t1, tree t2, tree) const final override
4154 { return range_compatible_p (t1, t2); }
4155 } op_logical_not;
4156
4157 // Folding a logical NOT, oddly enough, involves doing nothing on the
4158 // forward pass through. During the initial walk backwards, the
4159 // logical NOT reversed the desired outcome on the way back, so on the
4160 // way forward all we do is pass the range forward.
4161 //
4162 // b_2 = x_1 < 20
4163 // b_3 = !b_2
4164 // if (b_3)
4165 // to determine the TRUE branch, walking backward
4166 // if (b_3) if ([1,1])
4167 // b_3 = !b_2 [1,1] = ![0,0]
4168 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
4169 // which is the result we are looking for.. so.. pass it through.
4170
4171 bool
4172 operator_logical_not::fold_range (irange &r, tree type,
4173 const irange &lh,
4174 const irange &rh ATTRIBUTE_UNUSED,
4175 relation_trio) const
4176 {
4177 if (empty_range_varying (r, type, lh, rh))
4178 return true;
4179
4180 r = lh;
4181 if (!lh.varying_p () && !lh.undefined_p ())
4182 r.invert ();
4183
4184 return true;
4185 }
4186
4187 bool
4188 operator_logical_not::op1_range (irange &r,
4189 tree type,
4190 const irange &lhs,
4191 const irange &op2,
4192 relation_trio) const
4193 {
4194 // Logical NOT is involutary...do it again.
4195 return fold_range (r, type, lhs, op2);
4196 }
4197
4198 bool
4199 operator_bitwise_not::fold_range (irange &r, tree type,
4200 const irange &lh,
4201 const irange &rh,
4202 relation_trio) const
4203 {
4204 if (empty_range_varying (r, type, lh, rh))
4205 return true;
4206
4207 if (types_compatible_p (type, boolean_type_node))
4208 return op_logical_not.fold_range (r, type, lh, rh);
4209
4210 // ~X is simply -1 - X.
4211 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
4212 wi::minus_one (TYPE_PRECISION (type)));
4213 return range_op_handler (MINUS_EXPR).fold_range (r, type, minusone, lh);
4214 }
4215
4216 bool
4217 operator_bitwise_not::op1_range (irange &r, tree type,
4218 const irange &lhs,
4219 const irange &op2,
4220 relation_trio) const
4221 {
4222 if (lhs.undefined_p ())
4223 return false;
4224 if (types_compatible_p (type, boolean_type_node))
4225 return op_logical_not.op1_range (r, type, lhs, op2);
4226
4227 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4228 return fold_range (r, type, lhs, op2);
4229 }
4230
4231 void
4232 operator_bitwise_not::update_bitmask (irange &r, const irange &lh,
4233 const irange &rh) const
4234 {
4235 update_known_bitmask (r, BIT_NOT_EXPR, lh, rh);
4236 }
4237
4238
4239 bool
4240 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4241 const irange &lh,
4242 const irange &rh ATTRIBUTE_UNUSED,
4243 relation_trio) const
4244 {
4245 r = lh;
4246 return true;
4247 }
4248
4249
4250 // Determine if there is a relationship between LHS and OP1.
4251
4252 relation_kind
4253 operator_identity::lhs_op1_relation (const irange &lhs,
4254 const irange &op1 ATTRIBUTE_UNUSED,
4255 const irange &op2 ATTRIBUTE_UNUSED,
4256 relation_kind) const
4257 {
4258 if (lhs.undefined_p ())
4259 return VREL_VARYING;
4260 // Simply a copy, so they are equivalent.
4261 return VREL_EQ;
4262 }
4263
4264 bool
4265 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4266 const irange &lh,
4267 const irange &rh ATTRIBUTE_UNUSED,
4268 relation_trio) const
4269 {
4270 r = lh;
4271 return true;
4272 }
4273
4274 bool
4275 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
4276 const irange &lhs,
4277 const irange &op2 ATTRIBUTE_UNUSED,
4278 relation_trio) const
4279 {
4280 r = lhs;
4281 return true;
4282 }
4283
4284
4285 class operator_unknown : public range_operator
4286 {
4287 using range_operator::fold_range;
4288 public:
4289 virtual bool fold_range (irange &r, tree type,
4290 const irange &op1,
4291 const irange &op2,
4292 relation_trio rel = TRIO_VARYING) const;
4293 } op_unknown;
4294
4295 bool
4296 operator_unknown::fold_range (irange &r, tree type,
4297 const irange &lh ATTRIBUTE_UNUSED,
4298 const irange &rh ATTRIBUTE_UNUSED,
4299 relation_trio) const
4300 {
4301 r.set_varying (type);
4302 return true;
4303 }
4304
4305
4306 void
4307 operator_abs::wi_fold (irange &r, tree type,
4308 const wide_int &lh_lb, const wide_int &lh_ub,
4309 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4310 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4311 {
4312 wide_int min, max;
4313 signop sign = TYPE_SIGN (type);
4314 unsigned prec = TYPE_PRECISION (type);
4315
4316 // Pass through LH for the easy cases.
4317 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
4318 {
4319 r = int_range<1> (type, lh_lb, lh_ub);
4320 return;
4321 }
4322
4323 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4324 // a useful range.
4325 wide_int min_value = wi::min_value (prec, sign);
4326 wide_int max_value = wi::max_value (prec, sign);
4327 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
4328 {
4329 r.set_varying (type);
4330 return;
4331 }
4332
4333 // ABS_EXPR may flip the range around, if the original range
4334 // included negative values.
4335 if (wi::eq_p (lh_lb, min_value))
4336 {
4337 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4338 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4339 if (wi::eq_p (lh_ub, min_value))
4340 {
4341 r = int_range<1> (type, min_value, min_value);
4342 return;
4343 }
4344 min = max_value;
4345 }
4346 else
4347 min = wi::abs (lh_lb);
4348
4349 if (wi::eq_p (lh_ub, min_value))
4350 max = max_value;
4351 else
4352 max = wi::abs (lh_ub);
4353
4354 // If the range contains zero then we know that the minimum value in the
4355 // range will be zero.
4356 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
4357 {
4358 if (wi::gt_p (min, max, sign))
4359 max = min;
4360 min = wi::zero (prec);
4361 }
4362 else
4363 {
4364 // If the range was reversed, swap MIN and MAX.
4365 if (wi::gt_p (min, max, sign))
4366 std::swap (min, max);
4367 }
4368
4369 // If the new range has its limits swapped around (MIN > MAX), then
4370 // the operation caused one of them to wrap around. The only thing
4371 // we know is that the result is positive.
4372 if (wi::gt_p (min, max, sign))
4373 {
4374 min = wi::zero (prec);
4375 max = max_value;
4376 }
4377 r = int_range<1> (type, min, max);
4378 }
4379
4380 bool
4381 operator_abs::op1_range (irange &r, tree type,
4382 const irange &lhs,
4383 const irange &op2,
4384 relation_trio) const
4385 {
4386 if (empty_range_varying (r, type, lhs, op2))
4387 return true;
4388 if (TYPE_UNSIGNED (type))
4389 {
4390 r = lhs;
4391 return true;
4392 }
4393 // Start with the positives because negatives are an impossible result.
4394 int_range_max positives = range_positives (type);
4395 positives.intersect (lhs);
4396 r = positives;
4397 // Then add the negative of each pair:
4398 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4399 for (unsigned i = 0; i < positives.num_pairs (); ++i)
4400 r.union_ (int_range<1> (type,
4401 -positives.upper_bound (i),
4402 -positives.lower_bound (i)));
4403 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4404 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4405 wide_int min_value = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4406 wide_int lb = lhs.lower_bound ();
4407 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lb, min_value))
4408 r.union_ (int_range<2> (type, lb, lb));
4409 return true;
4410 }
4411
4412 void
4413 operator_abs::update_bitmask (irange &r, const irange &lh,
4414 const irange &rh) const
4415 {
4416 update_known_bitmask (r, ABS_EXPR, lh, rh);
4417 }
4418
4419 class operator_absu : public range_operator
4420 {
4421 using range_operator::update_bitmask;
4422 public:
4423 virtual void wi_fold (irange &r, tree type,
4424 const wide_int &lh_lb, const wide_int &lh_ub,
4425 const wide_int &rh_lb, const wide_int &rh_ub) const;
4426 virtual void update_bitmask (irange &r, const irange &lh,
4427 const irange &rh) const final override;
4428 } op_absu;
4429
4430 void
4431 operator_absu::wi_fold (irange &r, tree type,
4432 const wide_int &lh_lb, const wide_int &lh_ub,
4433 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4434 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4435 {
4436 wide_int new_lb, new_ub;
4437
4438 // Pass through VR0 the easy cases.
4439 if (wi::ges_p (lh_lb, 0))
4440 {
4441 new_lb = lh_lb;
4442 new_ub = lh_ub;
4443 }
4444 else
4445 {
4446 new_lb = wi::abs (lh_lb);
4447 new_ub = wi::abs (lh_ub);
4448
4449 // If the range contains zero then we know that the minimum
4450 // value in the range will be zero.
4451 if (wi::ges_p (lh_ub, 0))
4452 {
4453 if (wi::gtu_p (new_lb, new_ub))
4454 new_ub = new_lb;
4455 new_lb = wi::zero (TYPE_PRECISION (type));
4456 }
4457 else
4458 std::swap (new_lb, new_ub);
4459 }
4460
4461 gcc_checking_assert (TYPE_UNSIGNED (type));
4462 r = int_range<1> (type, new_lb, new_ub);
4463 }
4464
4465 void
4466 operator_absu::update_bitmask (irange &r, const irange &lh,
4467 const irange &rh) const
4468 {
4469 update_known_bitmask (r, ABSU_EXPR, lh, rh);
4470 }
4471
4472
4473 bool
4474 operator_negate::fold_range (irange &r, tree type,
4475 const irange &lh,
4476 const irange &rh,
4477 relation_trio) const
4478 {
4479 if (empty_range_varying (r, type, lh, rh))
4480 return true;
4481
4482 // -X is simply 0 - X.
4483 int_range<1> zero;
4484 zero.set_zero (type);
4485 return range_op_handler (MINUS_EXPR).fold_range (r, type, zero, lh);
4486 }
4487
4488 bool
4489 operator_negate::op1_range (irange &r, tree type,
4490 const irange &lhs,
4491 const irange &op2,
4492 relation_trio) const
4493 {
4494 // NEGATE is involutory.
4495 return fold_range (r, type, lhs, op2);
4496 }
4497
4498
4499 bool
4500 operator_addr_expr::fold_range (irange &r, tree type,
4501 const irange &lh,
4502 const irange &rh,
4503 relation_trio) const
4504 {
4505 if (empty_range_varying (r, type, lh, rh))
4506 return true;
4507
4508 // Return a non-null pointer of the LHS type (passed in op2).
4509 if (lh.zero_p ())
4510 r.set_zero (type);
4511 else if (lh.undefined_p () || contains_zero_p (lh))
4512 r.set_varying (type);
4513 else
4514 r.set_nonzero (type);
4515 return true;
4516 }
4517
4518 bool
4519 operator_addr_expr::op1_range (irange &r, tree type,
4520 const irange &lhs,
4521 const irange &op2,
4522 relation_trio) const
4523 {
4524 if (empty_range_varying (r, type, lhs, op2))
4525 return true;
4526
4527 // Return a non-null pointer of the LHS type (passed in op2), but only
4528 // if we cant overflow, eitherwise a no-zero offset could wrap to zero.
4529 // See PR 111009.
4530 if (!lhs.undefined_p () && !contains_zero_p (lhs) && TYPE_OVERFLOW_UNDEFINED (type))
4531 r.set_nonzero (type);
4532 else
4533 r.set_varying (type);
4534 return true;
4535 }
4536 \f
4537 // Initialize any integral operators to the primary table
4538
4539 void
4540 range_op_table::initialize_integral_ops ()
4541 {
4542 set (TRUNC_DIV_EXPR, op_trunc_div);
4543 set (FLOOR_DIV_EXPR, op_floor_div);
4544 set (ROUND_DIV_EXPR, op_round_div);
4545 set (CEIL_DIV_EXPR, op_ceil_div);
4546 set (EXACT_DIV_EXPR, op_exact_div);
4547 set (LSHIFT_EXPR, op_lshift);
4548 set (RSHIFT_EXPR, op_rshift);
4549 set (TRUTH_AND_EXPR, op_logical_and);
4550 set (TRUTH_OR_EXPR, op_logical_or);
4551 set (TRUNC_MOD_EXPR, op_trunc_mod);
4552 set (TRUTH_NOT_EXPR, op_logical_not);
4553 set (IMAGPART_EXPR, op_unknown);
4554 set (REALPART_EXPR, op_unknown);
4555 set (ABSU_EXPR, op_absu);
4556 set (OP_WIDEN_MULT_SIGNED, op_widen_mult_signed);
4557 set (OP_WIDEN_MULT_UNSIGNED, op_widen_mult_unsigned);
4558 set (OP_WIDEN_PLUS_SIGNED, op_widen_plus_signed);
4559 set (OP_WIDEN_PLUS_UNSIGNED, op_widen_plus_unsigned);
4560
4561 }
4562
4563 bool
4564 operator_plus::overflow_free_p (const irange &lh, const irange &rh,
4565 relation_trio) const
4566 {
4567 if (lh.undefined_p () || rh.undefined_p ())
4568 return false;
4569
4570 tree type = lh.type ();
4571 if (TYPE_OVERFLOW_UNDEFINED (type))
4572 return true;
4573
4574 wi::overflow_type ovf;
4575 signop sgn = TYPE_SIGN (type);
4576 wide_int wmax0 = lh.upper_bound ();
4577 wide_int wmax1 = rh.upper_bound ();
4578 wi::add (wmax0, wmax1, sgn, &ovf);
4579 if (ovf != wi::OVF_NONE)
4580 return false;
4581
4582 if (TYPE_UNSIGNED (type))
4583 return true;
4584
4585 wide_int wmin0 = lh.lower_bound ();
4586 wide_int wmin1 = rh.lower_bound ();
4587 wi::add (wmin0, wmin1, sgn, &ovf);
4588 if (ovf != wi::OVF_NONE)
4589 return false;
4590
4591 return true;
4592 }
4593
4594 bool
4595 operator_minus::overflow_free_p (const irange &lh, const irange &rh,
4596 relation_trio) const
4597 {
4598 if (lh.undefined_p () || rh.undefined_p ())
4599 return false;
4600
4601 tree type = lh.type ();
4602 if (TYPE_OVERFLOW_UNDEFINED (type))
4603 return true;
4604
4605 wi::overflow_type ovf;
4606 signop sgn = TYPE_SIGN (type);
4607 wide_int wmin0 = lh.lower_bound ();
4608 wide_int wmax1 = rh.upper_bound ();
4609 wi::sub (wmin0, wmax1, sgn, &ovf);
4610 if (ovf != wi::OVF_NONE)
4611 return false;
4612
4613 if (TYPE_UNSIGNED (type))
4614 return true;
4615
4616 wide_int wmax0 = lh.upper_bound ();
4617 wide_int wmin1 = rh.lower_bound ();
4618 wi::sub (wmax0, wmin1, sgn, &ovf);
4619 if (ovf != wi::OVF_NONE)
4620 return false;
4621
4622 return true;
4623 }
4624
4625 bool
4626 operator_mult::overflow_free_p (const irange &lh, const irange &rh,
4627 relation_trio) const
4628 {
4629 if (lh.undefined_p () || rh.undefined_p ())
4630 return false;
4631
4632 tree type = lh.type ();
4633 if (TYPE_OVERFLOW_UNDEFINED (type))
4634 return true;
4635
4636 wi::overflow_type ovf;
4637 signop sgn = TYPE_SIGN (type);
4638 wide_int wmax0 = lh.upper_bound ();
4639 wide_int wmax1 = rh.upper_bound ();
4640 wi::mul (wmax0, wmax1, sgn, &ovf);
4641 if (ovf != wi::OVF_NONE)
4642 return false;
4643
4644 if (TYPE_UNSIGNED (type))
4645 return true;
4646
4647 wide_int wmin0 = lh.lower_bound ();
4648 wide_int wmin1 = rh.lower_bound ();
4649 wi::mul (wmin0, wmin1, sgn, &ovf);
4650 if (ovf != wi::OVF_NONE)
4651 return false;
4652
4653 wi::mul (wmin0, wmax1, sgn, &ovf);
4654 if (ovf != wi::OVF_NONE)
4655 return false;
4656
4657 wi::mul (wmax0, wmin1, sgn, &ovf);
4658 if (ovf != wi::OVF_NONE)
4659 return false;
4660
4661 return true;
4662 }
4663
4664 #if CHECKING_P
4665 #include "selftest.h"
4666
4667 namespace selftest
4668 {
4669 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4670 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4671 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4672 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4673 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4674 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4675
4676 static void
4677 range_op_cast_tests ()
4678 {
4679 int_range<2> r0, r1, r2, rold;
4680 r0.set_varying (integer_type_node);
4681 wide_int maxint = r0.upper_bound ();
4682
4683 // If a range is in any way outside of the range for the converted
4684 // to range, default to the range for the new type.
4685 r0.set_varying (short_integer_type_node);
4686 wide_int minshort = r0.lower_bound ();
4687 wide_int maxshort = r0.upper_bound ();
4688 if (TYPE_PRECISION (integer_type_node)
4689 > TYPE_PRECISION (short_integer_type_node))
4690 {
4691 r1 = int_range<1> (integer_type_node,
4692 wi::zero (TYPE_PRECISION (integer_type_node)),
4693 maxint);
4694 range_cast (r1, short_integer_type_node);
4695 ASSERT_TRUE (r1.lower_bound () == minshort
4696 && r1.upper_bound() == maxshort);
4697 }
4698
4699 // (unsigned char)[-5,-1] => [251,255].
4700 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (-1));
4701 range_cast (r0, unsigned_char_type_node);
4702 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node,
4703 UCHAR (251), UCHAR (255)));
4704 range_cast (r0, signed_char_type_node);
4705 ASSERT_TRUE (r0 == rold);
4706
4707 // (signed char)[15, 150] => [-128,-106][15,127].
4708 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (15), UCHAR (150));
4709 range_cast (r0, signed_char_type_node);
4710 r1 = int_range<1> (signed_char_type_node, SCHAR (15), SCHAR (127));
4711 r2 = int_range<1> (signed_char_type_node, SCHAR (-128), SCHAR (-106));
4712 r1.union_ (r2);
4713 ASSERT_TRUE (r1 == r0);
4714 range_cast (r0, unsigned_char_type_node);
4715 ASSERT_TRUE (r0 == rold);
4716
4717 // (unsigned char)[-5, 5] => [0,5][251,255].
4718 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (5));
4719 range_cast (r0, unsigned_char_type_node);
4720 r1 = int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255));
4721 r2 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4722 r1.union_ (r2);
4723 ASSERT_TRUE (r0 == r1);
4724 range_cast (r0, signed_char_type_node);
4725 ASSERT_TRUE (r0 == rold);
4726
4727 // (unsigned char)[-5,5] => [0,5][251,255].
4728 r0 = int_range<1> (integer_type_node, INT (-5), INT (5));
4729 range_cast (r0, unsigned_char_type_node);
4730 r1 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4731 r1.union_ (int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255)));
4732 ASSERT_TRUE (r0 == r1);
4733
4734 // (unsigned char)[5U,1974U] => [0,255].
4735 r0 = int_range<1> (unsigned_type_node, UINT (5), UINT (1974));
4736 range_cast (r0, unsigned_char_type_node);
4737 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (255)));
4738 range_cast (r0, integer_type_node);
4739 // Going to a wider range should not sign extend.
4740 ASSERT_TRUE (r0 == int_range<1> (integer_type_node, INT (0), INT (255)));
4741
4742 // (unsigned char)[-350,15] => [0,255].
4743 r0 = int_range<1> (integer_type_node, INT (-350), INT (15));
4744 range_cast (r0, unsigned_char_type_node);
4745 ASSERT_TRUE (r0 == (int_range<1>
4746 (unsigned_char_type_node,
4747 min_limit (unsigned_char_type_node),
4748 max_limit (unsigned_char_type_node))));
4749
4750 // Casting [-120,20] from signed char to unsigned short.
4751 // => [0, 20][0xff88, 0xffff].
4752 r0 = int_range<1> (signed_char_type_node, SCHAR (-120), SCHAR (20));
4753 range_cast (r0, short_unsigned_type_node);
4754 r1 = int_range<1> (short_unsigned_type_node, UINT16 (0), UINT16 (20));
4755 r2 = int_range<1> (short_unsigned_type_node,
4756 UINT16 (0xff88), UINT16 (0xffff));
4757 r1.union_ (r2);
4758 ASSERT_TRUE (r0 == r1);
4759 // A truncating cast back to signed char will work because [-120, 20]
4760 // is representable in signed char.
4761 range_cast (r0, signed_char_type_node);
4762 ASSERT_TRUE (r0 == int_range<1> (signed_char_type_node,
4763 SCHAR (-120), SCHAR (20)));
4764
4765 // unsigned char -> signed short
4766 // (signed short)[(unsigned char)25, (unsigned char)250]
4767 // => [(signed short)25, (signed short)250]
4768 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (25), UCHAR (250));
4769 range_cast (r0, short_integer_type_node);
4770 r1 = int_range<1> (short_integer_type_node, INT16 (25), INT16 (250));
4771 ASSERT_TRUE (r0 == r1);
4772 range_cast (r0, unsigned_char_type_node);
4773 ASSERT_TRUE (r0 == rold);
4774
4775 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4776 r0 = int_range<1> (long_long_integer_type_node,
4777 min_limit (long_long_integer_type_node),
4778 max_limit (long_long_integer_type_node));
4779 range_cast (r0, short_unsigned_type_node);
4780 r1 = int_range<1> (short_unsigned_type_node,
4781 min_limit (short_unsigned_type_node),
4782 max_limit (short_unsigned_type_node));
4783 ASSERT_TRUE (r0 == r1);
4784
4785 // Casting NONZERO to a narrower type will wrap/overflow so
4786 // it's just the entire range for the narrower type.
4787 //
4788 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4789 // is outside of the range of a smaller range, return the full
4790 // smaller range.
4791 if (TYPE_PRECISION (integer_type_node)
4792 > TYPE_PRECISION (short_integer_type_node))
4793 {
4794 r0.set_nonzero (integer_type_node);
4795 range_cast (r0, short_integer_type_node);
4796 r1 = int_range<1> (short_integer_type_node,
4797 min_limit (short_integer_type_node),
4798 max_limit (short_integer_type_node));
4799 ASSERT_TRUE (r0 == r1);
4800 }
4801
4802 // Casting NONZERO from a narrower signed to a wider signed.
4803 //
4804 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4805 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4806 r0.set_nonzero (short_integer_type_node);
4807 range_cast (r0, integer_type_node);
4808 r1 = int_range<1> (integer_type_node, INT (-32768), INT (-1));
4809 r2 = int_range<1> (integer_type_node, INT (1), INT (32767));
4810 r1.union_ (r2);
4811 ASSERT_TRUE (r0 == r1);
4812 }
4813
4814 static void
4815 range_op_lshift_tests ()
4816 {
4817 // Test that 0x808.... & 0x8.... still contains 0x8....
4818 // for a large set of numbers.
4819 {
4820 int_range_max res;
4821 tree big_type = long_long_unsigned_type_node;
4822 unsigned big_prec = TYPE_PRECISION (big_type);
4823 // big_num = 0x808,0000,0000,0000
4824 wide_int big_num = wi::lshift (wi::uhwi (0x808, big_prec),
4825 wi::uhwi (48, big_prec));
4826 op_bitwise_and.fold_range (res, big_type,
4827 int_range <1> (big_type),
4828 int_range <1> (big_type, big_num, big_num));
4829 // val = 0x8,0000,0000,0000
4830 wide_int val = wi::lshift (wi::uhwi (8, big_prec),
4831 wi::uhwi (48, big_prec));
4832 ASSERT_TRUE (res.contains_p (val));
4833 }
4834
4835 if (TYPE_PRECISION (unsigned_type_node) > 31)
4836 {
4837 // unsigned VARYING = op1 << 1 should be VARYING.
4838 int_range<2> lhs (unsigned_type_node);
4839 int_range<2> shift (unsigned_type_node, INT (1), INT (1));
4840 int_range_max op1;
4841 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
4842 ASSERT_TRUE (op1.varying_p ());
4843
4844 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4845 int_range<2> zero (unsigned_type_node, UINT (0), UINT (0));
4846 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
4847 ASSERT_TRUE (op1.num_pairs () == 2);
4848 // Remove the [0,0] range.
4849 op1.intersect (zero);
4850 ASSERT_TRUE (op1.num_pairs () == 1);
4851 // op1 << 1 should be [0x8000,0x8000] << 1,
4852 // which should result in [0,0].
4853 int_range_max result;
4854 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4855 ASSERT_TRUE (result == zero);
4856 }
4857 // signed VARYING = op1 << 1 should be VARYING.
4858 if (TYPE_PRECISION (integer_type_node) > 31)
4859 {
4860 // unsigned VARYING = op1 << 1 should be VARYING.
4861 int_range<2> lhs (integer_type_node);
4862 int_range<2> shift (integer_type_node, INT (1), INT (1));
4863 int_range_max op1;
4864 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
4865 ASSERT_TRUE (op1.varying_p ());
4866
4867 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4868 int_range<2> zero (integer_type_node, INT (0), INT (0));
4869 op_lshift.op1_range (op1, integer_type_node, zero, shift);
4870 ASSERT_TRUE (op1.num_pairs () == 2);
4871 // Remove the [0,0] range.
4872 op1.intersect (zero);
4873 ASSERT_TRUE (op1.num_pairs () == 1);
4874 // op1 << 1 should be [0x8000,0x8000] << 1,
4875 // which should result in [0,0].
4876 int_range_max result;
4877 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4878 ASSERT_TRUE (result == zero);
4879 }
4880 }
4881
4882 static void
4883 range_op_rshift_tests ()
4884 {
4885 // unsigned: [3, MAX] = OP1 >> 1
4886 {
4887 int_range_max lhs (unsigned_type_node,
4888 UINT (3), max_limit (unsigned_type_node));
4889 int_range_max one (unsigned_type_node,
4890 wi::one (TYPE_PRECISION (unsigned_type_node)),
4891 wi::one (TYPE_PRECISION (unsigned_type_node)));
4892 int_range_max op1;
4893 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
4894 ASSERT_FALSE (op1.contains_p (UINT (3)));
4895 }
4896
4897 // signed: [3, MAX] = OP1 >> 1
4898 {
4899 int_range_max lhs (integer_type_node,
4900 INT (3), max_limit (integer_type_node));
4901 int_range_max one (integer_type_node, INT (1), INT (1));
4902 int_range_max op1;
4903 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4904 ASSERT_FALSE (op1.contains_p (INT (-2)));
4905 }
4906
4907 // This is impossible, so OP1 should be [].
4908 // signed: [MIN, MIN] = OP1 >> 1
4909 {
4910 int_range_max lhs (integer_type_node,
4911 min_limit (integer_type_node),
4912 min_limit (integer_type_node));
4913 int_range_max one (integer_type_node, INT (1), INT (1));
4914 int_range_max op1;
4915 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4916 ASSERT_TRUE (op1.undefined_p ());
4917 }
4918
4919 // signed: ~[-1] = OP1 >> 31
4920 if (TYPE_PRECISION (integer_type_node) > 31)
4921 {
4922 int_range_max lhs (integer_type_node, INT (-1), INT (-1), VR_ANTI_RANGE);
4923 int_range_max shift (integer_type_node, INT (31), INT (31));
4924 int_range_max op1;
4925 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
4926 int_range_max negatives = range_negatives (integer_type_node);
4927 negatives.intersect (op1);
4928 ASSERT_TRUE (negatives.undefined_p ());
4929 }
4930 }
4931
4932 static void
4933 range_op_bitwise_and_tests ()
4934 {
4935 int_range_max res;
4936 wide_int min = min_limit (integer_type_node);
4937 wide_int max = max_limit (integer_type_node);
4938 wide_int tiny = wi::add (min, wi::one (TYPE_PRECISION (integer_type_node)));
4939 int_range_max i1 (integer_type_node, tiny, max);
4940 int_range_max i2 (integer_type_node, INT (255), INT (255));
4941
4942 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4943 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4944 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4945
4946 // VARYING = OP1 & 255: OP1 is VARYING
4947 i1 = int_range<1> (integer_type_node);
4948 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4949 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4950
4951 // For 0 = x & MASK, x is ~MASK.
4952 {
4953 int_range<2> zero (integer_type_node, INT (0), INT (0));
4954 int_range<2> mask = int_range<2> (integer_type_node, INT (7), INT (7));
4955 op_bitwise_and.op1_range (res, integer_type_node, zero, mask);
4956 wide_int inv = wi::shwi (~7U, TYPE_PRECISION (integer_type_node));
4957 ASSERT_TRUE (res.get_nonzero_bits () == inv);
4958 }
4959
4960 // (NONZERO | X) is nonzero.
4961 i1.set_nonzero (integer_type_node);
4962 i2.set_varying (integer_type_node);
4963 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4964 ASSERT_TRUE (res.nonzero_p ());
4965
4966 // (NEGATIVE | X) is nonzero.
4967 i1 = int_range<1> (integer_type_node, INT (-5), INT (-3));
4968 i2.set_varying (integer_type_node);
4969 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4970 ASSERT_FALSE (res.contains_p (INT (0)));
4971 }
4972
4973 static void
4974 range_relational_tests ()
4975 {
4976 int_range<2> lhs (unsigned_char_type_node);
4977 int_range<2> op1 (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4978 int_range<2> op2 (unsigned_char_type_node, UCHAR (20), UCHAR (20));
4979
4980 // Never wrapping additions mean LHS > OP1.
4981 relation_kind code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4982 ASSERT_TRUE (code == VREL_GT);
4983
4984 // Most wrapping additions mean nothing...
4985 op1 = int_range<2> (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4986 op2 = int_range<2> (unsigned_char_type_node, UCHAR (0), UCHAR (255));
4987 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4988 ASSERT_TRUE (code == VREL_VARYING);
4989
4990 // However, always wrapping additions mean LHS < OP1.
4991 op1 = int_range<2> (unsigned_char_type_node, UCHAR (1), UCHAR (255));
4992 op2 = int_range<2> (unsigned_char_type_node, UCHAR (255), UCHAR (255));
4993 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4994 ASSERT_TRUE (code == VREL_LT);
4995 }
4996
4997 void
4998 range_op_tests ()
4999 {
5000 range_op_rshift_tests ();
5001 range_op_lshift_tests ();
5002 range_op_bitwise_and_tests ();
5003 range_op_cast_tests ();
5004 range_relational_tests ();
5005
5006 extern void range_op_float_tests ();
5007 range_op_float_tests ();
5008 }
5009
5010 } // namespace selftest
5011
5012 #endif // CHECKING_P