]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/range-op.cc
c++, mingw: Fix up types of dtor hooks to __cxa_{,thread_}atexit/__cxa_throw on mingw...
[thirdparty/gcc.git] / gcc / range-op.cc
1 /* Code for range operators.
2 Copyright (C) 2017-2024 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "insn-codes.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "cfganal.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
43 #include "tree-eh.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "wide-int.h"
47 #include "value-relation.h"
48 #include "range-op.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
51
52 // Instantiate the operators which apply to multiple types here.
53
54 operator_equal op_equal;
55 operator_not_equal op_not_equal;
56 operator_lt op_lt;
57 operator_le op_le;
58 operator_gt op_gt;
59 operator_ge op_ge;
60 operator_identity op_ident;
61 operator_cst op_cst;
62 operator_cast op_cast;
63 operator_plus op_plus;
64 operator_abs op_abs;
65 operator_minus op_minus;
66 operator_negate op_negate;
67 operator_mult op_mult;
68 operator_addr_expr op_addr;
69 operator_bitwise_not op_bitwise_not;
70 operator_bitwise_xor op_bitwise_xor;
71 operator_bitwise_and op_bitwise_and;
72 operator_bitwise_or op_bitwise_or;
73 operator_min op_min;
74 operator_max op_max;
75
76 // Instantaite a range operator table.
77 range_op_table operator_table;
78
79 // Invoke the initialization routines for each class of range.
80
81 range_op_table::range_op_table ()
82 {
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
86
87 set (EQ_EXPR, op_equal);
88 set (NE_EXPR, op_not_equal);
89 set (LT_EXPR, op_lt);
90 set (LE_EXPR, op_le);
91 set (GT_EXPR, op_gt);
92 set (GE_EXPR, op_ge);
93 set (SSA_NAME, op_ident);
94 set (PAREN_EXPR, op_ident);
95 set (OBJ_TYPE_REF, op_ident);
96 set (REAL_CST, op_cst);
97 set (INTEGER_CST, op_cst);
98 set (NOP_EXPR, op_cast);
99 set (CONVERT_EXPR, op_cast);
100 set (PLUS_EXPR, op_plus);
101 set (ABS_EXPR, op_abs);
102 set (MINUS_EXPR, op_minus);
103 set (NEGATE_EXPR, op_negate);
104 set (MULT_EXPR, op_mult);
105
106 // Occur in both integer and pointer tables, but currently share
107 // integral implementation.
108 set (ADDR_EXPR, op_addr);
109 set (BIT_NOT_EXPR, op_bitwise_not);
110 set (BIT_XOR_EXPR, op_bitwise_xor);
111
112 // These are in both integer and pointer tables, but pointer has a different
113 // implementation.
114 // If commented out, there is a hybrid version in range-op-ptr.cc which
115 // is used until there is a pointer range class. Then we can simply
116 // uncomment the operator here and use the unified version.
117
118 // set (BIT_AND_EXPR, op_bitwise_and);
119 // set (BIT_IOR_EXPR, op_bitwise_or);
120 // set (MIN_EXPR, op_min);
121 // set (MAX_EXPR, op_max);
122 }
123
124 // Instantiate a default range operator for opcodes with no entry.
125
126 range_operator default_operator;
127
128 // Create a default range_op_handler.
129
130 range_op_handler::range_op_handler ()
131 {
132 m_operator = &default_operator;
133 }
134
135 // Create a range_op_handler for CODE. Use a default operatoer if CODE
136 // does not have an entry.
137
138 range_op_handler::range_op_handler (unsigned code)
139 {
140 m_operator = operator_table[code];
141 if (!m_operator)
142 m_operator = &default_operator;
143 }
144
145 // Return TRUE if this handler has a non-default operator.
146
147 range_op_handler::operator bool () const
148 {
149 return m_operator != &default_operator;
150 }
151
152 // Return a pointer to the range operator assocaited with this handler.
153 // If it is a default operator, return NULL.
154 // This is the equivalent of indexing the range table.
155
156 range_operator *
157 range_op_handler::range_op () const
158 {
159 if (m_operator != &default_operator)
160 return m_operator;
161 return NULL;
162 }
163
164 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
165 // This is used to produce a unique value for each dispatch pattern. Shift
166 // values are based on the size of the m_discriminator field in value_range.h.
167
168 constexpr unsigned
169 dispatch_trio (unsigned lhs, unsigned op1, unsigned op2)
170 {
171 return ((lhs << 8) + (op1 << 4) + (op2));
172 }
173
174 // These are the supported dispatch patterns. These map to the parameter list
175 // of the routines in range_operator. Note the last 3 characters are
176 // shorthand for the LHS, OP1, and OP2 range discriminator class.
177
178 const unsigned RO_III = dispatch_trio (VR_IRANGE, VR_IRANGE, VR_IRANGE);
179 const unsigned RO_IFI = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_IRANGE);
180 const unsigned RO_IFF = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_FRANGE);
181 const unsigned RO_FFF = dispatch_trio (VR_FRANGE, VR_FRANGE, VR_FRANGE);
182 const unsigned RO_FIF = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_FRANGE);
183 const unsigned RO_FII = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_IRANGE);
184 const unsigned RO_PPP = dispatch_trio (VR_PRANGE, VR_PRANGE, VR_PRANGE);
185 const unsigned RO_PPI = dispatch_trio (VR_PRANGE, VR_PRANGE, VR_IRANGE);
186 const unsigned RO_IPP = dispatch_trio (VR_IRANGE, VR_PRANGE, VR_PRANGE);
187 const unsigned RO_IPI = dispatch_trio (VR_IRANGE, VR_PRANGE, VR_IRANGE);
188 const unsigned RO_PIP = dispatch_trio (VR_PRANGE, VR_IRANGE, VR_PRANGE);
189 const unsigned RO_PII = dispatch_trio (VR_PRANGE, VR_IRANGE, VR_IRANGE);
190
191 // Return a dispatch value for parameter types LHS, OP1 and OP2.
192
193 unsigned
194 range_op_handler::dispatch_kind (const vrange &lhs, const vrange &op1,
195 const vrange& op2) const
196 {
197 return dispatch_trio (lhs.m_discriminator, op1.m_discriminator,
198 op2.m_discriminator);
199 }
200
201 void
202 range_op_handler::discriminator_fail (const vrange &r1,
203 const vrange &r2,
204 const vrange &r3) const
205 {
206 const char name[] = "IPF";
207 gcc_checking_assert (r1.m_discriminator < sizeof (name) - 1);
208 gcc_checking_assert (r2.m_discriminator < sizeof (name) - 1);
209 gcc_checking_assert (r3.m_discriminator < sizeof (name) - 1);
210 fprintf (stderr,
211 "Unsupported operand combination in dispatch: RO_%c%c%c\n",
212 name[r1.m_discriminator],
213 name[r2.m_discriminator],
214 name[r3.m_discriminator]);
215 gcc_unreachable ();
216 }
217
218 static inline bool
219 has_pointer_operand_p (const vrange &r1, const vrange &r2, const vrange &r3)
220 {
221 return is_a <prange> (r1) || is_a <prange> (r2) || is_a <prange> (r3);
222 }
223
224 // Dispatch a call to fold_range based on the types of R, LH and RH.
225
226 bool
227 range_op_handler::fold_range (vrange &r, tree type,
228 const vrange &lh,
229 const vrange &rh,
230 relation_trio rel) const
231 {
232 gcc_checking_assert (m_operator);
233 #if CHECKING_P
234 if (!lh.undefined_p () && !rh.undefined_p ())
235 gcc_assert (m_operator->operand_check_p (type, lh.type (), rh.type ()));
236 if (has_pointer_operand_p (r, lh, rh)
237 && !m_operator->pointers_handled_p (DISPATCH_FOLD_RANGE,
238 dispatch_kind (r, lh, rh)))
239 discriminator_fail (r, lh, rh);
240 #endif
241 switch (dispatch_kind (r, lh, rh))
242 {
243 case RO_III:
244 return m_operator->fold_range (as_a <irange> (r), type,
245 as_a <irange> (lh),
246 as_a <irange> (rh), rel);
247 case RO_IFI:
248 return m_operator->fold_range (as_a <irange> (r), type,
249 as_a <frange> (lh),
250 as_a <irange> (rh), rel);
251 case RO_IFF:
252 return m_operator->fold_range (as_a <irange> (r), type,
253 as_a <frange> (lh),
254 as_a <frange> (rh), rel);
255 case RO_FFF:
256 return m_operator->fold_range (as_a <frange> (r), type,
257 as_a <frange> (lh),
258 as_a <frange> (rh), rel);
259 case RO_FII:
260 return m_operator->fold_range (as_a <frange> (r), type,
261 as_a <irange> (lh),
262 as_a <irange> (rh), rel);
263 case RO_PPP:
264 return m_operator->fold_range (as_a <prange> (r), type,
265 as_a <prange> (lh),
266 as_a <prange> (rh), rel);
267 case RO_PPI:
268 return m_operator->fold_range (as_a <prange> (r), type,
269 as_a <prange> (lh),
270 as_a <irange> (rh), rel);
271 case RO_IPP:
272 return m_operator->fold_range (as_a <irange> (r), type,
273 as_a <prange> (lh),
274 as_a <prange> (rh), rel);
275 case RO_PIP:
276 return m_operator->fold_range (as_a <prange> (r), type,
277 as_a <irange> (lh),
278 as_a <prange> (rh), rel);
279 case RO_IPI:
280 return m_operator->fold_range (as_a <irange> (r), type,
281 as_a <prange> (lh),
282 as_a <irange> (rh), rel);
283 default:
284 return false;
285 }
286 }
287
288 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
289
290 bool
291 range_op_handler::op1_range (vrange &r, tree type,
292 const vrange &lhs,
293 const vrange &op2,
294 relation_trio rel) const
295 {
296 gcc_checking_assert (m_operator);
297 if (lhs.undefined_p ())
298 return false;
299 #if CHECKING_P
300 if (!op2.undefined_p ())
301 gcc_assert (m_operator->operand_check_p (lhs.type (), type, op2.type ()));
302 if (has_pointer_operand_p (r, lhs, op2)
303 && !m_operator->pointers_handled_p (DISPATCH_OP1_RANGE,
304 dispatch_kind (r, lhs, op2)))
305 discriminator_fail (r, lhs, op2);
306 #endif
307 switch (dispatch_kind (r, lhs, op2))
308 {
309 case RO_III:
310 return m_operator->op1_range (as_a <irange> (r), type,
311 as_a <irange> (lhs),
312 as_a <irange> (op2), rel);
313 case RO_PPP:
314 return m_operator->op1_range (as_a <prange> (r), type,
315 as_a <prange> (lhs),
316 as_a <prange> (op2), rel);
317 case RO_PIP:
318 return m_operator->op1_range (as_a <prange> (r), type,
319 as_a <irange> (lhs),
320 as_a <prange> (op2), rel);
321 case RO_PPI:
322 return m_operator->op1_range (as_a <prange> (r), type,
323 as_a <prange> (lhs),
324 as_a <irange> (op2), rel);
325 case RO_IPI:
326 return m_operator->op1_range (as_a <irange> (r), type,
327 as_a <prange> (lhs),
328 as_a <irange> (op2), rel);
329 case RO_FIF:
330 return m_operator->op1_range (as_a <frange> (r), type,
331 as_a <irange> (lhs),
332 as_a <frange> (op2), rel);
333 case RO_FFF:
334 return m_operator->op1_range (as_a <frange> (r), type,
335 as_a <frange> (lhs),
336 as_a <frange> (op2), rel);
337 default:
338 return false;
339 }
340 }
341
342 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
343
344 bool
345 range_op_handler::op2_range (vrange &r, tree type,
346 const vrange &lhs,
347 const vrange &op1,
348 relation_trio rel) const
349 {
350 gcc_checking_assert (m_operator);
351 if (lhs.undefined_p ())
352 return false;
353 #if CHECKING_P
354 if (!op1.undefined_p ())
355 gcc_assert (m_operator->operand_check_p (lhs.type (), op1.type (), type));
356 if (has_pointer_operand_p (r, lhs, op1)
357 && !m_operator->pointers_handled_p (DISPATCH_OP2_RANGE,
358 dispatch_kind (r, lhs, op1)))
359 discriminator_fail (r, lhs, op1);
360 #endif
361 switch (dispatch_kind (r, lhs, op1))
362 {
363 case RO_III:
364 return m_operator->op2_range (as_a <irange> (r), type,
365 as_a <irange> (lhs),
366 as_a <irange> (op1), rel);
367 case RO_PIP:
368 return m_operator->op2_range (as_a <prange> (r), type,
369 as_a <irange> (lhs),
370 as_a <prange> (op1), rel);
371 case RO_IPP:
372 return m_operator->op2_range (as_a <irange> (r), type,
373 as_a <prange> (lhs),
374 as_a <prange> (op1), rel);
375 case RO_FIF:
376 return m_operator->op2_range (as_a <frange> (r), type,
377 as_a <irange> (lhs),
378 as_a <frange> (op1), rel);
379 case RO_FFF:
380 return m_operator->op2_range (as_a <frange> (r), type,
381 as_a <frange> (lhs),
382 as_a <frange> (op1), rel);
383 default:
384 return false;
385 }
386 }
387
388 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
389
390 relation_kind
391 range_op_handler::lhs_op1_relation (const vrange &lhs,
392 const vrange &op1,
393 const vrange &op2,
394 relation_kind rel) const
395 {
396 gcc_checking_assert (m_operator);
397 #if CHECKING_P
398 if (has_pointer_operand_p (lhs, op1, op2)
399 && !m_operator->pointers_handled_p (DISPATCH_LHS_OP1_RELATION,
400 dispatch_kind (lhs, op1, op2)))
401 discriminator_fail (lhs, op1, op2);
402 #endif
403
404 switch (dispatch_kind (lhs, op1, op2))
405 {
406 case RO_III:
407 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
408 as_a <irange> (op1),
409 as_a <irange> (op2), rel);
410 case RO_PPP:
411 return m_operator->lhs_op1_relation (as_a <prange> (lhs),
412 as_a <prange> (op1),
413 as_a <prange> (op2), rel);
414 case RO_IPP:
415 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
416 as_a <prange> (op1),
417 as_a <prange> (op2), rel);
418 case RO_PII:
419 return m_operator->lhs_op1_relation (as_a <prange> (lhs),
420 as_a <irange> (op1),
421 as_a <irange> (op2), rel);
422 case RO_IFF:
423 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
424 as_a <frange> (op1),
425 as_a <frange> (op2), rel);
426 case RO_FFF:
427 return m_operator->lhs_op1_relation (as_a <frange> (lhs),
428 as_a <frange> (op1),
429 as_a <frange> (op2), rel);
430 default:
431 return VREL_VARYING;
432 }
433 }
434
435 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
436
437 relation_kind
438 range_op_handler::lhs_op2_relation (const vrange &lhs,
439 const vrange &op1,
440 const vrange &op2,
441 relation_kind rel) const
442 {
443 gcc_checking_assert (m_operator);
444 #if CHECKING_P
445 if (has_pointer_operand_p (lhs, op1, op2)
446 && !m_operator->pointers_handled_p (DISPATCH_LHS_OP2_RELATION,
447 dispatch_kind (lhs, op1, op2)))
448 discriminator_fail (lhs, op1, op2);
449 #endif
450 switch (dispatch_kind (lhs, op1, op2))
451 {
452 case RO_III:
453 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
454 as_a <irange> (op1),
455 as_a <irange> (op2), rel);
456 case RO_IFF:
457 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
458 as_a <frange> (op1),
459 as_a <frange> (op2), rel);
460 case RO_FFF:
461 return m_operator->lhs_op2_relation (as_a <frange> (lhs),
462 as_a <frange> (op1),
463 as_a <frange> (op2), rel);
464 default:
465 return VREL_VARYING;
466 }
467 }
468
469 // Dispatch a call to op1_op2_relation based on the type of LHS.
470
471 relation_kind
472 range_op_handler::op1_op2_relation (const vrange &lhs,
473 const vrange &op1,
474 const vrange &op2) const
475 {
476 gcc_checking_assert (m_operator);
477 #if CHECKING_P
478 if (has_pointer_operand_p (lhs, op1, op2)
479 && !m_operator->pointers_handled_p (DISPATCH_OP1_OP2_RELATION,
480 dispatch_kind (lhs, op1, op2)))
481 discriminator_fail (lhs, op1, op2);
482 #endif
483 switch (dispatch_kind (lhs, op1, op2))
484 {
485 case RO_III:
486 return m_operator->op1_op2_relation (as_a <irange> (lhs),
487 as_a <irange> (op1),
488 as_a <irange> (op2));
489
490 case RO_IPP:
491 return m_operator->op1_op2_relation (as_a <irange> (lhs),
492 as_a <prange> (op1),
493 as_a <prange> (op2));
494
495 case RO_IFF:
496 return m_operator->op1_op2_relation (as_a <irange> (lhs),
497 as_a <frange> (op1),
498 as_a <frange> (op2));
499
500 case RO_FFF:
501 return m_operator->op1_op2_relation (as_a <frange> (lhs),
502 as_a <frange> (op1),
503 as_a <frange> (op2));
504
505 default:
506 return VREL_VARYING;
507 }
508 }
509
510 bool
511 range_op_handler::overflow_free_p (const vrange &lh,
512 const vrange &rh,
513 relation_trio rel) const
514 {
515 gcc_checking_assert (m_operator);
516 switch (dispatch_kind (lh, lh, rh))
517 {
518 case RO_III:
519 return m_operator->overflow_free_p(as_a <irange> (lh),
520 as_a <irange> (rh),
521 rel);
522 default:
523 return false;
524 }
525 }
526
527 bool
528 range_op_handler::operand_check_p (tree t1, tree t2, tree t3) const
529 {
530 gcc_checking_assert (m_operator);
531 return m_operator->operand_check_p (t1, t2, t3);
532 }
533
534 // Update the known bitmasks in R when applying the operation CODE to
535 // LH and RH.
536
537 void
538 update_known_bitmask (vrange &r, tree_code code,
539 const vrange &lh, const vrange &rh)
540 {
541 if (r.undefined_p () || lh.undefined_p () || rh.undefined_p ()
542 || r.singleton_p ())
543 return;
544
545 widest_int widest_value, widest_mask;
546 tree type = r.type ();
547 signop sign = TYPE_SIGN (type);
548 int prec = TYPE_PRECISION (type);
549 irange_bitmask lh_bits = lh.get_bitmask ();
550 irange_bitmask rh_bits = rh.get_bitmask ();
551
552 switch (get_gimple_rhs_class (code))
553 {
554 case GIMPLE_UNARY_RHS:
555 bit_value_unop (code, sign, prec, &widest_value, &widest_mask,
556 TYPE_SIGN (lh.type ()),
557 TYPE_PRECISION (lh.type ()),
558 widest_int::from (lh_bits.value (),
559 TYPE_SIGN (lh.type ())),
560 widest_int::from (lh_bits.mask (),
561 TYPE_SIGN (lh.type ())));
562 break;
563 case GIMPLE_BINARY_RHS:
564 bit_value_binop (code, sign, prec, &widest_value, &widest_mask,
565 TYPE_SIGN (lh.type ()),
566 TYPE_PRECISION (lh.type ()),
567 widest_int::from (lh_bits.value (), sign),
568 widest_int::from (lh_bits.mask (), sign),
569 TYPE_SIGN (rh.type ()),
570 TYPE_PRECISION (rh.type ()),
571 widest_int::from (rh_bits.value (), sign),
572 widest_int::from (rh_bits.mask (), sign));
573 break;
574 default:
575 gcc_unreachable ();
576 }
577
578 wide_int mask = wide_int::from (widest_mask, prec, sign);
579 wide_int value = wide_int::from (widest_value, prec, sign);
580 // Bitmasks must have the unknown value bits cleared.
581 value &= ~mask;
582 irange_bitmask bm (value, mask);
583 r.update_bitmask (bm);
584 }
585
586 // Return the upper limit for a type.
587
588 static inline wide_int
589 max_limit (const_tree type)
590 {
591 return irange_val_max (type);
592 }
593
594 // Return the lower limit for a type.
595
596 static inline wide_int
597 min_limit (const_tree type)
598 {
599 return irange_val_min (type);
600 }
601
602 // Return false if shifting by OP is undefined behavior. Otherwise, return
603 // true and the range it is to be shifted by. This allows trimming out of
604 // undefined ranges, leaving only valid ranges if there are any.
605
606 static inline bool
607 get_shift_range (irange &r, tree type, const irange &op)
608 {
609 if (op.undefined_p ())
610 return false;
611
612 // Build valid range and intersect it with the shift range.
613 r = value_range (op.type (),
614 wi::shwi (0, TYPE_PRECISION (op.type ())),
615 wi::shwi (TYPE_PRECISION (type) - 1, TYPE_PRECISION (op.type ())));
616 r.intersect (op);
617
618 // If there are no valid ranges in the shift range, returned false.
619 if (r.undefined_p ())
620 return false;
621 return true;
622 }
623
624 // Default wide_int fold operation returns [MIN, MAX].
625
626 void
627 range_operator::wi_fold (irange &r, tree type,
628 const wide_int &lh_lb ATTRIBUTE_UNUSED,
629 const wide_int &lh_ub ATTRIBUTE_UNUSED,
630 const wide_int &rh_lb ATTRIBUTE_UNUSED,
631 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
632 {
633 gcc_checking_assert (r.supports_type_p (type));
634 r.set_varying (type);
635 }
636
637 // Call wi_fold when both op1 and op2 are equivalent. Further split small
638 // subranges into constants. This can provide better precision.
639 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
640 // [0,0][2, 2][4,4][6, 6][8, 8]
641 // LIMIT is the maximum number of elements in range allowed before we
642 // do not process them individually.
643
644 void
645 range_operator::wi_fold_in_parts_equiv (irange &r, tree type,
646 const wide_int &lh_lb,
647 const wide_int &lh_ub,
648 unsigned limit) const
649 {
650 int_range_max tmp;
651 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
652 widest_int::from (lh_lb, TYPE_SIGN (type)));
653 // if there are 1 to 8 values in the LH range, split them up.
654 r.set_undefined ();
655 if (lh_range >= 0 && lh_range < limit)
656 {
657 for (unsigned x = 0; x <= lh_range; x++)
658 {
659 wide_int val = lh_lb + x;
660 wi_fold (tmp, type, val, val, val, val);
661 r.union_ (tmp);
662 }
663 }
664 // Otherwise just call wi_fold.
665 else
666 wi_fold (r, type, lh_lb, lh_ub, lh_lb, lh_ub);
667 }
668
669 // Call wi_fold, except further split small subranges into constants.
670 // This can provide better precision. For something 8 >> [0,1]
671 // Instead of [8, 16], we will produce [8,8][16,16]
672
673 void
674 range_operator::wi_fold_in_parts (irange &r, tree type,
675 const wide_int &lh_lb,
676 const wide_int &lh_ub,
677 const wide_int &rh_lb,
678 const wide_int &rh_ub) const
679 {
680 int_range_max tmp;
681 widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)),
682 widest_int::from (rh_lb, TYPE_SIGN (type)));
683 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
684 widest_int::from (lh_lb, TYPE_SIGN (type)));
685 // If there are 2, 3, or 4 values in the RH range, do them separately.
686 // Call wi_fold_in_parts to check the RH side.
687 if (rh_range > 0 && rh_range < 4)
688 {
689 wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb);
690 if (rh_range > 1)
691 {
692 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1);
693 r.union_ (tmp);
694 if (rh_range == 3)
695 {
696 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2);
697 r.union_ (tmp);
698 }
699 }
700 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub);
701 r.union_ (tmp);
702 }
703 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
704 // The RH side has been checked, so no recursion needed.
705 else if (lh_range > 0 && lh_range < 4)
706 {
707 wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub);
708 if (lh_range > 1)
709 {
710 wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub);
711 r.union_ (tmp);
712 if (lh_range == 3)
713 {
714 wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub);
715 r.union_ (tmp);
716 }
717 }
718 wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub);
719 r.union_ (tmp);
720 }
721 // Otherwise just call wi_fold.
722 else
723 wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
724 }
725
726 // The default for fold is to break all ranges into sub-ranges and
727 // invoke the wi_fold method on each sub-range pair.
728
729 bool
730 range_operator::fold_range (irange &r, tree type,
731 const irange &lh,
732 const irange &rh,
733 relation_trio trio) const
734 {
735 gcc_checking_assert (r.supports_type_p (type));
736 if (empty_range_varying (r, type, lh, rh))
737 return true;
738
739 relation_kind rel = trio.op1_op2 ();
740 unsigned num_lh = lh.num_pairs ();
741 unsigned num_rh = rh.num_pairs ();
742
743 // If op1 and op2 are equivalences, then we don't need a complete cross
744 // product, just pairs of matching elements.
745 if (relation_equiv_p (rel) && lh == rh)
746 {
747 int_range_max tmp;
748 r.set_undefined ();
749 for (unsigned x = 0; x < num_lh; ++x)
750 {
751 // If the number of subranges is too high, limit subrange creation.
752 unsigned limit = (r.num_pairs () > 32) ? 0 : 8;
753 wide_int lh_lb = lh.lower_bound (x);
754 wide_int lh_ub = lh.upper_bound (x);
755 wi_fold_in_parts_equiv (tmp, type, lh_lb, lh_ub, limit);
756 r.union_ (tmp);
757 if (r.varying_p ())
758 break;
759 }
760 op1_op2_relation_effect (r, type, lh, rh, rel);
761 update_bitmask (r, lh, rh);
762 return true;
763 }
764
765 // If both ranges are single pairs, fold directly into the result range.
766 // If the number of subranges grows too high, produce a summary result as the
767 // loop becomes exponential with little benefit. See PR 103821.
768 if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12)
769 {
770 wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (),
771 rh.lower_bound (), rh.upper_bound ());
772 op1_op2_relation_effect (r, type, lh, rh, rel);
773 update_bitmask (r, lh, rh);
774 return true;
775 }
776
777 int_range_max tmp;
778 r.set_undefined ();
779 for (unsigned x = 0; x < num_lh; ++x)
780 for (unsigned y = 0; y < num_rh; ++y)
781 {
782 wide_int lh_lb = lh.lower_bound (x);
783 wide_int lh_ub = lh.upper_bound (x);
784 wide_int rh_lb = rh.lower_bound (y);
785 wide_int rh_ub = rh.upper_bound (y);
786 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
787 r.union_ (tmp);
788 if (r.varying_p ())
789 {
790 op1_op2_relation_effect (r, type, lh, rh, rel);
791 update_bitmask (r, lh, rh);
792 return true;
793 }
794 }
795 op1_op2_relation_effect (r, type, lh, rh, rel);
796 update_bitmask (r, lh, rh);
797 return true;
798 }
799
800 // The default for op1_range is to return false.
801
802 bool
803 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
804 tree type ATTRIBUTE_UNUSED,
805 const irange &lhs ATTRIBUTE_UNUSED,
806 const irange &op2 ATTRIBUTE_UNUSED,
807 relation_trio) const
808 {
809 return false;
810 }
811
812 // The default for op2_range is to return false.
813
814 bool
815 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
816 tree type ATTRIBUTE_UNUSED,
817 const irange &lhs ATTRIBUTE_UNUSED,
818 const irange &op1 ATTRIBUTE_UNUSED,
819 relation_trio) const
820 {
821 return false;
822 }
823
824 // The default relation routines return VREL_VARYING.
825
826 relation_kind
827 range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
828 const irange &op1 ATTRIBUTE_UNUSED,
829 const irange &op2 ATTRIBUTE_UNUSED,
830 relation_kind rel ATTRIBUTE_UNUSED) const
831 {
832 return VREL_VARYING;
833 }
834
835 relation_kind
836 range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
837 const irange &op1 ATTRIBUTE_UNUSED,
838 const irange &op2 ATTRIBUTE_UNUSED,
839 relation_kind rel ATTRIBUTE_UNUSED) const
840 {
841 return VREL_VARYING;
842 }
843
844 relation_kind
845 range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
846 const irange &op1 ATTRIBUTE_UNUSED,
847 const irange &op2 ATTRIBUTE_UNUSED) const
848 {
849 return VREL_VARYING;
850 }
851
852 // Default is no relation affects the LHS.
853
854 bool
855 range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED,
856 tree type ATTRIBUTE_UNUSED,
857 const irange &op1_range ATTRIBUTE_UNUSED,
858 const irange &op2_range ATTRIBUTE_UNUSED,
859 relation_kind rel ATTRIBUTE_UNUSED) const
860 {
861 return false;
862 }
863
864 bool
865 range_operator::overflow_free_p (const irange &, const irange &,
866 relation_trio) const
867 {
868 return false;
869 }
870
871 // Apply any known bitmask updates based on this operator.
872
873 void
874 range_operator::update_bitmask (irange &, const irange &,
875 const irange &) const
876 {
877 }
878
879 // Check that operand types are OK. Default to always OK.
880
881 bool
882 range_operator::operand_check_p (tree, tree, tree) const
883 {
884 return true;
885 }
886
887 // Create and return a range from a pair of wide-ints that are known
888 // to have overflowed (or underflowed).
889
890 static void
891 value_range_from_overflowed_bounds (irange &r, tree type,
892 const wide_int &wmin,
893 const wide_int &wmax)
894 {
895 const signop sgn = TYPE_SIGN (type);
896 const unsigned int prec = TYPE_PRECISION (type);
897
898 wide_int tmin = wide_int::from (wmin, prec, sgn);
899 wide_int tmax = wide_int::from (wmax, prec, sgn);
900
901 bool covers = false;
902 wide_int tem = tmin;
903 tmin = tmax + 1;
904 if (wi::cmp (tmin, tmax, sgn) < 0)
905 covers = true;
906 tmax = tem - 1;
907 if (wi::cmp (tmax, tem, sgn) > 0)
908 covers = true;
909
910 // If the anti-range would cover nothing, drop to varying.
911 // Likewise if the anti-range bounds are outside of the types
912 // values.
913 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
914 r.set_varying (type);
915 else
916 r.set (type, tmin, tmax, VR_ANTI_RANGE);
917 }
918
919 // Create and return a range from a pair of wide-ints. MIN_OVF and
920 // MAX_OVF describe any overflow that might have occurred while
921 // calculating WMIN and WMAX respectively.
922
923 static void
924 value_range_with_overflow (irange &r, tree type,
925 const wide_int &wmin, const wide_int &wmax,
926 wi::overflow_type min_ovf = wi::OVF_NONE,
927 wi::overflow_type max_ovf = wi::OVF_NONE)
928 {
929 const signop sgn = TYPE_SIGN (type);
930 const unsigned int prec = TYPE_PRECISION (type);
931 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
932
933 // For one bit precision if max != min, then the range covers all
934 // values.
935 if (prec == 1 && wi::ne_p (wmax, wmin))
936 {
937 r.set_varying (type);
938 return;
939 }
940
941 if (overflow_wraps)
942 {
943 // If overflow wraps, truncate the values and adjust the range,
944 // kind, and bounds appropriately.
945 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
946 {
947 wide_int tmin = wide_int::from (wmin, prec, sgn);
948 wide_int tmax = wide_int::from (wmax, prec, sgn);
949 // If the limits are swapped, we wrapped around and cover
950 // the entire range.
951 if (wi::gt_p (tmin, tmax, sgn))
952 r.set_varying (type);
953 else
954 // No overflow or both overflow or underflow. The range
955 // kind stays normal.
956 r.set (type, tmin, tmax);
957 return;
958 }
959
960 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
961 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
962 value_range_from_overflowed_bounds (r, type, wmin, wmax);
963 else
964 // Other underflow and/or overflow, drop to VR_VARYING.
965 r.set_varying (type);
966 }
967 else
968 {
969 // If both bounds either underflowed or overflowed, then the result
970 // is undefined.
971 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
972 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
973 {
974 r.set_undefined ();
975 return;
976 }
977
978 // If overflow does not wrap, saturate to [MIN, MAX].
979 wide_int new_lb, new_ub;
980 if (min_ovf == wi::OVF_UNDERFLOW)
981 new_lb = wi::min_value (prec, sgn);
982 else if (min_ovf == wi::OVF_OVERFLOW)
983 new_lb = wi::max_value (prec, sgn);
984 else
985 new_lb = wmin;
986
987 if (max_ovf == wi::OVF_UNDERFLOW)
988 new_ub = wi::min_value (prec, sgn);
989 else if (max_ovf == wi::OVF_OVERFLOW)
990 new_ub = wi::max_value (prec, sgn);
991 else
992 new_ub = wmax;
993
994 r.set (type, new_lb, new_ub);
995 }
996 }
997
998 // Create and return a range from a pair of wide-ints. Canonicalize
999 // the case where the bounds are swapped. In which case, we transform
1000 // [10,5] into [MIN,5][10,MAX].
1001
1002 static inline void
1003 create_possibly_reversed_range (irange &r, tree type,
1004 const wide_int &new_lb, const wide_int &new_ub)
1005 {
1006 signop s = TYPE_SIGN (type);
1007 // If the bounds are swapped, treat the result as if an overflow occurred.
1008 if (wi::gt_p (new_lb, new_ub, s))
1009 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
1010 else
1011 // Otherwise it's just a normal range.
1012 r.set (type, new_lb, new_ub);
1013 }
1014
1015 // Return the summary information about boolean range LHS. If EMPTY/FULL,
1016 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
1017
1018 bool_range_state
1019 get_bool_state (vrange &r, const vrange &lhs, tree val_type)
1020 {
1021 // If there is no result, then this is unexecutable.
1022 if (lhs.undefined_p ())
1023 {
1024 r.set_undefined ();
1025 return BRS_EMPTY;
1026 }
1027
1028 if (lhs.zero_p ())
1029 return BRS_FALSE;
1030
1031 // For TRUE, we can't just test for [1,1] because Ada can have
1032 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
1033 if (lhs.contains_p (build_zero_cst (lhs.type ())))
1034 {
1035 r.set_varying (val_type);
1036 return BRS_FULL;
1037 }
1038
1039 return BRS_TRUE;
1040 }
1041
1042 // ------------------------------------------------------------------------
1043
1044 void
1045 operator_equal::update_bitmask (irange &r, const irange &lh,
1046 const irange &rh) const
1047 {
1048 update_known_bitmask (r, EQ_EXPR, lh, rh);
1049 }
1050
1051 // Check if the LHS range indicates a relation between OP1 and OP2.
1052
1053 relation_kind
1054 operator_equal::op1_op2_relation (const irange &lhs, const irange &,
1055 const irange &) const
1056 {
1057 if (lhs.undefined_p ())
1058 return VREL_UNDEFINED;
1059
1060 // FALSE = op1 == op2 indicates NE_EXPR.
1061 if (lhs.zero_p ())
1062 return VREL_NE;
1063
1064 // TRUE = op1 == op2 indicates EQ_EXPR.
1065 if (!contains_zero_p (lhs))
1066 return VREL_EQ;
1067 return VREL_VARYING;
1068 }
1069
1070 bool
1071 operator_equal::fold_range (irange &r, tree type,
1072 const irange &op1,
1073 const irange &op2,
1074 relation_trio rel) const
1075 {
1076 if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ))
1077 return true;
1078
1079 // We can be sure the values are always equal or not if both ranges
1080 // consist of a single value, and then compare them.
1081 bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ());
1082 bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ());
1083 if (op1_const && op2_const)
1084 {
1085 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
1086 r = range_true (type);
1087 else
1088 r = range_false (type);
1089 }
1090 else
1091 {
1092 // If ranges do not intersect, we know the range is not equal,
1093 // otherwise we don't know anything for sure.
1094 int_range_max tmp = op1;
1095 tmp.intersect (op2);
1096 if (tmp.undefined_p ())
1097 r = range_false (type);
1098 // Check if a constant cannot satisfy the bitmask requirements.
1099 else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ()))
1100 r = range_false (type);
1101 else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ()))
1102 r = range_false (type);
1103 else
1104 r = range_true_and_false (type);
1105 }
1106 return true;
1107 }
1108
1109 bool
1110 operator_equal::op1_range (irange &r, tree type,
1111 const irange &lhs,
1112 const irange &op2,
1113 relation_trio) const
1114 {
1115 switch (get_bool_state (r, lhs, type))
1116 {
1117 case BRS_TRUE:
1118 // If it's true, the result is the same as OP2.
1119 r = op2;
1120 break;
1121
1122 case BRS_FALSE:
1123 // If the result is false, the only time we know anything is
1124 // if OP2 is a constant.
1125 if (!op2.undefined_p ()
1126 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1127 {
1128 r = op2;
1129 r.invert ();
1130 }
1131 else
1132 r.set_varying (type);
1133 break;
1134
1135 default:
1136 break;
1137 }
1138 return true;
1139 }
1140
1141 bool
1142 operator_equal::op2_range (irange &r, tree type,
1143 const irange &lhs,
1144 const irange &op1,
1145 relation_trio rel) const
1146 {
1147 return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1148 }
1149
1150 // -------------------------------------------------------------------------
1151
1152 void
1153 operator_not_equal::update_bitmask (irange &r, const irange &lh,
1154 const irange &rh) const
1155 {
1156 update_known_bitmask (r, NE_EXPR, lh, rh);
1157 }
1158
1159 // Check if the LHS range indicates a relation between OP1 and OP2.
1160
1161 relation_kind
1162 operator_not_equal::op1_op2_relation (const irange &lhs, const irange &,
1163 const irange &) const
1164 {
1165 if (lhs.undefined_p ())
1166 return VREL_UNDEFINED;
1167
1168 // FALSE = op1 != op2 indicates EQ_EXPR.
1169 if (lhs.zero_p ())
1170 return VREL_EQ;
1171
1172 // TRUE = op1 != op2 indicates NE_EXPR.
1173 if (!contains_zero_p (lhs))
1174 return VREL_NE;
1175 return VREL_VARYING;
1176 }
1177
1178 bool
1179 operator_not_equal::fold_range (irange &r, tree type,
1180 const irange &op1,
1181 const irange &op2,
1182 relation_trio rel) const
1183 {
1184 if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE))
1185 return true;
1186
1187 // We can be sure the values are always equal or not if both ranges
1188 // consist of a single value, and then compare them.
1189 bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ());
1190 bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ());
1191 if (op1_const && op2_const)
1192 {
1193 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
1194 r = range_true (type);
1195 else
1196 r = range_false (type);
1197 }
1198 else
1199 {
1200 // If ranges do not intersect, we know the range is not equal,
1201 // otherwise we don't know anything for sure.
1202 int_range_max tmp = op1;
1203 tmp.intersect (op2);
1204 if (tmp.undefined_p ())
1205 r = range_true (type);
1206 // Check if a constant cannot satisfy the bitmask requirements.
1207 else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ()))
1208 r = range_true (type);
1209 else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ()))
1210 r = range_true (type);
1211 else
1212 r = range_true_and_false (type);
1213 }
1214 return true;
1215 }
1216
1217 bool
1218 operator_not_equal::op1_range (irange &r, tree type,
1219 const irange &lhs,
1220 const irange &op2,
1221 relation_trio) const
1222 {
1223 switch (get_bool_state (r, lhs, type))
1224 {
1225 case BRS_TRUE:
1226 // If the result is true, the only time we know anything is if
1227 // OP2 is a constant.
1228 if (!op2.undefined_p ()
1229 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1230 {
1231 r = op2;
1232 r.invert ();
1233 }
1234 else
1235 r.set_varying (type);
1236 break;
1237
1238 case BRS_FALSE:
1239 // If it's false, the result is the same as OP2.
1240 r = op2;
1241 break;
1242
1243 default:
1244 break;
1245 }
1246 return true;
1247 }
1248
1249
1250 bool
1251 operator_not_equal::op2_range (irange &r, tree type,
1252 const irange &lhs,
1253 const irange &op1,
1254 relation_trio rel) const
1255 {
1256 return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1257 }
1258
1259 // (X < VAL) produces the range of [MIN, VAL - 1].
1260
1261 static void
1262 build_lt (irange &r, tree type, const wide_int &val)
1263 {
1264 wi::overflow_type ov;
1265 wide_int lim;
1266 signop sgn = TYPE_SIGN (type);
1267
1268 // Signed 1 bit cannot represent 1 for subtraction.
1269 if (sgn == SIGNED)
1270 lim = wi::add (val, -1, sgn, &ov);
1271 else
1272 lim = wi::sub (val, 1, sgn, &ov);
1273
1274 // If val - 1 underflows, check if X < MIN, which is an empty range.
1275 if (ov)
1276 r.set_undefined ();
1277 else
1278 r = int_range<1> (type, min_limit (type), lim);
1279 }
1280
1281 // (X <= VAL) produces the range of [MIN, VAL].
1282
1283 static void
1284 build_le (irange &r, tree type, const wide_int &val)
1285 {
1286 r = int_range<1> (type, min_limit (type), val);
1287 }
1288
1289 // (X > VAL) produces the range of [VAL + 1, MAX].
1290
1291 static void
1292 build_gt (irange &r, tree type, const wide_int &val)
1293 {
1294 wi::overflow_type ov;
1295 wide_int lim;
1296 signop sgn = TYPE_SIGN (type);
1297
1298 // Signed 1 bit cannot represent 1 for addition.
1299 if (sgn == SIGNED)
1300 lim = wi::sub (val, -1, sgn, &ov);
1301 else
1302 lim = wi::add (val, 1, sgn, &ov);
1303 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1304 if (ov)
1305 r.set_undefined ();
1306 else
1307 r = int_range<1> (type, lim, max_limit (type));
1308 }
1309
1310 // (X >= val) produces the range of [VAL, MAX].
1311
1312 static void
1313 build_ge (irange &r, tree type, const wide_int &val)
1314 {
1315 r = int_range<1> (type, val, max_limit (type));
1316 }
1317
1318
1319 void
1320 operator_lt::update_bitmask (irange &r, const irange &lh,
1321 const irange &rh) const
1322 {
1323 update_known_bitmask (r, LT_EXPR, lh, rh);
1324 }
1325
1326 // Check if the LHS range indicates a relation between OP1 and OP2.
1327
1328 relation_kind
1329 operator_lt::op1_op2_relation (const irange &lhs, const irange &,
1330 const irange &) const
1331 {
1332 if (lhs.undefined_p ())
1333 return VREL_UNDEFINED;
1334
1335 // FALSE = op1 < op2 indicates GE_EXPR.
1336 if (lhs.zero_p ())
1337 return VREL_GE;
1338
1339 // TRUE = op1 < op2 indicates LT_EXPR.
1340 if (!contains_zero_p (lhs))
1341 return VREL_LT;
1342 return VREL_VARYING;
1343 }
1344
1345 bool
1346 operator_lt::fold_range (irange &r, tree type,
1347 const irange &op1,
1348 const irange &op2,
1349 relation_trio rel) const
1350 {
1351 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT))
1352 return true;
1353
1354 signop sign = TYPE_SIGN (op1.type ());
1355 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1356
1357 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
1358 r = range_true (type);
1359 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
1360 r = range_false (type);
1361 // Use nonzero bits to determine if < 0 is false.
1362 else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign))
1363 r = range_false (type);
1364 else
1365 r = range_true_and_false (type);
1366 return true;
1367 }
1368
1369 bool
1370 operator_lt::op1_range (irange &r, tree type,
1371 const irange &lhs,
1372 const irange &op2,
1373 relation_trio) const
1374 {
1375 if (op2.undefined_p ())
1376 return false;
1377
1378 switch (get_bool_state (r, lhs, type))
1379 {
1380 case BRS_TRUE:
1381 build_lt (r, type, op2.upper_bound ());
1382 break;
1383
1384 case BRS_FALSE:
1385 build_ge (r, type, op2.lower_bound ());
1386 break;
1387
1388 default:
1389 break;
1390 }
1391 return true;
1392 }
1393
1394 bool
1395 operator_lt::op2_range (irange &r, tree type,
1396 const irange &lhs,
1397 const irange &op1,
1398 relation_trio) const
1399 {
1400 if (op1.undefined_p ())
1401 return false;
1402
1403 switch (get_bool_state (r, lhs, type))
1404 {
1405 case BRS_TRUE:
1406 build_gt (r, type, op1.lower_bound ());
1407 break;
1408
1409 case BRS_FALSE:
1410 build_le (r, type, op1.upper_bound ());
1411 break;
1412
1413 default:
1414 break;
1415 }
1416 return true;
1417 }
1418
1419
1420 void
1421 operator_le::update_bitmask (irange &r, const irange &lh,
1422 const irange &rh) const
1423 {
1424 update_known_bitmask (r, LE_EXPR, lh, rh);
1425 }
1426
1427 // Check if the LHS range indicates a relation between OP1 and OP2.
1428
1429 relation_kind
1430 operator_le::op1_op2_relation (const irange &lhs, const irange &,
1431 const irange &) const
1432 {
1433 if (lhs.undefined_p ())
1434 return VREL_UNDEFINED;
1435
1436 // FALSE = op1 <= op2 indicates GT_EXPR.
1437 if (lhs.zero_p ())
1438 return VREL_GT;
1439
1440 // TRUE = op1 <= op2 indicates LE_EXPR.
1441 if (!contains_zero_p (lhs))
1442 return VREL_LE;
1443 return VREL_VARYING;
1444 }
1445
1446 bool
1447 operator_le::fold_range (irange &r, tree type,
1448 const irange &op1,
1449 const irange &op2,
1450 relation_trio rel) const
1451 {
1452 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE))
1453 return true;
1454
1455 signop sign = TYPE_SIGN (op1.type ());
1456 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1457
1458 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
1459 r = range_true (type);
1460 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
1461 r = range_false (type);
1462 else
1463 r = range_true_and_false (type);
1464 return true;
1465 }
1466
1467 bool
1468 operator_le::op1_range (irange &r, tree type,
1469 const irange &lhs,
1470 const irange &op2,
1471 relation_trio) const
1472 {
1473 if (op2.undefined_p ())
1474 return false;
1475
1476 switch (get_bool_state (r, lhs, type))
1477 {
1478 case BRS_TRUE:
1479 build_le (r, type, op2.upper_bound ());
1480 break;
1481
1482 case BRS_FALSE:
1483 build_gt (r, type, op2.lower_bound ());
1484 break;
1485
1486 default:
1487 break;
1488 }
1489 return true;
1490 }
1491
1492 bool
1493 operator_le::op2_range (irange &r, tree type,
1494 const irange &lhs,
1495 const irange &op1,
1496 relation_trio) const
1497 {
1498 if (op1.undefined_p ())
1499 return false;
1500
1501 switch (get_bool_state (r, lhs, type))
1502 {
1503 case BRS_TRUE:
1504 build_ge (r, type, op1.lower_bound ());
1505 break;
1506
1507 case BRS_FALSE:
1508 build_lt (r, type, op1.upper_bound ());
1509 break;
1510
1511 default:
1512 break;
1513 }
1514 return true;
1515 }
1516
1517
1518 void
1519 operator_gt::update_bitmask (irange &r, const irange &lh,
1520 const irange &rh) const
1521 {
1522 update_known_bitmask (r, GT_EXPR, lh, rh);
1523 }
1524
1525 // Check if the LHS range indicates a relation between OP1 and OP2.
1526
1527 relation_kind
1528 operator_gt::op1_op2_relation (const irange &lhs, const irange &,
1529 const irange &) const
1530 {
1531 if (lhs.undefined_p ())
1532 return VREL_UNDEFINED;
1533
1534 // FALSE = op1 > op2 indicates LE_EXPR.
1535 if (lhs.zero_p ())
1536 return VREL_LE;
1537
1538 // TRUE = op1 > op2 indicates GT_EXPR.
1539 if (!contains_zero_p (lhs))
1540 return VREL_GT;
1541 return VREL_VARYING;
1542 }
1543
1544 bool
1545 operator_gt::fold_range (irange &r, tree type,
1546 const irange &op1, const irange &op2,
1547 relation_trio rel) const
1548 {
1549 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT))
1550 return true;
1551
1552 signop sign = TYPE_SIGN (op1.type ());
1553 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1554
1555 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
1556 r = range_true (type);
1557 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
1558 r = range_false (type);
1559 else
1560 r = range_true_and_false (type);
1561 return true;
1562 }
1563
1564 bool
1565 operator_gt::op1_range (irange &r, tree type,
1566 const irange &lhs, const irange &op2,
1567 relation_trio) const
1568 {
1569 if (op2.undefined_p ())
1570 return false;
1571
1572 switch (get_bool_state (r, lhs, type))
1573 {
1574 case BRS_TRUE:
1575 build_gt (r, type, op2.lower_bound ());
1576 break;
1577
1578 case BRS_FALSE:
1579 build_le (r, type, op2.upper_bound ());
1580 break;
1581
1582 default:
1583 break;
1584 }
1585 return true;
1586 }
1587
1588 bool
1589 operator_gt::op2_range (irange &r, tree type,
1590 const irange &lhs,
1591 const irange &op1,
1592 relation_trio) const
1593 {
1594 if (op1.undefined_p ())
1595 return false;
1596
1597 switch (get_bool_state (r, lhs, type))
1598 {
1599 case BRS_TRUE:
1600 build_lt (r, type, op1.upper_bound ());
1601 break;
1602
1603 case BRS_FALSE:
1604 build_ge (r, type, op1.lower_bound ());
1605 break;
1606
1607 default:
1608 break;
1609 }
1610 return true;
1611 }
1612
1613
1614 void
1615 operator_ge::update_bitmask (irange &r, const irange &lh,
1616 const irange &rh) const
1617 {
1618 update_known_bitmask (r, GE_EXPR, lh, rh);
1619 }
1620
1621 // Check if the LHS range indicates a relation between OP1 and OP2.
1622
1623 relation_kind
1624 operator_ge::op1_op2_relation (const irange &lhs, const irange &,
1625 const irange &) const
1626 {
1627 if (lhs.undefined_p ())
1628 return VREL_UNDEFINED;
1629
1630 // FALSE = op1 >= op2 indicates LT_EXPR.
1631 if (lhs.zero_p ())
1632 return VREL_LT;
1633
1634 // TRUE = op1 >= op2 indicates GE_EXPR.
1635 if (!contains_zero_p (lhs))
1636 return VREL_GE;
1637 return VREL_VARYING;
1638 }
1639
1640 bool
1641 operator_ge::fold_range (irange &r, tree type,
1642 const irange &op1,
1643 const irange &op2,
1644 relation_trio rel) const
1645 {
1646 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE))
1647 return true;
1648
1649 signop sign = TYPE_SIGN (op1.type ());
1650 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1651
1652 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
1653 r = range_true (type);
1654 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
1655 r = range_false (type);
1656 else
1657 r = range_true_and_false (type);
1658 return true;
1659 }
1660
1661 bool
1662 operator_ge::op1_range (irange &r, tree type,
1663 const irange &lhs,
1664 const irange &op2,
1665 relation_trio) const
1666 {
1667 if (op2.undefined_p ())
1668 return false;
1669
1670 switch (get_bool_state (r, lhs, type))
1671 {
1672 case BRS_TRUE:
1673 build_ge (r, type, op2.lower_bound ());
1674 break;
1675
1676 case BRS_FALSE:
1677 build_lt (r, type, op2.upper_bound ());
1678 break;
1679
1680 default:
1681 break;
1682 }
1683 return true;
1684 }
1685
1686 bool
1687 operator_ge::op2_range (irange &r, tree type,
1688 const irange &lhs,
1689 const irange &op1,
1690 relation_trio) const
1691 {
1692 if (op1.undefined_p ())
1693 return false;
1694
1695 switch (get_bool_state (r, lhs, type))
1696 {
1697 case BRS_TRUE:
1698 build_le (r, type, op1.upper_bound ());
1699 break;
1700
1701 case BRS_FALSE:
1702 build_gt (r, type, op1.lower_bound ());
1703 break;
1704
1705 default:
1706 break;
1707 }
1708 return true;
1709 }
1710
1711
1712 void
1713 operator_plus::update_bitmask (irange &r, const irange &lh,
1714 const irange &rh) const
1715 {
1716 update_known_bitmask (r, PLUS_EXPR, lh, rh);
1717 }
1718
1719 // Check to see if the range of OP2 indicates anything about the relation
1720 // between LHS and OP1.
1721
1722 relation_kind
1723 operator_plus::lhs_op1_relation (const irange &lhs,
1724 const irange &op1,
1725 const irange &op2,
1726 relation_kind) const
1727 {
1728 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
1729 return VREL_VARYING;
1730
1731 tree type = lhs.type ();
1732 unsigned prec = TYPE_PRECISION (type);
1733 wi::overflow_type ovf1, ovf2;
1734 signop sign = TYPE_SIGN (type);
1735
1736 // LHS = OP1 + 0 indicates LHS == OP1.
1737 if (op2.zero_p ())
1738 return VREL_EQ;
1739
1740 if (TYPE_OVERFLOW_WRAPS (type))
1741 {
1742 wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1);
1743 wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2);
1744 }
1745 else
1746 ovf1 = ovf2 = wi::OVF_NONE;
1747
1748 // Never wrapping additions.
1749 if (!ovf1 && !ovf2)
1750 {
1751 // Positive op2 means lhs > op1.
1752 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1753 return VREL_GT;
1754 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1755 return VREL_GE;
1756
1757 // Negative op2 means lhs < op1.
1758 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1759 return VREL_LT;
1760 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1761 return VREL_LE;
1762 }
1763 // Always wrapping additions.
1764 else if (ovf1 && ovf1 == ovf2)
1765 {
1766 // Positive op2 means lhs < op1.
1767 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1768 return VREL_LT;
1769 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1770 return VREL_LE;
1771
1772 // Negative op2 means lhs > op1.
1773 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1774 return VREL_GT;
1775 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1776 return VREL_GE;
1777 }
1778
1779 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1780 if (!range_includes_zero_p (op2))
1781 return VREL_NE;
1782
1783 return VREL_VARYING;
1784 }
1785
1786 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1787 // operands.
1788
1789 relation_kind
1790 operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1,
1791 const irange &op2, relation_kind rel) const
1792 {
1793 return lhs_op1_relation (lhs, op2, op1, rel);
1794 }
1795
1796 void
1797 operator_plus::wi_fold (irange &r, tree type,
1798 const wide_int &lh_lb, const wide_int &lh_ub,
1799 const wide_int &rh_lb, const wide_int &rh_ub) const
1800 {
1801 wi::overflow_type ov_lb, ov_ub;
1802 signop s = TYPE_SIGN (type);
1803 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
1804 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
1805 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1806 }
1807
1808 // Given addition or subtraction, determine the possible NORMAL ranges and
1809 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1810 // Return the relation that exists between the LHS and OP1 in order for the
1811 // NORMAL range to apply.
1812 // a return value of VREL_VARYING means no ranges were applicable.
1813
1814 static relation_kind
1815 plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset,
1816 bool add_p)
1817 {
1818 relation_kind kind = VREL_VARYING;
1819 // For now, only deal with constant adds. This could be extended to ranges
1820 // when someone is so motivated.
1821 if (!offset.singleton_p () || offset.zero_p ())
1822 return kind;
1823
1824 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1825 wide_int off = offset.lower_bound ();
1826 if (wi::neg_p (off, SIGNED))
1827 {
1828 add_p = !add_p;
1829 off = wi::neg (off);
1830 }
1831
1832 wi::overflow_type ov;
1833 tree type = offset.type ();
1834 unsigned prec = TYPE_PRECISION (type);
1835 wide_int ub;
1836 wide_int lb;
1837 // calculate the normal range and relation for the operation.
1838 if (add_p)
1839 {
1840 // [ 0 , INF - OFF]
1841 lb = wi::zero (prec);
1842 ub = wi::sub (irange_val_max (type), off, UNSIGNED, &ov);
1843 kind = VREL_GT;
1844 }
1845 else
1846 {
1847 // [ OFF, INF ]
1848 lb = off;
1849 ub = irange_val_max (type);
1850 kind = VREL_LT;
1851 }
1852 int_range<2> normal_range (type, lb, ub);
1853 int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE);
1854
1855 r_ov = ov_range;
1856 r_normal = normal_range;
1857 return kind;
1858 }
1859
1860 // Once op1 has been calculated by operator_plus or operator_minus, check
1861 // to see if the relation passed causes any part of the calculation to
1862 // be not possible. ie
1863 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1864 // and that further refines a_2 to [0, 0].
1865 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1866 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1867 // MINUS. IF any adjustment can be made, R will reflect it.
1868
1869 static void
1870 adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel,
1871 bool add_p)
1872 {
1873 if (r.undefined_p ())
1874 return;
1875 tree type = r.type ();
1876 // Check for unsigned overflow and calculate the overflow part.
1877 signop s = TYPE_SIGN (type);
1878 if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED)
1879 return;
1880
1881 // Only work with <, <=, >, >= relations.
1882 if (!relation_lt_le_gt_ge_p (rel))
1883 return;
1884
1885 // Get the ranges for this offset.
1886 int_range_max normal, overflow;
1887 relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p);
1888
1889 // VREL_VARYING means there are no adjustments.
1890 if (k == VREL_VARYING)
1891 return;
1892
1893 // If the relations match use the normal range, otherwise use overflow range.
1894 if (relation_intersect (k, rel) == k)
1895 r.intersect (normal);
1896 else
1897 r.intersect (overflow);
1898 return;
1899 }
1900
1901 bool
1902 operator_plus::op1_range (irange &r, tree type,
1903 const irange &lhs,
1904 const irange &op2,
1905 relation_trio trio) const
1906 {
1907 if (lhs.undefined_p ())
1908 return false;
1909 // Start with the default operation.
1910 range_op_handler minus (MINUS_EXPR);
1911 if (!minus)
1912 return false;
1913 bool res = minus.fold_range (r, type, lhs, op2);
1914 relation_kind rel = trio.lhs_op1 ();
1915 // Check for a relation refinement.
1916 if (res)
1917 adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */);
1918 return res;
1919 }
1920
1921 bool
1922 operator_plus::op2_range (irange &r, tree type,
1923 const irange &lhs,
1924 const irange &op1,
1925 relation_trio rel) const
1926 {
1927 return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1928 }
1929
1930 class operator_widen_plus_signed : public range_operator
1931 {
1932 public:
1933 virtual void wi_fold (irange &r, tree type,
1934 const wide_int &lh_lb,
1935 const wide_int &lh_ub,
1936 const wide_int &rh_lb,
1937 const wide_int &rh_ub) const;
1938 } op_widen_plus_signed;
1939
1940 void
1941 operator_widen_plus_signed::wi_fold (irange &r, tree type,
1942 const wide_int &lh_lb,
1943 const wide_int &lh_ub,
1944 const wide_int &rh_lb,
1945 const wide_int &rh_ub) const
1946 {
1947 wi::overflow_type ov_lb, ov_ub;
1948 signop s = TYPE_SIGN (type);
1949
1950 wide_int lh_wlb
1951 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
1952 wide_int lh_wub
1953 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
1954 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1955 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1956
1957 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1958 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1959
1960 r = int_range<2> (type, new_lb, new_ub);
1961 }
1962
1963 class operator_widen_plus_unsigned : public range_operator
1964 {
1965 public:
1966 virtual void wi_fold (irange &r, tree type,
1967 const wide_int &lh_lb,
1968 const wide_int &lh_ub,
1969 const wide_int &rh_lb,
1970 const wide_int &rh_ub) const;
1971 } op_widen_plus_unsigned;
1972
1973 void
1974 operator_widen_plus_unsigned::wi_fold (irange &r, tree type,
1975 const wide_int &lh_lb,
1976 const wide_int &lh_ub,
1977 const wide_int &rh_lb,
1978 const wide_int &rh_ub) const
1979 {
1980 wi::overflow_type ov_lb, ov_ub;
1981 signop s = TYPE_SIGN (type);
1982
1983 wide_int lh_wlb
1984 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
1985 wide_int lh_wub
1986 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
1987 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1988 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1989
1990 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1991 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1992
1993 r = int_range<2> (type, new_lb, new_ub);
1994 }
1995
1996 void
1997 operator_minus::update_bitmask (irange &r, const irange &lh,
1998 const irange &rh) const
1999 {
2000 update_known_bitmask (r, MINUS_EXPR, lh, rh);
2001 }
2002
2003 void
2004 operator_minus::wi_fold (irange &r, tree type,
2005 const wide_int &lh_lb, const wide_int &lh_ub,
2006 const wide_int &rh_lb, const wide_int &rh_ub) const
2007 {
2008 wi::overflow_type ov_lb, ov_ub;
2009 signop s = TYPE_SIGN (type);
2010 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
2011 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
2012 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
2013 }
2014
2015
2016 // Return the relation between LHS and OP1 based on the relation between
2017 // OP1 and OP2.
2018
2019 relation_kind
2020 operator_minus::lhs_op1_relation (const irange &, const irange &op1,
2021 const irange &, relation_kind rel) const
2022 {
2023 if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED)
2024 switch (rel)
2025 {
2026 case VREL_GT:
2027 case VREL_GE:
2028 return VREL_LE;
2029 default:
2030 break;
2031 }
2032 return VREL_VARYING;
2033 }
2034
2035 // Check to see if the relation REL between OP1 and OP2 has any effect on the
2036 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
2037 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
2038
2039 bool
2040 minus_op1_op2_relation_effect (irange &lhs_range, tree type,
2041 const irange &op1_range ATTRIBUTE_UNUSED,
2042 const irange &op2_range ATTRIBUTE_UNUSED,
2043 relation_kind rel)
2044 {
2045 if (rel == VREL_VARYING)
2046 return false;
2047
2048 int_range<2> rel_range;
2049 unsigned prec = TYPE_PRECISION (type);
2050 signop sgn = TYPE_SIGN (type);
2051
2052 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
2053 if (rel == VREL_EQ)
2054 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec));
2055 else if (rel == VREL_NE)
2056 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
2057 VR_ANTI_RANGE);
2058 else if (TYPE_OVERFLOW_WRAPS (type))
2059 {
2060 switch (rel)
2061 {
2062 // For wrapping signed values and unsigned, if op1 > op2 or
2063 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
2064 case VREL_GT:
2065 case VREL_LT:
2066 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
2067 VR_ANTI_RANGE);
2068 break;
2069 default:
2070 return false;
2071 }
2072 }
2073 else
2074 {
2075 switch (rel)
2076 {
2077 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
2078 case VREL_GT:
2079 rel_range = int_range<2> (type, wi::one (prec),
2080 wi::max_value (prec, sgn));
2081 break;
2082 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
2083 case VREL_GE:
2084 rel_range = int_range<2> (type, wi::zero (prec),
2085 wi::max_value (prec, sgn));
2086 break;
2087 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
2088 case VREL_LT:
2089 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
2090 wi::minus_one (prec));
2091 break;
2092 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
2093 case VREL_LE:
2094 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
2095 wi::zero (prec));
2096 break;
2097 default:
2098 return false;
2099 }
2100 }
2101 lhs_range.intersect (rel_range);
2102 return true;
2103 }
2104
2105 bool
2106 operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type,
2107 const irange &op1_range,
2108 const irange &op2_range,
2109 relation_kind rel) const
2110 {
2111 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
2112 rel);
2113 }
2114
2115 bool
2116 operator_minus::op1_range (irange &r, tree type,
2117 const irange &lhs,
2118 const irange &op2,
2119 relation_trio trio) const
2120 {
2121 if (lhs.undefined_p ())
2122 return false;
2123 // Start with the default operation.
2124 range_op_handler minus (PLUS_EXPR);
2125 if (!minus)
2126 return false;
2127 bool res = minus.fold_range (r, type, lhs, op2);
2128 relation_kind rel = trio.lhs_op1 ();
2129 if (res)
2130 adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */);
2131 return res;
2132
2133 }
2134
2135 bool
2136 operator_minus::op2_range (irange &r, tree type,
2137 const irange &lhs,
2138 const irange &op1,
2139 relation_trio) const
2140 {
2141 if (lhs.undefined_p ())
2142 return false;
2143 return fold_range (r, type, op1, lhs);
2144 }
2145
2146 void
2147 operator_min::update_bitmask (irange &r, const irange &lh,
2148 const irange &rh) const
2149 {
2150 update_known_bitmask (r, MIN_EXPR, lh, rh);
2151 }
2152
2153 void
2154 operator_min::wi_fold (irange &r, tree type,
2155 const wide_int &lh_lb, const wide_int &lh_ub,
2156 const wide_int &rh_lb, const wide_int &rh_ub) const
2157 {
2158 signop s = TYPE_SIGN (type);
2159 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
2160 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
2161 value_range_with_overflow (r, type, new_lb, new_ub);
2162 }
2163
2164
2165 void
2166 operator_max::update_bitmask (irange &r, const irange &lh,
2167 const irange &rh) const
2168 {
2169 update_known_bitmask (r, MAX_EXPR, lh, rh);
2170 }
2171
2172 void
2173 operator_max::wi_fold (irange &r, tree type,
2174 const wide_int &lh_lb, const wide_int &lh_ub,
2175 const wide_int &rh_lb, const wide_int &rh_ub) const
2176 {
2177 signop s = TYPE_SIGN (type);
2178 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
2179 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
2180 value_range_with_overflow (r, type, new_lb, new_ub);
2181 }
2182
2183
2184 // Calculate the cross product of two sets of ranges and return it.
2185 //
2186 // Multiplications, divisions and shifts are a bit tricky to handle,
2187 // depending on the mix of signs we have in the two ranges, we need to
2188 // operate on different values to get the minimum and maximum values
2189 // for the new range. One approach is to figure out all the
2190 // variations of range combinations and do the operations.
2191 //
2192 // However, this involves several calls to compare_values and it is
2193 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
2194 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
2195 // figure the smallest and largest values to form the new range.
2196
2197 void
2198 cross_product_operator::wi_cross_product (irange &r, tree type,
2199 const wide_int &lh_lb,
2200 const wide_int &lh_ub,
2201 const wide_int &rh_lb,
2202 const wide_int &rh_ub) const
2203 {
2204 wide_int cp1, cp2, cp3, cp4;
2205 // Default to varying.
2206 r.set_varying (type);
2207
2208 // Compute the 4 cross operations, bailing if we get an overflow we
2209 // can't handle.
2210 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
2211 return;
2212 if (wi::eq_p (lh_lb, lh_ub))
2213 cp3 = cp1;
2214 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
2215 return;
2216 if (wi::eq_p (rh_lb, rh_ub))
2217 cp2 = cp1;
2218 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
2219 return;
2220 if (wi::eq_p (lh_lb, lh_ub))
2221 cp4 = cp2;
2222 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
2223 return;
2224
2225 // Order pairs.
2226 signop sign = TYPE_SIGN (type);
2227 if (wi::gt_p (cp1, cp2, sign))
2228 std::swap (cp1, cp2);
2229 if (wi::gt_p (cp3, cp4, sign))
2230 std::swap (cp3, cp4);
2231
2232 // Choose min and max from the ordered pairs.
2233 wide_int res_lb = wi::min (cp1, cp3, sign);
2234 wide_int res_ub = wi::max (cp2, cp4, sign);
2235 value_range_with_overflow (r, type, res_lb, res_ub);
2236 }
2237
2238
2239 void
2240 operator_mult::update_bitmask (irange &r, const irange &lh,
2241 const irange &rh) const
2242 {
2243 update_known_bitmask (r, MULT_EXPR, lh, rh);
2244 }
2245
2246 bool
2247 operator_mult::op1_range (irange &r, tree type,
2248 const irange &lhs, const irange &op2,
2249 relation_trio) const
2250 {
2251 if (lhs.undefined_p ())
2252 return false;
2253
2254 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2255 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2256 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2257 if (TYPE_OVERFLOW_WRAPS (type))
2258 return false;
2259
2260 wide_int offset;
2261 if (op2.singleton_p (offset) && offset != 0)
2262 return range_op_handler (TRUNC_DIV_EXPR).fold_range (r, type, lhs, op2);
2263 return false;
2264 }
2265
2266 bool
2267 operator_mult::op2_range (irange &r, tree type,
2268 const irange &lhs, const irange &op1,
2269 relation_trio rel) const
2270 {
2271 return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
2272 }
2273
2274 bool
2275 operator_mult::wi_op_overflows (wide_int &res, tree type,
2276 const wide_int &w0, const wide_int &w1) const
2277 {
2278 wi::overflow_type overflow = wi::OVF_NONE;
2279 signop sign = TYPE_SIGN (type);
2280 res = wi::mul (w0, w1, sign, &overflow);
2281 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2282 {
2283 // For multiplication, the sign of the overflow is given
2284 // by the comparison of the signs of the operands.
2285 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
2286 res = wi::max_value (w0.get_precision (), sign);
2287 else
2288 res = wi::min_value (w0.get_precision (), sign);
2289 return false;
2290 }
2291 return overflow;
2292 }
2293
2294 void
2295 operator_mult::wi_fold (irange &r, tree type,
2296 const wide_int &lh_lb, const wide_int &lh_ub,
2297 const wide_int &rh_lb, const wide_int &rh_ub) const
2298 {
2299 if (TYPE_OVERFLOW_UNDEFINED (type))
2300 {
2301 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2302 return;
2303 }
2304
2305 // Multiply the ranges when overflow wraps. This is basically fancy
2306 // code so we don't drop to varying with an unsigned
2307 // [-3,-1]*[-3,-1].
2308 //
2309 // This test requires 2*prec bits if both operands are signed and
2310 // 2*prec + 2 bits if either is not. Therefore, extend the values
2311 // using the sign of the result to PREC2. From here on out,
2312 // everything is just signed math no matter what the input types
2313 // were.
2314
2315 signop sign = TYPE_SIGN (type);
2316 unsigned prec = TYPE_PRECISION (type);
2317 widest2_int min0 = widest2_int::from (lh_lb, sign);
2318 widest2_int max0 = widest2_int::from (lh_ub, sign);
2319 widest2_int min1 = widest2_int::from (rh_lb, sign);
2320 widest2_int max1 = widest2_int::from (rh_ub, sign);
2321 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
2322 widest2_int size = sizem1 + 1;
2323
2324 // Canonicalize the intervals.
2325 if (sign == UNSIGNED)
2326 {
2327 if (wi::ltu_p (size, min0 + max0))
2328 {
2329 min0 -= size;
2330 max0 -= size;
2331 }
2332 if (wi::ltu_p (size, min1 + max1))
2333 {
2334 min1 -= size;
2335 max1 -= size;
2336 }
2337 }
2338
2339 // Sort the 4 products so that min is in prod0 and max is in
2340 // prod3.
2341 widest2_int prod0 = min0 * min1;
2342 widest2_int prod1 = min0 * max1;
2343 widest2_int prod2 = max0 * min1;
2344 widest2_int prod3 = max0 * max1;
2345
2346 // min0min1 > max0max1
2347 if (prod0 > prod3)
2348 std::swap (prod0, prod3);
2349
2350 // min0max1 > max0min1
2351 if (prod1 > prod2)
2352 std::swap (prod1, prod2);
2353
2354 if (prod0 > prod1)
2355 std::swap (prod0, prod1);
2356
2357 if (prod2 > prod3)
2358 std::swap (prod2, prod3);
2359
2360 // diff = max - min
2361 prod2 = prod3 - prod0;
2362 if (wi::geu_p (prod2, sizem1))
2363 {
2364 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2365 if (TYPE_UNSIGNED (type) && rh_lb == rh_ub
2366 && wi::exact_log2 (rh_lb) != -1 && prec > 1)
2367 {
2368 r.set (type, rh_lb, wi::max_value (prec, sign));
2369 int_range<2> zero;
2370 zero.set_zero (type);
2371 r.union_ (zero);
2372 }
2373 else
2374 // The range covers all values.
2375 r.set_varying (type);
2376 }
2377 else
2378 {
2379 wide_int new_lb = wide_int::from (prod0, prec, sign);
2380 wide_int new_ub = wide_int::from (prod3, prec, sign);
2381 create_possibly_reversed_range (r, type, new_lb, new_ub);
2382 }
2383 }
2384
2385 class operator_widen_mult_signed : public range_operator
2386 {
2387 public:
2388 virtual void wi_fold (irange &r, tree type,
2389 const wide_int &lh_lb,
2390 const wide_int &lh_ub,
2391 const wide_int &rh_lb,
2392 const wide_int &rh_ub)
2393 const;
2394 } op_widen_mult_signed;
2395
2396 void
2397 operator_widen_mult_signed::wi_fold (irange &r, tree type,
2398 const wide_int &lh_lb,
2399 const wide_int &lh_ub,
2400 const wide_int &rh_lb,
2401 const wide_int &rh_ub) const
2402 {
2403 signop s = TYPE_SIGN (type);
2404
2405 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
2406 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
2407 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2408 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2409
2410 /* We don't expect a widening multiplication to be able to overflow but range
2411 calculations for multiplications are complicated. After widening the
2412 operands lets call the base class. */
2413 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2414 }
2415
2416
2417 class operator_widen_mult_unsigned : public range_operator
2418 {
2419 public:
2420 virtual void wi_fold (irange &r, tree type,
2421 const wide_int &lh_lb,
2422 const wide_int &lh_ub,
2423 const wide_int &rh_lb,
2424 const wide_int &rh_ub)
2425 const;
2426 } op_widen_mult_unsigned;
2427
2428 void
2429 operator_widen_mult_unsigned::wi_fold (irange &r, tree type,
2430 const wide_int &lh_lb,
2431 const wide_int &lh_ub,
2432 const wide_int &rh_lb,
2433 const wide_int &rh_ub) const
2434 {
2435 signop s = TYPE_SIGN (type);
2436
2437 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
2438 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
2439 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2440 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2441
2442 /* We don't expect a widening multiplication to be able to overflow but range
2443 calculations for multiplications are complicated. After widening the
2444 operands lets call the base class. */
2445 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2446 }
2447
2448 class operator_div : public cross_product_operator
2449 {
2450 using range_operator::update_bitmask;
2451 public:
2452 operator_div (tree_code div_kind) { m_code = div_kind; }
2453 virtual void wi_fold (irange &r, tree type,
2454 const wide_int &lh_lb,
2455 const wide_int &lh_ub,
2456 const wide_int &rh_lb,
2457 const wide_int &rh_ub) const final override;
2458 virtual bool wi_op_overflows (wide_int &res, tree type,
2459 const wide_int &, const wide_int &)
2460 const final override;
2461 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
2462 { update_known_bitmask (r, m_code, lh, rh); }
2463 protected:
2464 tree_code m_code;
2465 };
2466
2467 static operator_div op_trunc_div (TRUNC_DIV_EXPR);
2468 static operator_div op_floor_div (FLOOR_DIV_EXPR);
2469 static operator_div op_round_div (ROUND_DIV_EXPR);
2470 static operator_div op_ceil_div (CEIL_DIV_EXPR);
2471
2472 bool
2473 operator_div::wi_op_overflows (wide_int &res, tree type,
2474 const wide_int &w0, const wide_int &w1) const
2475 {
2476 if (w1 == 0)
2477 return true;
2478
2479 wi::overflow_type overflow = wi::OVF_NONE;
2480 signop sign = TYPE_SIGN (type);
2481
2482 switch (m_code)
2483 {
2484 case EXACT_DIV_EXPR:
2485 case TRUNC_DIV_EXPR:
2486 res = wi::div_trunc (w0, w1, sign, &overflow);
2487 break;
2488 case FLOOR_DIV_EXPR:
2489 res = wi::div_floor (w0, w1, sign, &overflow);
2490 break;
2491 case ROUND_DIV_EXPR:
2492 res = wi::div_round (w0, w1, sign, &overflow);
2493 break;
2494 case CEIL_DIV_EXPR:
2495 res = wi::div_ceil (w0, w1, sign, &overflow);
2496 break;
2497 default:
2498 gcc_unreachable ();
2499 }
2500
2501 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2502 {
2503 // For division, the only case is -INF / -1 = +INF.
2504 res = wi::max_value (w0.get_precision (), sign);
2505 return false;
2506 }
2507 return overflow;
2508 }
2509
2510 void
2511 operator_div::wi_fold (irange &r, tree type,
2512 const wide_int &lh_lb, const wide_int &lh_ub,
2513 const wide_int &rh_lb, const wide_int &rh_ub) const
2514 {
2515 const wide_int dividend_min = lh_lb;
2516 const wide_int dividend_max = lh_ub;
2517 const wide_int divisor_min = rh_lb;
2518 const wide_int divisor_max = rh_ub;
2519 signop sign = TYPE_SIGN (type);
2520 unsigned prec = TYPE_PRECISION (type);
2521 wide_int extra_min, extra_max;
2522
2523 // If we know we won't divide by zero, just do the division.
2524 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
2525 {
2526 wi_cross_product (r, type, dividend_min, dividend_max,
2527 divisor_min, divisor_max);
2528 return;
2529 }
2530
2531 // If we're definitely dividing by zero, there's nothing to do.
2532 if (wi_zero_p (type, divisor_min, divisor_max))
2533 {
2534 r.set_undefined ();
2535 return;
2536 }
2537
2538 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2539 // skip any division by zero.
2540
2541 // First divide by the negative numbers, if any.
2542 if (wi::neg_p (divisor_min, sign))
2543 wi_cross_product (r, type, dividend_min, dividend_max,
2544 divisor_min, wi::minus_one (prec));
2545 else
2546 r.set_undefined ();
2547
2548 // Then divide by the non-zero positive numbers, if any.
2549 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
2550 {
2551 int_range_max tmp;
2552 wi_cross_product (tmp, type, dividend_min, dividend_max,
2553 wi::one (prec), divisor_max);
2554 r.union_ (tmp);
2555 }
2556 // We shouldn't still have undefined here.
2557 gcc_checking_assert (!r.undefined_p ());
2558 }
2559
2560
2561 class operator_exact_divide : public operator_div
2562 {
2563 using range_operator::op1_range;
2564 public:
2565 operator_exact_divide () : operator_div (EXACT_DIV_EXPR) { }
2566 virtual bool op1_range (irange &r, tree type,
2567 const irange &lhs,
2568 const irange &op2,
2569 relation_trio) const;
2570
2571 } op_exact_div;
2572
2573 bool
2574 operator_exact_divide::op1_range (irange &r, tree type,
2575 const irange &lhs,
2576 const irange &op2,
2577 relation_trio) const
2578 {
2579 if (lhs.undefined_p ())
2580 return false;
2581 wide_int offset;
2582 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2583 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2584 // We wont bother trying to enumerate all the in between stuff :-P
2585 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2586 // the time however.
2587 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2588 if (op2.singleton_p (offset) && offset != 0)
2589 return range_op_handler (MULT_EXPR).fold_range (r, type, lhs, op2);
2590 return false;
2591 }
2592
2593
2594 class operator_lshift : public cross_product_operator
2595 {
2596 using range_operator::fold_range;
2597 using range_operator::op1_range;
2598 using range_operator::update_bitmask;
2599 public:
2600 virtual bool op1_range (irange &r, tree type, const irange &lhs,
2601 const irange &op2, relation_trio rel = TRIO_VARYING)
2602 const final override;
2603 virtual bool fold_range (irange &r, tree type, const irange &op1,
2604 const irange &op2, relation_trio rel = TRIO_VARYING)
2605 const final override;
2606
2607 virtual void wi_fold (irange &r, tree type,
2608 const wide_int &lh_lb, const wide_int &lh_ub,
2609 const wide_int &rh_lb,
2610 const wide_int &rh_ub) const final override;
2611 virtual bool wi_op_overflows (wide_int &res,
2612 tree type,
2613 const wide_int &,
2614 const wide_int &) const final override;
2615 void update_bitmask (irange &r, const irange &lh,
2616 const irange &rh) const final override
2617 { update_known_bitmask (r, LSHIFT_EXPR, lh, rh); }
2618 // Check compatibility of LHS and op1.
2619 bool operand_check_p (tree t1, tree t2, tree) const final override
2620 { return range_compatible_p (t1, t2); }
2621 } op_lshift;
2622
2623 class operator_rshift : public cross_product_operator
2624 {
2625 using range_operator::fold_range;
2626 using range_operator::op1_range;
2627 using range_operator::lhs_op1_relation;
2628 using range_operator::update_bitmask;
2629 public:
2630 virtual bool fold_range (irange &r, tree type, const irange &op1,
2631 const irange &op2, relation_trio rel = TRIO_VARYING)
2632 const final override;
2633 virtual void wi_fold (irange &r, tree type,
2634 const wide_int &lh_lb,
2635 const wide_int &lh_ub,
2636 const wide_int &rh_lb,
2637 const wide_int &rh_ub) const final override;
2638 virtual bool wi_op_overflows (wide_int &res,
2639 tree type,
2640 const wide_int &w0,
2641 const wide_int &w1) const final override;
2642 virtual bool op1_range (irange &, tree type, const irange &lhs,
2643 const irange &op2, relation_trio rel = TRIO_VARYING)
2644 const final override;
2645 virtual relation_kind lhs_op1_relation (const irange &lhs, const irange &op1,
2646 const irange &op2, relation_kind rel)
2647 const final override;
2648 void update_bitmask (irange &r, const irange &lh,
2649 const irange &rh) const final override
2650 { update_known_bitmask (r, RSHIFT_EXPR, lh, rh); }
2651 // Check compatibility of LHS and op1.
2652 bool operand_check_p (tree t1, tree t2, tree) const final override
2653 { return range_compatible_p (t1, t2); }
2654 } op_rshift;
2655
2656
2657 relation_kind
2658 operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
2659 const irange &op1,
2660 const irange &op2,
2661 relation_kind) const
2662 {
2663 // If both operands range are >= 0, then the LHS <= op1.
2664 if (!op1.undefined_p () && !op2.undefined_p ()
2665 && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ()))
2666 && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ())))
2667 return VREL_LE;
2668 return VREL_VARYING;
2669 }
2670
2671 bool
2672 operator_lshift::fold_range (irange &r, tree type,
2673 const irange &op1,
2674 const irange &op2,
2675 relation_trio rel) const
2676 {
2677 int_range_max shift_range;
2678 if (!get_shift_range (shift_range, type, op2))
2679 {
2680 if (op2.undefined_p ())
2681 r.set_undefined ();
2682 else
2683 r.set_zero (type);
2684 return true;
2685 }
2686
2687 // Transform left shifts by constants into multiplies.
2688 if (shift_range.singleton_p ())
2689 {
2690 unsigned shift = shift_range.lower_bound ().to_uhwi ();
2691 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
2692 int_range<1> mult (type, tmp, tmp);
2693
2694 // Force wrapping multiplication.
2695 bool saved_flag_wrapv = flag_wrapv;
2696 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
2697 flag_wrapv = 1;
2698 flag_wrapv_pointer = 1;
2699 bool b = op_mult.fold_range (r, type, op1, mult);
2700 flag_wrapv = saved_flag_wrapv;
2701 flag_wrapv_pointer = saved_flag_wrapv_pointer;
2702 return b;
2703 }
2704 else
2705 // Otherwise, invoke the generic fold routine.
2706 return range_operator::fold_range (r, type, op1, shift_range, rel);
2707 }
2708
2709 void
2710 operator_lshift::wi_fold (irange &r, tree type,
2711 const wide_int &lh_lb, const wide_int &lh_ub,
2712 const wide_int &rh_lb, const wide_int &rh_ub) const
2713 {
2714 signop sign = TYPE_SIGN (type);
2715 unsigned prec = TYPE_PRECISION (type);
2716 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
2717 int bound_shift = overflow_pos - rh_ub.to_shwi ();
2718 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2719 // overflow. However, for that to happen, rh.max needs to be zero,
2720 // which means rh is a singleton range of zero, which means we simply return
2721 // [lh_lb, lh_ub] as the range.
2722 if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0))
2723 {
2724 r = int_range<2> (type, lh_lb, lh_ub);
2725 return;
2726 }
2727
2728 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2729 wide_int complement = ~(bound - 1);
2730 wide_int low_bound, high_bound;
2731 bool in_bounds = false;
2732
2733 if (sign == UNSIGNED)
2734 {
2735 low_bound = bound;
2736 high_bound = complement;
2737 if (wi::ltu_p (lh_ub, low_bound))
2738 {
2739 // [5, 6] << [1, 2] == [10, 24].
2740 // We're shifting out only zeroes, the value increases
2741 // monotonically.
2742 in_bounds = true;
2743 }
2744 else if (wi::ltu_p (high_bound, lh_lb))
2745 {
2746 // [0xffffff00, 0xffffffff] << [1, 2]
2747 // == [0xfffffc00, 0xfffffffe].
2748 // We're shifting out only ones, the value decreases
2749 // monotonically.
2750 in_bounds = true;
2751 }
2752 }
2753 else
2754 {
2755 // [-1, 1] << [1, 2] == [-4, 4]
2756 low_bound = complement;
2757 high_bound = bound;
2758 if (wi::lts_p (lh_ub, high_bound)
2759 && wi::lts_p (low_bound, lh_lb))
2760 {
2761 // For non-negative numbers, we're shifting out only zeroes,
2762 // the value increases monotonically. For negative numbers,
2763 // we're shifting out only ones, the value decreases
2764 // monotonically.
2765 in_bounds = true;
2766 }
2767 }
2768
2769 if (in_bounds)
2770 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2771 else
2772 r.set_varying (type);
2773 }
2774
2775 bool
2776 operator_lshift::wi_op_overflows (wide_int &res, tree type,
2777 const wide_int &w0, const wide_int &w1) const
2778 {
2779 signop sign = TYPE_SIGN (type);
2780 if (wi::neg_p (w1))
2781 {
2782 // It's unclear from the C standard whether shifts can overflow.
2783 // The following code ignores overflow; perhaps a C standard
2784 // interpretation ruling is needed.
2785 res = wi::rshift (w0, -w1, sign);
2786 }
2787 else
2788 res = wi::lshift (w0, w1);
2789 return false;
2790 }
2791
2792 bool
2793 operator_lshift::op1_range (irange &r,
2794 tree type,
2795 const irange &lhs,
2796 const irange &op2,
2797 relation_trio) const
2798 {
2799 if (lhs.undefined_p ())
2800 return false;
2801
2802 if (!contains_zero_p (lhs))
2803 r.set_nonzero (type);
2804 else
2805 r.set_varying (type);
2806
2807 wide_int shift;
2808 if (op2.singleton_p (shift))
2809 {
2810 if (wi::lt_p (shift, 0, SIGNED))
2811 return false;
2812 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
2813 TYPE_PRECISION (op2.type ())),
2814 UNSIGNED))
2815 return false;
2816 if (shift == 0)
2817 {
2818 r.intersect (lhs);
2819 return true;
2820 }
2821
2822 // Work completely in unsigned mode to start.
2823 tree utype = type;
2824 int_range_max tmp_range;
2825 if (TYPE_SIGN (type) == SIGNED)
2826 {
2827 int_range_max tmp = lhs;
2828 utype = unsigned_type_for (type);
2829 range_cast (tmp, utype);
2830 op_rshift.fold_range (tmp_range, utype, tmp, op2);
2831 }
2832 else
2833 op_rshift.fold_range (tmp_range, utype, lhs, op2);
2834
2835 // Start with ranges which can produce the LHS by right shifting the
2836 // result by the shift amount.
2837 // ie [0x08, 0xF0] = op1 << 2 will start with
2838 // [00001000, 11110000] = op1 << 2
2839 // [0x02, 0x4C] aka [00000010, 00111100]
2840
2841 // Then create a range from the LB with the least significant upper bit
2842 // set, to the upper bound with all the bits set.
2843 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2844
2845 // Ideally we do this for each subrange, but just lump them all for now.
2846 unsigned low_bits = TYPE_PRECISION (utype) - shift.to_uhwi ();
2847 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
2848 wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ());
2849 wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits);
2850 int_range<2> fill_range (utype, new_lb, new_ub);
2851 tmp_range.union_ (fill_range);
2852
2853 if (utype != type)
2854 range_cast (tmp_range, type);
2855
2856 r.intersect (tmp_range);
2857 return true;
2858 }
2859
2860 return !r.varying_p ();
2861 }
2862
2863 bool
2864 operator_rshift::op1_range (irange &r,
2865 tree type,
2866 const irange &lhs,
2867 const irange &op2,
2868 relation_trio) const
2869 {
2870 if (lhs.undefined_p ())
2871 return false;
2872 wide_int shift;
2873 if (op2.singleton_p (shift))
2874 {
2875 // Ignore nonsensical shifts.
2876 unsigned prec = TYPE_PRECISION (type);
2877 if (wi::ge_p (shift,
2878 wi::uhwi (prec, TYPE_PRECISION (op2.type ())),
2879 UNSIGNED))
2880 return false;
2881 if (shift == 0)
2882 {
2883 r = lhs;
2884 return true;
2885 }
2886
2887 // Folding the original operation may discard some impossible
2888 // ranges from the LHS.
2889 int_range_max lhs_refined;
2890 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
2891 lhs_refined.intersect (lhs);
2892 if (lhs_refined.undefined_p ())
2893 {
2894 r.set_undefined ();
2895 return true;
2896 }
2897 int_range_max shift_range (op2.type (), shift, shift);
2898 int_range_max lb, ub;
2899 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
2900 // LHS
2901 // 0000 0111 = OP1 >> 3
2902 //
2903 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2904 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2905 // right hand side (0x07).
2906 wide_int mask = wi::bit_not (wi::lshift (wi::minus_one (prec), shift));
2907 int_range_max mask_range (type,
2908 wi::zero (TYPE_PRECISION (type)),
2909 mask);
2910 op_plus.fold_range (ub, type, lb, mask_range);
2911 r = lb;
2912 r.union_ (ub);
2913 if (!contains_zero_p (lhs_refined))
2914 {
2915 mask_range.invert ();
2916 r.intersect (mask_range);
2917 }
2918 return true;
2919 }
2920 return false;
2921 }
2922
2923 bool
2924 operator_rshift::wi_op_overflows (wide_int &res,
2925 tree type,
2926 const wide_int &w0,
2927 const wide_int &w1) const
2928 {
2929 signop sign = TYPE_SIGN (type);
2930 if (wi::neg_p (w1))
2931 res = wi::lshift (w0, -w1);
2932 else
2933 {
2934 // It's unclear from the C standard whether shifts can overflow.
2935 // The following code ignores overflow; perhaps a C standard
2936 // interpretation ruling is needed.
2937 res = wi::rshift (w0, w1, sign);
2938 }
2939 return false;
2940 }
2941
2942 bool
2943 operator_rshift::fold_range (irange &r, tree type,
2944 const irange &op1,
2945 const irange &op2,
2946 relation_trio rel) const
2947 {
2948 int_range_max shift;
2949 if (!get_shift_range (shift, type, op2))
2950 {
2951 if (op2.undefined_p ())
2952 r.set_undefined ();
2953 else
2954 r.set_zero (type);
2955 return true;
2956 }
2957
2958 return range_operator::fold_range (r, type, op1, shift, rel);
2959 }
2960
2961 void
2962 operator_rshift::wi_fold (irange &r, tree type,
2963 const wide_int &lh_lb, const wide_int &lh_ub,
2964 const wide_int &rh_lb, const wide_int &rh_ub) const
2965 {
2966 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2967 }
2968
2969
2970 // Add a partial equivalence between the LHS and op1 for casts.
2971
2972 relation_kind
2973 operator_cast::lhs_op1_relation (const irange &lhs,
2974 const irange &op1,
2975 const irange &op2 ATTRIBUTE_UNUSED,
2976 relation_kind) const
2977 {
2978 if (lhs.undefined_p () || op1.undefined_p ())
2979 return VREL_VARYING;
2980 unsigned lhs_prec = TYPE_PRECISION (lhs.type ());
2981 unsigned op1_prec = TYPE_PRECISION (op1.type ());
2982 // If the result gets sign extended into a larger type check first if this
2983 // qualifies as a partial equivalence.
2984 if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec)
2985 {
2986 // If the result is sign extended, and the LHS is larger than op1,
2987 // check if op1's range can be negative as the sign extension will
2988 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2989 int_range<3> negs = range_negatives (op1.type ());
2990 negs.intersect (op1);
2991 if (!negs.undefined_p ())
2992 return VREL_VARYING;
2993 }
2994
2995 unsigned prec = MIN (lhs_prec, op1_prec);
2996 return bits_to_pe (prec);
2997 }
2998
2999 // Return TRUE if casting from INNER to OUTER is a truncating cast.
3000
3001 inline bool
3002 operator_cast::truncating_cast_p (const irange &inner,
3003 const irange &outer) const
3004 {
3005 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
3006 }
3007
3008 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
3009
3010 bool
3011 operator_cast::inside_domain_p (const wide_int &min,
3012 const wide_int &max,
3013 const irange &range) const
3014 {
3015 wide_int domain_min = irange_val_min (range.type ());
3016 wide_int domain_max = irange_val_max (range.type ());
3017 signop domain_sign = TYPE_SIGN (range.type ());
3018 return (wi::le_p (min, domain_max, domain_sign)
3019 && wi::le_p (max, domain_max, domain_sign)
3020 && wi::ge_p (min, domain_min, domain_sign)
3021 && wi::ge_p (max, domain_min, domain_sign));
3022 }
3023
3024
3025 // Helper for fold_range which work on a pair at a time.
3026
3027 void
3028 operator_cast::fold_pair (irange &r, unsigned index,
3029 const irange &inner,
3030 const irange &outer) const
3031 {
3032 tree inner_type = inner.type ();
3033 tree outer_type = outer.type ();
3034 signop inner_sign = TYPE_SIGN (inner_type);
3035 unsigned outer_prec = TYPE_PRECISION (outer_type);
3036
3037 // check to see if casting from INNER to OUTER is a conversion that
3038 // fits in the resulting OUTER type.
3039 wide_int inner_lb = inner.lower_bound (index);
3040 wide_int inner_ub = inner.upper_bound (index);
3041 if (truncating_cast_p (inner, outer))
3042 {
3043 // We may be able to accommodate a truncating cast if the
3044 // resulting range can be represented in the target type...
3045 if (wi::rshift (wi::sub (inner_ub, inner_lb),
3046 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
3047 inner_sign) != 0)
3048 {
3049 r.set_varying (outer_type);
3050 return;
3051 }
3052 }
3053 // ...but we must still verify that the final range fits in the
3054 // domain. This catches -fstrict-enum restrictions where the domain
3055 // range is smaller than what fits in the underlying type.
3056 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
3057 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
3058 if (inside_domain_p (min, max, outer))
3059 create_possibly_reversed_range (r, outer_type, min, max);
3060 else
3061 r.set_varying (outer_type);
3062 }
3063
3064
3065 bool
3066 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3067 const irange &inner,
3068 const irange &outer,
3069 relation_trio) const
3070 {
3071 if (empty_range_varying (r, type, inner, outer))
3072 return true;
3073
3074 gcc_checking_assert (outer.varying_p ());
3075 gcc_checking_assert (inner.num_pairs () > 0);
3076
3077 // Avoid a temporary by folding the first pair directly into the result.
3078 fold_pair (r, 0, inner, outer);
3079
3080 // Then process any additional pairs by unioning with their results.
3081 for (unsigned x = 1; x < inner.num_pairs (); ++x)
3082 {
3083 int_range_max tmp;
3084 fold_pair (tmp, x, inner, outer);
3085 r.union_ (tmp);
3086 if (r.varying_p ())
3087 return true;
3088 }
3089
3090 update_bitmask (r, inner, outer);
3091 return true;
3092 }
3093
3094 void
3095 operator_cast::update_bitmask (irange &r, const irange &lh,
3096 const irange &rh) const
3097 {
3098 update_known_bitmask (r, CONVERT_EXPR, lh, rh);
3099 }
3100
3101 bool
3102 operator_cast::op1_range (irange &r, tree type,
3103 const irange &lhs,
3104 const irange &op2,
3105 relation_trio) const
3106 {
3107 if (lhs.undefined_p ())
3108 return false;
3109 tree lhs_type = lhs.type ();
3110 gcc_checking_assert (types_compatible_p (op2.type(), type));
3111
3112 // If we are calculating a pointer, shortcut to what we really care about.
3113 if (POINTER_TYPE_P (type))
3114 {
3115 // Conversion from other pointers or a constant (including 0/NULL)
3116 // are straightforward.
3117 if (POINTER_TYPE_P (lhs.type ())
3118 || (lhs.singleton_p ()
3119 && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type)))
3120 {
3121 r = lhs;
3122 range_cast (r, type);
3123 }
3124 else
3125 {
3126 // If the LHS is not a pointer nor a singleton, then it is
3127 // either VARYING or non-zero.
3128 if (!lhs.undefined_p () && !contains_zero_p (lhs))
3129 r.set_nonzero (type);
3130 else
3131 r.set_varying (type);
3132 }
3133 r.intersect (op2);
3134 return true;
3135 }
3136
3137 if (truncating_cast_p (op2, lhs))
3138 {
3139 if (lhs.varying_p ())
3140 r.set_varying (type);
3141 else
3142 {
3143 // We want to insert the LHS as an unsigned value since it
3144 // would not trigger the signed bit of the larger type.
3145 int_range_max converted_lhs = lhs;
3146 range_cast (converted_lhs, unsigned_type_for (lhs_type));
3147 range_cast (converted_lhs, type);
3148 // Start by building the positive signed outer range for the type.
3149 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
3150 TYPE_PRECISION (type));
3151 create_possibly_reversed_range (r, type, lim,
3152 wi::max_value (TYPE_PRECISION (type),
3153 SIGNED));
3154 // For the signed part, we need to simply union the 2 ranges now.
3155 r.union_ (converted_lhs);
3156
3157 // Create maximal negative number outside of LHS bits.
3158 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
3159 TYPE_PRECISION (type));
3160 // Add this to the unsigned LHS range(s).
3161 int_range_max lim_range (type, lim, lim);
3162 int_range_max lhs_neg;
3163 range_op_handler (PLUS_EXPR).fold_range (lhs_neg, type,
3164 converted_lhs, lim_range);
3165 // lhs_neg now has all the negative versions of the LHS.
3166 // Now union in all the values from SIGNED MIN (0x80000) to
3167 // lim-1 in order to fill in all the ranges with the upper
3168 // bits set.
3169
3170 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
3171 // we don't need to create a range from min to lim-1
3172 // calculate neg range traps trying to create [lim, lim - 1].
3173 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
3174 if (lim != min_val)
3175 {
3176 int_range_max neg (type,
3177 wi::min_value (TYPE_PRECISION (type),
3178 SIGNED),
3179 lim - 1);
3180 lhs_neg.union_ (neg);
3181 }
3182 // And finally, munge the signed and unsigned portions.
3183 r.union_ (lhs_neg);
3184 }
3185 // And intersect with any known value passed in the extra operand.
3186 r.intersect (op2);
3187 return true;
3188 }
3189
3190 int_range_max tmp;
3191 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
3192 tmp = lhs;
3193 else
3194 {
3195 // The cast is not truncating, and the range is restricted to
3196 // the range of the RHS by this assignment.
3197 //
3198 // Cast the range of the RHS to the type of the LHS.
3199 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
3200 // Intersect this with the LHS range will produce the range,
3201 // which will be cast to the RHS type before returning.
3202 tmp.intersect (lhs);
3203 }
3204
3205 // Cast the calculated range to the type of the RHS.
3206 fold_range (r, type, tmp, int_range<1> (type));
3207 return true;
3208 }
3209
3210
3211 class operator_logical_and : public range_operator
3212 {
3213 using range_operator::fold_range;
3214 using range_operator::op1_range;
3215 using range_operator::op2_range;
3216 public:
3217 virtual bool fold_range (irange &r, tree type,
3218 const irange &lh,
3219 const irange &rh,
3220 relation_trio rel = TRIO_VARYING) const;
3221 virtual bool op1_range (irange &r, tree type,
3222 const irange &lhs,
3223 const irange &op2,
3224 relation_trio rel = TRIO_VARYING) const;
3225 virtual bool op2_range (irange &r, tree type,
3226 const irange &lhs,
3227 const irange &op1,
3228 relation_trio rel = TRIO_VARYING) const;
3229 // Check compatibility of all operands.
3230 bool operand_check_p (tree t1, tree t2, tree t3) const final override
3231 { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
3232 } op_logical_and;
3233
3234 bool
3235 operator_logical_and::fold_range (irange &r, tree type,
3236 const irange &lh,
3237 const irange &rh,
3238 relation_trio) const
3239 {
3240 if (empty_range_varying (r, type, lh, rh))
3241 return true;
3242
3243 // Precision of LHS and both operands must match.
3244 if (TYPE_PRECISION (lh.type ()) != TYPE_PRECISION (type)
3245 || TYPE_PRECISION (type) != TYPE_PRECISION (rh.type ()))
3246 return false;
3247
3248 // 0 && anything is 0.
3249 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
3250 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
3251 r = range_false (type);
3252 else if (contains_zero_p (lh) || contains_zero_p (rh))
3253 // To reach this point, there must be a logical 1 on each side, and
3254 // the only remaining question is whether there is a zero or not.
3255 r = range_true_and_false (type);
3256 else
3257 r = range_true (type);
3258 return true;
3259 }
3260
3261 bool
3262 operator_logical_and::op1_range (irange &r, tree type,
3263 const irange &lhs,
3264 const irange &op2 ATTRIBUTE_UNUSED,
3265 relation_trio) const
3266 {
3267 switch (get_bool_state (r, lhs, type))
3268 {
3269 case BRS_TRUE:
3270 // A true result means both sides of the AND must be true.
3271 r = range_true (type);
3272 break;
3273 default:
3274 // Any other result means only one side has to be false, the
3275 // other side can be anything. So we cannot be sure of any
3276 // result here.
3277 r = range_true_and_false (type);
3278 break;
3279 }
3280 return true;
3281 }
3282
3283 bool
3284 operator_logical_and::op2_range (irange &r, tree type,
3285 const irange &lhs,
3286 const irange &op1,
3287 relation_trio) const
3288 {
3289 return operator_logical_and::op1_range (r, type, lhs, op1);
3290 }
3291
3292
3293 void
3294 operator_bitwise_and::update_bitmask (irange &r, const irange &lh,
3295 const irange &rh) const
3296 {
3297 update_known_bitmask (r, BIT_AND_EXPR, lh, rh);
3298 }
3299
3300 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3301 // by considering the number of leading redundant sign bit copies.
3302 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3303 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3304 static bool
3305 wi_optimize_signed_bitwise_op (irange &r, tree type,
3306 const wide_int &lh_lb, const wide_int &lh_ub,
3307 const wide_int &rh_lb, const wide_int &rh_ub)
3308 {
3309 int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
3310 int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
3311 int new_clrsb = MIN (lh_clrsb, rh_clrsb);
3312 if (new_clrsb == 0)
3313 return false;
3314 int type_prec = TYPE_PRECISION (type);
3315 int rprec = (type_prec - new_clrsb) - 1;
3316 value_range_with_overflow (r, type,
3317 wi::mask (rprec, true, type_prec),
3318 wi::mask (rprec, false, type_prec));
3319 return true;
3320 }
3321
3322 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3323 // the LHS and op1.
3324
3325 relation_kind
3326 operator_bitwise_and::lhs_op1_relation (const irange &lhs,
3327 const irange &op1,
3328 const irange &op2,
3329 relation_kind) const
3330 {
3331 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
3332 return VREL_VARYING;
3333 if (!op2.singleton_p ())
3334 return VREL_VARYING;
3335 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3336 int prec1 = TYPE_PRECISION (op1.type ());
3337 int prec2 = TYPE_PRECISION (op2.type ());
3338 int mask_prec = 0;
3339 wide_int mask = op2.lower_bound ();
3340 if (wi::eq_p (mask, wi::mask (8, false, prec2)))
3341 mask_prec = 8;
3342 else if (wi::eq_p (mask, wi::mask (16, false, prec2)))
3343 mask_prec = 16;
3344 else if (wi::eq_p (mask, wi::mask (32, false, prec2)))
3345 mask_prec = 32;
3346 else if (wi::eq_p (mask, wi::mask (64, false, prec2)))
3347 mask_prec = 64;
3348 return bits_to_pe (MIN (prec1, mask_prec));
3349 }
3350
3351 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3352 // possible. Basically, see if we can optimize:
3353 //
3354 // [LB, UB] op Z
3355 // into:
3356 // [LB op Z, UB op Z]
3357 //
3358 // If the optimization was successful, accumulate the range in R and
3359 // return TRUE.
3360
3361 static bool
3362 wi_optimize_and_or (irange &r,
3363 enum tree_code code,
3364 tree type,
3365 const wide_int &lh_lb, const wide_int &lh_ub,
3366 const wide_int &rh_lb, const wide_int &rh_ub)
3367 {
3368 // Calculate the singleton mask among the ranges, if any.
3369 wide_int lower_bound, upper_bound, mask;
3370 if (wi::eq_p (rh_lb, rh_ub))
3371 {
3372 mask = rh_lb;
3373 lower_bound = lh_lb;
3374 upper_bound = lh_ub;
3375 }
3376 else if (wi::eq_p (lh_lb, lh_ub))
3377 {
3378 mask = lh_lb;
3379 lower_bound = rh_lb;
3380 upper_bound = rh_ub;
3381 }
3382 else
3383 return false;
3384
3385 // If Z is a constant which (for op | its bitwise not) has n
3386 // consecutive least significant bits cleared followed by m 1
3387 // consecutive bits set immediately above it and either
3388 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3389 //
3390 // The least significant n bits of all the values in the range are
3391 // cleared or set, the m bits above it are preserved and any bits
3392 // above these are required to be the same for all values in the
3393 // range.
3394 wide_int w = mask;
3395 int m = 0, n = 0;
3396 if (code == BIT_IOR_EXPR)
3397 w = ~w;
3398 if (wi::eq_p (w, 0))
3399 n = w.get_precision ();
3400 else
3401 {
3402 n = wi::ctz (w);
3403 w = ~(w | wi::mask (n, false, w.get_precision ()));
3404 if (wi::eq_p (w, 0))
3405 m = w.get_precision () - n;
3406 else
3407 m = wi::ctz (w) - n;
3408 }
3409 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
3410 if ((new_mask & lower_bound) != (new_mask & upper_bound))
3411 return false;
3412
3413 wide_int res_lb, res_ub;
3414 if (code == BIT_AND_EXPR)
3415 {
3416 res_lb = wi::bit_and (lower_bound, mask);
3417 res_ub = wi::bit_and (upper_bound, mask);
3418 }
3419 else if (code == BIT_IOR_EXPR)
3420 {
3421 res_lb = wi::bit_or (lower_bound, mask);
3422 res_ub = wi::bit_or (upper_bound, mask);
3423 }
3424 else
3425 gcc_unreachable ();
3426 value_range_with_overflow (r, type, res_lb, res_ub);
3427
3428 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3429 if (code == BIT_IOR_EXPR && wi::ne_p (mask, 0))
3430 {
3431 int_range<2> tmp;
3432 tmp.set_nonzero (type);
3433 r.intersect (tmp);
3434 }
3435 return true;
3436 }
3437
3438 // For range [LB, UB] compute two wide_int bit masks.
3439 //
3440 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3441 // for all numbers in the range the bit is 0, otherwise it might be 0
3442 // or 1.
3443 //
3444 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3445 // for all numbers in the range the bit is 1, otherwise it might be 0
3446 // or 1.
3447
3448 void
3449 wi_set_zero_nonzero_bits (tree type,
3450 const wide_int &lb, const wide_int &ub,
3451 wide_int &maybe_nonzero,
3452 wide_int &mustbe_nonzero)
3453 {
3454 signop sign = TYPE_SIGN (type);
3455
3456 if (wi::eq_p (lb, ub))
3457 maybe_nonzero = mustbe_nonzero = lb;
3458 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
3459 {
3460 wide_int xor_mask = lb ^ ub;
3461 maybe_nonzero = lb | ub;
3462 mustbe_nonzero = lb & ub;
3463 if (xor_mask != 0)
3464 {
3465 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
3466 maybe_nonzero.get_precision ());
3467 maybe_nonzero = maybe_nonzero | mask;
3468 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
3469 }
3470 }
3471 else
3472 {
3473 maybe_nonzero = wi::minus_one (lb.get_precision ());
3474 mustbe_nonzero = wi::zero (lb.get_precision ());
3475 }
3476 }
3477
3478 void
3479 operator_bitwise_and::wi_fold (irange &r, tree type,
3480 const wide_int &lh_lb,
3481 const wide_int &lh_ub,
3482 const wide_int &rh_lb,
3483 const wide_int &rh_ub) const
3484 {
3485 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3486 return;
3487
3488 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3489 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3490 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3491 maybe_nonzero_lh, mustbe_nonzero_lh);
3492 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3493 maybe_nonzero_rh, mustbe_nonzero_rh);
3494
3495 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
3496 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
3497 signop sign = TYPE_SIGN (type);
3498 unsigned prec = TYPE_PRECISION (type);
3499 // If both input ranges contain only negative values, we can
3500 // truncate the result range maximum to the minimum of the
3501 // input range maxima.
3502 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
3503 {
3504 new_ub = wi::min (new_ub, lh_ub, sign);
3505 new_ub = wi::min (new_ub, rh_ub, sign);
3506 }
3507 // If either input range contains only non-negative values
3508 // we can truncate the result range maximum to the respective
3509 // maximum of the input range.
3510 if (wi::ge_p (lh_lb, 0, sign))
3511 new_ub = wi::min (new_ub, lh_ub, sign);
3512 if (wi::ge_p (rh_lb, 0, sign))
3513 new_ub = wi::min (new_ub, rh_ub, sign);
3514 // PR68217: In case of signed & sign-bit-CST should
3515 // result in [-INF, 0] instead of [-INF, INF].
3516 if (wi::gt_p (new_lb, new_ub, sign))
3517 {
3518 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
3519 if (sign == SIGNED
3520 && ((wi::eq_p (lh_lb, lh_ub)
3521 && !wi::cmps (lh_lb, sign_bit))
3522 || (wi::eq_p (rh_lb, rh_ub)
3523 && !wi::cmps (rh_lb, sign_bit))))
3524 {
3525 new_lb = wi::min_value (prec, sign);
3526 new_ub = wi::zero (prec);
3527 }
3528 }
3529 // If the limits got swapped around, return varying.
3530 if (wi::gt_p (new_lb, new_ub,sign))
3531 {
3532 if (sign == SIGNED
3533 && wi_optimize_signed_bitwise_op (r, type,
3534 lh_lb, lh_ub,
3535 rh_lb, rh_ub))
3536 return;
3537 r.set_varying (type);
3538 }
3539 else
3540 value_range_with_overflow (r, type, new_lb, new_ub);
3541 }
3542
3543 static void
3544 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
3545 {
3546 if (lhs.undefined_p () || contains_zero_p (lhs))
3547 r.set_varying (type);
3548 else
3549 r.set_nonzero (type);
3550 }
3551
3552 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3553 (otherwise return VAL). VAL and MASK must be zero-extended for
3554 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3555 (to transform signed values into unsigned) and at the end xor
3556 SGNBIT back. */
3557
3558 wide_int
3559 masked_increment (const wide_int &val_in, const wide_int &mask,
3560 const wide_int &sgnbit, unsigned int prec)
3561 {
3562 wide_int bit = wi::one (prec), res;
3563 unsigned int i;
3564
3565 wide_int val = val_in ^ sgnbit;
3566 for (i = 0; i < prec; i++, bit += bit)
3567 {
3568 res = mask;
3569 if ((res & bit) == 0)
3570 continue;
3571 res = bit - 1;
3572 res = wi::bit_and_not (val + bit, res);
3573 res &= mask;
3574 if (wi::gtu_p (res, val))
3575 return res ^ sgnbit;
3576 }
3577 return val ^ sgnbit;
3578 }
3579
3580 // This was shamelessly stolen from register_edge_assert_for_2 and
3581 // adjusted to work with iranges.
3582
3583 void
3584 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
3585 const irange &lhs,
3586 const irange &op2) const
3587 {
3588 if (!op2.singleton_p ())
3589 {
3590 set_nonzero_range_from_mask (r, type, lhs);
3591 return;
3592 }
3593 unsigned int nprec = TYPE_PRECISION (type);
3594 wide_int cst2v = op2.lower_bound ();
3595 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
3596 wide_int sgnbit;
3597 if (cst2n)
3598 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3599 else
3600 sgnbit = wi::zero (nprec);
3601
3602 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3603 //
3604 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3605 // maximum unsigned value is ~0. For signed comparison, if CST2
3606 // doesn't have the most significant bit set, handle it similarly. If
3607 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3608 wide_int valv = lhs.lower_bound ();
3609 wide_int minv = valv & cst2v, maxv;
3610 bool we_know_nothing = false;
3611 if (minv != valv)
3612 {
3613 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3614 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3615 if (minv == valv)
3616 {
3617 // If we can't determine anything on this bound, fall
3618 // through and conservatively solve for the other end point.
3619 we_know_nothing = true;
3620 }
3621 }
3622 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3623 if (we_know_nothing)
3624 r.set_varying (type);
3625 else
3626 create_possibly_reversed_range (r, type, minv, maxv);
3627
3628 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3629 //
3630 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3631 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3632 // VAL2 where
3633 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3634 // as maximum.
3635 // For signed comparison, if CST2 doesn't have most significant bit
3636 // set, handle it similarly. If CST2 has MSB set, the maximum is
3637 // the same and minimum is INT_MIN.
3638 valv = lhs.upper_bound ();
3639 minv = valv & cst2v;
3640 if (minv == valv)
3641 maxv = valv;
3642 else
3643 {
3644 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3645 if (maxv == valv)
3646 {
3647 // If we couldn't determine anything on either bound, return
3648 // undefined.
3649 if (we_know_nothing)
3650 r.set_undefined ();
3651 return;
3652 }
3653 maxv -= 1;
3654 }
3655 maxv |= ~cst2v;
3656 minv = sgnbit;
3657 int_range<2> upper_bits;
3658 create_possibly_reversed_range (upper_bits, type, minv, maxv);
3659 r.intersect (upper_bits);
3660 }
3661
3662 bool
3663 operator_bitwise_and::op1_range (irange &r, tree type,
3664 const irange &lhs,
3665 const irange &op2,
3666 relation_trio) const
3667 {
3668 if (lhs.undefined_p ())
3669 return false;
3670 if (types_compatible_p (type, boolean_type_node))
3671 return op_logical_and.op1_range (r, type, lhs, op2);
3672
3673 r.set_undefined ();
3674 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
3675 {
3676 int_range_max chunk (lhs.type (),
3677 lhs.lower_bound (i),
3678 lhs.upper_bound (i));
3679 int_range_max res;
3680 simple_op1_range_solver (res, type, chunk, op2);
3681 r.union_ (res);
3682 }
3683 if (r.undefined_p ())
3684 set_nonzero_range_from_mask (r, type, lhs);
3685
3686 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3687 wide_int mask;
3688 if (lhs == op2 && lhs.singleton_p (mask))
3689 {
3690 r.update_bitmask (irange_bitmask (mask, ~mask));
3691 return true;
3692 }
3693
3694 // For 0 = op1 & MASK, op1 is ~MASK.
3695 if (lhs.zero_p () && op2.singleton_p ())
3696 {
3697 wide_int nz = wi::bit_not (op2.get_nonzero_bits ());
3698 int_range<2> tmp (type);
3699 tmp.set_nonzero_bits (nz);
3700 r.intersect (tmp);
3701 }
3702 return true;
3703 }
3704
3705 bool
3706 operator_bitwise_and::op2_range (irange &r, tree type,
3707 const irange &lhs,
3708 const irange &op1,
3709 relation_trio) const
3710 {
3711 return operator_bitwise_and::op1_range (r, type, lhs, op1);
3712 }
3713
3714
3715 class operator_logical_or : public range_operator
3716 {
3717 using range_operator::fold_range;
3718 using range_operator::op1_range;
3719 using range_operator::op2_range;
3720 public:
3721 virtual bool fold_range (irange &r, tree type,
3722 const irange &lh,
3723 const irange &rh,
3724 relation_trio rel = TRIO_VARYING) const;
3725 virtual bool op1_range (irange &r, tree type,
3726 const irange &lhs,
3727 const irange &op2,
3728 relation_trio rel = TRIO_VARYING) const;
3729 virtual bool op2_range (irange &r, tree type,
3730 const irange &lhs,
3731 const irange &op1,
3732 relation_trio rel = TRIO_VARYING) const;
3733 // Check compatibility of all operands.
3734 bool operand_check_p (tree t1, tree t2, tree t3) const final override
3735 { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
3736 } op_logical_or;
3737
3738 bool
3739 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3740 const irange &lh,
3741 const irange &rh,
3742 relation_trio) const
3743 {
3744 if (empty_range_varying (r, type, lh, rh))
3745 return true;
3746
3747 r = lh;
3748 r.union_ (rh);
3749 return true;
3750 }
3751
3752 bool
3753 operator_logical_or::op1_range (irange &r, tree type,
3754 const irange &lhs,
3755 const irange &op2 ATTRIBUTE_UNUSED,
3756 relation_trio) const
3757 {
3758 switch (get_bool_state (r, lhs, type))
3759 {
3760 case BRS_FALSE:
3761 // A false result means both sides of the OR must be false.
3762 r = range_false (type);
3763 break;
3764 default:
3765 // Any other result means only one side has to be true, the
3766 // other side can be anything. so we can't be sure of any result
3767 // here.
3768 r = range_true_and_false (type);
3769 break;
3770 }
3771 return true;
3772 }
3773
3774 bool
3775 operator_logical_or::op2_range (irange &r, tree type,
3776 const irange &lhs,
3777 const irange &op1,
3778 relation_trio) const
3779 {
3780 return operator_logical_or::op1_range (r, type, lhs, op1);
3781 }
3782
3783
3784 void
3785 operator_bitwise_or::update_bitmask (irange &r, const irange &lh,
3786 const irange &rh) const
3787 {
3788 update_known_bitmask (r, BIT_IOR_EXPR, lh, rh);
3789 }
3790
3791 void
3792 operator_bitwise_or::wi_fold (irange &r, tree type,
3793 const wide_int &lh_lb,
3794 const wide_int &lh_ub,
3795 const wide_int &rh_lb,
3796 const wide_int &rh_ub) const
3797 {
3798 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3799 return;
3800
3801 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3802 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3803 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3804 maybe_nonzero_lh, mustbe_nonzero_lh);
3805 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3806 maybe_nonzero_rh, mustbe_nonzero_rh);
3807 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
3808 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
3809 signop sign = TYPE_SIGN (type);
3810 // If the input ranges contain only positive values we can
3811 // truncate the minimum of the result range to the maximum
3812 // of the input range minima.
3813 if (wi::ge_p (lh_lb, 0, sign)
3814 && wi::ge_p (rh_lb, 0, sign))
3815 {
3816 new_lb = wi::max (new_lb, lh_lb, sign);
3817 new_lb = wi::max (new_lb, rh_lb, sign);
3818 }
3819 // If either input range contains only negative values
3820 // we can truncate the minimum of the result range to the
3821 // respective minimum range.
3822 if (wi::lt_p (lh_ub, 0, sign))
3823 new_lb = wi::max (new_lb, lh_lb, sign);
3824 if (wi::lt_p (rh_ub, 0, sign))
3825 new_lb = wi::max (new_lb, rh_lb, sign);
3826 // If the limits got swapped around, return a conservative range.
3827 if (wi::gt_p (new_lb, new_ub, sign))
3828 {
3829 // Make sure that nonzero|X is nonzero.
3830 if (wi::gt_p (lh_lb, 0, sign)
3831 || wi::gt_p (rh_lb, 0, sign)
3832 || wi::lt_p (lh_ub, 0, sign)
3833 || wi::lt_p (rh_ub, 0, sign))
3834 r.set_nonzero (type);
3835 else if (sign == SIGNED
3836 && wi_optimize_signed_bitwise_op (r, type,
3837 lh_lb, lh_ub,
3838 rh_lb, rh_ub))
3839 return;
3840 else
3841 r.set_varying (type);
3842 return;
3843 }
3844 value_range_with_overflow (r, type, new_lb, new_ub);
3845 }
3846
3847 bool
3848 operator_bitwise_or::op1_range (irange &r, tree type,
3849 const irange &lhs,
3850 const irange &op2,
3851 relation_trio) const
3852 {
3853 if (lhs.undefined_p ())
3854 return false;
3855 // If this is really a logical wi_fold, call that.
3856 if (types_compatible_p (type, boolean_type_node))
3857 return op_logical_or.op1_range (r, type, lhs, op2);
3858
3859 if (lhs.zero_p ())
3860 {
3861 r.set_zero (type);
3862 return true;
3863 }
3864 r.set_varying (type);
3865 return true;
3866 }
3867
3868 bool
3869 operator_bitwise_or::op2_range (irange &r, tree type,
3870 const irange &lhs,
3871 const irange &op1,
3872 relation_trio) const
3873 {
3874 return operator_bitwise_or::op1_range (r, type, lhs, op1);
3875 }
3876
3877 void
3878 operator_bitwise_xor::update_bitmask (irange &r, const irange &lh,
3879 const irange &rh) const
3880 {
3881 update_known_bitmask (r, BIT_XOR_EXPR, lh, rh);
3882 }
3883
3884 void
3885 operator_bitwise_xor::wi_fold (irange &r, tree type,
3886 const wide_int &lh_lb,
3887 const wide_int &lh_ub,
3888 const wide_int &rh_lb,
3889 const wide_int &rh_ub) const
3890 {
3891 signop sign = TYPE_SIGN (type);
3892 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3893 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3894 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3895 maybe_nonzero_lh, mustbe_nonzero_lh);
3896 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3897 maybe_nonzero_rh, mustbe_nonzero_rh);
3898
3899 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
3900 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
3901 wide_int result_one_bits
3902 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
3903 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
3904 wide_int new_ub = ~result_zero_bits;
3905 wide_int new_lb = result_one_bits;
3906
3907 // If the range has all positive or all negative values, the result
3908 // is better than VARYING.
3909 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
3910 value_range_with_overflow (r, type, new_lb, new_ub);
3911 else if (sign == SIGNED
3912 && wi_optimize_signed_bitwise_op (r, type,
3913 lh_lb, lh_ub,
3914 rh_lb, rh_ub))
3915 ; /* Do nothing. */
3916 else
3917 r.set_varying (type);
3918
3919 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3920 if (wi::lt_p (lh_ub, rh_lb, sign)
3921 || wi::lt_p (rh_ub, lh_lb, sign)
3922 || wi::ne_p (result_one_bits, 0))
3923 {
3924 int_range<2> tmp;
3925 tmp.set_nonzero (type);
3926 r.intersect (tmp);
3927 }
3928 }
3929
3930 bool
3931 operator_bitwise_xor::op1_op2_relation_effect (irange &lhs_range,
3932 tree type,
3933 const irange &,
3934 const irange &,
3935 relation_kind rel) const
3936 {
3937 if (rel == VREL_VARYING)
3938 return false;
3939
3940 int_range<2> rel_range;
3941
3942 switch (rel)
3943 {
3944 case VREL_EQ:
3945 rel_range.set_zero (type);
3946 break;
3947 case VREL_NE:
3948 rel_range.set_nonzero (type);
3949 break;
3950 default:
3951 return false;
3952 }
3953
3954 lhs_range.intersect (rel_range);
3955 return true;
3956 }
3957
3958 bool
3959 operator_bitwise_xor::op1_range (irange &r, tree type,
3960 const irange &lhs,
3961 const irange &op2,
3962 relation_trio) const
3963 {
3964 if (lhs.undefined_p () || lhs.varying_p ())
3965 {
3966 r = lhs;
3967 return true;
3968 }
3969 if (types_compatible_p (type, boolean_type_node))
3970 {
3971 switch (get_bool_state (r, lhs, type))
3972 {
3973 case BRS_TRUE:
3974 if (op2.varying_p ())
3975 r.set_varying (type);
3976 else if (op2.zero_p ())
3977 r = range_true (type);
3978 // See get_bool_state for the rationale
3979 else if (op2.undefined_p () || contains_zero_p (op2))
3980 r = range_true_and_false (type);
3981 else
3982 r = range_false (type);
3983 break;
3984 case BRS_FALSE:
3985 r = op2;
3986 break;
3987 default:
3988 break;
3989 }
3990 return true;
3991 }
3992 r.set_varying (type);
3993 return true;
3994 }
3995
3996 bool
3997 operator_bitwise_xor::op2_range (irange &r, tree type,
3998 const irange &lhs,
3999 const irange &op1,
4000 relation_trio) const
4001 {
4002 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
4003 }
4004
4005 class operator_trunc_mod : public range_operator
4006 {
4007 using range_operator::op1_range;
4008 using range_operator::op2_range;
4009 using range_operator::update_bitmask;
4010 public:
4011 virtual void wi_fold (irange &r, tree type,
4012 const wide_int &lh_lb,
4013 const wide_int &lh_ub,
4014 const wide_int &rh_lb,
4015 const wide_int &rh_ub) const;
4016 virtual bool op1_range (irange &r, tree type,
4017 const irange &lhs,
4018 const irange &op2,
4019 relation_trio) const;
4020 virtual bool op2_range (irange &r, tree type,
4021 const irange &lhs,
4022 const irange &op1,
4023 relation_trio) const;
4024 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
4025 { update_known_bitmask (r, TRUNC_MOD_EXPR, lh, rh); }
4026 } op_trunc_mod;
4027
4028 void
4029 operator_trunc_mod::wi_fold (irange &r, tree type,
4030 const wide_int &lh_lb,
4031 const wide_int &lh_ub,
4032 const wide_int &rh_lb,
4033 const wide_int &rh_ub) const
4034 {
4035 wide_int new_lb, new_ub, tmp;
4036 signop sign = TYPE_SIGN (type);
4037 unsigned prec = TYPE_PRECISION (type);
4038
4039 // Mod 0 is undefined.
4040 if (wi_zero_p (type, rh_lb, rh_ub))
4041 {
4042 r.set_undefined ();
4043 return;
4044 }
4045
4046 // Check for constant and try to fold.
4047 if (lh_lb == lh_ub && rh_lb == rh_ub)
4048 {
4049 wi::overflow_type ov = wi::OVF_NONE;
4050 tmp = wi::mod_trunc (lh_lb, rh_lb, sign, &ov);
4051 if (ov == wi::OVF_NONE)
4052 {
4053 r = int_range<2> (type, tmp, tmp);
4054 return;
4055 }
4056 }
4057
4058 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
4059 new_ub = rh_ub - 1;
4060 if (sign == SIGNED)
4061 {
4062 tmp = -1 - rh_lb;
4063 new_ub = wi::smax (new_ub, tmp);
4064 }
4065
4066 if (sign == UNSIGNED)
4067 new_lb = wi::zero (prec);
4068 else
4069 {
4070 new_lb = -new_ub;
4071 tmp = lh_lb;
4072 if (wi::gts_p (tmp, 0))
4073 tmp = wi::zero (prec);
4074 new_lb = wi::smax (new_lb, tmp);
4075 }
4076 tmp = lh_ub;
4077 if (sign == SIGNED && wi::neg_p (tmp))
4078 tmp = wi::zero (prec);
4079 new_ub = wi::min (new_ub, tmp, sign);
4080
4081 value_range_with_overflow (r, type, new_lb, new_ub);
4082 }
4083
4084 bool
4085 operator_trunc_mod::op1_range (irange &r, tree type,
4086 const irange &lhs,
4087 const irange &,
4088 relation_trio) const
4089 {
4090 if (lhs.undefined_p ())
4091 return false;
4092 // PR 91029.
4093 signop sign = TYPE_SIGN (type);
4094 unsigned prec = TYPE_PRECISION (type);
4095 // (a % b) >= x && x > 0 , then a >= x.
4096 if (wi::gt_p (lhs.lower_bound (), 0, sign))
4097 {
4098 r = value_range (type, lhs.lower_bound (), wi::max_value (prec, sign));
4099 return true;
4100 }
4101 // (a % b) <= x && x < 0 , then a <= x.
4102 if (wi::lt_p (lhs.upper_bound (), 0, sign))
4103 {
4104 r = value_range (type, wi::min_value (prec, sign), lhs.upper_bound ());
4105 return true;
4106 }
4107 return false;
4108 }
4109
4110 bool
4111 operator_trunc_mod::op2_range (irange &r, tree type,
4112 const irange &lhs,
4113 const irange &,
4114 relation_trio) const
4115 {
4116 if (lhs.undefined_p ())
4117 return false;
4118 // PR 91029.
4119 signop sign = TYPE_SIGN (type);
4120 unsigned prec = TYPE_PRECISION (type);
4121 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
4122 // or b > x for unsigned.
4123 if (wi::gt_p (lhs.lower_bound (), 0, sign))
4124 {
4125 if (sign == SIGNED)
4126 r = value_range (type, wi::neg (lhs.lower_bound ()),
4127 lhs.lower_bound (), VR_ANTI_RANGE);
4128 else if (wi::lt_p (lhs.lower_bound (), wi::max_value (prec, sign),
4129 sign))
4130 r = value_range (type, lhs.lower_bound () + 1,
4131 wi::max_value (prec, sign));
4132 else
4133 return false;
4134 return true;
4135 }
4136 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
4137 if (wi::lt_p (lhs.upper_bound (), 0, sign))
4138 {
4139 if (wi::gt_p (lhs.upper_bound (), wi::min_value (prec, sign), sign))
4140 r = value_range (type, lhs.upper_bound (),
4141 wi::neg (lhs.upper_bound ()), VR_ANTI_RANGE);
4142 else
4143 return false;
4144 return true;
4145 }
4146 return false;
4147 }
4148
4149
4150 class operator_logical_not : public range_operator
4151 {
4152 using range_operator::fold_range;
4153 using range_operator::op1_range;
4154 public:
4155 virtual bool fold_range (irange &r, tree type,
4156 const irange &lh,
4157 const irange &rh,
4158 relation_trio rel = TRIO_VARYING) const;
4159 virtual bool op1_range (irange &r, tree type,
4160 const irange &lhs,
4161 const irange &op2,
4162 relation_trio rel = TRIO_VARYING) const;
4163 // Check compatibility of LHS and op1.
4164 bool operand_check_p (tree t1, tree t2, tree) const final override
4165 { return range_compatible_p (t1, t2); }
4166 } op_logical_not;
4167
4168 // Folding a logical NOT, oddly enough, involves doing nothing on the
4169 // forward pass through. During the initial walk backwards, the
4170 // logical NOT reversed the desired outcome on the way back, so on the
4171 // way forward all we do is pass the range forward.
4172 //
4173 // b_2 = x_1 < 20
4174 // b_3 = !b_2
4175 // if (b_3)
4176 // to determine the TRUE branch, walking backward
4177 // if (b_3) if ([1,1])
4178 // b_3 = !b_2 [1,1] = ![0,0]
4179 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
4180 // which is the result we are looking for.. so.. pass it through.
4181
4182 bool
4183 operator_logical_not::fold_range (irange &r, tree type,
4184 const irange &lh,
4185 const irange &rh ATTRIBUTE_UNUSED,
4186 relation_trio) const
4187 {
4188 if (empty_range_varying (r, type, lh, rh))
4189 return true;
4190
4191 r = lh;
4192 if (!lh.varying_p () && !lh.undefined_p ())
4193 r.invert ();
4194
4195 return true;
4196 }
4197
4198 bool
4199 operator_logical_not::op1_range (irange &r,
4200 tree type,
4201 const irange &lhs,
4202 const irange &op2,
4203 relation_trio) const
4204 {
4205 // Logical NOT is involutary...do it again.
4206 return fold_range (r, type, lhs, op2);
4207 }
4208
4209 bool
4210 operator_bitwise_not::fold_range (irange &r, tree type,
4211 const irange &lh,
4212 const irange &rh,
4213 relation_trio) const
4214 {
4215 if (empty_range_varying (r, type, lh, rh))
4216 return true;
4217
4218 if (types_compatible_p (type, boolean_type_node))
4219 return op_logical_not.fold_range (r, type, lh, rh);
4220
4221 // ~X is simply -1 - X.
4222 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
4223 wi::minus_one (TYPE_PRECISION (type)));
4224 return range_op_handler (MINUS_EXPR).fold_range (r, type, minusone, lh);
4225 }
4226
4227 bool
4228 operator_bitwise_not::op1_range (irange &r, tree type,
4229 const irange &lhs,
4230 const irange &op2,
4231 relation_trio) const
4232 {
4233 if (lhs.undefined_p ())
4234 return false;
4235 if (types_compatible_p (type, boolean_type_node))
4236 return op_logical_not.op1_range (r, type, lhs, op2);
4237
4238 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4239 return fold_range (r, type, lhs, op2);
4240 }
4241
4242 void
4243 operator_bitwise_not::update_bitmask (irange &r, const irange &lh,
4244 const irange &rh) const
4245 {
4246 update_known_bitmask (r, BIT_NOT_EXPR, lh, rh);
4247 }
4248
4249
4250 bool
4251 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4252 const irange &lh,
4253 const irange &rh ATTRIBUTE_UNUSED,
4254 relation_trio) const
4255 {
4256 r = lh;
4257 return true;
4258 }
4259
4260
4261 // Determine if there is a relationship between LHS and OP1.
4262
4263 relation_kind
4264 operator_identity::lhs_op1_relation (const irange &lhs,
4265 const irange &op1 ATTRIBUTE_UNUSED,
4266 const irange &op2 ATTRIBUTE_UNUSED,
4267 relation_kind) const
4268 {
4269 if (lhs.undefined_p ())
4270 return VREL_VARYING;
4271 // Simply a copy, so they are equivalent.
4272 return VREL_EQ;
4273 }
4274
4275 bool
4276 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4277 const irange &lh,
4278 const irange &rh ATTRIBUTE_UNUSED,
4279 relation_trio) const
4280 {
4281 r = lh;
4282 return true;
4283 }
4284
4285 bool
4286 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
4287 const irange &lhs,
4288 const irange &op2 ATTRIBUTE_UNUSED,
4289 relation_trio) const
4290 {
4291 r = lhs;
4292 return true;
4293 }
4294
4295
4296 class operator_unknown : public range_operator
4297 {
4298 using range_operator::fold_range;
4299 public:
4300 virtual bool fold_range (irange &r, tree type,
4301 const irange &op1,
4302 const irange &op2,
4303 relation_trio rel = TRIO_VARYING) const;
4304 } op_unknown;
4305
4306 bool
4307 operator_unknown::fold_range (irange &r, tree type,
4308 const irange &lh ATTRIBUTE_UNUSED,
4309 const irange &rh ATTRIBUTE_UNUSED,
4310 relation_trio) const
4311 {
4312 r.set_varying (type);
4313 return true;
4314 }
4315
4316
4317 void
4318 operator_abs::wi_fold (irange &r, tree type,
4319 const wide_int &lh_lb, const wide_int &lh_ub,
4320 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4321 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4322 {
4323 wide_int min, max;
4324 signop sign = TYPE_SIGN (type);
4325 unsigned prec = TYPE_PRECISION (type);
4326
4327 // Pass through LH for the easy cases.
4328 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
4329 {
4330 r = int_range<1> (type, lh_lb, lh_ub);
4331 return;
4332 }
4333
4334 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4335 // a useful range.
4336 wide_int min_value = wi::min_value (prec, sign);
4337 wide_int max_value = wi::max_value (prec, sign);
4338 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
4339 {
4340 r.set_varying (type);
4341 return;
4342 }
4343
4344 // ABS_EXPR may flip the range around, if the original range
4345 // included negative values.
4346 if (wi::eq_p (lh_lb, min_value))
4347 {
4348 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4349 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4350 if (wi::eq_p (lh_ub, min_value))
4351 {
4352 r = int_range<1> (type, min_value, min_value);
4353 return;
4354 }
4355 min = max_value;
4356 }
4357 else
4358 min = wi::abs (lh_lb);
4359
4360 if (wi::eq_p (lh_ub, min_value))
4361 max = max_value;
4362 else
4363 max = wi::abs (lh_ub);
4364
4365 // If the range contains zero then we know that the minimum value in the
4366 // range will be zero.
4367 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
4368 {
4369 if (wi::gt_p (min, max, sign))
4370 max = min;
4371 min = wi::zero (prec);
4372 }
4373 else
4374 {
4375 // If the range was reversed, swap MIN and MAX.
4376 if (wi::gt_p (min, max, sign))
4377 std::swap (min, max);
4378 }
4379
4380 // If the new range has its limits swapped around (MIN > MAX), then
4381 // the operation caused one of them to wrap around. The only thing
4382 // we know is that the result is positive.
4383 if (wi::gt_p (min, max, sign))
4384 {
4385 min = wi::zero (prec);
4386 max = max_value;
4387 }
4388 r = int_range<1> (type, min, max);
4389 }
4390
4391 bool
4392 operator_abs::op1_range (irange &r, tree type,
4393 const irange &lhs,
4394 const irange &op2,
4395 relation_trio) const
4396 {
4397 if (empty_range_varying (r, type, lhs, op2))
4398 return true;
4399 if (TYPE_UNSIGNED (type))
4400 {
4401 r = lhs;
4402 return true;
4403 }
4404 // Start with the positives because negatives are an impossible result.
4405 int_range_max positives = range_positives (type);
4406 positives.intersect (lhs);
4407 r = positives;
4408 // Then add the negative of each pair:
4409 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4410 for (unsigned i = 0; i < positives.num_pairs (); ++i)
4411 r.union_ (int_range<1> (type,
4412 -positives.upper_bound (i),
4413 -positives.lower_bound (i)));
4414 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4415 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4416 wide_int min_value = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4417 wide_int lb = lhs.lower_bound ();
4418 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lb, min_value))
4419 r.union_ (int_range<2> (type, lb, lb));
4420 return true;
4421 }
4422
4423 void
4424 operator_abs::update_bitmask (irange &r, const irange &lh,
4425 const irange &rh) const
4426 {
4427 update_known_bitmask (r, ABS_EXPR, lh, rh);
4428 }
4429
4430 class operator_absu : public range_operator
4431 {
4432 using range_operator::update_bitmask;
4433 public:
4434 virtual void wi_fold (irange &r, tree type,
4435 const wide_int &lh_lb, const wide_int &lh_ub,
4436 const wide_int &rh_lb, const wide_int &rh_ub) const;
4437 virtual void update_bitmask (irange &r, const irange &lh,
4438 const irange &rh) const final override;
4439 } op_absu;
4440
4441 void
4442 operator_absu::wi_fold (irange &r, tree type,
4443 const wide_int &lh_lb, const wide_int &lh_ub,
4444 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4445 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4446 {
4447 wide_int new_lb, new_ub;
4448
4449 // Pass through VR0 the easy cases.
4450 if (wi::ges_p (lh_lb, 0))
4451 {
4452 new_lb = lh_lb;
4453 new_ub = lh_ub;
4454 }
4455 else
4456 {
4457 new_lb = wi::abs (lh_lb);
4458 new_ub = wi::abs (lh_ub);
4459
4460 // If the range contains zero then we know that the minimum
4461 // value in the range will be zero.
4462 if (wi::ges_p (lh_ub, 0))
4463 {
4464 if (wi::gtu_p (new_lb, new_ub))
4465 new_ub = new_lb;
4466 new_lb = wi::zero (TYPE_PRECISION (type));
4467 }
4468 else
4469 std::swap (new_lb, new_ub);
4470 }
4471
4472 gcc_checking_assert (TYPE_UNSIGNED (type));
4473 r = int_range<1> (type, new_lb, new_ub);
4474 }
4475
4476 void
4477 operator_absu::update_bitmask (irange &r, const irange &lh,
4478 const irange &rh) const
4479 {
4480 update_known_bitmask (r, ABSU_EXPR, lh, rh);
4481 }
4482
4483
4484 bool
4485 operator_negate::fold_range (irange &r, tree type,
4486 const irange &lh,
4487 const irange &rh,
4488 relation_trio) const
4489 {
4490 if (empty_range_varying (r, type, lh, rh))
4491 return true;
4492
4493 // -X is simply 0 - X.
4494 int_range<1> zero;
4495 zero.set_zero (type);
4496 return range_op_handler (MINUS_EXPR).fold_range (r, type, zero, lh);
4497 }
4498
4499 bool
4500 operator_negate::op1_range (irange &r, tree type,
4501 const irange &lhs,
4502 const irange &op2,
4503 relation_trio) const
4504 {
4505 // NEGATE is involutory.
4506 return fold_range (r, type, lhs, op2);
4507 }
4508
4509
4510 bool
4511 operator_addr_expr::fold_range (irange &r, tree type,
4512 const irange &lh,
4513 const irange &rh,
4514 relation_trio) const
4515 {
4516 if (empty_range_varying (r, type, lh, rh))
4517 return true;
4518
4519 // Return a non-null pointer of the LHS type (passed in op2).
4520 if (lh.zero_p ())
4521 r.set_zero (type);
4522 else if (lh.undefined_p () || contains_zero_p (lh))
4523 r.set_varying (type);
4524 else
4525 r.set_nonzero (type);
4526 return true;
4527 }
4528
4529 bool
4530 operator_addr_expr::op1_range (irange &r, tree type,
4531 const irange &lhs,
4532 const irange &op2,
4533 relation_trio) const
4534 {
4535 if (empty_range_varying (r, type, lhs, op2))
4536 return true;
4537
4538 // Return a non-null pointer of the LHS type (passed in op2), but only
4539 // if we cant overflow, eitherwise a no-zero offset could wrap to zero.
4540 // See PR 111009.
4541 if (!lhs.undefined_p () && !contains_zero_p (lhs) && TYPE_OVERFLOW_UNDEFINED (type))
4542 r.set_nonzero (type);
4543 else
4544 r.set_varying (type);
4545 return true;
4546 }
4547 \f
4548 // Initialize any integral operators to the primary table
4549
4550 void
4551 range_op_table::initialize_integral_ops ()
4552 {
4553 set (TRUNC_DIV_EXPR, op_trunc_div);
4554 set (FLOOR_DIV_EXPR, op_floor_div);
4555 set (ROUND_DIV_EXPR, op_round_div);
4556 set (CEIL_DIV_EXPR, op_ceil_div);
4557 set (EXACT_DIV_EXPR, op_exact_div);
4558 set (LSHIFT_EXPR, op_lshift);
4559 set (RSHIFT_EXPR, op_rshift);
4560 set (TRUTH_AND_EXPR, op_logical_and);
4561 set (TRUTH_OR_EXPR, op_logical_or);
4562 set (TRUNC_MOD_EXPR, op_trunc_mod);
4563 set (TRUTH_NOT_EXPR, op_logical_not);
4564 set (IMAGPART_EXPR, op_unknown);
4565 set (REALPART_EXPR, op_unknown);
4566 set (ABSU_EXPR, op_absu);
4567 set (OP_WIDEN_MULT_SIGNED, op_widen_mult_signed);
4568 set (OP_WIDEN_MULT_UNSIGNED, op_widen_mult_unsigned);
4569 set (OP_WIDEN_PLUS_SIGNED, op_widen_plus_signed);
4570 set (OP_WIDEN_PLUS_UNSIGNED, op_widen_plus_unsigned);
4571
4572 }
4573
4574 bool
4575 operator_plus::overflow_free_p (const irange &lh, const irange &rh,
4576 relation_trio) const
4577 {
4578 if (lh.undefined_p () || rh.undefined_p ())
4579 return false;
4580
4581 tree type = lh.type ();
4582 if (TYPE_OVERFLOW_UNDEFINED (type))
4583 return true;
4584
4585 wi::overflow_type ovf;
4586 signop sgn = TYPE_SIGN (type);
4587 wide_int wmax0 = lh.upper_bound ();
4588 wide_int wmax1 = rh.upper_bound ();
4589 wi::add (wmax0, wmax1, sgn, &ovf);
4590 if (ovf != wi::OVF_NONE)
4591 return false;
4592
4593 if (TYPE_UNSIGNED (type))
4594 return true;
4595
4596 wide_int wmin0 = lh.lower_bound ();
4597 wide_int wmin1 = rh.lower_bound ();
4598 wi::add (wmin0, wmin1, sgn, &ovf);
4599 if (ovf != wi::OVF_NONE)
4600 return false;
4601
4602 return true;
4603 }
4604
4605 bool
4606 operator_minus::overflow_free_p (const irange &lh, const irange &rh,
4607 relation_trio) const
4608 {
4609 if (lh.undefined_p () || rh.undefined_p ())
4610 return false;
4611
4612 tree type = lh.type ();
4613 if (TYPE_OVERFLOW_UNDEFINED (type))
4614 return true;
4615
4616 wi::overflow_type ovf;
4617 signop sgn = TYPE_SIGN (type);
4618 wide_int wmin0 = lh.lower_bound ();
4619 wide_int wmax1 = rh.upper_bound ();
4620 wi::sub (wmin0, wmax1, sgn, &ovf);
4621 if (ovf != wi::OVF_NONE)
4622 return false;
4623
4624 if (TYPE_UNSIGNED (type))
4625 return true;
4626
4627 wide_int wmax0 = lh.upper_bound ();
4628 wide_int wmin1 = rh.lower_bound ();
4629 wi::sub (wmax0, wmin1, sgn, &ovf);
4630 if (ovf != wi::OVF_NONE)
4631 return false;
4632
4633 return true;
4634 }
4635
4636 bool
4637 operator_mult::overflow_free_p (const irange &lh, const irange &rh,
4638 relation_trio) const
4639 {
4640 if (lh.undefined_p () || rh.undefined_p ())
4641 return false;
4642
4643 tree type = lh.type ();
4644 if (TYPE_OVERFLOW_UNDEFINED (type))
4645 return true;
4646
4647 wi::overflow_type ovf;
4648 signop sgn = TYPE_SIGN (type);
4649 wide_int wmax0 = lh.upper_bound ();
4650 wide_int wmax1 = rh.upper_bound ();
4651 wi::mul (wmax0, wmax1, sgn, &ovf);
4652 if (ovf != wi::OVF_NONE)
4653 return false;
4654
4655 if (TYPE_UNSIGNED (type))
4656 return true;
4657
4658 wide_int wmin0 = lh.lower_bound ();
4659 wide_int wmin1 = rh.lower_bound ();
4660 wi::mul (wmin0, wmin1, sgn, &ovf);
4661 if (ovf != wi::OVF_NONE)
4662 return false;
4663
4664 wi::mul (wmin0, wmax1, sgn, &ovf);
4665 if (ovf != wi::OVF_NONE)
4666 return false;
4667
4668 wi::mul (wmax0, wmin1, sgn, &ovf);
4669 if (ovf != wi::OVF_NONE)
4670 return false;
4671
4672 return true;
4673 }
4674
4675 #if CHECKING_P
4676 #include "selftest.h"
4677
4678 namespace selftest
4679 {
4680 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4681 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4682 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4683 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4684 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4685 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4686
4687 static void
4688 range_op_cast_tests ()
4689 {
4690 int_range<2> r0, r1, r2, rold;
4691 r0.set_varying (integer_type_node);
4692 wide_int maxint = r0.upper_bound ();
4693
4694 // If a range is in any way outside of the range for the converted
4695 // to range, default to the range for the new type.
4696 r0.set_varying (short_integer_type_node);
4697 wide_int minshort = r0.lower_bound ();
4698 wide_int maxshort = r0.upper_bound ();
4699 if (TYPE_PRECISION (integer_type_node)
4700 > TYPE_PRECISION (short_integer_type_node))
4701 {
4702 r1 = int_range<1> (integer_type_node,
4703 wi::zero (TYPE_PRECISION (integer_type_node)),
4704 maxint);
4705 range_cast (r1, short_integer_type_node);
4706 ASSERT_TRUE (r1.lower_bound () == minshort
4707 && r1.upper_bound() == maxshort);
4708 }
4709
4710 // (unsigned char)[-5,-1] => [251,255].
4711 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (-1));
4712 range_cast (r0, unsigned_char_type_node);
4713 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node,
4714 UCHAR (251), UCHAR (255)));
4715 range_cast (r0, signed_char_type_node);
4716 ASSERT_TRUE (r0 == rold);
4717
4718 // (signed char)[15, 150] => [-128,-106][15,127].
4719 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (15), UCHAR (150));
4720 range_cast (r0, signed_char_type_node);
4721 r1 = int_range<1> (signed_char_type_node, SCHAR (15), SCHAR (127));
4722 r2 = int_range<1> (signed_char_type_node, SCHAR (-128), SCHAR (-106));
4723 r1.union_ (r2);
4724 ASSERT_TRUE (r1 == r0);
4725 range_cast (r0, unsigned_char_type_node);
4726 ASSERT_TRUE (r0 == rold);
4727
4728 // (unsigned char)[-5, 5] => [0,5][251,255].
4729 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (5));
4730 range_cast (r0, unsigned_char_type_node);
4731 r1 = int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255));
4732 r2 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4733 r1.union_ (r2);
4734 ASSERT_TRUE (r0 == r1);
4735 range_cast (r0, signed_char_type_node);
4736 ASSERT_TRUE (r0 == rold);
4737
4738 // (unsigned char)[-5,5] => [0,5][251,255].
4739 r0 = int_range<1> (integer_type_node, INT (-5), INT (5));
4740 range_cast (r0, unsigned_char_type_node);
4741 r1 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4742 r1.union_ (int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255)));
4743 ASSERT_TRUE (r0 == r1);
4744
4745 // (unsigned char)[5U,1974U] => [0,255].
4746 r0 = int_range<1> (unsigned_type_node, UINT (5), UINT (1974));
4747 range_cast (r0, unsigned_char_type_node);
4748 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (255)));
4749 range_cast (r0, integer_type_node);
4750 // Going to a wider range should not sign extend.
4751 ASSERT_TRUE (r0 == int_range<1> (integer_type_node, INT (0), INT (255)));
4752
4753 // (unsigned char)[-350,15] => [0,255].
4754 r0 = int_range<1> (integer_type_node, INT (-350), INT (15));
4755 range_cast (r0, unsigned_char_type_node);
4756 ASSERT_TRUE (r0 == (int_range<1>
4757 (unsigned_char_type_node,
4758 min_limit (unsigned_char_type_node),
4759 max_limit (unsigned_char_type_node))));
4760
4761 // Casting [-120,20] from signed char to unsigned short.
4762 // => [0, 20][0xff88, 0xffff].
4763 r0 = int_range<1> (signed_char_type_node, SCHAR (-120), SCHAR (20));
4764 range_cast (r0, short_unsigned_type_node);
4765 r1 = int_range<1> (short_unsigned_type_node, UINT16 (0), UINT16 (20));
4766 r2 = int_range<1> (short_unsigned_type_node,
4767 UINT16 (0xff88), UINT16 (0xffff));
4768 r1.union_ (r2);
4769 ASSERT_TRUE (r0 == r1);
4770 // A truncating cast back to signed char will work because [-120, 20]
4771 // is representable in signed char.
4772 range_cast (r0, signed_char_type_node);
4773 ASSERT_TRUE (r0 == int_range<1> (signed_char_type_node,
4774 SCHAR (-120), SCHAR (20)));
4775
4776 // unsigned char -> signed short
4777 // (signed short)[(unsigned char)25, (unsigned char)250]
4778 // => [(signed short)25, (signed short)250]
4779 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (25), UCHAR (250));
4780 range_cast (r0, short_integer_type_node);
4781 r1 = int_range<1> (short_integer_type_node, INT16 (25), INT16 (250));
4782 ASSERT_TRUE (r0 == r1);
4783 range_cast (r0, unsigned_char_type_node);
4784 ASSERT_TRUE (r0 == rold);
4785
4786 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4787 r0 = int_range<1> (long_long_integer_type_node,
4788 min_limit (long_long_integer_type_node),
4789 max_limit (long_long_integer_type_node));
4790 range_cast (r0, short_unsigned_type_node);
4791 r1 = int_range<1> (short_unsigned_type_node,
4792 min_limit (short_unsigned_type_node),
4793 max_limit (short_unsigned_type_node));
4794 ASSERT_TRUE (r0 == r1);
4795
4796 // Casting NONZERO to a narrower type will wrap/overflow so
4797 // it's just the entire range for the narrower type.
4798 //
4799 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4800 // is outside of the range of a smaller range, return the full
4801 // smaller range.
4802 if (TYPE_PRECISION (integer_type_node)
4803 > TYPE_PRECISION (short_integer_type_node))
4804 {
4805 r0.set_nonzero (integer_type_node);
4806 range_cast (r0, short_integer_type_node);
4807 r1 = int_range<1> (short_integer_type_node,
4808 min_limit (short_integer_type_node),
4809 max_limit (short_integer_type_node));
4810 ASSERT_TRUE (r0 == r1);
4811 }
4812
4813 // Casting NONZERO from a narrower signed to a wider signed.
4814 //
4815 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4816 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4817 r0.set_nonzero (short_integer_type_node);
4818 range_cast (r0, integer_type_node);
4819 r1 = int_range<1> (integer_type_node, INT (-32768), INT (-1));
4820 r2 = int_range<1> (integer_type_node, INT (1), INT (32767));
4821 r1.union_ (r2);
4822 ASSERT_TRUE (r0 == r1);
4823 }
4824
4825 static void
4826 range_op_lshift_tests ()
4827 {
4828 // Test that 0x808.... & 0x8.... still contains 0x8....
4829 // for a large set of numbers.
4830 {
4831 int_range_max res;
4832 tree big_type = long_long_unsigned_type_node;
4833 unsigned big_prec = TYPE_PRECISION (big_type);
4834 // big_num = 0x808,0000,0000,0000
4835 wide_int big_num = wi::lshift (wi::uhwi (0x808, big_prec),
4836 wi::uhwi (48, big_prec));
4837 op_bitwise_and.fold_range (res, big_type,
4838 int_range <1> (big_type),
4839 int_range <1> (big_type, big_num, big_num));
4840 // val = 0x8,0000,0000,0000
4841 wide_int val = wi::lshift (wi::uhwi (8, big_prec),
4842 wi::uhwi (48, big_prec));
4843 ASSERT_TRUE (res.contains_p (val));
4844 }
4845
4846 if (TYPE_PRECISION (unsigned_type_node) > 31)
4847 {
4848 // unsigned VARYING = op1 << 1 should be VARYING.
4849 int_range<2> lhs (unsigned_type_node);
4850 int_range<2> shift (unsigned_type_node, INT (1), INT (1));
4851 int_range_max op1;
4852 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
4853 ASSERT_TRUE (op1.varying_p ());
4854
4855 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4856 int_range<2> zero (unsigned_type_node, UINT (0), UINT (0));
4857 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
4858 ASSERT_TRUE (op1.num_pairs () == 2);
4859 // Remove the [0,0] range.
4860 op1.intersect (zero);
4861 ASSERT_TRUE (op1.num_pairs () == 1);
4862 // op1 << 1 should be [0x8000,0x8000] << 1,
4863 // which should result in [0,0].
4864 int_range_max result;
4865 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4866 ASSERT_TRUE (result == zero);
4867 }
4868 // signed VARYING = op1 << 1 should be VARYING.
4869 if (TYPE_PRECISION (integer_type_node) > 31)
4870 {
4871 // unsigned VARYING = op1 << 1 should be VARYING.
4872 int_range<2> lhs (integer_type_node);
4873 int_range<2> shift (integer_type_node, INT (1), INT (1));
4874 int_range_max op1;
4875 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
4876 ASSERT_TRUE (op1.varying_p ());
4877
4878 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4879 int_range<2> zero (integer_type_node, INT (0), INT (0));
4880 op_lshift.op1_range (op1, integer_type_node, zero, shift);
4881 ASSERT_TRUE (op1.num_pairs () == 2);
4882 // Remove the [0,0] range.
4883 op1.intersect (zero);
4884 ASSERT_TRUE (op1.num_pairs () == 1);
4885 // op1 << 1 should be [0x8000,0x8000] << 1,
4886 // which should result in [0,0].
4887 int_range_max result;
4888 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4889 ASSERT_TRUE (result == zero);
4890 }
4891 }
4892
4893 static void
4894 range_op_rshift_tests ()
4895 {
4896 // unsigned: [3, MAX] = OP1 >> 1
4897 {
4898 int_range_max lhs (unsigned_type_node,
4899 UINT (3), max_limit (unsigned_type_node));
4900 int_range_max one (unsigned_type_node,
4901 wi::one (TYPE_PRECISION (unsigned_type_node)),
4902 wi::one (TYPE_PRECISION (unsigned_type_node)));
4903 int_range_max op1;
4904 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
4905 ASSERT_FALSE (op1.contains_p (UINT (3)));
4906 }
4907
4908 // signed: [3, MAX] = OP1 >> 1
4909 {
4910 int_range_max lhs (integer_type_node,
4911 INT (3), max_limit (integer_type_node));
4912 int_range_max one (integer_type_node, INT (1), INT (1));
4913 int_range_max op1;
4914 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4915 ASSERT_FALSE (op1.contains_p (INT (-2)));
4916 }
4917
4918 // This is impossible, so OP1 should be [].
4919 // signed: [MIN, MIN] = OP1 >> 1
4920 {
4921 int_range_max lhs (integer_type_node,
4922 min_limit (integer_type_node),
4923 min_limit (integer_type_node));
4924 int_range_max one (integer_type_node, INT (1), INT (1));
4925 int_range_max op1;
4926 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4927 ASSERT_TRUE (op1.undefined_p ());
4928 }
4929
4930 // signed: ~[-1] = OP1 >> 31
4931 if (TYPE_PRECISION (integer_type_node) > 31)
4932 {
4933 int_range_max lhs (integer_type_node, INT (-1), INT (-1), VR_ANTI_RANGE);
4934 int_range_max shift (integer_type_node, INT (31), INT (31));
4935 int_range_max op1;
4936 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
4937 int_range_max negatives = range_negatives (integer_type_node);
4938 negatives.intersect (op1);
4939 ASSERT_TRUE (negatives.undefined_p ());
4940 }
4941 }
4942
4943 static void
4944 range_op_bitwise_and_tests ()
4945 {
4946 int_range_max res;
4947 wide_int min = min_limit (integer_type_node);
4948 wide_int max = max_limit (integer_type_node);
4949 wide_int tiny = wi::add (min, wi::one (TYPE_PRECISION (integer_type_node)));
4950 int_range_max i1 (integer_type_node, tiny, max);
4951 int_range_max i2 (integer_type_node, INT (255), INT (255));
4952
4953 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4954 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4955 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4956
4957 // VARYING = OP1 & 255: OP1 is VARYING
4958 i1 = int_range<1> (integer_type_node);
4959 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4960 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4961
4962 // For 0 = x & MASK, x is ~MASK.
4963 {
4964 int_range<2> zero (integer_type_node, INT (0), INT (0));
4965 int_range<2> mask = int_range<2> (integer_type_node, INT (7), INT (7));
4966 op_bitwise_and.op1_range (res, integer_type_node, zero, mask);
4967 wide_int inv = wi::shwi (~7U, TYPE_PRECISION (integer_type_node));
4968 ASSERT_TRUE (res.get_nonzero_bits () == inv);
4969 }
4970
4971 // (NONZERO | X) is nonzero.
4972 i1.set_nonzero (integer_type_node);
4973 i2.set_varying (integer_type_node);
4974 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4975 ASSERT_TRUE (res.nonzero_p ());
4976
4977 // (NEGATIVE | X) is nonzero.
4978 i1 = int_range<1> (integer_type_node, INT (-5), INT (-3));
4979 i2.set_varying (integer_type_node);
4980 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4981 ASSERT_FALSE (res.contains_p (INT (0)));
4982 }
4983
4984 static void
4985 range_relational_tests ()
4986 {
4987 int_range<2> lhs (unsigned_char_type_node);
4988 int_range<2> op1 (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4989 int_range<2> op2 (unsigned_char_type_node, UCHAR (20), UCHAR (20));
4990
4991 // Never wrapping additions mean LHS > OP1.
4992 relation_kind code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4993 ASSERT_TRUE (code == VREL_GT);
4994
4995 // Most wrapping additions mean nothing...
4996 op1 = int_range<2> (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4997 op2 = int_range<2> (unsigned_char_type_node, UCHAR (0), UCHAR (255));
4998 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4999 ASSERT_TRUE (code == VREL_VARYING);
5000
5001 // However, always wrapping additions mean LHS < OP1.
5002 op1 = int_range<2> (unsigned_char_type_node, UCHAR (1), UCHAR (255));
5003 op2 = int_range<2> (unsigned_char_type_node, UCHAR (255), UCHAR (255));
5004 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
5005 ASSERT_TRUE (code == VREL_LT);
5006 }
5007
5008 void
5009 range_op_tests ()
5010 {
5011 range_op_rshift_tests ();
5012 range_op_lshift_tests ();
5013 range_op_bitwise_and_tests ();
5014 range_op_cast_tests ();
5015 range_relational_tests ();
5016
5017 extern void range_op_float_tests ();
5018 range_op_float_tests ();
5019 }
5020
5021 } // namespace selftest
5022
5023 #endif // CHECKING_P