return true;
}
-// Return the range of a __builtin_ubsan* in CALL and set it in R.
-// CODE is the type of ubsan call (PLUS_EXPR, MINUS_EXPR or
-// MULT_EXPR).
-
-void
-fold_using_range::range_of_builtin_ubsan_call (irange &r, gcall *call,
- tree_code code, fur_source &src)
-{
- gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR
- || code == MULT_EXPR);
- tree type = gimple_range_type (call);
- range_op_handler op (code, type);
- gcc_checking_assert (op);
- int_range_max ir0, ir1;
- tree arg0 = gimple_call_arg (call, 0);
- tree arg1 = gimple_call_arg (call, 1);
- src.get_operand (ir0, arg0);
- src.get_operand (ir1, arg1);
- // Check for any relation between arg0 and arg1.
- relation_kind relation = src.query_relation (arg0, arg1);
-
- bool saved_flag_wrapv = flag_wrapv;
- // Pretend the arithmetic is wrapping. If there is any overflow,
- // we'll complain, but will actually do wrapping operation.
- flag_wrapv = 1;
- op.fold_range (r, type, ir0, ir1, relation);
- flag_wrapv = saved_flag_wrapv;
-
- // If for both arguments vrp_valueize returned non-NULL, this should
- // have been already folded and if not, it wasn't folded because of
- // overflow. Avoid removing the UBSAN_CHECK_* calls in that case.
- if (r.singleton_p ())
- r.set_varying (type);
-}
-
// For a builtin in CALL, return a range in R if known and return
// TRUE. Otherwise return FALSE.
bool
fold_using_range::range_of_builtin_int_call (irange &r, gcall *call,
- fur_source &src)
+ fur_source &)
{
combined_fn func = gimple_call_combined_fn (call);
if (func == CFN_LAST)
r.set (build_zero_cst (type), build_one_cst (type));
return true;
- case CFN_UBSAN_CHECK_ADD:
- range_of_builtin_ubsan_call (r, call, PLUS_EXPR, src);
- return true;
- case CFN_UBSAN_CHECK_SUB:
- range_of_builtin_ubsan_call (r, call, MINUS_EXPR, src);
- return true;
- case CFN_UBSAN_CHECK_MUL:
- range_of_builtin_ubsan_call (r, call, MULT_EXPR, src);
- return true;
-
case CFN_GOACC_DIM_SIZE:
case CFN_GOACC_DIM_POS:
// Optimizing these two internal functions helps the loop
}
} op_cfn_clrsb;
+
+// Implement range operator for CFN_BUILT_IN_
+class cfn_ubsan : public range_operator
+{
+public:
+ cfn_ubsan (enum tree_code code) { m_code = code; }
+ using range_operator::fold_range;
+ virtual bool fold_range (irange &r, tree type, const irange &lh,
+ const irange &rh, relation_kind rel) const
+ {
+ range_op_handler handler (m_code, type);
+ gcc_checking_assert (handler);
+
+ bool saved_flag_wrapv = flag_wrapv;
+ // Pretend the arithmetic is wrapping. If there is any overflow,
+ // we'll complain, but will actually do wrapping operation.
+ flag_wrapv = 1;
+ bool result = handler.fold_range (r, type, lh, rh, rel);
+ flag_wrapv = saved_flag_wrapv;
+
+ // If for both arguments vrp_valueize returned non-NULL, this should
+ // have been already folded and if not, it wasn't folded because of
+ // overflow. Avoid removing the UBSAN_CHECK_* calls in that case.
+ if (result && r.singleton_p ())
+ r.set_varying (type);
+ return result;
+ }
+private:
+ enum tree_code m_code;
+};
+
+cfn_ubsan op_cfn_ubsan_add (PLUS_EXPR);
+cfn_ubsan op_cfn_ubsan_sub (MINUS_EXPR);
+cfn_ubsan op_cfn_ubsan_mul (MULT_EXPR);
+
// Set up a gimple_range_op_handler for any built in function which can be
// supported via range-ops.
m_int = &op_cfn_clrsb;
break;
+ case CFN_UBSAN_CHECK_ADD:
+ m_op1 = gimple_call_arg (call, 0);
+ m_op2 = gimple_call_arg (call, 1);
+ m_valid = true;
+ m_int = &op_cfn_ubsan_add;
+ break;
+
+ case CFN_UBSAN_CHECK_SUB:
+ m_op1 = gimple_call_arg (call, 0);
+ m_op2 = gimple_call_arg (call, 1);
+ m_valid = true;
+ m_int = &op_cfn_ubsan_sub;
+ break;
+
+ case CFN_UBSAN_CHECK_MUL:
+ m_op1 = gimple_call_arg (call, 0);
+ m_op2 = gimple_call_arg (call, 1);
+ m_valid = true;
+ m_int = &op_cfn_ubsan_mul;
+ break;
+
default:
break;
}