+2018-09-04 Aldy Hernandez <aldyh@redhat.com>
+
+ * tree-vrp.c (vrp_can_optimize_bit_op): Remove.
+ (extract_range_from_binary_expr_1): Do not call
+ vrp_can_optimize_bit_op.
+ * wide-int-range.cc (wide_int_range_can_optimize_bit_op): Make
+ static.
+ (wide_int_range_get_mask_and_bounds): New.
+ (wide_int_range_optimize_bit_op): New.
+ (wide_int_range_bit_ior): Call wide_int_range_optimize_bit_op.
+ (wide_int_range_bit_and): Same.
+ * wide-int-range.h (wide_int_range_can_optimize_bit_op): Remove.
+ (wide_int_range_optimize_bit_op): New.
+ (wide_int_range_get_mask_and_bounds): New.
+
2018-09-04 Richard Biener <rguenther@suse.de>
PR tree-optimization/87176
set_value_range_to_varying (vr);
}
-/* Value range wrapper for wide_int_range_can_optimize_bit_op.
-
- If a bit operation on two ranges can be easily optimized in terms
- of a mask, store the optimized new range in VR and return TRUE. */
-
-static bool
-vrp_can_optimize_bit_op (value_range *vr, enum tree_code code,
- const value_range *vr0, const value_range *vr1)
-{
- tree lower_bound, upper_bound, mask;
- if (code != BIT_AND_EXPR && code != BIT_IOR_EXPR)
- return false;
- if (range_int_cst_singleton_p (vr1))
- {
- if (!range_int_cst_p (vr0))
- return false;
- mask = vr1->min;
- lower_bound = vr0->min;
- upper_bound = vr0->max;
- }
- else if (range_int_cst_singleton_p (vr0))
- {
- if (!range_int_cst_p (vr1))
- return false;
- mask = vr0->min;
- lower_bound = vr1->min;
- upper_bound = vr1->max;
- }
- else
- return false;
- if (wide_int_range_can_optimize_bit_op (code,
- wi::to_wide (lower_bound),
- wi::to_wide (upper_bound),
- wi::to_wide (mask)))
- {
- tree min = int_const_binop (code, lower_bound, mask);
- tree max = int_const_binop (code, upper_bound, mask);
- set_value_range (vr, VR_RANGE, min, max, NULL);
- return true;
- }
- return false;
-}
-
/* If BOUND will include a symbolic bound, adjust it accordingly,
otherwise leave it as is.
}
else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
{
- if (vrp_can_optimize_bit_op (vr, code, &vr0, &vr1))
- return;
-
wide_int may_be_nonzero0, may_be_nonzero1;
wide_int must_be_nonzero0, must_be_nonzero1;
wide_int wmin, wmax;
It is up to the caller to perform the actual folding above. */
-bool
+static bool
wide_int_range_can_optimize_bit_op (tree_code code,
const wide_int &lb, const wide_int &ub,
const wide_int &mask)
return false;
}
+/* Helper function for wide_int_range_optimize_bit_op.
+
+ Calculates bounds and mask for a pair of ranges. The mask is the
+ singleton range among the ranges, if any. The bounds are the
+ bounds for the remaining range. */
+
+bool
+wide_int_range_get_mask_and_bounds (wide_int &mask,
+ wide_int &lower_bound,
+ wide_int &upper_bound,
+ const wide_int &vr0_min,
+ const wide_int &vr0_max,
+ const wide_int &vr1_min,
+ const wide_int &vr1_max)
+{
+ if (wi::eq_p (vr1_min, vr1_max))
+ {
+ mask = vr1_min;
+ lower_bound = vr0_min;
+ upper_bound = vr0_max;
+ return true;
+ }
+ else if (wi::eq_p (vr0_min, vr0_max))
+ {
+ mask = vr0_min;
+ lower_bound = vr1_min;
+ upper_bound = vr1_max;
+ return true;
+ }
+ return false;
+}
+
+/* Optimize a bit operation (BIT_AND_EXPR or BIT_IOR_EXPR) if
+ possible. If so, return TRUE and store the result in
+ [RES_LB, RES_UB]. */
+
+bool
+wide_int_range_optimize_bit_op (wide_int &res_lb, wide_int &res_ub,
+ enum tree_code code,
+ signop sign,
+ const wide_int &vr0_min,
+ const wide_int &vr0_max,
+ const wide_int &vr1_min,
+ const wide_int &vr1_max)
+{
+ gcc_assert (code == BIT_AND_EXPR || code == BIT_IOR_EXPR);
+
+ wide_int lower_bound, upper_bound, mask;
+ if (!wide_int_range_get_mask_and_bounds (mask, lower_bound, upper_bound,
+ vr0_min, vr0_max, vr1_min, vr1_max))
+ return false;
+ if (wide_int_range_can_optimize_bit_op (code,
+ lower_bound, upper_bound, mask))
+ {
+ wi::overflow_type ovf;
+ wide_int_binop (res_lb, code, lower_bound, mask, sign, &ovf);
+ wide_int_binop (res_ub, code, upper_bound, mask, sign, &ovf);
+ return true;
+ }
+ return false;
+}
+
/* Calculate the XOR of two ranges and store the result in [WMIN,WMAX].
The two input ranges are described by their MUST_BE_NONZERO and
MAY_BE_NONZERO bit masks.
const wide_int &must_be_nonzero1,
const wide_int &may_be_nonzero1)
{
+ if (wide_int_range_optimize_bit_op (wmin, wmax, BIT_IOR_EXPR, sign,
+ vr0_min, vr0_max,
+ vr1_min, vr1_max))
+ return true;
wmin = must_be_nonzero0 | must_be_nonzero1;
wmax = may_be_nonzero0 | may_be_nonzero1;
/* If the input ranges contain only positive values we can
const wide_int &must_be_nonzero1,
const wide_int &may_be_nonzero1)
{
+ if (wide_int_range_optimize_bit_op (wmin, wmax, BIT_AND_EXPR, sign,
+ vr0_min, vr0_max,
+ vr1_min, vr1_max))
+ return true;
wmin = must_be_nonzero0 & must_be_nonzero1;
wmax = may_be_nonzero0 & may_be_nonzero1;
/* If both input ranges contain only negative values we can
const wide_int &ub,
wide_int &may_be_nonzero,
wide_int &must_be_nonzero);
-extern bool wide_int_range_can_optimize_bit_op (tree_code,
- const wide_int &lb,
- const wide_int &ub,
- const wide_int &mask);
+extern bool wide_int_range_optimize_bit_op (wide_int &res_lb, wide_int &res_ub,
+ enum tree_code code,
+ signop sign,
+ const wide_int &vr0_lb,
+ const wide_int &vr0_ub,
+ const wide_int &vr1_lb,
+ const wide_int &vr1_ub);
+extern bool wide_int_range_get_mask_and_bounds (wide_int &mask,
+ wide_int &lower_bound,
+ wide_int &upper_bound,
+ const wide_int &vr0_min,
+ const wide_int &vr0_max,
+ const wide_int &vr1_min,
+ const wide_int &vr1_max);
extern bool wide_int_range_bit_xor (wide_int &wmin, wide_int &wmax,
signop sign,
unsigned prec,