// See also the note in irange_bitmask::intersect.
irange_bitmask bm (type (), lower_bound (), upper_bound ());
if (!m_bitmask.unknown_p ())
- bm.intersect (m_bitmask);
+ {
+ bm.intersect (m_bitmask);
+ // If the new intersection is unknown, it means there are inconstent
+ // bits, so simply return the original bitmask.
+ if (bm.unknown_p ())
+ return m_bitmask;
+ }
return bm;
}
ASSERT_FALSE (ir1.varying_p ());
}
+// Test that range bounds are "snapped" to where they are expected to be.
+
+static void
+assert_snap_result (int lb_val, int ub_val,
+ int expected_lb, int expected_ub,
+ unsigned mask_val, unsigned value_val,
+ tree type)
+{
+ wide_int lb = wi::shwi (lb_val, TYPE_PRECISION (type));
+ wide_int ub = wi::shwi (ub_val, TYPE_PRECISION (type));
+ wide_int new_lb, new_ub;
+
+ irange_bitmask bm (wi::uhwi (value_val, TYPE_PRECISION (type)),
+ wi::uhwi (mask_val, TYPE_PRECISION (type)));
+
+ int_range_max r (type);
+ r.set (type, lb, ub);
+ r.update_bitmask (bm);
+
+ if (TYPE_SIGN (type) == SIGNED && expected_ub < expected_lb)
+ gcc_checking_assert (r.undefined_p ());
+ else if (TYPE_SIGN (type) == UNSIGNED
+ && ((unsigned)expected_ub < (unsigned)expected_lb))
+ gcc_checking_assert (r.undefined_p ());
+ else
+ {
+ gcc_checking_assert (wi::eq_p (r.lower_bound (),
+ wi::shwi (expected_lb,
+ TYPE_PRECISION (type))));
+ gcc_checking_assert (wi::eq_p (r.upper_bound (),
+ wi::shwi (expected_ub,
+ TYPE_PRECISION (type))));
+ }
+}
+
+
+// Run a selection of tests that confirm, bounds are snapped as expected.
+// We only test individual pairs, multiple pairs use the same snapping
+// routine as single pairs.
+
+static void
+test_irange_snap_bounds ()
+{
+ tree u32 = unsigned_type_node;
+ tree s32 = integer_type_node;
+ tree s8 = build_nonstandard_integer_type (8, /*unsigned=*/ 0);
+ tree s1 = build_nonstandard_integer_type (1, /*unsigned=*/ 0);
+ tree u1 = build_nonstandard_integer_type (1, /*unsigned=*/ 1);
+
+ // Basic aligned range: even-only
+ assert_snap_result (5, 15, 6, 14, 0xFFFFFFFE, 0x0, u32);
+ // Singleton that doesn't match mask: undefined.
+ assert_snap_result (7, 7, 1, 0, 0xFFFFFFFE, 0x0, u32);
+ // 8-bit signed char, mask 0xF0 (i.e. step of 16).
+ assert_snap_result (-100, 100, -96, 96, 0xF0, 0x00, s8);
+ // Already aligned range: no change.
+ assert_snap_result (0, 240, 0, 240, 0xF0, 0x00, u32);
+ // Negative range, step 16 alignment (s32).
+ assert_snap_result (-123, -17, -112, -32, 0xFFFFFFF0, 0x00, s32);
+ // Negative range, step 16 alignment (trailing-zero aligned mask).
+ assert_snap_result (-123, -17, -112, -32, 0xFFFFFFF0, 0x00, s32);
+ // s8, 16-alignment mask, value = 0 (valid).
+ assert_snap_result (-50, 10, -48, 0, 0xF0, 0x00, s8);
+ // No values in range [-3,2] match alignment except 0.
+ assert_snap_result (-3, 2, 0, 0, 0xF8, 0x00, s8);
+ // No values in range [-3,2] match alignment — undefined.
+ assert_snap_result (-3, 2, 1, 0, 0xF8, 0x04, s8);
+ // Already aligned range: no change.
+ assert_snap_result (0, 240, 0, 240, 0xF0, 0x00, s32);
+ // 1-bit signed: only -1 allowed (0b1).
+ assert_snap_result (-1, 0, -1, -1, 0x00, 0x01, s1);
+ // 1-bit signed: only 0 allowed (0b0).
+ assert_snap_result (-1, 0, 0, 0, 0x00, 0x00, s1);
+ // 1-bit signed: no match (invalid case).
+ assert_snap_result (-1, -1, 1, 0, 0x00, 0x00, s1);
+ // 1-bit signed: no match (invalid case).
+ assert_snap_result (0, 0, 1, 0, 0x00, 0x01, s1);
+ // 1-bit unsigned: only 1 allowed.
+ assert_snap_result (0, 1, 1, 1, 0x00, 0x01, u1);
+ // 1-bit unsigned: only 0 allowed.
+ assert_snap_result (0, 1, 0, 0, 0x00, 0x00, u1);
+ // 1-bit unsigned: no match (invalid case).
+ assert_snap_result (1, 1, 1, 0, 0x00, 0x00, u1);
+ // 1-bit unsigned: no match (invalid case).
+ assert_snap_result (0, 0, 1, 0, 0x00, 0x01, u1);
+ // Unsigned: Near overflow, even alignment.
+ assert_snap_result (UINT_MAX - 6, UINT_MAX, UINT_MAX - 5, UINT_MAX - 1,
+ 0xFFFFFFFE, 0x00, u32);
+ // Unsigned: Wraparound-like range — no valid snapped values.
+ assert_snap_result (UINT_MAX - 5, UINT_MAX, 1, 0, 0xFFFFFFF0, 0x00, u32);
+ // Signed: Near INT_MAX, 8-aligned.
+ assert_snap_result (INT_MAX - 18, INT_MAX, INT_MAX - 15, INT_MAX - 7,
+ 0xFFFFFFF8, 0x00, s32);
+ // Signed: Near INT_MIN, 16-aligned.
+ assert_snap_result (INT_MIN, INT_MIN + 30, INT_MIN, INT_MIN + 16,
+ 0xFFFFFFF0, 0x00, s32);
+ // Signed: Full domain, 4-aligned.
+ assert_snap_result (-128, 127, -128, 124, 0xFC, 0x00, s8);
+ // Singleton at INT_MIN that doesn’t match alignment — undefined
+ assert_snap_result (INT_MIN, INT_MIN, 1, 0, 0xFFFFFFFE, 0x01, s32);
+ // Range at INT_MIN that doesn’t match alignment — undefined.
+ assert_snap_result (INT_MIN, INT_MIN + 10, 1, 0, 0xFFFFFFF0, 0x0F, s32);
+ // Unsigned: Full domain, 256-aligned.
+ assert_snap_result (0, UINT_MAX, 0, UINT_MAX & ~255, 0xFFFFFF00, 0x00, u32);
+}
+
static void
range_tests_misc ()
{
r0.set_zero (integer_type_node);
r0.set_nonzero_bits (INT (1));
ASSERT_TRUE (r0.zero_p ());
+
+ // Now test that range bounds are snapped to match bitmask alignments.
+ test_irange_snap_bounds ();
}
// Build an frange from string endpoints.