As mentioned in the PR, this hunk is guarded with !wi::neg_p (r1val | r1mask, sgn)
which means if sgn is UNSIGNED, it is always true, but r1val | r1mask in
widest_int is still sign-extended. That means wi::clz (arg) returns 0,
wi::get_precision (arg) returns some very large number
(WIDE_INT_MAX_PRECISION, on x86_64 576 bits) and width is 64, so we end up
with lzcount of -512 where the code afterwards expects a non-negative
lzcount. For arg without the sign bit set the code works right, those
numbers are zero extended and so wi::clz must return wi::get_precision (arg) - width
plus number of leading zero bits within the width precision.
The patch fixes it by handling the sign-extension specially, either it could
be done through wi::neg_p (arg) check, but lzcount == 0 works identically.
2021-08-31 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/102134
* tree-ssa-ccp.c (bit_value_binop) <case RSHIFT_EXPR>: If sgn is
UNSIGNED and r1val | r1mask has MSB set, ensure lzcount doesn't
become negative.
* gcc.c-torture/execute/pr102134.c: New test.
--- /dev/null
+/* PR tree-optimization/102134 */
+
+typedef unsigned long long u64;
+
+u64 g;
+
+void
+foo (u64 a, u64 b, u64 c, u64 *r)
+{
+ b *= b;
+ u64 x = a && ((b >> (c & 63)) | ((b << (c & 63)) & g));
+ *r = x + a;
+}
+
+int
+main ()
+{
+ u64 x;
+ foo (1, 3000, 0, &x);
+ if (x != 2)
+ __builtin_abort ();
+ return 0;
+}
/* Logical right shift, or zero sign bit. */
widest_int arg = r1val | r1mask;
int lzcount = wi::clz (arg);
- lzcount -= wi::get_precision (arg) - width;
+ if (lzcount)
+ lzcount -= wi::get_precision (arg) - width;
widest_int tmp = wi::mask <widest_int> (width, false);
tmp = wi::lrshift (tmp, lzcount);
tmp = wi::lrshift (tmp, wi::bit_and_not (r2val, r2mask));