When value-numbering an address expression like
&p_74(D)->a1x[
4294967295].a1; we are accumulating the byte offset
in an 64bit integer. When later exploiting the duality between
that and a POINTER_PLUS_EXPR we should avoid truncating that
offset to fit in the target specific sizetype. While such
overflows are generally undefined behavior, exploiting this
may leads to spurious missing diagnostics.
2022-05-09 Richard Biener <rguenther@suse.de>
PR tree-optimization/105517
* tree-ssa-sccvn.cc (vn_reference_lookup): Make sure the accumulated
offset can be represented in the POINTER_PLUS_EXPR IL.
(vn_reference_insert): Likewise.
* poly-int.h (sext_hwi): Add poly version of sext_hwi.
}
}
+/* Poly version of sext_hwi, with the same interface. */
+
+template<unsigned int N, typename C>
+inline poly_int<N, HOST_WIDE_INT>
+sext_hwi (const poly_int<N, C> &a, unsigned int precision)
+{
+ poly_int_pod<N, HOST_WIDE_INT> r;
+ for (unsigned int i = 0; i < N; i++)
+ r.coeffs[i] = sext_hwi (a.coeffs[i], precision);
+ return r;
+}
+
+
/* Return true if a0 + a1 * x might equal b0 + b1 * x for some nonnegative
integer x. */
break;
off += vro->off;
}
- if (i == operands.length () - 1)
+ if (i == operands.length () - 1
+ /* Make sure we the offset we accumulated in a 64bit int
+ fits the address computation carried out in target
+ offset precision. */
+ && (off.coeffs[0]
+ == sext_hwi (off.coeffs[0], TYPE_PRECISION (sizetype))))
{
gcc_assert (operands[i-1].opcode == MEM_REF);
tree ops[2];
break;
off += vro->off;
}
- if (i == operands.length () - 1)
+ if (i == operands.length () - 1
+ /* Make sure we the offset we accumulated in a 64bit int
+ fits the address computation carried out in target
+ offset precision. */
+ && (off.coeffs[0]
+ == sext_hwi (off.coeffs[0], TYPE_PRECISION (sizetype))))
{
gcc_assert (operands[i-1].opcode == MEM_REF);
tree ops[2];