From: Jakub Jelinek Date: Fri, 2 Feb 2024 10:25:13 +0000 (+0100) Subject: tree-ssa-math-opts: Fix is_widening_mult_rhs_p - unbreak bootstrap [PR113705] X-Git-Tag: basepoints/gcc-15~1397 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a8f335ccb61bf6105192a4197ef2d84900614dc1;p=thirdparty%2Fgcc.git tree-ssa-math-opts: Fix is_widening_mult_rhs_p - unbreak bootstrap [PR113705] On Tue, Jan 30, 2024 at 07:33:10AM -0000, Roger Sayle wrote: + wide_int bits = wide_int::from (tree_nonzero_bits (rhs), + prec, + TYPE_SIGN (TREE_TYPE (rhs))); ... > + if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR > + && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST > + && wi::to_wide (gimple_assign_rhs2 (stmt)) > + == wi::mask (hprec, false, prec)) This change broke bootstrap on aarch64-linux. The problem can be seen even on the reduced testcase. The IL on the unreduced testcase before widening_mul has: # val_583 = PHI ... pretmp_266 = MEM[(const struct wide_int_storage *)&D.160657].len; _264 = pretmp_266 & 65535; ... _176 = (sizetype) val_583; _439 = (sizetype) _264; _284 = _439 * 8; _115 = _176 + _284; where 583/266/264 have unsigned int type and 176/439/284/115 have sizetype. widening_mul first turns that into: # val_583 = PHI ... pretmp_266 = MEM[(const struct wide_int_storage *)&D.160657].len; _264 = pretmp_266 & 65535; ... _176 = (sizetype) val_583; _439 = (sizetype) _264; _284 = _264 w* 8; _115 = _176 + _284; and then is_widening_mult_rhs_p is called, with type sizetype (64-bit), rhs _264, hprec 32 and prec 64. Now tree_nonzero_bits (rhs) is 65535, so bits is 64-bit wide_int 65535, stmt is BIT_AND_EXPR, but we ICE on the wi::to_wide (gimple_assign_rhs2 (stmt)) == wi::mask (hprec, false, prec) comparison because wi::to_wide on gimple_assign_rhs2 (stmt) - unsigned int 65535 gives 32-bit wide_int 65535, while wi::mask (hprec, false, prec) gives 64-bit wide_int 0xffffffff and comparison between different precision wide_ints is forbidden. The following patch fixes it the same way how bits is computed earlier, by calling wide_int::from on the wi::to_wide (gimple_assign_rhs2 (stmt)), so we compare 64-bit 65535 with 64-bit 0xffffffff. 2024-02-02 Jakub Jelinek PR middle-end/113705 * tree-ssa-math-opts.cc (is_widening_mult_rhs_p): Use wide_int_from around wi::to_wide in order to compare value in prec precision. * g++.dg/opt/pr113705.C: New test. --- diff --git a/gcc/testsuite/g++.dg/opt/pr113705.C b/gcc/testsuite/g++.dg/opt/pr113705.C new file mode 100644 index 000000000000..39fb047077ef --- /dev/null +++ b/gcc/testsuite/g++.dg/opt/pr113705.C @@ -0,0 +1,68 @@ +// PR middle-end/113705 +// { dg-do compile { target c++17 } } +// { dg-options "-O2 -w" } + +void foo (); +template struct A : T { long bar () const; }; +int a; + +template +long +A::bar () const +{ + return this->baz ()[a]; +} + +struct B { + struct { long b[1]; long c; } u; + unsigned d; + int e; + B (const B &); + ~B (); + const long *baz () const; + unsigned qux () const; +}; + +B::B (const B &) +{ + if (__builtin_expect (e, 0)) + u.c = 0; +} + +B::~B () +{ + if (__builtin_expect (e, 0)) + foo (); +} + +const long * +B::baz () const +{ + return u.b; +} + +unsigned +B::qux () const +{ + return d; +} + +struct C { A corge () const; A *f; }; + +A +C::corge () const +{ + return f[1]; +} + +void +test (C r, long *h, unsigned short *d) +{ + for (int j = 0; j < 8; ++j) + { + A g = r.corge (); + *d = g.qux (); + for (unsigned i = 0; i < *d; ++i) + *h++ = g.bar (); + } +} diff --git a/gcc/tree-ssa-math-opts.cc b/gcc/tree-ssa-math-opts.cc index cffe7573f794..aa9f7b579994 100644 --- a/gcc/tree-ssa-math-opts.cc +++ b/gcc/tree-ssa-math-opts.cc @@ -2572,7 +2572,8 @@ is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out, if (is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST - && wi::to_wide (gimple_assign_rhs2 (stmt)) + && wide_int::from (wi::to_wide (gimple_assign_rhs2 (stmt)), + prec, TYPE_SIGN (TREE_TYPE (rhs))) == wi::mask (hprec, false, prec)) *new_rhs_out = gimple_assign_rhs1 (stmt); else