The following testcase shows that we mishandle bit insertion for
info->bitsize >= 64. The problem is in using unsigned HOST_WIDE_INT
shift + subtraction + build_int_cst to compute mask, the shift invokes
UB at compile time for info->bitsize 64 and larger and e.g. on the testcase
with info->bitsize happens to compute mask of 0x3f rather than
0x3f'
ffffffff'
ffffffff.
The patch fixes that by using wide_int wi::mask + wide_int_to_tree, so it
handles masks in any precision (up to WIDE_INT_MAX_PRECISION ;) ).
2023-08-30 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/111015
* gimple-ssa-store-merging.c
(imm_store_chain_info::output_merged_store): Use wi::mask and
wide_int_to_tree instead of unsigned HOST_WIDE_INT shift and
build_int_cst to build BIT_AND_EXPR mask.
* gcc.dg/pr111015.c: New test.
(cherry picked from commit
49a3b35c4068091900b657cd36e5cffd41ef0c47)
}
else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
{
- const unsigned HOST_WIDE_INT imask
- = (HOST_WIDE_INT_1U << info->bitsize) - 1;
+ wide_int imask
+ = wi::mask (info->bitsize, false,
+ TYPE_PRECISION (TREE_TYPE (tem)));
tem = gimple_build (&seq, loc,
BIT_AND_EXPR, TREE_TYPE (tem), tem,
- build_int_cst (TREE_TYPE (tem),
- imask));
+ wide_int_to_tree (TREE_TYPE (tem),
+ imask));
}
const HOST_WIDE_INT shift
= (BYTES_BIG_ENDIAN ? end_gap : start_gap);
--- /dev/null
+/* PR tree-optimization/111015 */
+/* { dg-do run { target int128 } } */
+/* { dg-options "-O2" } */
+
+struct S { unsigned a : 4, b : 4; unsigned __int128 c : 70; } d;
+
+__attribute__((noipa)) void
+foo (unsigned __int128 x, unsigned char y, unsigned char z)
+{
+ d.a = y;
+ d.b = z;
+ d.c = x;
+}
+
+int
+main ()
+{
+ foo (-1, 12, 5);
+ if (d.a != 12
+ || d.b != 5
+ || d.c != (-1ULL | (((unsigned __int128) 0x3f) << 64)))
+ __builtin_abort ();
+ foo (0x123456789abcdef0ULL | (((unsigned __int128) 26) << 64), 7, 11);
+ if (d.a != 7
+ || d.b != 11
+ || d.c != (0x123456789abcdef0ULL | (((unsigned __int128) 26) << 64)))
+ __builtin_abort ();
+}