--- /dev/null
+/* PR tree-optimization/113120 */
+/* { dg-do compile { target bitint } } */
+/* { dg-require-stack-check "generic" } */
+/* { dg-options "-std=c23 -O -fno-tree-fre --param=large-stack-frame=1024 -fstack-check=generic" } */
+
+#if __BITINT_MAXWIDTH__ >= 513
+typedef _BitInt(513) B;
+#else
+typedef int B;
+#endif
+
+static inline __attribute__((__always_inline__)) void
+bar (B x)
+{
+ B y = x;
+ if (y)
+ __builtin_abort ();
+}
+
+void
+foo (void)
+{
+ bar (0);
+}
For integral types this means the precision has to match.
Avoid assumptions based on the integral type kind, too. */
if (INTEGRAL_TYPE_P (root->type)
- && (TREE_CODE (root->type) != INTEGER_TYPE
+ && ((TREE_CODE (root->type) != INTEGER_TYPE
+ && TREE_CODE (root->type) != BITINT_TYPE)
|| TYPE_PRECISION (root->type) != root->size)
/* But leave bitfield accesses alone. */
&& (TREE_CODE (root->expr) != COMPONENT_REF
tree rt = root->type;
gcc_assert ((root->offset % BITS_PER_UNIT) == 0
&& (root->size % BITS_PER_UNIT) == 0);
- root->type = build_nonstandard_integer_type (root->size,
- TYPE_UNSIGNED (rt));
+ if (TREE_CODE (root->type) == BITINT_TYPE)
+ root->type = build_bitint_type (root->size, TYPE_UNSIGNED (rt));
+ else
+ root->type = build_nonstandard_integer_type (root->size,
+ TYPE_UNSIGNED (rt));
root->expr = build_ref_for_offset (UNKNOWN_LOCATION, root->base,
root->offset, root->reverse,
root->type, NULL, false);