]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
sra: Partial fix for BITINT_TYPEs [PR113120]
authorJakub Jelinek <jakub@redhat.com>
Wed, 10 Jan 2024 11:46:00 +0000 (12:46 +0100)
committerJakub Jelinek <jakub@redhat.com>
Wed, 10 Jan 2024 11:46:00 +0000 (12:46 +0100)
As changed in other parts of the compiler, using
build_nonstandard_integer_type is not appropriate for arbitrary precisions,
especially if the precision comes from a BITINT_TYPE or something based on
that, build_nonstandard_integer_type relies on some integral mode being
supported that can support the precision.

The following patch uses build_bitint_type instead for BITINT_TYPE
precisions.

Note, it would be good if we were able to punt on the optimization
(but this code doesn't seem to be able to punt, so it needs to be done
somewhere earlier) at least in cases where building it would be invalid.
E.g. right now BITINT_TYPE can support precisions up to 65535 (inclusive),
but 65536 will not work anymore (we can't have > 16-bit TYPE_PRECISION).
I've tried to replace 513 with 65532 in the testcase and it didn't ICE,
so maybe it ran into some other SRA limit.

2024-01-10  Jakub Jelinek  <jakub@redhat.com>

PR tree-optimization/113120
* tree-sra.cc (analyze_access_subtree): For BITINT_TYPE
with root->size TYPE_PRECISION don't build anything new.
Otherwise, if root->type is a BITINT_TYPE, use build_bitint_type
rather than build_nonstandard_integer_type.

* gcc.dg/bitint-63.c: New test.

gcc/testsuite/gcc.dg/bitint-63.c [new file with mode: 0644]
gcc/tree-sra.cc

diff --git a/gcc/testsuite/gcc.dg/bitint-63.c b/gcc/testsuite/gcc.dg/bitint-63.c
new file mode 100644 (file)
index 0000000..96828f5
--- /dev/null
@@ -0,0 +1,24 @@
+/* PR tree-optimization/113120 */
+/* { dg-do compile { target bitint } } */
+/* { dg-require-stack-check "generic" } */
+/* { dg-options "-std=c23 -O -fno-tree-fre --param=large-stack-frame=1024 -fstack-check=generic" } */
+
+#if __BITINT_MAXWIDTH__ >= 513
+typedef _BitInt(513) B;
+#else
+typedef int B;
+#endif
+
+static inline __attribute__((__always_inline__)) void
+bar (B x)
+{
+  B y = x;
+  if (y)
+    __builtin_abort ();
+}
+
+void
+foo (void)
+{
+  bar (0);
+}
index 781ffdb22d744c81d891bd9591fceeb2ec5f7ff8..e786232f6b004c69af811c2dda91d4b92d4ab3ef 100644 (file)
@@ -2733,7 +2733,8 @@ analyze_access_subtree (struct access *root, struct access *parent,
          For integral types this means the precision has to match.
         Avoid assumptions based on the integral type kind, too.  */
       if (INTEGRAL_TYPE_P (root->type)
-         && (TREE_CODE (root->type) != INTEGER_TYPE
+         && ((TREE_CODE (root->type) != INTEGER_TYPE
+              && TREE_CODE (root->type) != BITINT_TYPE)
              || TYPE_PRECISION (root->type) != root->size)
          /* But leave bitfield accesses alone.  */
          && (TREE_CODE (root->expr) != COMPONENT_REF
@@ -2742,8 +2743,11 @@ analyze_access_subtree (struct access *root, struct access *parent,
          tree rt = root->type;
          gcc_assert ((root->offset % BITS_PER_UNIT) == 0
                      && (root->size % BITS_PER_UNIT) == 0);
-         root->type = build_nonstandard_integer_type (root->size,
-                                                      TYPE_UNSIGNED (rt));
+         if (TREE_CODE (root->type) == BITINT_TYPE)
+           root->type = build_bitint_type (root->size, TYPE_UNSIGNED (rt));
+         else
+           root->type = build_nonstandard_integer_type (root->size,
+                                                        TYPE_UNSIGNED (rt));
          root->expr = build_ref_for_offset (UNKNOWN_LOCATION, root->base,
                                             root->offset, root->reverse,
                                             root->type, NULL, false);