]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
sched: Make model scheduler more robust against stale live-in sets
authorRichard Sandiford <rdsandiford@googlemail.com>
Fri, 23 Jan 2026 14:31:48 +0000 (14:31 +0000)
committerRichard Sandiford <rdsandiford@googlemail.com>
Fri, 23 Jan 2026 14:31:48 +0000 (14:31 +0000)
As the comment in the patch says, previous inter-block insn movement
can mean that the current block's live-in set becomes stale.  This is
somewhat undesirable, since it'll make estimates less conservative
than intended.  However, a fully accurate update would be too expensive
for something that is only supposed to be a heuristic.

gcc/
PR rtl-optimization/80357
PR rtl-optimization/94014
PR rtl-optimization/123144
* haifa-sched.cc (model_recompute): Ignore dying uses of registers
that are not assumed to be live.

gcc/testsuite/
PR rtl-optimization/123144
* gcc.dg/torture/pr123144.c: New file.

gcc/haifa-sched.cc
gcc/testsuite/gcc.dg/torture/pr123144.c [new file with mode: 0644]

index 4daa4d7b18a89502f29c3920baabc226764f4efa..58cda3af83903c345077bec4487c1e6cfe3b18fb 100644 (file)
@@ -2147,7 +2147,13 @@ model_recompute (rtx_insn *insn)
   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
     {
       new_last = model_last_use_except (use);
-      if (new_last < point && bitmap_set_bit (tmp_bitmap, use->regno))
+      if (new_last < point
+         && bitmap_set_bit (tmp_bitmap, use->regno)
+         /* df_get_live_in has not necessarily been updated to reflect the
+            effect of inter-block movement performed by earlier schedules.
+            Cope with stale live-in sets by ignoring registers that are not
+            currently assumed to be live.  */
+         && bitmap_bit_p (curr_reg_live, use->regno))
        {
          gcc_assert (num_uses < ARRAY_SIZE (uses));
          uses[num_uses].last_use = new_last;
diff --git a/gcc/testsuite/gcc.dg/torture/pr123144.c b/gcc/testsuite/gcc.dg/torture/pr123144.c
new file mode 100644 (file)
index 0000000..1aef841
--- /dev/null
@@ -0,0 +1,41 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-w" } */
+/* { dg-additional-options "-mcpu=power8" { target powerpc64*-*-* } } */
+
+#include <stdint.h>
+#define BS_VEC(type, num) type __attribute__((vector_size(num * sizeof(type))))
+int8_t backsmith_snippet_893(int8_t BS_ARG_1, BS_VEC(uint8_t, 32) BS_ARG_5)
+{
+    BS_VEC(uint16_t, 8) BS_VAR_0;
+    for (;;)
+        for (uint64_t BS_INC_1 = 0; BS_INC_1 < 13; BS_INC_1 += 1)
+        {
+            if (BS_ARG_5[2])
+                for (;;)
+                    __builtin_convertvector((BS_VEC(int8_t, 16)){},
+                                            BS_VEC(uint8_t, 16));
+            uint8_t BS_TEMP_59 = __builtin_convertvector(
+                __builtin_shufflevector(
+                    __builtin_convertvector((BS_VEC(int8_t, 4)){},
+                                            BS_VEC(uint16_t, 4)),
+                    __builtin_convertvector((BS_VEC(int8_t, 4)){ BS_ARG_1 },
+                                            BS_VEC(uint16_t, 4)),
+                    4, 2, 2, 2, 6, 3, 0, 0, 3, 2, 1, 2, 4, 0, 1, 4, 2, 0, 3, 6,
+                    4, 3, 1, 0, 2, 5, 3, 7, 4, 2, 4, 2),
+                BS_VEC(uint8_t, 32))[(BS_INC_1 ? BS_VAR_0[3] : 2) ?: 2];
+            uint8_t BS_TEMP_60 = BS_TEMP_59;
+            for (; BS_TEMP_60;)
+                ;
+            BS_VEC(uint32_t, 8)
+            BS_TEMP_68 = __builtin_convertvector(
+                __builtin_convertvector(
+                    (BS_VEC(uint64_t, 8)){ BS_INC_1, BS_INC_1, BS_INC_1,
+                                           BS_INC_1, BS_INC_1, BS_INC_1,
+                                           BS_INC_1, BS_INC_1 },
+                    BS_VEC(uint16_t, 8)),
+                BS_VEC(uint32_t, 8));
+            BS_VAR_0[BS_INC_1 < 8 ? BS_INC_1 : 0] = BS_TEMP_68[0]
+                * BS_TEMP_68[1] * BS_TEMP_68[2] * BS_TEMP_68[3] * BS_TEMP_68[4]
+                * 6 * BS_TEMP_68[7];
+        }
+}