]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
vect: Tighten check for SLP memory groups [PR103517]
authorRichard Sandiford <richard.sandiford@arm.com>
Wed, 1 Dec 2021 14:36:24 +0000 (14:36 +0000)
committerRichard Sandiford <richard.sandiford@arm.com>
Wed, 1 Dec 2021 14:36:24 +0000 (14:36 +0000)
When checking for compatible stmts, vect_build_slp_tree_1 did:

       && !(STMT_VINFO_GROUPED_ACCESS (stmt_info)
    && (first_stmt_code == ARRAY_REF
|| first_stmt_code == BIT_FIELD_REF
|| first_stmt_code == INDIRECT_REF
|| first_stmt_code == COMPONENT_REF
|| first_stmt_code == MEM_REF)))

That is, it allowed any rhs_code as long as the first_stmt_code
looked valid.  This had the effect of allowing IFN_MASK_LOAD
to be paired with an earlier non-call code (but didn't allow
the reverse).

This patch makes the check symmetrical.

gcc/
PR tree-optimization/103517
* tree-vect-slp.c (vect_build_slp_tree_1): When allowing two
different component references, check the codes of both them,
rather than just the first.

gcc/testsuite/
PR tree-optimization/103517
* gcc.dg/vect/pr103517.c: New test.

gcc/testsuite/gcc.dg/vect/pr103517.c [new file with mode: 0644]
gcc/tree-vect-slp.c

diff --git a/gcc/testsuite/gcc.dg/vect/pr103517.c b/gcc/testsuite/gcc.dg/vect/pr103517.c
new file mode 100644 (file)
index 0000000..de87fc4
--- /dev/null
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=skylake-avx512" { target x86_64-*-* i?86-*-* } } */
+
+int a;
+short b, c;
+extern short d[];
+void e() {
+  for (short f = 1; f < (short)a; f += 2)
+    if (d[f + 1]) {
+      b = d[f];
+      c = d[f + 1];
+    }
+}
index 7bff5118bd00c5ed660c35b02bf70b1bb4b31834..bc22ffeed82e6f2f34ae80398847b7bb685f0f73 100644 (file)
@@ -1121,7 +1121,12 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
                        || first_stmt_code == BIT_FIELD_REF
                        || first_stmt_code == INDIRECT_REF
                        || first_stmt_code == COMPONENT_REF
-                       || first_stmt_code == MEM_REF)))
+                       || first_stmt_code == MEM_REF)
+                   && (rhs_code == ARRAY_REF
+                       || rhs_code == BIT_FIELD_REF
+                       || rhs_code == INDIRECT_REF
+                       || rhs_code == COMPONENT_REF
+                       || rhs_code == MEM_REF)))
              || first_stmt_load_p != load_p
              || first_stmt_phi_p != phi_p)
            {