]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
tree-optimization/115652 - adjust insertion gsi for SLP
authorRichard Biener <rguenther@suse.de>
Wed, 26 Jun 2024 07:25:27 +0000 (09:25 +0200)
committerRichard Biener <rguenth@gcc.gnu.org>
Wed, 26 Jun 2024 12:05:38 +0000 (14:05 +0200)
The following adjusts how SLP computes the insertion location.  In
particular it advanced the insert iterator of the found last_stmt.
The vectorizer will later insert stmts _before_ it.  But we also
have the constraint that possibly masked ops may not be scheduled
outside of the loop and as we do not model the loop mask in the
SLP graph we have to adjust for that.  The following moves this
to after the advance since it isn't compatible with that as the
current GIMPLE_COND exception shows.  The PR is about in-order
reduction vectorization which also isn't happy when that's the
very first stmt.

PR tree-optimization/115652
* tree-vect-slp.cc (vect_schedule_slp_node): Advance the
iterator based on last_stmt only for vector defs.

gcc/tree-vect-slp.cc

index b47b7e8c979c7f485e8fbff3b4113ae0871429fe..1f5b3fccf41aa1efde0f91e1bb31dcfb39fc4038 100644 (file)
@@ -9629,16 +9629,6 @@ vect_schedule_slp_node (vec_info *vinfo,
       /* Emit other stmts after the children vectorized defs which is
         earliest possible.  */
       gimple *last_stmt = NULL;
-      if (auto loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
-       if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
-           || LOOP_VINFO_FULLY_WITH_LENGTH_P (loop_vinfo))
-         {
-           /* But avoid scheduling internal defs outside of the loop when
-              we might have only implicitly tracked loop mask/len defs.  */
-           gimple_stmt_iterator si
-             = gsi_after_labels (LOOP_VINFO_LOOP (loop_vinfo)->header);
-           last_stmt = *si;
-         }
       bool seen_vector_def = false;
       FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
        if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
@@ -9747,12 +9737,19 @@ vect_schedule_slp_node (vec_info *vinfo,
       else
        {
          si = gsi_for_stmt (last_stmt);
-         /* When we're getting gsi_after_labels from the starting
-            condition of a fully masked/len loop avoid insertion
-            after a GIMPLE_COND that can appear as the only header
-            stmt with early break vectorization.  */
-         if (gimple_code (last_stmt) != GIMPLE_COND)
-           gsi_next (&si);
+         gsi_next (&si);
+
+         /* Avoid scheduling internal defs outside of the loop when
+            we might have only implicitly tracked loop mask/len defs.  */
+         if (auto loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
+           if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
+               || LOOP_VINFO_FULLY_WITH_LENGTH_P (loop_vinfo))
+             {
+               gimple_stmt_iterator si2
+                 = gsi_after_labels (LOOP_VINFO_LOOP (loop_vinfo)->header);
+               if (vect_stmt_dominates_stmt_p (last_stmt, *si2))
+                 si = si2;
+             }
        }
     }