]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
Remove STMT_VINFO_MEMORY_ACCESS_TYPE
authorRichard Biener <rguenther@suse.de>
Thu, 24 Jul 2025 13:20:02 +0000 (15:20 +0200)
committerRichard Biener <rguenther@suse.de>
Thu, 31 Jul 2025 10:14:36 +0000 (12:14 +0200)
This should be present only on SLP nodes now.  The RISC-V changes
are mechanical along the line of the SLP_TREE_TYPE changes.

* tree-vectorizer.h (_stmt_vec_info::memory_access_type): Remove.
(STMT_VINFO_MEMORY_ACCESS_TYPE): Likewise.
(vect_mem_access_type): Likewise.
* tree-vect-stmts.cc (vectorizable_store): Do not set
STMT_VINFO_MEMORY_ACCESS_TYPE.  Fix SLP_TREE_MEMORY_ACCESS_TYPE
usage.
* tree-vect-loop.cc (update_epilogue_loop_vinfo): Remove
checking of memory access type.
* config/riscv/riscv-vector-costs.cc (costs::compute_local_live_ranges):
Use SLP_TREE_MEMORY_ACCESS_TYPE.
(costs::need_additional_vector_vars_p): Likewise.
(segment_loadstore_group_size): Get SLP node as argument,
use SLP_TREE_MEMORY_ACCESS_TYPE.
(costs::adjust_stmt_cost): Pass down SLP node.
* config/aarch64/aarch64.cc (aarch64_ld234_st234_vectors): Use
SLP_TREE_MEMORY_ACCESS_TYPE instead of vect_mem_access_type.
(aarch64_detect_vector_stmt_subtype): Likewise.
(aarch64_vector_costs::count_ops): Likewise.
(aarch64_vector_costs::add_stmt_cost): Likewise.

gcc/config/aarch64/aarch64.cc
gcc/config/riscv/riscv-vector-costs.cc
gcc/tree-vect-loop.cc
gcc/tree-vect-stmts.cc
gcc/tree-vectorizer.h

index 5502d0b48072089cab772726c1d3edb2af22e861..a761addc06cba5fec6ac24675a42d561cf8b1b2f 100644 (file)
@@ -17193,8 +17193,8 @@ aarch64_ld234_st234_vectors (vect_cost_for_stmt kind, stmt_vec_info stmt_info,
       && STMT_VINFO_DATA_REF (stmt_info))
     {
       stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
-      if (stmt_info
-         && vect_mem_access_type (stmt_info, node) == VMAT_LOAD_STORE_LANES)
+      if (node
+         && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_LOAD_STORE_LANES)
        return DR_GROUP_SIZE (stmt_info);
     }
   return 0;
@@ -17466,7 +17466,7 @@ aarch64_detect_vector_stmt_subtype (vec_info *vinfo, vect_cost_for_stmt kind,
      cost by the number of elements in the vector.  */
   if (kind == scalar_load
       && sve_costs
-      && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+      && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
     {
       unsigned int nunits = vect_nunits_for_cost (vectype);
       /* Test for VNx2 modes, which have 64-bit containers.  */
@@ -17479,7 +17479,7 @@ aarch64_detect_vector_stmt_subtype (vec_info *vinfo, vect_cost_for_stmt kind,
      in a scatter operation.  */
   if (kind == scalar_store
       && sve_costs
-      && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+      && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
     return sve_costs->scatter_store_elt_cost;
 
   /* Detect cases in which vec_to_scalar represents an in-loop reduction.  */
@@ -17735,7 +17735,7 @@ aarch64_vector_costs::count_ops (unsigned int count, vect_cost_for_stmt kind,
   if (stmt_info
       && kind == vec_to_scalar
       && (m_vec_flags & VEC_ADVSIMD)
-      && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+      && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
     {
       auto dr = STMT_VINFO_DATA_REF (stmt_info);
       tree dr_ref = DR_REF (dr);
@@ -17850,7 +17850,7 @@ aarch64_vector_costs::count_ops (unsigned int count, vect_cost_for_stmt kind,
   if (stmt_info
       && sve_issue
       && (kind == scalar_load || kind == scalar_store)
-      && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+      && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
     {
       unsigned int pairs = CEIL (count, 2);
       ops->pred_ops += sve_issue->gather_scatter_pair_pred_ops * pairs;
@@ -18005,9 +18005,10 @@ aarch64_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
 
       /* Check if we've seen an SVE gather/scatter operation and which size.  */
       if (kind == scalar_load
+         && !m_costing_for_scalar
          && vectype
          && aarch64_sve_mode_p (TYPE_MODE (vectype))
-         && vect_mem_access_type (stmt_info, node) == VMAT_GATHER_SCATTER)
+         && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
        {
          const sve_vec_cost *sve_costs = aarch64_tune_params.vec_costs->sve;
          if (sve_costs)
index 1c6bc25c2ad2964362ff84c446e4643b56d5d55a..44ef44a1435336f2e5fa8a47daa7f4f509f92ace 100644 (file)
@@ -400,7 +400,7 @@ costs::compute_local_live_ranges (
                  pair &live_range
                    = live_ranges->get_or_insert (lhs, &existed_p);
                  gcc_assert (!existed_p);
-                 if (STMT_VINFO_MEMORY_ACCESS_TYPE (program_point.stmt_info)
+                 if (SLP_TREE_MEMORY_ACCESS_TYPE (*node)
                      == VMAT_LOAD_STORE_LANES)
                    point = get_first_lane_point (program_points,
                                                  program_point.stmt_info);
@@ -418,8 +418,7 @@ costs::compute_local_live_ranges (
                      bool existed_p = false;
                      pair &live_range
                        = live_ranges->get_or_insert (var, &existed_p);
-                     if (STMT_VINFO_MEMORY_ACCESS_TYPE (
-                           program_point.stmt_info)
+                     if (SLP_TREE_MEMORY_ACCESS_TYPE (*node)
                          == VMAT_LOAD_STORE_LANES)
                        point = get_last_lane_point (program_points,
                                                     program_point.stmt_info);
@@ -608,7 +607,7 @@ costs::need_additional_vector_vars_p (stmt_vec_info stmt_info,
   if (type == load_vec_info_type || type == store_vec_info_type)
     {
       if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)
-         && STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) == VMAT_GATHER_SCATTER)
+         && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_GATHER_SCATTER)
        return true;
 
       machine_mode mode = TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info));
@@ -1086,7 +1085,7 @@ costs::better_main_loop_than_p (const vector_costs *uncast_other) const
    load/store.  */
 static int
 segment_loadstore_group_size (enum vect_cost_for_stmt kind,
-                             stmt_vec_info stmt_info)
+                             stmt_vec_info stmt_info, slp_tree node)
 {
   if (stmt_info
       && (kind == vector_load || kind == vector_store)
@@ -1094,7 +1093,7 @@ segment_loadstore_group_size (enum vect_cost_for_stmt kind,
     {
       stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
       if (stmt_info
-         && STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) == VMAT_LOAD_STORE_LANES)
+         && SLP_TREE_MEMORY_ACCESS_TYPE (node) == VMAT_LOAD_STORE_LANES)
        return DR_GROUP_SIZE (stmt_info);
     }
   return 0;
@@ -1108,7 +1107,7 @@ segment_loadstore_group_size (enum vect_cost_for_stmt kind,
 unsigned
 costs::adjust_stmt_cost (enum vect_cost_for_stmt kind, loop_vec_info loop,
                         stmt_vec_info stmt_info,
-                        slp_tree, tree vectype, int stmt_cost)
+                        slp_tree node, tree vectype, int stmt_cost)
 {
   const cpu_vector_cost *costs = get_vector_costs ();
   switch (kind)
@@ -1131,7 +1130,8 @@ costs::adjust_stmt_cost (enum vect_cost_for_stmt kind, loop_vec_info loop,
                 each vector in the group.  Here we additionally add permute
                 costs for each.  */
              /* TODO: Indexed and ordered/unordered cost.  */
-             int group_size = segment_loadstore_group_size (kind, stmt_info);
+             int group_size = segment_loadstore_group_size (kind, stmt_info,
+                                                            node);
              if (group_size > 1)
                {
                  switch (group_size)
index 9b4d200bb296957553c8528dfa01e47ea367a162..460de575fcc00faa489a7178df7f51e2368e3301 100644 (file)
@@ -11292,11 +11292,9 @@ update_epilogue_loop_vinfo (class loop *epilogue, tree advance)
         updated offset we set using ADVANCE.  Instead we have to make sure the
         reference in the data references point to the corresponding copy of
         the original in the epilogue.  Make sure to update both
-        gather/scatters recognized by dataref analysis and also other
-        refs that get_load_store_type classified as VMAT_GATHER_SCATTER.  */
+        gather/scatters recognized by dataref analysis.  */
       auto vstmt_vinfo = vect_stmt_to_vectorize (stmt_vinfo);
-      if (STMT_VINFO_MEMORY_ACCESS_TYPE (vstmt_vinfo) == VMAT_GATHER_SCATTER
-         || STMT_VINFO_STRIDED_P (vstmt_vinfo)
+      if (STMT_VINFO_STRIDED_P (vstmt_vinfo)
          || STMT_VINFO_GATHER_SCATTER_P (vstmt_vinfo))
        {
          /* ???  As we copy epilogues from the main loop incremental
index 47a6b6c558934af7e66be147dcd5fa5359e67b96..7fe9996b48ce5ab94eb3bb51a56ff0c13b8214a1 100644 (file)
@@ -7954,7 +7954,6 @@ vectorizable_store (vec_info *vinfo,
   bool costing_p = cost_vec;
   if (costing_p) /* transformation not required.  */
     {
-      STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
       SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type;
 
       if (loop_vinfo
@@ -7985,7 +7984,7 @@ vectorizable_store (vec_info *vinfo,
 
       SLP_TREE_TYPE (slp_node) = store_vec_info_type;
     }
-  gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (stmt_info));
+  gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (slp_node));
 
   /* Transform.  */
 
index 222a723485cb84572d5a1c31680580a524c1cf8a..095db66b94777f88699766878f564815ed865b86 100644 (file)
@@ -1438,10 +1438,6 @@ public:
   /* For both loads and stores.  */
   unsigned simd_lane_access_p : 3;
 
-  /* Classifies how the load or store is going to be implemented
-     for loop vectorization.  */
-  vect_memory_access_type memory_access_type;
-
   /* For INTEGER_INDUC_COND_REDUCTION, the initial value to be used.  */
   tree induc_cond_initial_val;
 
@@ -1584,7 +1580,6 @@ struct gather_scatter_info {
 #define STMT_VINFO_DATA_REF(S)             ((S)->dr_aux.dr + 0)
 #define STMT_VINFO_GATHER_SCATTER_P(S)    (S)->gather_scatter_p
 #define STMT_VINFO_STRIDED_P(S)                   (S)->strided_p
-#define STMT_VINFO_MEMORY_ACCESS_TYPE(S)   (S)->memory_access_type
 #define STMT_VINFO_SIMD_LANE_ACCESS_P(S)   (S)->simd_lane_access_p
 #define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL(S) (S)->induc_cond_initial_val
 #define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT(S) (S)->reduc_epilogue_adjustment
@@ -2833,18 +2828,6 @@ vect_is_reduction (stmt_vec_info stmt_info)
   return STMT_VINFO_REDUC_IDX (stmt_info) >= 0;
 }
 
-/* Returns the memory acccess type being used to vectorize the statement.  If
-   SLP this is read from NODE, otherwise it's read from the STMT_VINFO.  */
-
-inline vect_memory_access_type
-vect_mem_access_type (stmt_vec_info stmt_info, slp_tree node)
-{
-  if (node)
-    return SLP_TREE_MEMORY_ACCESS_TYPE (node);
-  else
-    return STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info);
-}
-
 /* If STMT_INFO describes a reduction, return the vect_reduction_type
    of the reduction it describes, otherwise return -1.  */
 inline int