]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
VECT: Support SLP MASK_LEN_GATHER_LOAD with conditional mask
authorJuzhe-Zhong <juzhe.zhong@rivai.ai>
Thu, 26 Oct 2023 11:50:19 +0000 (19:50 +0800)
committerRobin Dapp <rdapp@ventanamicro.com>
Tue, 31 Oct 2023 20:07:46 +0000 (21:07 +0100)
This patch leverage current MASK_GATHER_LOAD to support SLP MASK_LEN_GATHER_LOAD with condtional mask.

Unconditional MASK_LEN_GATHER_LOAD (base, offset, scale, zero, -1) SLP is not included in this patch
since it seems that we can't support it in the middle-end:

FAIL: gcc.dg/tree-ssa/pr44306.c (internal compiler error: in vectorizable_load, at tree-vect-stmts.cc:9885)

May be we should support GATHER_LOAD explictily in RISC-V backend to walk around this issue.

I am gonna support GATHER_LOAD explictly work around in RISC-V backend.

This patch also adds conditional gather load test since there is no conditional gather load test.

Ok for trunk ?

gcc/ChangeLog:

* tree-vect-slp.cc (vect_get_operand_map): Add MASK_LEN_GATHER_LOAD.
(vect_build_slp_tree_1): Ditto.
(vect_build_slp_tree_2): Ditto.

gcc/testsuite/ChangeLog:

* gcc.dg/vect/vect-gather-6.c: New test.

gcc/testsuite/gcc.dg/vect/vect-gather-6.c [new file with mode: 0644]
gcc/tree-vect-slp.cc

diff --git a/gcc/testsuite/gcc.dg/vect/vect-gather-6.c b/gcc/testsuite/gcc.dg/vect/vect-gather-6.c
new file mode 100644 (file)
index 0000000..ff55f32
--- /dev/null
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+
+void
+f (int *restrict y, int *restrict x, int *restrict indices, int *restrict cond, int n)
+{
+  for (int i = 0; i < n; ++i)
+    {
+      if (cond[i * 2])
+       y[i * 2] = x[indices[i * 2]] + 1;
+      if (cond[i * 2 + 1])
+       y[i * 2 + 1] = x[indices[i * 2 + 1]] + 2;
+    }
+}
+
+/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" vect { target vect_gather_load_ifn } } } */
index 5eb310eceaf828dd583b3c21b36f18ccf5f375ec..b78133f204f5fe7793388d40df90ba4270aac48c 100644 (file)
@@ -564,6 +564,7 @@ vect_get_operand_map (const gimple *stmt, bool gather_scatter_p = false,
            return arg1_map;
 
          case IFN_MASK_GATHER_LOAD:
+         case IFN_MASK_LEN_GATHER_LOAD:
            return arg1_arg4_map;
 
          case IFN_MASK_STORE:
@@ -1158,7 +1159,8 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
 
          if (cfn == CFN_MASK_LOAD
              || cfn == CFN_GATHER_LOAD
-             || cfn == CFN_MASK_GATHER_LOAD)
+             || cfn == CFN_MASK_GATHER_LOAD
+             || cfn == CFN_MASK_LEN_GATHER_LOAD)
            ldst_p = true;
          else if (cfn == CFN_MASK_STORE)
            {
@@ -1425,6 +1427,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
          if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info))
              && rhs_code != CFN_GATHER_LOAD
              && rhs_code != CFN_MASK_GATHER_LOAD
+                               && rhs_code != CFN_MASK_LEN_GATHER_LOAD
              && !STMT_VINFO_GATHER_SCATTER_P (stmt_info)
              /* Not grouped loads are handled as externals for BB
                 vectorization.  For loop vectorization we can handle
@@ -1927,7 +1930,8 @@ vect_build_slp_tree_2 (vec_info *vinfo, slp_tree node,
       if (gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt))
        gcc_assert (gimple_call_internal_p (stmt, IFN_MASK_LOAD)
                    || gimple_call_internal_p (stmt, IFN_GATHER_LOAD)
-                   || gimple_call_internal_p (stmt, IFN_MASK_GATHER_LOAD));
+                   || gimple_call_internal_p (stmt, IFN_MASK_GATHER_LOAD)
+                   || gimple_call_internal_p (stmt, IFN_MASK_LEN_GATHER_LOAD));
       else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
        gcc_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
       else