return stmt_cost;
}
+/* Return true if STMT_INFO is part of a reduction that has the form:
+
+ r = r op ...;
+ r = r op ...;
+
+ with the single accumulator being read and written multiple times. */
+static bool
+aarch64_force_single_cycle (vec_info *vinfo, stmt_vec_info stmt_info)
+{
+ if (!STMT_VINFO_LIVE_P (stmt_info))
+ return false;
+
+ auto reduc_info = info_for_reduction (vinfo, stmt_info);
+ return STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info);
+}
+
/* COUNT, KIND and STMT_INFO are the same as for vector_costs::add_stmt_cost
and they describe an operation in the body of a vector loop. Record issue
information relating to the vector operation in OPS. */
{
unsigned int base
= aarch64_in_loop_reduction_latency (m_vinfo, stmt_info, m_vec_flags);
-
- /* ??? Ideally we'd do COUNT reductions in parallel, but unfortunately
- that's not yet the case. */
- ops->reduction_latency = MAX (ops->reduction_latency, base * count);
+ if (aarch64_force_single_cycle (m_vinfo, stmt_info))
+ /* ??? Ideally we'd use a tree to reduce the copies down to 1 vector,
+ and then accumulate that, but at the moment the loop-carried
+ dependency includes all copies. */
+ ops->reduction_latency = MAX (ops->reduction_latency, base * count);
+ else
+ ops->reduction_latency = MAX (ops->reduction_latency, base);
}
/* Assume that multiply-adds will become a single operation. */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-Ofast -mcpu=neoverse-n2 -fdump-tree-vect-details -fno-tree-slp-vectorize" } */
+/* { dg-final { scan-tree-dump-not "reduction latency = 8" "vect" } } */
+
+/* Do not increase the vector body cost due to the incorrect reduction latency
+ Original vector body cost = 51
+ Scalar issue estimate:
+ ...
+ reduction latency = 2
+ estimated min cycles per iteration = 2.000000
+ estimated cycles per vector iteration (for VF 2) = 4.000000
+ Vector issue estimate:
+ ...
+ reduction latency = 8 <-- Too large
+ estimated min cycles per iteration = 8.000000
+ Increasing body cost to 102 because scalar code would issue more quickly
+ ...
+ missed: cost model: the vector iteration cost = 102 divided by the scalar iteration cost = 44 is greater or equal to the vectorization factor = 2.
+ missed: not vectorized: vectorization not profitable. */
+
+typedef struct
+{
+ unsigned short m1, m2, m3, m4;
+} the_struct_t;
+typedef struct
+{
+ double m1, m2, m3, m4, m5;
+} the_struct2_t;
+
+double
+bar (the_struct2_t *);
+
+double
+foo (double *k, unsigned int n, the_struct_t *the_struct)
+{
+ unsigned int u;
+ the_struct2_t result;
+ for (u = 0; u < n; u++, k--)
+ {
+ result.m1 += (*k) * the_struct[u].m1;
+ result.m2 += (*k) * the_struct[u].m2;
+ result.m3 += (*k) * the_struct[u].m3;
+ result.m4 += (*k) * the_struct[u].m4;
+ }
+ return bar (&result);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-Ofast -mcpu=neoverse-n2 -fdump-tree-vect-details -fno-tree-slp-vectorize" } */
+/* { dg-final { scan-tree-dump "reduction latency = 8" "vect" } } */
+
+/* The reduction latency should be multiplied by the count for
+ single_defuse_cycle. */
+
+long
+f (long res, short *ptr1, short *ptr2, int n)
+{
+ for (int i = 0; i < n; ++i)
+ res += (long) ptr1[i] << ptr2[i];
+ return res;
+}