--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O3 -funroll-loops -fno-tree-vectorize -fdump-tree-cunroll-details" } */
+
+typedef struct {
+ double real;
+ double imag;
+} complex;
+
+typedef struct { complex e[3][3]; } su3_matrix;
+
+void mult_su3_nn( su3_matrix *a, su3_matrix *b, su3_matrix *c )
+{
+ int i,j;
+ double t,ar,ai,br,bi,cr,ci;
+ for(i=0;i<3;i++)
+ for(j=0;j<3;j++){
+
+ ar=a->e[i][0].real; ai=a->e[i][0].imag;
+ br=b->e[0][j].real; bi=b->e[0][j].imag;
+ cr=ar*br; t=ai*bi; cr -= t;
+ ci=ar*bi; t=ai*br; ci += t;
+
+ ar=a->e[i][1].real; ai=a->e[i][1].imag;
+ br=b->e[1][j].real; bi=b->e[1][j].imag;
+ t=ar*br; cr += t; t=ai*bi; cr -= t;
+ t=ar*bi; ci += t; t=ai*br; ci += t;
+
+ ar=a->e[i][2].real; ai=a->e[i][2].imag;
+ br=b->e[2][j].real; bi=b->e[2][j].imag;
+ t=ar*br; cr += t; t=ai*bi; cr -= t;
+ t=ar*bi; ci += t; t=ai*br; ci += t;
+
+ c->e[i][j].real=cr;
+ c->e[i][j].imag=ci;
+ }
+}
+/* { dg-final { scan-tree-dump-times "optimized: loop with 2 iterations completely unrolled" 1 "cunroll" { target i?86-*-* x86_64-*-* } } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O3 -funroll-loops -fdump-tree-vect-details" } */
+/* { dg-require-effective-target vect_int } */
+/* { dg-require-effective-target vect_shift } */
+/* { dg-additional-options "-mavx2" { target x86_64-*-* i?86-*-* } } */
+/* { dg-additional-options "--param max-completely-peeled-insns=200" { target powerpc64*-*-* } } */
+
+typedef unsigned short ggml_fp16_t;
+static float table_f32_f16[1 << 16];
+
+inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
+ unsigned short s;
+ __builtin_memcpy(&s, &f, sizeof(unsigned short));
+ return table_f32_f16[s];
+}
+
+typedef struct {
+ ggml_fp16_t d;
+ ggml_fp16_t m;
+ unsigned char qh[4];
+ unsigned char qs[32 / 2];
+} block_q5_1;
+
+typedef struct {
+ float d;
+ float s;
+ char qs[32];
+} block_q8_1;
+
+void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = 32;
+ const int nb = n / qk;
+
+ const block_q5_1 * restrict x = vx;
+ const block_q8_1 * restrict y = vy;
+
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ unsigned qh;
+ __builtin_memcpy(&qh, x[i].qh, sizeof(qh));
+
+ int sumi = 0;
+
+ if (qh) {
+ for (int j = 0; j < qk/2; ++j) {
+ const unsigned char xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const unsigned char xh_1 = ((qh >> (j + 12)) ) & 0x10;
+
+ const int x0 = (x[i].qs[j] & 0xF) | xh_0;
+ const int x1 = (x[i].qs[j] >> 4) | xh_1;
+
+ sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
+ }
+ }
+ else {
+ for (int j = 0; j < qk/2; ++j) {
+ const int x0 = (x[i].qs[j] & 0xF);
+ const int x1 = (x[i].qs[j] >> 4);
+
+ sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
+ }
+ }
+
+ sumf += (ggml_lookup_fp16_to_fp32(x[i].d)*y[i].d)*sumi + ggml_lookup_fp16_to_fp32(x[i].m)*y[i].s;
+ }
+
+ *s = sumf;
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
1) It could increase register pressure.
2) Big loop after completely unroll may not be vectorized
by BB vectorizer. */
- else if ((cunrolli && !loop->inner
- ? unr_insns : unr_insns - est_eliminated)
+ else if ((cunrolli ? unr_insns : unr_insns - est_eliminated)
> (unsigned) param_max_completely_peeled_insns)
{
if (dump_file && (dump_flags & TDF_DETAILS))
static bool
canonicalize_loop_induction_variables (class loop *loop,
bool create_iv, enum unroll_level ul,
- bool try_eval, bool allow_peel, bool cunrolli)
+ bool try_eval, bool allow_peel,
+ const_sbitmap innermost,
+ bool cunrolli)
{
edge exit = NULL;
tree niter;
modified |= remove_redundant_iv_tests (loop);
dump_user_location_t locus = find_loop_location (loop);
+
+ bool innermost_cunrolli_p
+ = cunrolli
+ && (unsigned) loop->num < SBITMAP_SIZE (innermost)
+ && bitmap_bit_p (innermost, loop->num);
+
if (try_unroll_loop_completely (loop, exit, niter, may_be_zero, ul,
- maxiter, locus, allow_peel, cunrolli))
+ maxiter, locus, allow_peel,
+ innermost_cunrolli_p))
return true;
if (create_iv
bool changed = false;
bool irred_invalidated = false;
bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
+ auto_sbitmap innermost (number_of_loops (cfun));
+ bitmap_clear (innermost);
estimate_numbers_of_iterations (cfun);
for (auto loop : loops_list (cfun, LI_FROM_INNERMOST))
{
- changed |= canonicalize_loop_induction_variables (loop,
- true, UL_SINGLE_ITER,
- true, false, false);
+ changed
+ |= canonicalize_loop_induction_variables (loop,
+ true, UL_SINGLE_ITER,
+ true, false,
+ (const_sbitmap) innermost,
+ false);
}
gcc_assert (!need_ssa_update_p (cfun));
static bool
tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
- bitmap father_bbs, class loop *loop, bool cunrolli)
+ bitmap father_bbs, class loop *loop,
+ const_sbitmap innermost, bool cunrolli)
{
class loop *loop_father;
bool changed = false;
if (!child_father_bbs)
child_father_bbs = BITMAP_ALLOC (NULL);
if (tree_unroll_loops_completely_1 (may_increase_size, unroll_outer,
- child_father_bbs, inner, cunrolli))
+ child_father_bbs, inner,
+ innermost, cunrolli))
{
bitmap_ior_into (father_bbs, child_father_bbs);
bitmap_clear (child_father_bbs);
ul = UL_NO_GROWTH;
if (canonicalize_loop_induction_variables
- (loop, false, ul, !flag_tree_loop_ivcanon, unroll_outer, cunrolli))
+ (loop, false, ul, !flag_tree_loop_ivcanon, unroll_outer,
+ innermost, cunrolli))
{
/* If we'll continue unrolling, we need to propagate constants
within the new basic blocks to fold away induction variable
/* Unroll LOOPS completely if they iterate just few times. Unless
MAY_INCREASE_SIZE is true, perform the unrolling only if the
- size of the code does not increase. */
+ size of the code does not increase.
+ cunrolli is true when passs is cunrolli. */
static unsigned int
-tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
+tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer, bool cunrolli)
{
bitmap father_bbs = BITMAP_ALLOC (NULL);
bool changed;
int iteration = 0;
bool irred_invalidated = false;
- bool cunrolli = true;
+ auto_sbitmap innermost (number_of_loops (cfun));
+ bitmap_clear (innermost);
estimate_numbers_of_iterations (cfun);
+ /* Mark all innermost loop at the begining. */
+ for (auto loop : loops_list (cfun, LI_FROM_INNERMOST))
+ {
+ if (!loop->inner)
+ bitmap_set_bit (innermost, loop->num);
+ }
+
do
{
changed = false;
changed = tree_unroll_loops_completely_1 (may_increase_size,
unroll_outer, father_bbs,
current_loops->tree_root,
+ (const_sbitmap) innermost,
cunrolli);
if (changed)
{
unsigned i;
- /* For the outer loop, considering that the inner loop is completely
- unrolled, it would expose more optimization opportunities, so it's
- better to keep 2/3 reduction of estimated unrolled size. */
- cunrolli = false;
unloop_loops (loops_to_unloop, loops_to_unloop_nunroll,
edges_to_remove, loop_closed_ssa_invalidated,
re-peeling the same loop multiple times. */
if (flag_peel_loops)
peeled_loops = BITMAP_ALLOC (NULL);
- unsigned int val = tree_unroll_loops_completely (flag_cunroll_grow_size, true);
+ unsigned int val = tree_unroll_loops_completely (flag_cunroll_grow_size, true, false);
if (peeled_loops)
{
BITMAP_FREE (peeled_loops);
if (number_of_loops (fun) > 1)
{
scev_initialize ();
- ret = tree_unroll_loops_completely (optimize >= 3, false);
+ ret = tree_unroll_loops_completely (optimize >= 3, false, true);
scev_finalize ();
}
loop_optimizer_finalize ();