/* Loop unroll-and-jam.
- Copyright (C) 2017 Free Software Foundation, Inc.
+ Copyright (C) 2017-2021 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "params.h"
#include "tree-pass.h"
#include "backend.h"
#include "tree.h"
#include "tree-data-ref.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-vectorizer.h"
+#include "tree-ssa-sccvn.h"
/* Unroll and Jam transformation
to the OLD loop or the outer loop of OLD now is inside LOOP. */
static void
-merge_loop_tree (struct loop *loop, struct loop *old)
+merge_loop_tree (class loop *loop, class loop *old)
{
basic_block *bbs;
int i, n;
- struct loop *subloop;
+ class loop *subloop;
edge e;
edge_iterator ei;
for (i = 0; i < n; i++)
{
/* If the block was direct child of OLD loop it's now part
- of LOOP. If it was outside OLD, then it moved into LOOP
+ of LOOP. If it was outside OLD, then it moved into LOOP
as well. This avoids changing the loop father for BBs
in inner loops of OLD. */
if (bbs[i]->loop_father == old
free (bbs);
}
-/* BB exits the outer loop of an unroll-and-jam situation.
+/* BB is part of the outer loop of an unroll-and-jam situation.
Check if any statements therein would prevent the transformation. */
static bool
{
gimple_stmt_iterator gsi;
/* BB is duplicated by outer unrolling and then all N-1 first copies
- move into the body of the fused inner loop. The last copy remains
- the exit block of the outer loop and is still outside the inner loop
- also after fusion. We can't allow this for some effects of BB:
+ move into the body of the fused inner loop. If BB exits the outer loop
+ the last copy still does so, and the first N-1 copies are cancelled
+ by loop unrolling, so also after fusion it's the exit block.
+ But there might be other reasons that prevent fusion:
* stores or unknown side-effects prevent fusion
* loads don't
* computations into SSA names: these aren't problematic. Their
- result will be unused on the exit edges of the first N-1 copies
+ result will be unused on the exit edges of the first N-1 copies
(those aren't taken after unrolling). If they are used on the
other edge (the one leading to the outer latch block) they are
loop-carried (on the outer loop) and the Nth copy of BB will
If so return true, otherwise return false. */
static bool
-unroll_jam_possible_p (struct loop *outer, struct loop *loop)
+unroll_jam_possible_p (class loop *outer, class loop *loop)
{
basic_block *bbs;
int i, n;
- struct tree_niter_desc niter;
+ class tree_niter_desc niter;
/* When fusing the loops we skip the latch block
of the first one, so it mustn't have any effects to
if (outer->inner != loop || loop->next)
return false;
+ /* Prevent head-controlled inner loops, that we usually have.
+ The guard block would need to be accepted
+ (invariant condition either entering or skipping the loop),
+ without also accepting arbitrary control flow. When unswitching
+ ran before us (as with -O3) this won't be a problem because its
+ outer loop unswitching will have moved out the invariant condition.
+
+ If we do that we need to extend fuse_loops() to cope with this
+ by threading through the (still invariant) copied condition
+ between the two loop copies. */
+ if (!dominated_by_p (CDI_DOMINATORS, outer->latch, loop->header))
+ return false;
+
/* The number of iterations of the inner loop must be loop invariant
with respect to the outer loop. */
if (!number_of_iterations_exit (loop, single_exit (loop), &niter,
|| !expr_invariant_in_loop_p (outer, niter.niter))
return false;
+ /* If the inner loop produces any values that are used inside the
+ outer loop (except the virtual op) then it can flow
+ back (perhaps indirectly) into the inner loop. This prevents
+ fusion: without fusion the value at the last iteration is used,
+ with fusion the value after the initial iteration is used.
+
+ If all uses are outside the outer loop this doesn't prevent fusion;
+ the value of the last iteration is still used (and the values from
+ all intermediate iterations are dead). */
+ gphi_iterator psi;
+ for (psi = gsi_start_phis (single_exit (loop)->dest);
+ !gsi_end_p (psi); gsi_next (&psi))
+ {
+ imm_use_iterator imm_iter;
+ use_operand_p use_p;
+ tree op = gimple_phi_result (psi.phi ());
+ if (virtual_operand_p (op))
+ continue;
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
+ {
+ gimple *use_stmt = USE_STMT (use_p);
+ if (!is_gimple_debug (use_stmt)
+ && flow_bb_inside_loop_p (outer, gimple_bb (use_stmt)))
+ return false;
+ }
+ }
+
/* And check blocks belonging to just outer loop. */
bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
n = get_loop_body_with_size (outer, bbs, n_basic_blocks_for_fn (cfun));
for (i = 0; i < n; i++)
- {
- if (bbs[i]->loop_father == outer
- && bbs[i] != outer->latch && bbs[i] != outer->header
- && (!loop_exits_from_bb_p (outer, bbs[i])
- || bb_prevents_fusion_p (bbs[i])))
- break;
- /* XXX Note that the above disallows head-controlled inner loops,
- that we usually have. The guard block would need to be accepted
- (invariant condition either entering or skipping the loop),
- without also accepting arbitrary control flow. When unswitching
- ran before us (as with -O3) this won't be a problem because its
- outer loop unswitching will have moved out the invariant condition.
-
- If we do that we need to extend fuse_loops() to cope with this
- by threading through the (still invariant) copied condition
- between the two loop copies. */
- }
+ if (bbs[i]->loop_father == outer && bb_prevents_fusion_p (bbs[i]))
+ break;
free (bbs);
if (i != n)
return false;
body would be the after-iter value of the first body) if it's over
an associative and commutative operation. We wouldn't
be able to handle unknown cycles. */
- gphi_iterator psi;
for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
{
affine_iv iv;
if (!simple_iv (loop, loop, op, &iv, true))
return false;
/* The inductions must be regular, loop invariant step and initial
- value. */
+ value. */
if (!expr_invariant_in_loop_p (outer, iv.step)
|| !expr_invariant_in_loop_p (outer, iv.base))
return false;
/* XXX With more effort we could also be able to deal with inductions
- where the initial value is loop variant but a simple IV in the
+ where the initial value is loop variant but a simple IV in the
outer loop. The initial value for the second body would be
the original initial value plus iv.base.step. The next value
for the fused loop would be the original next value of the first
be in appropriate form. */
static void
-fuse_loops (struct loop *loop)
+fuse_loops (class loop *loop)
{
- struct loop *next = loop->next;
+ class loop *next = loop->next;
while (next)
{
gcc_assert (EDGE_COUNT (next->header->preds) == 1);
/* The PHI nodes of the second body (single-argument now)
- need adjustments to use the right values: either directly
+ need adjustments to use the right values: either directly
the value of the corresponding PHI in the first copy or
the one leaving the first body which unrolling did for us.
merge_loop_tree (loop, next);
gcc_assert (!next->num_nodes);
- struct loop *ln = next->next;
+ class loop *ln = next->next;
delete_loop (next);
next = ln;
}
rewrite_into_loop_closed_ssa_1 (NULL, 0, SSA_OP_USE, loop);
}
+/* Return true if any of the access functions for dataref A
+ isn't invariant with respect to loop LOOP_NEST. */
+static bool
+any_access_function_variant_p (const struct data_reference *a,
+ const class loop *loop_nest)
+{
+ vec<tree> fns = DR_ACCESS_FNS (a);
+
+ for (tree t : fns)
+ if (!evolution_function_is_invariant_p (t, loop_nest->num))
+ return true;
+
+ return false;
+}
+
/* Returns true if the distance in DDR can be determined and adjusts
the unroll factor in *UNROLL to make unrolling valid for that distance.
- Otherwise return false.
+ Otherwise return false. DDR is with respect to the outer loop of INNER.
If this data dep can lead to a removed memory reference, increment
*REMOVED and adjust *PROFIT_UNROLL to be the necessary unroll factor
for this to happen. */
static bool
-adjust_unroll_factor (struct data_dependence_relation *ddr,
+adjust_unroll_factor (class loop *inner, struct data_dependence_relation *ddr,
unsigned *unroll, unsigned *profit_unroll,
unsigned *removed)
{
gcc_unreachable ();
else if ((unsigned)dist >= *unroll)
;
- else if (lambda_vector_lexico_pos (dist_v + 1, DDR_NB_LOOPS (ddr) - 1)
- || (lambda_vector_zerop (dist_v + 1, DDR_NB_LOOPS (ddr) - 1)
- && dist > 0))
+ else if (lambda_vector_zerop (dist_v + 1, DDR_NB_LOOPS (ddr) - 1))
+ {
+ /* We have (a,0) with a < N, so this will be transformed into
+ (0,0) after unrolling by N. This might potentially be a
+ problem, if it's not a read-read dependency. */
+ if (DR_IS_READ (DDR_A (ddr)) && DR_IS_READ (DDR_B (ddr)))
+ ;
+ else
+ {
+ /* So, at least one is a write, and we might reduce the
+ distance vector to (0,0). This is still no problem
+ if both data-refs are affine with respect to the inner
+ loops. But if one of them is invariant with respect
+ to an inner loop our reordering implicit in loop fusion
+ corrupts the program, as our data dependences don't
+ capture this. E.g. for:
+ for (0 <= i < n)
+ for (0 <= j < m)
+ a[i][0] = a[i+1][0] + 2; // (1)
+ b[i][j] = b[i+1][j] + 2; // (2)
+ the distance vector for both statements is (-1,0),
+ but exchanging the order for (2) is okay, while
+ for (1) it is not. To see this, write out the original
+ accesses (assume m is 2):
+ a i j original
+ 0 0 0 r a[1][0] b[1][0]
+ 1 0 0 w a[0][0] b[0][0]
+ 2 0 1 r a[1][0] b[1][1]
+ 3 0 1 w a[0][0] b[0][1]
+ 4 1 0 r a[2][0] b[2][0]
+ 5 1 0 w a[1][0] b[1][0]
+ after unroll-by-2 and fusion the accesses are done in
+ this order (from column a): 0,1, 4,5, 2,3, i.e. this:
+ a i j transformed
+ 0 0 0 r a[1][0] b[1][0]
+ 1 0 0 w a[0][0] b[0][0]
+ 4 1 0 r a[2][0] b[2][0]
+ 5 1 0 w a[1][0] b[1][0]
+ 2 0 1 r a[1][0] b[1][1]
+ 3 0 1 w a[0][0] b[0][1]
+ Note how access 2 accesses the same element as access 5
+ for array 'a' but not for array 'b'. */
+ if (any_access_function_variant_p (DDR_A (ddr), inner)
+ && any_access_function_variant_p (DDR_B (ddr), inner))
+ ;
+ else
+ /* And if any dataref of this pair is invariant with
+ respect to the inner loop, we have no chance than
+ to reduce the unroll factor. */
+ *unroll = dist;
+ }
+ }
+ else if (lambda_vector_lexico_pos (dist_v + 1, DDR_NB_LOOPS (ddr) - 1))
;
else
*unroll = dist;
static unsigned int
tree_loop_unroll_and_jam (void)
{
- struct loop *loop;
- bool changed = false;
+ unsigned int todo = 0;
gcc_assert (scev_initialized_p ());
/* Go through all innermost loops. */
- FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
+ for (auto loop : loops_list (cfun, LI_ONLY_INNERMOST))
{
- struct loop *outer = loop_outer (loop);
+ class loop *outer = loop_outer (loop);
if (loop_depth (loop) < 2
|| optimize_loop_nest_for_size_p (outer))
if (!unroll_jam_possible_p (outer, loop))
continue;
- vec<data_reference_p> datarefs;
- vec<ddr_p> dependences;
+ vec<data_reference_p> datarefs = vNULL;
+ vec<ddr_p> dependences = vNULL;
unsigned unroll_factor, profit_unroll, removed;
- struct tree_niter_desc desc;
+ class tree_niter_desc desc;
bool unroll = false;
auto_vec<loop_p, 3> loop_nest;
- dependences.create (10);
- datarefs.create (10);
if (!compute_data_dependences_for_loop (outer, true, &loop_nest,
- &datarefs, &dependences))
+ &datarefs, &dependences))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Cannot analyze data dependencies\n");
free_data_refs (datarefs);
free_dependence_relations (dependences);
- return false;
+ continue;
}
if (!datarefs.length ())
continue;
/* Now check the distance vector, for determining a sensible
outer unroll factor, and for validity of merging the inner
loop copies. */
- if (!adjust_unroll_factor (ddr, &unroll_factor, &profit_unroll,
+ if (!adjust_unroll_factor (loop, ddr, &unroll_factor, &profit_unroll,
&removed))
{
/* Couldn't get the distance vector. For two reads that's
- harmless (we assume we should unroll). For at least
+ harmless (we assume we should unroll). For at least
one write this means we can't check the dependence direction
and hence can't determine safety. */
}
/* We regard a user-specified minimum percentage of zero as a request
- to ignore all profitability concerns and apply the transformation
+ to ignore all profitability concerns and apply the transformation
always. */
- if (!PARAM_VALUE (PARAM_UNROLL_JAM_MIN_PERCENT))
- profit_unroll = 2;
+ if (!param_unroll_jam_min_percent)
+ profit_unroll = MAX(2, profit_unroll);
else if (removed * 100 / datarefs.length ()
- < (unsigned)PARAM_VALUE (PARAM_UNROLL_JAM_MIN_PERCENT))
+ < (unsigned)param_unroll_jam_min_percent)
profit_unroll = 1;
if (unroll_factor > profit_unroll)
unroll_factor = profit_unroll;
- if (unroll_factor > (unsigned)PARAM_VALUE (PARAM_UNROLL_JAM_MAX_UNROLL))
- unroll_factor = PARAM_VALUE (PARAM_UNROLL_JAM_MAX_UNROLL);
+ if (unroll_factor > (unsigned)param_unroll_jam_max_unroll)
+ unroll_factor = param_unroll_jam_max_unroll;
unroll = (unroll_factor > 1
&& can_unroll_loop_p (outer, unroll_factor, &desc));
&desc);
free_original_copy_tables ();
fuse_loops (outer->inner);
- changed = true;
+ todo |= TODO_cleanup_cfg;
+
+ auto_bitmap exit_bbs;
+ bitmap_set_bit (exit_bbs, single_dom_exit (outer)->dest->index);
+ todo |= do_rpo_vn (cfun, loop_preheader_edge (outer), exit_bbs);
}
loop_nest.release ();
free_data_refs (datarefs);
}
- if (changed)
+ if (todo)
{
scev_reset ();
free_dominance_info (CDI_DOMINATORS);
- return TODO_cleanup_cfg;
}
- return 0;
+ return todo;
}
/* Pass boilerplate */