/* Array prefetching.
- Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+ Copyright (C) 2005-2013 Free Software Foundation, Inc.
This file is part of GCC.
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stor-layout.h"
#include "tm_p.h"
#include "basic-block.h"
-#include "output.h"
#include "tree-pretty-print.h"
-#include "tree-flow.h"
-#include "tree-dump.h"
-#include "timevar.h"
+#include "gimple.h"
+#include "gimplify.h"
+#include "gimple-iterator.h"
+#include "gimplify-me.h"
+#include "gimple-ssa.h"
+#include "tree-ssa-loop-ivopts.h"
+#include "tree-ssa-loop-manip.h"
+#include "tree-ssa-loop-niter.h"
+#include "tree-ssa-loop.h"
+#include "tree-into-ssa.h"
#include "cfgloop.h"
#include "tree-pass.h"
#include "insn-config.h"
-#include "recog.h"
#include "hashtab.h"
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "params.h"
#include "langhooks.h"
#include "tree-inline.h"
between the GIMPLE and RTL worlds. */
#include "expr.h"
#include "optabs.h"
+#include "recog.h"
/* This pass inserts prefetch instructions to optimize cache usage during
accesses to arrays in loops. It processes loops sequentially and:
(2) has PREFETCH_MOD 64
(3) has PREFETCH_MOD 4
(4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
- the cache line accessed by (4) is the same with probability only
+ the cache line accessed by (5) is the same with probability only
7/32.
(5) has PREFETCH_MOD 1 as well.
prefetch instructions with guards in cases where 5) was not sufficient
to satisfy the constraints?
- The function is_loop_prefetching_profitable() implements a cost model
- to determine if prefetching is profitable for a given loop. The cost
- model has two heuristcs:
- 1. A heuristic that determines whether the given loop has enough CPU
- ops that can be overlapped with cache missing memory ops.
- If not, the loop won't benefit from prefetching. This is implemented
- by requirung the ratio between the instruction count and the mem ref
- count to be above a certain minimum.
- 2. A heuristic that disables prefetching in a loop with an unknown trip
- count if the prefetching cost is above a certain limit. The relative
- prefetching cost is estimated by taking the ratio between the
- prefetch count and the total intruction count (this models the I-cache
- cost).
+ A cost model is implemented to determine whether or not prefetching is
+ profitable for a given loop. The cost model has three heuristics:
+
+ 1. Function trip_count_to_ahead_ratio_too_small_p implements a
+ heuristic that determines whether or not the loop has too few
+ iterations (compared to ahead). Prefetching is not likely to be
+ beneficial if the trip count to ahead ratio is below a certain
+ minimum.
+
+ 2. Function mem_ref_count_reasonable_p implements a heuristic that
+ determines whether the given loop has enough CPU ops that can be
+ overlapped with cache missing memory ops. If not, the loop
+ won't benefit from prefetching. In the implementation,
+ prefetching is not considered beneficial if the ratio between
+ the instruction count and the mem ref count is below a certain
+ minimum.
+
+ 3. Function insn_to_prefetch_ratio_too_small_p implements a
+ heuristic that disables prefetching in a loop if the prefetching
+ cost is above a certain limit. The relative prefetching cost is
+ estimated by taking the ratio between the prefetch count and the
+ total intruction count (this models the I-cache cost).
+
The limits used in these heuristics are defined as parameters with
reasonable default values. Machine-specific default values will be
added later.
#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
#endif
+/* Some of the prefetch computations have quadratic complexity. We want to
+ avoid huge compile times and, therefore, want to limit the amount of
+ memory references per loop where we consider prefetching. */
+
+#ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
+#define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
+#endif
+
/* The memory reference. */
struct mem_ref
nontemporal one. */
};
-/* Dumps information about reference REF to FILE. */
-
+/* Dumps information about memory reference */
static void
-dump_mem_ref (FILE *file, struct mem_ref *ref)
+dump_mem_details (FILE *file, tree base, tree step,
+ HOST_WIDE_INT delta, bool write_p)
{
- fprintf (file, "Reference %p:\n", (void *) ref);
-
- fprintf (file, " group %p (base ", (void *) ref->group);
- print_generic_expr (file, ref->group->base, TDF_SLIM);
+ fprintf (file, "(base ");
+ print_generic_expr (file, base, TDF_SLIM);
fprintf (file, ", step ");
- if (cst_and_fits_in_hwi (ref->group->step))
- fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
+ if (cst_fits_shwi_p (step))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
else
- print_generic_expr (file, ref->group->step, TDF_TREE);
+ print_generic_expr (file, step, TDF_TREE);
fprintf (file, ")\n");
-
fprintf (file, " delta ");
- fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, delta);
+ fprintf (file, "\n");
+ fprintf (file, " %s\n", write_p ? "write" : "read");
fprintf (file, "\n");
+}
- fprintf (file, " %s\n", ref->write_p ? "write" : "read");
+/* Dumps information about reference REF to FILE. */
- fprintf (file, "\n");
+static void
+dump_mem_ref (FILE *file, struct mem_ref *ref)
+{
+ fprintf (file, "Reference %p:\n", (void *) ref);
+
+ fprintf (file, " group %p ", (void *) ref->group);
+
+ dump_mem_details (file, ref->group->base, ref->group->step, ref->delta,
+ ref->write_p);
}
/* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
/* If step is an integer constant, keep the list of groups sorted
by decreasing step. */
- if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
+ if (cst_fits_shwi_p ((*groups)->step) && cst_fits_shwi_p (step)
&& int_cst_value ((*groups)->step) < int_cst_value (step))
break;
}
HOST_WIDE_INT idelta = 0, imult = 1;
affine_iv iv;
- if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
- || TREE_CODE (base) == ALIGN_INDIRECT_REF)
- return false;
-
if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
*index, &iv, true))
return false;
step = iv.step;
if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
- && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
+ && cst_fits_shwi_p (TREE_OPERAND (ibase, 1)))
{
idelta = int_cst_value (TREE_OPERAND (ibase, 1));
ibase = TREE_OPERAND (ibase, 0);
}
- if (cst_and_fits_in_hwi (ibase))
+ if (cst_fits_shwi_p (ibase))
{
idelta += int_cst_value (ibase);
ibase = build_int_cst (TREE_TYPE (ibase), 0);
if (TREE_CODE (base) == ARRAY_REF)
{
stepsize = array_ref_element_size (base);
- if (!cst_and_fits_in_hwi (stepsize))
+ if (!cst_fits_shwi_p (stepsize))
return false;
imult = int_cst_value (stepsize);
step = fold_build2 (MULT_EXPR, sizetype,
*step = NULL_TREE;
*delta = 0;
- /* First strip off the component references. Ignore bitfields. */
- if (TREE_CODE (ref) == COMPONENT_REF
- && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
- ref = TREE_OPERAND (ref, 0);
+ /* First strip off the component references. Ignore bitfields.
+ Also strip off the real and imagine parts of a complex, so that
+ they can have the same base. */
+ if (TREE_CODE (ref) == REALPART_EXPR
+ || TREE_CODE (ref) == IMAGPART_EXPR
+ || (TREE_CODE (ref) == COMPONENT_REF
+ && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
+ {
+ if (TREE_CODE (ref) == IMAGPART_EXPR)
+ *delta += int_size_in_bytes (TREE_TYPE (ref));
+ ref = TREE_OPERAND (ref, 0);
+ }
*ref_p = ref;
if (step == NULL_TREE)
return false;
+ /* Stop if the address of BASE could not be taken. */
+ if (may_be_nonaddressable_p (base))
+ return false;
+
+ /* Limit non-constant step prefetching only to the innermost loops and
+ only when the step is loop invariant in the entire loop nest. */
+ if (!cst_fits_shwi_p (step))
+ {
+ if (loop->inner != NULL)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Memory expression %p\n",(void *) ref );
+ print_generic_expr (dump_file, ref, TDF_TREE);
+ fprintf (dump_file,":");
+ dump_mem_details (dump_file, base, step, delta, write_p);
+ fprintf (dump_file,
+ "Ignoring %p, non-constant step prefetching is "
+ "limited to inner most loops \n",
+ (void *) ref);
+ }
+ return false;
+ }
+ else
+ {
+ if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Memory expression %p\n",(void *) ref );
+ print_generic_expr (dump_file, ref, TDF_TREE);
+ fprintf (dump_file,":");
+ dump_mem_details (dump_file, base, step, delta, write_p);
+ fprintf (dump_file,
+ "Not prefetching, ignoring %p due to "
+ "loop variant step\n",
+ (void *) ref);
+ }
+ return false;
+ }
+ }
+ }
+
/* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
are integer constants. */
agrp = find_or_create_group (refs, base, step);
bool backward;
/* If the step size is non constant, we cannot calculate prefetch_mod. */
- if (!cst_and_fits_in_hwi (ref->group->step))
+ if (!cst_fits_shwi_p (ref->group->step))
return;
step = int_cst_value (ref->group->step);
/* Given a CACHE_LINE_SIZE and two inductive memory references
with a common STEP greater than CACHE_LINE_SIZE and an address
difference DELTA, compute the probability that they will fall
- in different cache lines. DISTINCT_ITERS is the number of
- distinct iterations after which the pattern repeats itself.
+ in different cache lines. Return true if the computed miss rate
+ is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
+ number of distinct iterations after which the pattern repeats itself.
ALIGN_UNIT is the unit of alignment in bytes. */
-static int
-compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
+static bool
+is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
HOST_WIDE_INT step, HOST_WIDE_INT delta,
unsigned HOST_WIDE_INT distinct_iters,
int align_unit)
{
unsigned align, iter;
- int total_positions, miss_positions, miss_rate;
+ int total_positions, miss_positions, max_allowed_miss_positions;
int address1, address2, cache_line1, cache_line2;
- total_positions = 0;
+ /* It always misses if delta is greater than or equal to the cache
+ line size. */
+ if (delta >= (HOST_WIDE_INT) cache_line_size)
+ return false;
+
miss_positions = 0;
+ total_positions = (cache_line_size / align_unit) * distinct_iters;
+ max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
/* Iterate through all possible alignments of the first
memory reference within its cache line. */
address2 = address1 + delta;
cache_line1 = address1 / cache_line_size;
cache_line2 = address2 / cache_line_size;
- total_positions += 1;
if (cache_line1 != cache_line2)
- miss_positions += 1;
+ {
+ miss_positions += 1;
+ if (miss_positions > max_allowed_miss_positions)
+ return false;
+ }
}
- miss_rate = 1000 * miss_positions / total_positions;
- return miss_rate;
+ return true;
}
/* Prune the prefetch candidate REF using the reuse with BY.
HOST_WIDE_INT delta = delta_b - delta_r;
HOST_WIDE_INT hit_from;
unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
- int miss_rate;
HOST_WIDE_INT reduced_step;
unsigned HOST_WIDE_INT reduced_prefetch_block;
tree ref_type;
int align_unit;
/* If the step is non constant we cannot calculate prefetch_before. */
- if (!cst_and_fits_in_hwi (ref->group->step)) {
+ if (!cst_fits_shwi_p (ref->group->step)) {
return;
}
prefetch_before = (hit_from - delta_r + step - 1) / step;
/* Do not reduce prefetch_before if we meet beyond cache size. */
- if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
+ if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
prefetch_before = PREFETCH_ALL;
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
delta %= step;
ref_type = TREE_TYPE (ref->mem);
align_unit = TYPE_ALIGN (ref_type) / 8;
- miss_rate = compute_miss_rate(prefetch_block, step, delta,
- reduced_prefetch_block, align_unit);
- if (miss_rate <= ACCEPTABLE_MISS_RATE)
+ if (is_miss_rate_acceptable (prefetch_block, step, delta,
+ reduced_prefetch_block, align_unit))
{
/* Do not reduce prefetch_before if we meet beyond cache size. */
if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
/* Try also the following iteration. */
prefetch_before++;
delta = step - delta;
- miss_rate = compute_miss_rate(prefetch_block, step, delta,
- reduced_prefetch_block, align_unit);
- if (miss_rate <= ACCEPTABLE_MISS_RATE)
+ if (is_miss_rate_acceptable (prefetch_block, step, delta,
+ reduced_prefetch_block, align_unit))
{
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
return any;
}
-/* Estimate the number of prefetches in the given GROUPS. */
+/* Return TRUE if no prefetch is going to be generated in the given
+ GROUPS. */
+
+static bool
+nothing_to_prefetch_p (struct mem_ref_group *groups)
+{
+ struct mem_ref *ref;
+
+ for (; groups; groups = groups->next)
+ for (ref = groups->refs; ref; ref = ref->next)
+ if (should_issue_prefetch_p (ref))
+ return false;
+
+ return true;
+}
+
+/* Estimate the number of prefetches in the given GROUPS.
+ UNROLL_FACTOR is the factor by which LOOP was unrolled. */
static int
-estimate_prefetch_count (struct mem_ref_group *groups)
+estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
{
struct mem_ref *ref;
+ unsigned n_prefetches;
int prefetch_count = 0;
for (; groups; groups = groups->next)
for (ref = groups->refs; ref; ref = ref->next)
if (should_issue_prefetch_p (ref))
- prefetch_count++;
+ {
+ n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
+ / ref->prefetch_mod);
+ prefetch_count += n_prefetches;
+ }
return prefetch_count;
}
addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
true, NULL, true, GSI_SAME_STMT);
write_p = ref->write_p ? integer_one_node : integer_zero_node;
- local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
+ local = nontemporal ? integer_zero_node : integer_three_node;
for (ap = 0; ap < n_prefetches; ap++)
{
- if (cst_and_fits_in_hwi (ref->group->step))
+ if (cst_fits_shwi_p (ref->group->step))
{
/* Determine the address to prefetch. */
delta = (ahead + ap * ref->prefetch_mod) *
int_cst_value (ref->group->step);
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- addr_base, size_int (delta));
+ addr = fold_build_pointer_plus_hwi (addr_base, delta);
addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
true, GSI_SAME_STMT);
}
forward = fold_build2 (MULT_EXPR, sizetype,
fold_convert (sizetype, ref->group->step),
fold_convert (sizetype, size_int (ahead)));
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
- forward);
+ addr = fold_build_pointer_plus (addr_base, forward);
addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
NULL, true, GSI_SAME_STMT);
}
/* Create the prefetch instruction. */
- prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
+ prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
3, addr, write_p, local);
gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
}
if (mode == BLKmode)
return false;
- code = optab_handler (storent_optab, mode)->insn_code;
+ code = optab_handler (storent_optab, mode);
return code != CODE_FOR_nothing;
}
static void
emit_mfence_after_loop (struct loop *loop)
{
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
edge exit;
gimple call;
gimple_stmt_iterator bsi;
unsigned i;
- for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
+ FOR_EACH_VEC_ELT (exits, i, exit)
{
call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
bsi = gsi_after_labels (exit->dest);
gsi_insert_before (&bsi, call, GSI_NEW_STMT);
- mark_virtual_ops_for_renaming (call);
}
- VEC_free (edge, heap, exits);
+ exits.release ();
update_ssa (TODO_update_ssa_only_virtuals);
}
is a suitable place for it at each of the loop exits. */
if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
{
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
unsigned i;
edge exit;
- for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
+ FOR_EACH_VEC_ELT (exits, i, exit)
if ((exit->flags & EDGE_ABNORMAL)
- && exit->dest == EXIT_BLOCK_PTR)
+ && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
ret = false;
- VEC_free (edge, heap, exits);
+ exits.release ();
}
return ret;
if ((unsigned) loop_depth (aloop) <= min_depth)
continue;
- if (host_integerp (step, 0))
- astep = tree_low_cst (step, 0);
+ if (tree_fits_shwi_p (step))
+ astep = tree_to_shwi (step);
else
astep = L1_CACHE_LINE_SIZE;
{
tree stride, access_fn;
HOST_WIDE_INT *strides, astride;
- VEC (tree, heap) *access_fns;
+ vec<tree> access_fns;
tree ref = DR_REF (dr);
unsigned i, ret = ~0u;
strides = XCNEWVEC (HOST_WIDE_INT, n);
access_fns = DR_ACCESS_FNS (dr);
- for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
+ FOR_EACH_VEC_ELT (access_fns, i, access_fn)
{
/* Keep track of the reference corresponding to the subscript, so that we
know its stride. */
if (TREE_CODE (ref) == ARRAY_REF)
{
stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
- if (host_integerp (stride, 1))
- astride = tree_low_cst (stride, 1);
+ if (tree_fits_uhwi_p (stride))
+ astride = tree_to_uhwi (stride);
else
astride = L1_CACHE_LINE_SIZE;
/* Determines the distance till the first reuse of each reference in REFS
in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
- memory references in the loop. */
+ memory references in the loop. Return false if the analysis fails. */
-static void
+static bool
determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
bool no_other_refs)
{
struct loop *nest, *aloop;
- VEC (data_reference_p, heap) *datarefs = NULL;
- VEC (ddr_p, heap) *dependences = NULL;
+ vec<data_reference_p> datarefs = vNULL;
+ vec<ddr_p> dependences = vNULL;
struct mem_ref_group *gr;
struct mem_ref *ref, *refb;
- VEC (loop_p, heap) *vloops = NULL;
+ vec<loop_p> vloops = vNULL;
unsigned *loop_data_size;
unsigned i, j, n;
unsigned volume, dist, adist;
ddr_p dep;
if (loop->inner)
- return;
+ return true;
/* Find the outermost loop of the loop nest of loop (we require that
there are no sibling loops inside the nest). */
We use this to estimate whether the reference is evicted from the
cache before its reuse. */
find_loop_nest (nest, &vloops);
- n = VEC_length (loop_p, vloops);
+ n = vloops.length ();
loop_data_size = XNEWVEC (unsigned, n);
volume = volume_of_references (refs);
i = n;
if (volume > L2_CACHE_SIZE_BYTES)
continue;
- aloop = VEC_index (loop_p, vloops, i);
- vol = estimated_loop_iterations_int (aloop, false);
- if (vol < 0)
+ aloop = vloops[i];
+ vol = estimated_stmt_executions_int (aloop);
+ if (vol == -1)
vol = expected_loop_iterations (aloop);
volume *= vol;
}
for (gr = refs; gr; gr = gr->next)
for (ref = gr->refs; ref; ref = ref->next)
{
- dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
+ dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
+ ref->mem, ref->stmt, !ref->write_p);
if (dr)
{
ref->reuse_distance = volume;
dr->aux = ref;
- VEC_safe_push (data_reference_p, heap, datarefs, dr);
+ datarefs.safe_push (dr);
}
else
no_other_refs = false;
}
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
dist = self_reuse_distance (dr, loop_data_size, n, loop);
ref = (struct mem_ref *) dr->aux;
ref->independent_p = true;
}
- compute_all_dependences (datarefs, &dependences, vloops, true);
+ if (!compute_all_dependences (datarefs, &dependences, vloops, true))
+ return false;
- for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
+ FOR_EACH_VEC_ELT (dependences, i, dep)
{
if (DDR_ARE_DEPENDENT (dep) == chrec_known)
continue;
fprintf (dump_file, " ref %p distance %u\n",
(void *) ref, ref->reuse_distance);
}
+
+ return true;
}
-/* Do a cost-benefit analysis to determine if prefetching is profitable
- for the current loop given the following parameters:
+/* Determine whether or not the trip count to ahead ratio is too small based
+ on prefitablility consideration.
AHEAD: the iteration ahead distance,
- EST_NITER: the estimated trip count,
+ EST_NITER: the estimated trip count. */
+
+static bool
+trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
+{
+ /* Assume trip count to ahead ratio is big enough if the trip count could not
+ be estimated at compile time. */
+ if (est_niter < 0)
+ return false;
+
+ if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Not prefetching -- loop estimated to roll only %d times\n",
+ (int) est_niter);
+ return true;
+ }
+
+ return false;
+}
+
+/* Determine whether or not the number of memory references in the loop is
+ reasonable based on the profitablity and compilation time considerations.
NINSNS: estimated number of instructions in the loop,
- PREFETCH_COUNT: an estimate of the number of prefetches
MEM_REF_COUNT: total number of memory references in the loop. */
static bool
-is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
- unsigned ninsns, unsigned prefetch_count,
- unsigned mem_ref_count, unsigned unroll_factor)
+mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
{
- int insn_to_mem_ratio, insn_to_prefetch_ratio;
+ int insn_to_mem_ratio;
if (mem_ref_count == 0)
return false;
+ /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
+ (compute_all_dependences) have high costs based on quadratic complexity.
+ To avoid huge compilation time, we give up prefetching if mem_ref_count
+ is too large. */
+ if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
+ return false;
+
/* Prefetching improves performance by overlapping cache missing
memory accesses with CPU operations. If the loop does not have
enough CPU operations to overlap with memory operations, prefetching
return false;
}
+ return true;
+}
+
+/* Determine whether or not the instruction to prefetch ratio in the loop is
+ too small based on the profitablity consideration.
+ NINSNS: estimated number of instructions in the loop,
+ PREFETCH_COUNT: an estimate of the number of prefetches,
+ UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
+
+static bool
+insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
+ unsigned unroll_factor)
+{
+ int insn_to_prefetch_ratio;
+
/* Prefetching most likely causes performance degradation when the instruction
to prefetch ratio is too small. Too many prefetch instructions in a loop
may reduce the I-cache performance.
fprintf (dump_file,
"Not prefetching -- instruction to prefetch ratio (%d) too small\n",
insn_to_prefetch_ratio);
- return false;
+ return true;
}
- /* Could not do further estimation if the trip count is unknown. Just assume
- prefetching is profitable. Too aggressive??? */
- if (est_niter < 0)
- return true;
-
- if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file,
- "Not prefetching -- loop estimated to roll only %d times\n",
- (int) est_niter);
- return false;
- }
- return true;
+ return false;
}
return false;
}
+ /* FIXME: the time should be weighted by the probabilities of the blocks in
+ the loop body. */
+ time = tree_num_loop_insns (loop, &eni_time_weights);
+ if (time == 0)
+ return false;
+
+ ahead = (PREFETCH_LATENCY + time - 1) / time;
+ est_niter = estimated_stmt_executions_int (loop);
+ if (est_niter == -1)
+ est_niter = max_stmt_executions_int (loop);
+
+ /* Prefetching is not likely to be profitable if the trip count to ahead
+ ratio is too small. */
+ if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
+ return false;
+
+ ninsns = tree_num_loop_insns (loop, &eni_size_weights);
+
/* Step 1: gather the memory references. */
refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
+ /* Give up prefetching if the number of memory references in the
+ loop is not reasonable based on profitablity and compilation time
+ considerations. */
+ if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
+ goto fail;
+
/* Step 2: estimate the reuse effects. */
prune_by_reuse (refs);
- prefetch_count = estimate_prefetch_count (refs);
- if (prefetch_count == 0)
+ if (nothing_to_prefetch_p (refs))
goto fail;
- determine_loop_nest_reuse (loop, refs, no_other_refs);
-
- /* Step 3: determine the ahead and unroll factor. */
-
- /* FIXME: the time should be weighted by the probabilities of the blocks in
- the loop body. */
- time = tree_num_loop_insns (loop, &eni_time_weights);
- ahead = (PREFETCH_LATENCY + time - 1) / time;
- est_niter = estimated_loop_iterations_int (loop, false);
+ if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
+ goto fail;
- ninsns = tree_num_loop_insns (loop, &eni_size_weights);
+ /* Step 3: determine unroll factor. */
unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
est_niter);
+
+ /* Estimate prefetch count for the unrolled loop. */
+ prefetch_count = estimate_prefetch_count (refs, unroll_factor);
+ if (prefetch_count == 0)
+ goto fail;
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
HOST_WIDE_INT_PRINT_DEC "\n"
ahead, unroll_factor, est_niter,
ninsns, mem_ref_count, prefetch_count);
- if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns, prefetch_count,
- mem_ref_count, unroll_factor))
+ /* Prefetching is not likely to be profitable if the instruction to prefetch
+ ratio is too small. */
+ if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
+ unroll_factor))
goto fail;
mark_nontemporal_stores (loop, refs);
unsigned int
tree_ssa_prefetch_arrays (void)
{
- loop_iterator li;
struct loop *loop;
bool unrolled = false;
int todo_flags = 0;
initialize_original_copy_tables ();
- if (!built_in_decls[BUILT_IN_PREFETCH])
+ if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
{
- tree type = build_function_type (void_type_node,
- tree_cons (NULL_TREE,
- const_ptr_type_node,
- NULL_TREE));
+ tree type = build_function_type_list (void_type_node,
+ const_ptr_type_node, NULL_TREE);
tree decl = add_builtin_function ("__builtin_prefetch", type,
BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
NULL, NULL_TREE);
DECL_IS_NOVOPS (decl) = true;
- built_in_decls[BUILT_IN_PREFETCH] = decl;
+ set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
}
/* We assume that size of cache line is a power of two, so verify this
here. */
gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Processing loop %d:\n", loop->num);
free_original_copy_tables ();
return todo_flags;
}
+
+/* Prefetching. */
+
+static unsigned int
+tree_ssa_loop_prefetch (void)
+{
+ if (number_of_loops (cfun) <= 1)
+ return 0;
+
+ return tree_ssa_prefetch_arrays ();
+}
+
+static bool
+gate_tree_ssa_loop_prefetch (void)
+{
+ return flag_prefetch_loop_arrays > 0;
+}
+
+namespace {
+
+const pass_data pass_data_loop_prefetch =
+{
+ GIMPLE_PASS, /* type */
+ "aprefetch", /* name */
+ OPTGROUP_LOOP, /* optinfo_flags */
+ true, /* has_gate */
+ true, /* has_execute */
+ TV_TREE_PREFETCH, /* tv_id */
+ ( PROP_cfg | PROP_ssa ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_loop_prefetch : public gimple_opt_pass
+{
+public:
+ pass_loop_prefetch (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_loop_prefetch, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ bool gate () { return gate_tree_ssa_loop_prefetch (); }
+ unsigned int execute () { return tree_ssa_loop_prefetch (); }
+
+}; // class pass_loop_prefetch
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_loop_prefetch (gcc::context *ctxt)
+{
+ return new pass_loop_prefetch (ctxt);
+}
+
+