]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-loop-prefetch.c
Daily bump.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-prefetch.c
CommitLineData
b076a3fd 1/* Array prefetching.
9f813990
PC
2 Copyright (C) 2005, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
b8698a0f 4
b076a3fd 5This file is part of GCC.
b8698a0f 6
b076a3fd
ZD
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by the
9dcd6f09 9Free Software Foundation; either version 3, or (at your option) any
b076a3fd 10later version.
b8698a0f 11
b076a3fd
ZD
12GCC is distributed in the hope that it will be useful, but WITHOUT
13ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
b8698a0f 16
b076a3fd 17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
b076a3fd
ZD
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "tree.h"
b076a3fd 26#include "tm_p.h"
b076a3fd
ZD
27#include "basic-block.h"
28#include "output.h"
cf835838 29#include "tree-pretty-print.h"
b076a3fd
ZD
30#include "tree-flow.h"
31#include "tree-dump.h"
32#include "timevar.h"
33#include "cfgloop.h"
b076a3fd 34#include "tree-pass.h"
b076a3fd
ZD
35#include "insn-config.h"
36#include "recog.h"
37#include "hashtab.h"
38#include "tree-chrec.h"
39#include "tree-scalar-evolution.h"
718f9c0f 40#include "diagnostic-core.h"
b076a3fd
ZD
41#include "params.h"
42#include "langhooks.h"
7f9bc51b 43#include "tree-inline.h"
5417e022 44#include "tree-data-ref.h"
2eb79bbb
SB
45
46
47/* FIXME: Needed for optabs, but this should all be moved to a TBD interface
48 between the GIMPLE and RTL worlds. */
49#include "expr.h"
79f5e442 50#include "optabs.h"
b076a3fd
ZD
51
52/* This pass inserts prefetch instructions to optimize cache usage during
53 accesses to arrays in loops. It processes loops sequentially and:
54
55 1) Gathers all memory references in the single loop.
56 2) For each of the references it decides when it is profitable to prefetch
57 it. To do it, we evaluate the reuse among the accesses, and determines
58 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
59 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
60 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
61 iterations of the loop that are zero modulo PREFETCH_MOD). For example
62 (assuming cache line size is 64 bytes, char has size 1 byte and there
63 is no hardware sequential prefetch):
64
65 char *a;
66 for (i = 0; i < max; i++)
67 {
68 a[255] = ...; (0)
69 a[i] = ...; (1)
70 a[i + 64] = ...; (2)
71 a[16*i] = ...; (3)
72 a[187*i] = ...; (4)
73 a[187*i + 50] = ...; (5)
74 }
75
76 (0) obviously has PREFETCH_BEFORE 1
77 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
78 location 64 iterations before it, and PREFETCH_MOD 64 (since
79 it hits the same cache line otherwise).
80 (2) has PREFETCH_MOD 64
81 (3) has PREFETCH_MOD 4
82 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
bae077dc 83 the cache line accessed by (5) is the same with probability only
b076a3fd
ZD
84 7/32.
85 (5) has PREFETCH_MOD 1 as well.
86
5417e022
ZD
87 Additionally, we use data dependence analysis to determine for each
88 reference the distance till the first reuse; this information is used
89 to determine the temporality of the issued prefetch instruction.
90
b076a3fd
ZD
91 3) We determine how much ahead we need to prefetch. The number of
92 iterations needed is time to fetch / time spent in one iteration of
93 the loop. The problem is that we do not know either of these values,
94 so we just make a heuristic guess based on a magic (possibly)
95 target-specific constant and size of the loop.
96
97 4) Determine which of the references we prefetch. We take into account
98 that there is a maximum number of simultaneous prefetches (provided
99 by machine description). We prefetch as many prefetches as possible
100 while still within this bound (starting with those with lowest
101 prefetch_mod, since they are responsible for most of the cache
102 misses).
b8698a0f 103
b076a3fd
ZD
104 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
105 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
106 prefetching nonaccessed memory.
107 TODO -- actually implement peeling.
b8698a0f 108
b076a3fd
ZD
109 6) We actually emit the prefetch instructions. ??? Perhaps emit the
110 prefetch instructions with guards in cases where 5) was not sufficient
111 to satisfy the constraints?
112
0bbe50f6
CF
113 A cost model is implemented to determine whether or not prefetching is
114 profitable for a given loop. The cost model has three heuristics:
115
116 1. Function trip_count_to_ahead_ratio_too_small_p implements a
117 heuristic that determines whether or not the loop has too few
118 iterations (compared to ahead). Prefetching is not likely to be
119 beneficial if the trip count to ahead ratio is below a certain
120 minimum.
121
122 2. Function mem_ref_count_reasonable_p implements a heuristic that
123 determines whether the given loop has enough CPU ops that can be
124 overlapped with cache missing memory ops. If not, the loop
125 won't benefit from prefetching. In the implementation,
126 prefetching is not considered beneficial if the ratio between
127 the instruction count and the mem ref count is below a certain
128 minimum.
129
130 3. Function insn_to_prefetch_ratio_too_small_p implements a
131 heuristic that disables prefetching in a loop if the prefetching
132 cost is above a certain limit. The relative prefetching cost is
133 estimated by taking the ratio between the prefetch count and the
134 total intruction count (this models the I-cache cost).
135
db34470d 136 The limits used in these heuristics are defined as parameters with
b8698a0f 137 reasonable default values. Machine-specific default values will be
db34470d 138 added later.
b8698a0f 139
b076a3fd
ZD
140 Some other TODO:
141 -- write and use more general reuse analysis (that could be also used
142 in other cache aimed loop optimizations)
143 -- make it behave sanely together with the prefetches given by user
144 (now we just ignore them; at the very least we should avoid
145 optimizing loops in that user put his own prefetches)
146 -- we assume cache line size alignment of arrays; this could be
147 improved. */
148
149/* Magic constants follow. These should be replaced by machine specific
150 numbers. */
151
b076a3fd
ZD
152/* True if write can be prefetched by a read prefetch. */
153
154#ifndef WRITE_CAN_USE_READ_PREFETCH
155#define WRITE_CAN_USE_READ_PREFETCH 1
156#endif
157
158/* True if read can be prefetched by a write prefetch. */
159
160#ifndef READ_CAN_USE_WRITE_PREFETCH
161#define READ_CAN_USE_WRITE_PREFETCH 0
162#endif
163
47eb5b32
ZD
164/* The size of the block loaded by a single prefetch. Usually, this is
165 the same as cache line size (at the moment, we only consider one level
166 of cache hierarchy). */
b076a3fd
ZD
167
168#ifndef PREFETCH_BLOCK
47eb5b32 169#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
b076a3fd
ZD
170#endif
171
172/* Do we have a forward hardware sequential prefetching? */
173
174#ifndef HAVE_FORWARD_PREFETCH
175#define HAVE_FORWARD_PREFETCH 0
176#endif
177
178/* Do we have a backward hardware sequential prefetching? */
179
180#ifndef HAVE_BACKWARD_PREFETCH
181#define HAVE_BACKWARD_PREFETCH 0
182#endif
183
184/* In some cases we are only able to determine that there is a certain
185 probability that the two accesses hit the same cache line. In this
186 case, we issue the prefetches for both of them if this probability
fa10beec 187 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
b076a3fd
ZD
188
189#ifndef ACCEPTABLE_MISS_RATE
190#define ACCEPTABLE_MISS_RATE 50
191#endif
192
193#ifndef HAVE_prefetch
194#define HAVE_prefetch 0
195#endif
196
46cb0441
ZD
197#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
198#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
5417e022
ZD
199
200/* We consider a memory access nontemporal if it is not reused sooner than
201 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
202 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
203 so that we use nontemporal prefetches e.g. if single memory location
204 is accessed several times in a single iteration of the loop. */
205#define NONTEMPORAL_FRACTION 16
206
79f5e442
ZD
207/* In case we have to emit a memory fence instruction after the loop that
208 uses nontemporal stores, this defines the builtin to use. */
209
210#ifndef FENCE_FOLLOWING_MOVNT
211#define FENCE_FOLLOWING_MOVNT NULL_TREE
212#endif
213
9bf4598b
CF
214/* It is not profitable to prefetch when the trip count is not at
215 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
216 For example, in a loop with a prefetch ahead distance of 10,
217 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
218 profitable to prefetch when the trip count is greater or equal to
219 40. In that case, 30 out of the 40 iterations will benefit from
220 prefetching. */
221
222#ifndef TRIP_COUNT_TO_AHEAD_RATIO
223#define TRIP_COUNT_TO_AHEAD_RATIO 4
224#endif
225
b076a3fd
ZD
226/* The group of references between that reuse may occur. */
227
228struct mem_ref_group
229{
230 tree base; /* Base of the reference. */
81f32326 231 tree step; /* Step of the reference. */
b076a3fd
ZD
232 struct mem_ref *refs; /* References in the group. */
233 struct mem_ref_group *next; /* Next group of references. */
234};
235
236/* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
237
238#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
239
8532678c
CF
240/* Do not generate a prefetch if the unroll factor is significantly less
241 than what is required by the prefetch. This is to avoid redundant
f7963a7c
CF
242 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
243 2, prefetching requires unrolling the loop 16 times, but
244 the loop is actually unrolled twice. In this case (ratio = 8),
8532678c
CF
245 prefetching is not likely to be beneficial. */
246
247#ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
f7963a7c 248#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
8532678c
CF
249#endif
250
0bbe50f6
CF
251/* Some of the prefetch computations have quadratic complexity. We want to
252 avoid huge compile times and, therefore, want to limit the amount of
253 memory references per loop where we consider prefetching. */
254
255#ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
256#define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
257#endif
258
b076a3fd
ZD
259/* The memory reference. */
260
261struct mem_ref
262{
726a989a 263 gimple stmt; /* Statement in that the reference appears. */
b076a3fd
ZD
264 tree mem; /* The reference. */
265 HOST_WIDE_INT delta; /* Constant offset of the reference. */
b076a3fd
ZD
266 struct mem_ref_group *group; /* The group of references it belongs to. */
267 unsigned HOST_WIDE_INT prefetch_mod;
268 /* Prefetch only each PREFETCH_MOD-th
269 iteration. */
270 unsigned HOST_WIDE_INT prefetch_before;
271 /* Prefetch only first PREFETCH_BEFORE
272 iterations. */
5417e022
ZD
273 unsigned reuse_distance; /* The amount of data accessed before the first
274 reuse of this value. */
b076a3fd 275 struct mem_ref *next; /* The next reference in the group. */
79f5e442
ZD
276 unsigned write_p : 1; /* Is it a write? */
277 unsigned independent_p : 1; /* True if the reference is independent on
278 all other references inside the loop. */
279 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
280 unsigned storent_p : 1; /* True if we changed the store to a
281 nontemporal one. */
b076a3fd
ZD
282};
283
75c40d56 284/* Dumps information about reference REF to FILE. */
b076a3fd
ZD
285
286static void
287dump_mem_ref (FILE *file, struct mem_ref *ref)
288{
289 fprintf (file, "Reference %p:\n", (void *) ref);
290
291 fprintf (file, " group %p (base ", (void *) ref->group);
292 print_generic_expr (file, ref->group->base, TDF_SLIM);
293 fprintf (file, ", step ");
81f32326
CB
294 if (cst_and_fits_in_hwi (ref->group->step))
295 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
296 else
297 print_generic_expr (file, ref->group->step, TDF_TREE);
b076a3fd
ZD
298 fprintf (file, ")\n");
299
e324a72f 300 fprintf (file, " delta ");
b076a3fd
ZD
301 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
302 fprintf (file, "\n");
303
304 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
305
306 fprintf (file, "\n");
307}
308
309/* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
310 exist. */
311
312static struct mem_ref_group *
81f32326 313find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
b076a3fd
ZD
314{
315 struct mem_ref_group *group;
316
317 for (; *groups; groups = &(*groups)->next)
318 {
81f32326 319 if (operand_equal_p ((*groups)->step, step, 0)
b076a3fd
ZD
320 && operand_equal_p ((*groups)->base, base, 0))
321 return *groups;
322
81f32326
CB
323 /* If step is an integer constant, keep the list of groups sorted
324 by decreasing step. */
325 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
326 && int_cst_value ((*groups)->step) < int_cst_value (step))
b076a3fd
ZD
327 break;
328 }
329
5417e022 330 group = XNEW (struct mem_ref_group);
b076a3fd
ZD
331 group->base = base;
332 group->step = step;
333 group->refs = NULL;
334 group->next = *groups;
335 *groups = group;
336
337 return group;
338}
339
340/* Records a memory reference MEM in GROUP with offset DELTA and write status
341 WRITE_P. The reference occurs in statement STMT. */
342
343static void
726a989a 344record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
b076a3fd
ZD
345 HOST_WIDE_INT delta, bool write_p)
346{
347 struct mem_ref **aref;
348
349 /* Do not record the same address twice. */
350 for (aref = &group->refs; *aref; aref = &(*aref)->next)
351 {
352 /* It does not have to be possible for write reference to reuse the read
353 prefetch, or vice versa. */
354 if (!WRITE_CAN_USE_READ_PREFETCH
355 && write_p
356 && !(*aref)->write_p)
357 continue;
358 if (!READ_CAN_USE_WRITE_PREFETCH
359 && !write_p
360 && (*aref)->write_p)
361 continue;
362
363 if ((*aref)->delta == delta)
364 return;
365 }
366
5417e022 367 (*aref) = XNEW (struct mem_ref);
b076a3fd
ZD
368 (*aref)->stmt = stmt;
369 (*aref)->mem = mem;
370 (*aref)->delta = delta;
371 (*aref)->write_p = write_p;
372 (*aref)->prefetch_before = PREFETCH_ALL;
373 (*aref)->prefetch_mod = 1;
5417e022 374 (*aref)->reuse_distance = 0;
b076a3fd
ZD
375 (*aref)->issue_prefetch_p = false;
376 (*aref)->group = group;
377 (*aref)->next = NULL;
79f5e442
ZD
378 (*aref)->independent_p = false;
379 (*aref)->storent_p = false;
b076a3fd
ZD
380
381 if (dump_file && (dump_flags & TDF_DETAILS))
382 dump_mem_ref (dump_file, *aref);
383}
384
385/* Release memory references in GROUPS. */
386
387static void
388release_mem_refs (struct mem_ref_group *groups)
389{
390 struct mem_ref_group *next_g;
391 struct mem_ref *ref, *next_r;
392
393 for (; groups; groups = next_g)
394 {
395 next_g = groups->next;
396 for (ref = groups->refs; ref; ref = next_r)
397 {
398 next_r = ref->next;
399 free (ref);
400 }
401 free (groups);
402 }
403}
404
405/* A structure used to pass arguments to idx_analyze_ref. */
406
407struct ar_data
408{
409 struct loop *loop; /* Loop of the reference. */
726a989a 410 gimple stmt; /* Statement of the reference. */
81f32326 411 tree *step; /* Step of the memory reference. */
b076a3fd
ZD
412 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
413};
414
415/* Analyzes a single INDEX of a memory reference to obtain information
416 described at analyze_ref. Callback for for_each_index. */
417
418static bool
419idx_analyze_ref (tree base, tree *index, void *data)
420{
c22940cd 421 struct ar_data *ar_data = (struct ar_data *) data;
b076a3fd 422 tree ibase, step, stepsize;
81f32326 423 HOST_WIDE_INT idelta = 0, imult = 1;
b076a3fd
ZD
424 affine_iv iv;
425
f017bf5e 426 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
81f32326 427 *index, &iv, true))
b076a3fd
ZD
428 return false;
429 ibase = iv.base;
430 step = iv.step;
431
5be014d5 432 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
b076a3fd
ZD
433 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
434 {
435 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
436 ibase = TREE_OPERAND (ibase, 0);
437 }
438 if (cst_and_fits_in_hwi (ibase))
439 {
440 idelta += int_cst_value (ibase);
ff5e9a94 441 ibase = build_int_cst (TREE_TYPE (ibase), 0);
b076a3fd
ZD
442 }
443
444 if (TREE_CODE (base) == ARRAY_REF)
445 {
446 stepsize = array_ref_element_size (base);
447 if (!cst_and_fits_in_hwi (stepsize))
448 return false;
449 imult = int_cst_value (stepsize);
8fde8b40
CB
450 step = fold_build2 (MULT_EXPR, sizetype,
451 fold_convert (sizetype, step),
452 fold_convert (sizetype, stepsize));
b076a3fd
ZD
453 idelta *= imult;
454 }
455
8fde8b40
CB
456 if (*ar_data->step == NULL_TREE)
457 *ar_data->step = step;
458 else
459 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
460 fold_convert (sizetype, *ar_data->step),
461 fold_convert (sizetype, step));
b076a3fd
ZD
462 *ar_data->delta += idelta;
463 *index = ibase;
464
465 return true;
466}
467
aac8b8ed 468/* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
b076a3fd 469 STEP are integer constants and iter is number of iterations of LOOP. The
aac8b8ed
RS
470 reference occurs in statement STMT. Strips nonaddressable component
471 references from REF_P. */
b076a3fd
ZD
472
473static bool
aac8b8ed 474analyze_ref (struct loop *loop, tree *ref_p, tree *base,
81f32326 475 tree *step, HOST_WIDE_INT *delta,
726a989a 476 gimple stmt)
b076a3fd
ZD
477{
478 struct ar_data ar_data;
479 tree off;
480 HOST_WIDE_INT bit_offset;
aac8b8ed 481 tree ref = *ref_p;
b076a3fd 482
81f32326 483 *step = NULL_TREE;
b076a3fd
ZD
484 *delta = 0;
485
7c6dafac
CF
486 /* First strip off the component references. Ignore bitfields.
487 Also strip off the real and imagine parts of a complex, so that
488 they can have the same base. */
489 if (TREE_CODE (ref) == REALPART_EXPR
490 || TREE_CODE (ref) == IMAGPART_EXPR
491 || (TREE_CODE (ref) == COMPONENT_REF
492 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
493 {
494 if (TREE_CODE (ref) == IMAGPART_EXPR)
495 *delta += int_size_in_bytes (TREE_TYPE (ref));
496 ref = TREE_OPERAND (ref, 0);
497 }
b076a3fd 498
aac8b8ed
RS
499 *ref_p = ref;
500
b076a3fd
ZD
501 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
502 {
503 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
504 bit_offset = TREE_INT_CST_LOW (off);
505 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
b8698a0f 506
b076a3fd
ZD
507 *delta += bit_offset / BITS_PER_UNIT;
508 }
509
510 *base = unshare_expr (ref);
511 ar_data.loop = loop;
512 ar_data.stmt = stmt;
513 ar_data.step = step;
514 ar_data.delta = delta;
515 return for_each_index (base, idx_analyze_ref, &ar_data);
516}
517
518/* Record a memory reference REF to the list REFS. The reference occurs in
79f5e442
ZD
519 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
520 reference was recorded, false otherwise. */
b076a3fd 521
79f5e442 522static bool
b076a3fd 523gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
726a989a 524 tree ref, bool write_p, gimple stmt)
b076a3fd 525{
81f32326
CB
526 tree base, step;
527 HOST_WIDE_INT delta;
b076a3fd
ZD
528 struct mem_ref_group *agrp;
529
a80a2701
JJ
530 if (get_base_address (ref) == NULL)
531 return false;
532
aac8b8ed 533 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
79f5e442 534 return false;
81f32326
CB
535 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
536 if (step == NULL_TREE)
537 return false;
b076a3fd 538
756f50ce 539 /* Stop if the address of BASE could not be taken. */
bc068a23
CF
540 if (may_be_nonaddressable_p (base))
541 return false;
542
50814135
CF
543 /* Limit non-constant step prefetching only to the innermost loops. */
544 if (!cst_and_fits_in_hwi (step) && loop->inner != NULL)
545 return false;
546
b076a3fd
ZD
547 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
548 are integer constants. */
549 agrp = find_or_create_group (refs, base, step);
550 record_ref (agrp, stmt, ref, delta, write_p);
79f5e442
ZD
551
552 return true;
b076a3fd
ZD
553}
554
79f5e442
ZD
555/* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
556 true if there are no other memory references inside the loop. */
b076a3fd
ZD
557
558static struct mem_ref_group *
db34470d 559gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
b076a3fd
ZD
560{
561 basic_block *body = get_loop_body_in_dom_order (loop);
562 basic_block bb;
563 unsigned i;
726a989a
RB
564 gimple_stmt_iterator bsi;
565 gimple stmt;
566 tree lhs, rhs;
b076a3fd
ZD
567 struct mem_ref_group *refs = NULL;
568
79f5e442 569 *no_other_refs = true;
db34470d 570 *ref_count = 0;
79f5e442 571
b076a3fd
ZD
572 /* Scan the loop body in order, so that the former references precede the
573 later ones. */
574 for (i = 0; i < loop->num_nodes; i++)
575 {
576 bb = body[i];
577 if (bb->loop_father != loop)
578 continue;
579
726a989a 580 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
b076a3fd 581 {
726a989a 582 stmt = gsi_stmt (bsi);
79f5e442 583
726a989a 584 if (gimple_code (stmt) != GIMPLE_ASSIGN)
79f5e442 585 {
5006671f 586 if (gimple_vuse (stmt)
726a989a
RB
587 || (is_gimple_call (stmt)
588 && !(gimple_call_flags (stmt) & ECF_CONST)))
79f5e442
ZD
589 *no_other_refs = false;
590 continue;
591 }
b076a3fd 592
726a989a
RB
593 lhs = gimple_assign_lhs (stmt);
594 rhs = gimple_assign_rhs1 (stmt);
b076a3fd
ZD
595
596 if (REFERENCE_CLASS_P (rhs))
db34470d 597 {
79f5e442
ZD
598 *no_other_refs &= gather_memory_references_ref (loop, &refs,
599 rhs, false, stmt);
db34470d
GS
600 *ref_count += 1;
601 }
b076a3fd 602 if (REFERENCE_CLASS_P (lhs))
db34470d 603 {
79f5e442
ZD
604 *no_other_refs &= gather_memory_references_ref (loop, &refs,
605 lhs, true, stmt);
db34470d
GS
606 *ref_count += 1;
607 }
b076a3fd
ZD
608 }
609 }
610 free (body);
611
612 return refs;
613}
614
615/* Prune the prefetch candidate REF using the self-reuse. */
616
617static void
618prune_ref_by_self_reuse (struct mem_ref *ref)
619{
81f32326
CB
620 HOST_WIDE_INT step;
621 bool backward;
622
623 /* If the step size is non constant, we cannot calculate prefetch_mod. */
624 if (!cst_and_fits_in_hwi (ref->group->step))
625 return;
626
627 step = int_cst_value (ref->group->step);
628
629 backward = step < 0;
b076a3fd
ZD
630
631 if (step == 0)
632 {
633 /* Prefetch references to invariant address just once. */
634 ref->prefetch_before = 1;
635 return;
636 }
637
638 if (backward)
639 step = -step;
640
641 if (step > PREFETCH_BLOCK)
642 return;
643
644 if ((backward && HAVE_BACKWARD_PREFETCH)
645 || (!backward && HAVE_FORWARD_PREFETCH))
646 {
647 ref->prefetch_before = 1;
648 return;
649 }
650
651 ref->prefetch_mod = PREFETCH_BLOCK / step;
652}
653
654/* Divides X by BY, rounding down. */
655
656static HOST_WIDE_INT
657ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
658{
659 gcc_assert (by > 0);
660
661 if (x >= 0)
662 return x / by;
663 else
664 return (x + by - 1) / by;
665}
666
b8698a0f
L
667/* Given a CACHE_LINE_SIZE and two inductive memory references
668 with a common STEP greater than CACHE_LINE_SIZE and an address
669 difference DELTA, compute the probability that they will fall
14e444c3
CF
670 in different cache lines. Return true if the computed miss rate
671 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
672 number of distinct iterations after which the pattern repeats itself.
2c6dd136
GS
673 ALIGN_UNIT is the unit of alignment in bytes. */
674
14e444c3
CF
675static bool
676is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
2c6dd136
GS
677 HOST_WIDE_INT step, HOST_WIDE_INT delta,
678 unsigned HOST_WIDE_INT distinct_iters,
679 int align_unit)
680{
681 unsigned align, iter;
14e444c3 682 int total_positions, miss_positions, max_allowed_miss_positions;
2c6dd136
GS
683 int address1, address2, cache_line1, cache_line2;
684
a245c04b
CF
685 /* It always misses if delta is greater than or equal to the cache
686 line size. */
14e444c3
CF
687 if (delta >= (HOST_WIDE_INT) cache_line_size)
688 return false;
a245c04b 689
2c6dd136 690 miss_positions = 0;
14e444c3
CF
691 total_positions = (cache_line_size / align_unit) * distinct_iters;
692 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
b8698a0f 693
2c6dd136
GS
694 /* Iterate through all possible alignments of the first
695 memory reference within its cache line. */
696 for (align = 0; align < cache_line_size; align += align_unit)
697
698 /* Iterate through all distinct iterations. */
699 for (iter = 0; iter < distinct_iters; iter++)
700 {
701 address1 = align + step * iter;
702 address2 = address1 + delta;
703 cache_line1 = address1 / cache_line_size;
704 cache_line2 = address2 / cache_line_size;
2c6dd136 705 if (cache_line1 != cache_line2)
14e444c3
CF
706 {
707 miss_positions += 1;
708 if (miss_positions > max_allowed_miss_positions)
709 return false;
710 }
2c6dd136 711 }
14e444c3 712 return true;
2c6dd136
GS
713}
714
b076a3fd
ZD
715/* Prune the prefetch candidate REF using the reuse with BY.
716 If BY_IS_BEFORE is true, BY is before REF in the loop. */
717
718static void
719prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
720 bool by_is_before)
721{
81f32326
CB
722 HOST_WIDE_INT step;
723 bool backward;
b076a3fd
ZD
724 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
725 HOST_WIDE_INT delta = delta_b - delta_r;
726 HOST_WIDE_INT hit_from;
727 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
2c6dd136
GS
728 HOST_WIDE_INT reduced_step;
729 unsigned HOST_WIDE_INT reduced_prefetch_block;
730 tree ref_type;
731 int align_unit;
b076a3fd 732
81f32326
CB
733 /* If the step is non constant we cannot calculate prefetch_before. */
734 if (!cst_and_fits_in_hwi (ref->group->step)) {
735 return;
736 }
737
738 step = int_cst_value (ref->group->step);
739
740 backward = step < 0;
741
742
b076a3fd
ZD
743 if (delta == 0)
744 {
745 /* If the references has the same address, only prefetch the
746 former. */
747 if (by_is_before)
748 ref->prefetch_before = 0;
b8698a0f 749
b076a3fd
ZD
750 return;
751 }
752
753 if (!step)
754 {
755 /* If the reference addresses are invariant and fall into the
756 same cache line, prefetch just the first one. */
757 if (!by_is_before)
758 return;
759
760 if (ddown (ref->delta, PREFETCH_BLOCK)
761 != ddown (by->delta, PREFETCH_BLOCK))
762 return;
763
764 ref->prefetch_before = 0;
765 return;
766 }
767
768 /* Only prune the reference that is behind in the array. */
769 if (backward)
770 {
771 if (delta > 0)
772 return;
773
774 /* Transform the data so that we may assume that the accesses
775 are forward. */
776 delta = - delta;
777 step = -step;
778 delta_r = PREFETCH_BLOCK - 1 - delta_r;
779 delta_b = PREFETCH_BLOCK - 1 - delta_b;
780 }
781 else
782 {
783 if (delta < 0)
784 return;
785 }
786
787 /* Check whether the two references are likely to hit the same cache
788 line, and how distant the iterations in that it occurs are from
789 each other. */
790
791 if (step <= PREFETCH_BLOCK)
792 {
793 /* The accesses are sure to meet. Let us check when. */
794 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
795 prefetch_before = (hit_from - delta_r + step - 1) / step;
796
57762e97 797 /* Do not reduce prefetch_before if we meet beyond cache size. */
4c9cf7af 798 if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
57762e97 799 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
800 if (prefetch_before < ref->prefetch_before)
801 ref->prefetch_before = prefetch_before;
802
803 return;
804 }
805
b8698a0f 806 /* A more complicated case with step > prefetch_block. First reduce
2c6dd136 807 the ratio between the step and the cache line size to its simplest
b8698a0f
L
808 terms. The resulting denominator will then represent the number of
809 distinct iterations after which each address will go back to its
810 initial location within the cache line. This computation assumes
2c6dd136 811 that PREFETCH_BLOCK is a power of two. */
b076a3fd 812 prefetch_block = PREFETCH_BLOCK;
2c6dd136
GS
813 reduced_prefetch_block = prefetch_block;
814 reduced_step = step;
815 while ((reduced_step & 1) == 0
816 && reduced_prefetch_block > 1)
b076a3fd 817 {
2c6dd136
GS
818 reduced_step >>= 1;
819 reduced_prefetch_block >>= 1;
b076a3fd
ZD
820 }
821
b076a3fd
ZD
822 prefetch_before = delta / step;
823 delta %= step;
2c6dd136
GS
824 ref_type = TREE_TYPE (ref->mem);
825 align_unit = TYPE_ALIGN (ref_type) / 8;
14e444c3
CF
826 if (is_miss_rate_acceptable (prefetch_block, step, delta,
827 reduced_prefetch_block, align_unit))
b076a3fd 828 {
57762e97
CB
829 /* Do not reduce prefetch_before if we meet beyond cache size. */
830 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
831 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
832 if (prefetch_before < ref->prefetch_before)
833 ref->prefetch_before = prefetch_before;
834
835 return;
836 }
837
838 /* Try also the following iteration. */
839 prefetch_before++;
840 delta = step - delta;
14e444c3
CF
841 if (is_miss_rate_acceptable (prefetch_block, step, delta,
842 reduced_prefetch_block, align_unit))
b076a3fd
ZD
843 {
844 if (prefetch_before < ref->prefetch_before)
845 ref->prefetch_before = prefetch_before;
846
847 return;
848 }
849
850 /* The ref probably does not reuse by. */
851 return;
852}
853
854/* Prune the prefetch candidate REF using the reuses with other references
855 in REFS. */
856
857static void
858prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
859{
860 struct mem_ref *prune_by;
861 bool before = true;
862
863 prune_ref_by_self_reuse (ref);
864
865 for (prune_by = refs; prune_by; prune_by = prune_by->next)
866 {
867 if (prune_by == ref)
868 {
869 before = false;
870 continue;
871 }
872
873 if (!WRITE_CAN_USE_READ_PREFETCH
874 && ref->write_p
875 && !prune_by->write_p)
876 continue;
877 if (!READ_CAN_USE_WRITE_PREFETCH
878 && !ref->write_p
879 && prune_by->write_p)
880 continue;
881
882 prune_ref_by_group_reuse (ref, prune_by, before);
883 }
884}
885
886/* Prune the prefetch candidates in GROUP using the reuse analysis. */
887
888static void
889prune_group_by_reuse (struct mem_ref_group *group)
890{
891 struct mem_ref *ref_pruned;
892
893 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
894 {
895 prune_ref_by_reuse (ref_pruned, group->refs);
896
897 if (dump_file && (dump_flags & TDF_DETAILS))
898 {
899 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
900
901 if (ref_pruned->prefetch_before == PREFETCH_ALL
902 && ref_pruned->prefetch_mod == 1)
903 fprintf (dump_file, " no restrictions");
904 else if (ref_pruned->prefetch_before == 0)
905 fprintf (dump_file, " do not prefetch");
906 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
907 fprintf (dump_file, " prefetch once");
908 else
909 {
910 if (ref_pruned->prefetch_before != PREFETCH_ALL)
911 {
912 fprintf (dump_file, " prefetch before ");
913 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
914 ref_pruned->prefetch_before);
915 }
916 if (ref_pruned->prefetch_mod != 1)
917 {
918 fprintf (dump_file, " prefetch mod ");
919 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
920 ref_pruned->prefetch_mod);
921 }
922 }
923 fprintf (dump_file, "\n");
924 }
925 }
926}
927
928/* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
929
930static void
931prune_by_reuse (struct mem_ref_group *groups)
932{
933 for (; groups; groups = groups->next)
934 prune_group_by_reuse (groups);
935}
936
937/* Returns true if we should issue prefetch for REF. */
938
939static bool
940should_issue_prefetch_p (struct mem_ref *ref)
941{
942 /* For now do not issue prefetches for only first few of the
943 iterations. */
944 if (ref->prefetch_before != PREFETCH_ALL)
a8beb3a7
CB
945 {
946 if (dump_file && (dump_flags & TDF_DETAILS))
947 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
948 (void *) ref);
949 return false;
950 }
b076a3fd 951
79f5e442
ZD
952 /* Do not prefetch nontemporal stores. */
953 if (ref->storent_p)
a8beb3a7
CB
954 {
955 if (dump_file && (dump_flags & TDF_DETAILS))
956 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
957 return false;
958 }
79f5e442 959
b076a3fd
ZD
960 return true;
961}
962
963/* Decide which of the prefetch candidates in GROUPS to prefetch.
964 AHEAD is the number of iterations to prefetch ahead (which corresponds
965 to the number of simultaneous instances of one prefetch running at a
966 time). UNROLL_FACTOR is the factor by that the loop is going to be
967 unrolled. Returns true if there is anything to prefetch. */
968
969static bool
970schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
971 unsigned ahead)
972{
911b3fdb
ZD
973 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
974 unsigned slots_per_prefetch;
b076a3fd
ZD
975 struct mem_ref *ref;
976 bool any = false;
977
911b3fdb
ZD
978 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
979 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
b076a3fd 980
911b3fdb
ZD
981 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
982 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
983 it will need a prefetch slot. */
984 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
b076a3fd 985 if (dump_file && (dump_flags & TDF_DETAILS))
911b3fdb
ZD
986 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
987 slots_per_prefetch);
b076a3fd
ZD
988
989 /* For now we just take memory references one by one and issue
990 prefetches for as many as possible. The groups are sorted
991 starting with the largest step, since the references with
c0220ea4 992 large step are more likely to cause many cache misses. */
b076a3fd
ZD
993
994 for (; groups; groups = groups->next)
995 for (ref = groups->refs; ref; ref = ref->next)
996 {
997 if (!should_issue_prefetch_p (ref))
998 continue;
999
8532678c
CF
1000 /* The loop is far from being sufficiently unrolled for this
1001 prefetch. Do not generate prefetch to avoid many redudant
1002 prefetches. */
1003 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1004 continue;
1005
911b3fdb
ZD
1006 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1007 and we unroll the loop UNROLL_FACTOR times, we need to insert
1008 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1009 iteration. */
b076a3fd
ZD
1010 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1011 / ref->prefetch_mod);
911b3fdb
ZD
1012 prefetch_slots = n_prefetches * slots_per_prefetch;
1013
1014 /* If more than half of the prefetches would be lost anyway, do not
1015 issue the prefetch. */
1016 if (2 * remaining_prefetch_slots < prefetch_slots)
1017 continue;
1018
1019 ref->issue_prefetch_p = true;
b076a3fd 1020
911b3fdb
ZD
1021 if (remaining_prefetch_slots <= prefetch_slots)
1022 return true;
1023 remaining_prefetch_slots -= prefetch_slots;
b076a3fd
ZD
1024 any = true;
1025 }
1026
1027 return any;
1028}
1029
d5058523
CF
1030/* Return TRUE if no prefetch is going to be generated in the given
1031 GROUPS. */
1032
1033static bool
1034nothing_to_prefetch_p (struct mem_ref_group *groups)
1035{
1036 struct mem_ref *ref;
1037
1038 for (; groups; groups = groups->next)
1039 for (ref = groups->refs; ref; ref = ref->next)
1040 if (should_issue_prefetch_p (ref))
1041 return false;
1042
1043 return true;
1044}
1045
1046/* Estimate the number of prefetches in the given GROUPS.
1047 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
b076a3fd 1048
db34470d 1049static int
d5058523 1050estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
b076a3fd
ZD
1051{
1052 struct mem_ref *ref;
d5058523 1053 unsigned n_prefetches;
db34470d 1054 int prefetch_count = 0;
b076a3fd
ZD
1055
1056 for (; groups; groups = groups->next)
1057 for (ref = groups->refs; ref; ref = ref->next)
1058 if (should_issue_prefetch_p (ref))
d5058523
CF
1059 {
1060 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1061 / ref->prefetch_mod);
1062 prefetch_count += n_prefetches;
1063 }
b076a3fd 1064
db34470d 1065 return prefetch_count;
b076a3fd
ZD
1066}
1067
1068/* Issue prefetches for the reference REF into loop as decided before.
1069 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
917f1b7e 1070 is the factor by which LOOP was unrolled. */
b076a3fd
ZD
1071
1072static void
1073issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1074{
1075 HOST_WIDE_INT delta;
81f32326 1076 tree addr, addr_base, write_p, local, forward;
726a989a
RB
1077 gimple prefetch;
1078 gimple_stmt_iterator bsi;
b076a3fd 1079 unsigned n_prefetches, ap;
5417e022 1080 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
b076a3fd
ZD
1081
1082 if (dump_file && (dump_flags & TDF_DETAILS))
5417e022
ZD
1083 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1084 nontemporal ? " nontemporal" : "",
1085 (void *) ref);
b076a3fd 1086
726a989a 1087 bsi = gsi_for_stmt (ref->stmt);
b076a3fd
ZD
1088
1089 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1090 / ref->prefetch_mod);
1091 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
726a989a
RB
1092 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1093 true, NULL, true, GSI_SAME_STMT);
911b3fdb 1094 write_p = ref->write_p ? integer_one_node : integer_zero_node;
9a9d280e 1095 local = nontemporal ? integer_zero_node : integer_three_node;
b076a3fd
ZD
1096
1097 for (ap = 0; ap < n_prefetches; ap++)
1098 {
81f32326
CB
1099 if (cst_and_fits_in_hwi (ref->group->step))
1100 {
1101 /* Determine the address to prefetch. */
1102 delta = (ahead + ap * ref->prefetch_mod) *
1103 int_cst_value (ref->group->step);
5d49b6a7 1104 addr = fold_build_pointer_plus_hwi (addr_base, delta);
81f32326
CB
1105 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1106 true, GSI_SAME_STMT);
1107 }
1108 else
1109 {
1110 /* The step size is non-constant but loop-invariant. We use the
1111 heuristic to simply prefetch ahead iterations ahead. */
1112 forward = fold_build2 (MULT_EXPR, sizetype,
1113 fold_convert (sizetype, ref->group->step),
1114 fold_convert (sizetype, size_int (ahead)));
5d49b6a7 1115 addr = fold_build_pointer_plus (addr_base, forward);
81f32326
CB
1116 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1117 NULL, true, GSI_SAME_STMT);
1118 }
b076a3fd 1119 /* Create the prefetch instruction. */
e79983f4 1120 prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
726a989a
RB
1121 3, addr, write_p, local);
1122 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
b076a3fd
ZD
1123 }
1124}
1125
1126/* Issue prefetches for the references in GROUPS into loop as decided before.
1127 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1128 factor by that LOOP was unrolled. */
1129
1130static void
1131issue_prefetches (struct mem_ref_group *groups,
1132 unsigned unroll_factor, unsigned ahead)
1133{
1134 struct mem_ref *ref;
1135
1136 for (; groups; groups = groups->next)
1137 for (ref = groups->refs; ref; ref = ref->next)
1138 if (ref->issue_prefetch_p)
1139 issue_prefetch_ref (ref, unroll_factor, ahead);
1140}
1141
79f5e442
ZD
1142/* Returns true if REF is a memory write for that a nontemporal store insn
1143 can be used. */
1144
1145static bool
1146nontemporal_store_p (struct mem_ref *ref)
1147{
1148 enum machine_mode mode;
1149 enum insn_code code;
1150
1151 /* REF must be a write that is not reused. We require it to be independent
1152 on all other memory references in the loop, as the nontemporal stores may
1153 be reordered with respect to other memory references. */
1154 if (!ref->write_p
1155 || !ref->independent_p
1156 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1157 return false;
1158
1159 /* Check that we have the storent instruction for the mode. */
1160 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1161 if (mode == BLKmode)
1162 return false;
1163
947131ba 1164 code = optab_handler (storent_optab, mode);
79f5e442
ZD
1165 return code != CODE_FOR_nothing;
1166}
1167
1168/* If REF is a nontemporal store, we mark the corresponding modify statement
1169 and return true. Otherwise, we return false. */
1170
1171static bool
1172mark_nontemporal_store (struct mem_ref *ref)
1173{
1174 if (!nontemporal_store_p (ref))
1175 return false;
1176
1177 if (dump_file && (dump_flags & TDF_DETAILS))
1178 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1179 (void *) ref);
1180
726a989a 1181 gimple_assign_set_nontemporal_move (ref->stmt, true);
79f5e442
ZD
1182 ref->storent_p = true;
1183
1184 return true;
1185}
1186
1187/* Issue a memory fence instruction after LOOP. */
1188
1189static void
1190emit_mfence_after_loop (struct loop *loop)
1191{
1192 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1193 edge exit;
726a989a
RB
1194 gimple call;
1195 gimple_stmt_iterator bsi;
79f5e442
ZD
1196 unsigned i;
1197
ac47786e 1198 FOR_EACH_VEC_ELT (edge, exits, i, exit)
79f5e442 1199 {
726a989a 1200 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
79f5e442
ZD
1201
1202 if (!single_pred_p (exit->dest)
1203 /* If possible, we prefer not to insert the fence on other paths
1204 in cfg. */
1205 && !(exit->flags & EDGE_ABNORMAL))
1206 split_loop_exit_edge (exit);
726a989a 1207 bsi = gsi_after_labels (exit->dest);
79f5e442 1208
726a989a 1209 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
79f5e442
ZD
1210 mark_virtual_ops_for_renaming (call);
1211 }
1212
1213 VEC_free (edge, heap, exits);
1214 update_ssa (TODO_update_ssa_only_virtuals);
1215}
1216
1217/* Returns true if we can use storent in loop, false otherwise. */
1218
1219static bool
1220may_use_storent_in_loop_p (struct loop *loop)
1221{
1222 bool ret = true;
1223
1224 if (loop->inner != NULL)
1225 return false;
1226
1227 /* If we must issue a mfence insn after using storent, check that there
1228 is a suitable place for it at each of the loop exits. */
1229 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1230 {
1231 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1232 unsigned i;
1233 edge exit;
1234
ac47786e 1235 FOR_EACH_VEC_ELT (edge, exits, i, exit)
79f5e442
ZD
1236 if ((exit->flags & EDGE_ABNORMAL)
1237 && exit->dest == EXIT_BLOCK_PTR)
1238 ret = false;
1239
1240 VEC_free (edge, heap, exits);
1241 }
1242
1243 return ret;
1244}
1245
1246/* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1247 references in the loop. */
1248
1249static void
1250mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1251{
1252 struct mem_ref *ref;
1253 bool any = false;
1254
1255 if (!may_use_storent_in_loop_p (loop))
1256 return;
1257
1258 for (; groups; groups = groups->next)
1259 for (ref = groups->refs; ref; ref = ref->next)
1260 any |= mark_nontemporal_store (ref);
1261
1262 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1263 emit_mfence_after_loop (loop);
1264}
1265
b076a3fd
ZD
1266/* Determines whether we can profitably unroll LOOP FACTOR times, and if
1267 this is the case, fill in DESC by the description of number of
1268 iterations. */
1269
1270static bool
1271should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1272 unsigned factor)
1273{
1274 if (!can_unroll_loop_p (loop, factor, desc))
1275 return false;
1276
1277 /* We only consider loops without control flow for unrolling. This is not
1278 a hard restriction -- tree_unroll_loop works with arbitrary loops
1279 as well; but the unrolling/prefetching is usually more profitable for
1280 loops consisting of a single basic block, and we want to limit the
1281 code growth. */
1282 if (loop->num_nodes > 2)
1283 return false;
1284
1285 return true;
1286}
1287
1288/* Determine the coefficient by that unroll LOOP, from the information
1289 contained in the list of memory references REFS. Description of
2711355f
ZD
1290 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1291 insns of the LOOP. EST_NITER is the estimated number of iterations of
1292 the loop, or -1 if no estimate is available. */
b076a3fd
ZD
1293
1294static unsigned
1295determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
2711355f
ZD
1296 unsigned ninsns, struct tree_niter_desc *desc,
1297 HOST_WIDE_INT est_niter)
b076a3fd 1298{
911b3fdb
ZD
1299 unsigned upper_bound;
1300 unsigned nfactor, factor, mod_constraint;
b076a3fd
ZD
1301 struct mem_ref_group *agp;
1302 struct mem_ref *ref;
1303
911b3fdb
ZD
1304 /* First check whether the loop is not too large to unroll. We ignore
1305 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1306 from unrolling them enough to make exactly one cache line covered by each
1307 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1308 us from unrolling the loops too many times in cases where we only expect
1309 gains from better scheduling and decreasing loop overhead, which is not
1310 the case here. */
1311 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
2711355f
ZD
1312
1313 /* If we unrolled the loop more times than it iterates, the unrolled version
1314 of the loop would be never entered. */
1315 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1316 upper_bound = est_niter;
1317
911b3fdb 1318 if (upper_bound <= 1)
b076a3fd
ZD
1319 return 1;
1320
911b3fdb
ZD
1321 /* Choose the factor so that we may prefetch each cache just once,
1322 but bound the unrolling by UPPER_BOUND. */
1323 factor = 1;
b076a3fd
ZD
1324 for (agp = refs; agp; agp = agp->next)
1325 for (ref = agp->refs; ref; ref = ref->next)
911b3fdb
ZD
1326 if (should_issue_prefetch_p (ref))
1327 {
1328 mod_constraint = ref->prefetch_mod;
1329 nfactor = least_common_multiple (mod_constraint, factor);
1330 if (nfactor <= upper_bound)
1331 factor = nfactor;
1332 }
b076a3fd
ZD
1333
1334 if (!should_unroll_loop_p (loop, desc, factor))
1335 return 1;
1336
1337 return factor;
1338}
1339
5417e022
ZD
1340/* Returns the total volume of the memory references REFS, taking into account
1341 reuses in the innermost loop and cache line size. TODO -- we should also
1342 take into account reuses across the iterations of the loops in the loop
1343 nest. */
1344
1345static unsigned
1346volume_of_references (struct mem_ref_group *refs)
1347{
1348 unsigned volume = 0;
1349 struct mem_ref_group *gr;
1350 struct mem_ref *ref;
1351
1352 for (gr = refs; gr; gr = gr->next)
1353 for (ref = gr->refs; ref; ref = ref->next)
1354 {
1355 /* Almost always reuses another value? */
1356 if (ref->prefetch_before != PREFETCH_ALL)
1357 continue;
1358
1359 /* If several iterations access the same cache line, use the size of
1360 the line divided by this number. Otherwise, a cache line is
1361 accessed in each iteration. TODO -- in the latter case, we should
1362 take the size of the reference into account, rounding it up on cache
1363 line size multiple. */
1364 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1365 }
1366 return volume;
1367}
1368
1369/* Returns the volume of memory references accessed across VEC iterations of
1370 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1371 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1372
1373static unsigned
1374volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1375{
1376 unsigned i;
1377
1378 for (i = 0; i < n; i++)
1379 if (vec[i] != 0)
1380 break;
1381
1382 if (i == n)
1383 return 0;
1384
1385 gcc_assert (vec[i] > 0);
1386
1387 /* We ignore the parts of the distance vector in subloops, since usually
1388 the numbers of iterations are much smaller. */
1389 return loop_sizes[i] * vec[i];
1390}
1391
1392/* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1393 at the position corresponding to the loop of the step. N is the depth
1394 of the considered loop nest, and, LOOP is its innermost loop. */
1395
1396static void
1397add_subscript_strides (tree access_fn, unsigned stride,
1398 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1399{
1400 struct loop *aloop;
1401 tree step;
1402 HOST_WIDE_INT astep;
1403 unsigned min_depth = loop_depth (loop) - n;
1404
1405 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1406 {
1407 aloop = get_chrec_loop (access_fn);
1408 step = CHREC_RIGHT (access_fn);
1409 access_fn = CHREC_LEFT (access_fn);
1410
1411 if ((unsigned) loop_depth (aloop) <= min_depth)
1412 continue;
1413
1414 if (host_integerp (step, 0))
1415 astep = tree_low_cst (step, 0);
1416 else
1417 astep = L1_CACHE_LINE_SIZE;
1418
1419 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1420
1421 }
1422}
1423
1424/* Returns the volume of memory references accessed between two consecutive
1425 self-reuses of the reference DR. We consider the subscripts of DR in N
1426 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1427 loops. LOOP is the innermost loop of the current loop nest. */
1428
1429static unsigned
1430self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1431 struct loop *loop)
1432{
1433 tree stride, access_fn;
1434 HOST_WIDE_INT *strides, astride;
1435 VEC (tree, heap) *access_fns;
1436 tree ref = DR_REF (dr);
1437 unsigned i, ret = ~0u;
1438
1439 /* In the following example:
1440
1441 for (i = 0; i < N; i++)
1442 for (j = 0; j < N; j++)
1443 use (a[j][i]);
1444 the same cache line is accessed each N steps (except if the change from
1445 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1446 we cannot rely purely on the results of the data dependence analysis.
1447
1448 Instead, we compute the stride of the reference in each loop, and consider
1449 the innermost loop in that the stride is less than cache size. */
1450
1451 strides = XCNEWVEC (HOST_WIDE_INT, n);
1452 access_fns = DR_ACCESS_FNS (dr);
1453
ac47786e 1454 FOR_EACH_VEC_ELT (tree, access_fns, i, access_fn)
5417e022
ZD
1455 {
1456 /* Keep track of the reference corresponding to the subscript, so that we
1457 know its stride. */
1458 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1459 ref = TREE_OPERAND (ref, 0);
b8698a0f 1460
5417e022
ZD
1461 if (TREE_CODE (ref) == ARRAY_REF)
1462 {
1463 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1464 if (host_integerp (stride, 1))
1465 astride = tree_low_cst (stride, 1);
1466 else
1467 astride = L1_CACHE_LINE_SIZE;
1468
1469 ref = TREE_OPERAND (ref, 0);
1470 }
1471 else
1472 astride = 1;
1473
1474 add_subscript_strides (access_fn, astride, strides, n, loop);
1475 }
1476
1477 for (i = n; i-- > 0; )
1478 {
1479 unsigned HOST_WIDE_INT s;
1480
1481 s = strides[i] < 0 ? -strides[i] : strides[i];
1482
1483 if (s < (unsigned) L1_CACHE_LINE_SIZE
1484 && (loop_sizes[i]
1485 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1486 {
1487 ret = loop_sizes[i];
1488 break;
1489 }
1490 }
1491
1492 free (strides);
1493 return ret;
1494}
1495
1496/* Determines the distance till the first reuse of each reference in REFS
79f5e442
ZD
1497 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1498 memory references in the loop. */
5417e022
ZD
1499
1500static void
79f5e442
ZD
1501determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1502 bool no_other_refs)
5417e022
ZD
1503{
1504 struct loop *nest, *aloop;
1505 VEC (data_reference_p, heap) *datarefs = NULL;
1506 VEC (ddr_p, heap) *dependences = NULL;
1507 struct mem_ref_group *gr;
79f5e442 1508 struct mem_ref *ref, *refb;
5417e022
ZD
1509 VEC (loop_p, heap) *vloops = NULL;
1510 unsigned *loop_data_size;
1511 unsigned i, j, n;
1512 unsigned volume, dist, adist;
1513 HOST_WIDE_INT vol;
1514 data_reference_p dr;
1515 ddr_p dep;
1516
1517 if (loop->inner)
1518 return;
1519
1520 /* Find the outermost loop of the loop nest of loop (we require that
1521 there are no sibling loops inside the nest). */
1522 nest = loop;
1523 while (1)
1524 {
1525 aloop = loop_outer (nest);
1526
1527 if (aloop == current_loops->tree_root
1528 || aloop->inner->next)
1529 break;
1530
1531 nest = aloop;
1532 }
1533
1534 /* For each loop, determine the amount of data accessed in each iteration.
1535 We use this to estimate whether the reference is evicted from the
1536 cache before its reuse. */
1537 find_loop_nest (nest, &vloops);
1538 n = VEC_length (loop_p, vloops);
1539 loop_data_size = XNEWVEC (unsigned, n);
1540 volume = volume_of_references (refs);
1541 i = n;
1542 while (i-- != 0)
1543 {
1544 loop_data_size[i] = volume;
1545 /* Bound the volume by the L2 cache size, since above this bound,
1546 all dependence distances are equivalent. */
1547 if (volume > L2_CACHE_SIZE_BYTES)
1548 continue;
1549
1550 aloop = VEC_index (loop_p, vloops, i);
b4a9343c 1551 vol = max_stmt_executions_int (aloop, false);
5417e022
ZD
1552 if (vol < 0)
1553 vol = expected_loop_iterations (aloop);
1554 volume *= vol;
1555 }
1556
1557 /* Prepare the references in the form suitable for data dependence
0d52bcc1 1558 analysis. We ignore unanalyzable data references (the results
5417e022
ZD
1559 are used just as a heuristics to estimate temporality of the
1560 references, hence we do not need to worry about correctness). */
1561 for (gr = refs; gr; gr = gr->next)
1562 for (ref = gr->refs; ref; ref = ref->next)
1563 {
5c640e29
SP
1564 dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
1565 ref->mem, ref->stmt, !ref->write_p);
5417e022
ZD
1566
1567 if (dr)
1568 {
1569 ref->reuse_distance = volume;
1570 dr->aux = ref;
1571 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1572 }
79f5e442
ZD
1573 else
1574 no_other_refs = false;
5417e022
ZD
1575 }
1576
ac47786e 1577 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
5417e022
ZD
1578 {
1579 dist = self_reuse_distance (dr, loop_data_size, n, loop);
3d9a9f94 1580 ref = (struct mem_ref *) dr->aux;
5417e022
ZD
1581 if (ref->reuse_distance > dist)
1582 ref->reuse_distance = dist;
79f5e442
ZD
1583
1584 if (no_other_refs)
1585 ref->independent_p = true;
5417e022
ZD
1586 }
1587
1588 compute_all_dependences (datarefs, &dependences, vloops, true);
1589
ac47786e 1590 FOR_EACH_VEC_ELT (ddr_p, dependences, i, dep)
5417e022
ZD
1591 {
1592 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1593 continue;
1594
3d9a9f94
KG
1595 ref = (struct mem_ref *) DDR_A (dep)->aux;
1596 refb = (struct mem_ref *) DDR_B (dep)->aux;
79f5e442 1597
5417e022
ZD
1598 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1599 || DDR_NUM_DIST_VECTS (dep) == 0)
1600 {
0d52bcc1 1601 /* If the dependence cannot be analyzed, assume that there might be
5417e022
ZD
1602 a reuse. */
1603 dist = 0;
b8698a0f 1604
79f5e442
ZD
1605 ref->independent_p = false;
1606 refb->independent_p = false;
5417e022
ZD
1607 }
1608 else
1609 {
0d52bcc1 1610 /* The distance vectors are normalized to be always lexicographically
5417e022
ZD
1611 positive, hence we cannot tell just from them whether DDR_A comes
1612 before DDR_B or vice versa. However, it is not important,
1613 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1614 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1615 in cache (and marking it as nontemporal would not affect
1616 anything). */
1617
1618 dist = volume;
1619 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1620 {
1621 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1622 loop_data_size, n);
1623
79f5e442
ZD
1624 /* If this is a dependence in the innermost loop (i.e., the
1625 distances in all superloops are zero) and it is not
1626 the trivial self-dependence with distance zero, record that
1627 the references are not completely independent. */
1628 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1629 && (ref != refb
1630 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1631 {
1632 ref->independent_p = false;
1633 refb->independent_p = false;
1634 }
1635
5417e022
ZD
1636 /* Ignore accesses closer than
1637 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1638 so that we use nontemporal prefetches e.g. if single memory
1639 location is accessed several times in a single iteration of
1640 the loop. */
1641 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1642 continue;
1643
1644 if (adist < dist)
1645 dist = adist;
1646 }
1647 }
1648
5417e022
ZD
1649 if (ref->reuse_distance > dist)
1650 ref->reuse_distance = dist;
79f5e442
ZD
1651 if (refb->reuse_distance > dist)
1652 refb->reuse_distance = dist;
5417e022
ZD
1653 }
1654
1655 free_dependence_relations (dependences);
1656 free_data_refs (datarefs);
1657 free (loop_data_size);
1658
1659 if (dump_file && (dump_flags & TDF_DETAILS))
1660 {
1661 fprintf (dump_file, "Reuse distances:\n");
1662 for (gr = refs; gr; gr = gr->next)
1663 for (ref = gr->refs; ref; ref = ref->next)
1664 fprintf (dump_file, " ref %p distance %u\n",
1665 (void *) ref, ref->reuse_distance);
1666 }
1667}
1668
0bbe50f6
CF
1669/* Determine whether or not the trip count to ahead ratio is too small based
1670 on prefitablility consideration.
db34470d 1671 AHEAD: the iteration ahead distance,
0bbe50f6
CF
1672 EST_NITER: the estimated trip count. */
1673
1674static bool
1675trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1676{
1677 /* Assume trip count to ahead ratio is big enough if the trip count could not
1678 be estimated at compile time. */
1679 if (est_niter < 0)
1680 return false;
1681
1682 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1683 {
1684 if (dump_file && (dump_flags & TDF_DETAILS))
1685 fprintf (dump_file,
1686 "Not prefetching -- loop estimated to roll only %d times\n",
1687 (int) est_niter);
1688 return true;
1689 }
1690
1691 return false;
1692}
1693
1694/* Determine whether or not the number of memory references in the loop is
1695 reasonable based on the profitablity and compilation time considerations.
db34470d 1696 NINSNS: estimated number of instructions in the loop,
db34470d
GS
1697 MEM_REF_COUNT: total number of memory references in the loop. */
1698
b8698a0f 1699static bool
0bbe50f6 1700mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
db34470d 1701{
0bbe50f6 1702 int insn_to_mem_ratio;
db34470d
GS
1703
1704 if (mem_ref_count == 0)
1705 return false;
1706
0bbe50f6
CF
1707 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1708 (compute_all_dependences) have high costs based on quadratic complexity.
1709 To avoid huge compilation time, we give up prefetching if mem_ref_count
1710 is too large. */
1711 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1712 return false;
1713
b8698a0f
L
1714 /* Prefetching improves performance by overlapping cache missing
1715 memory accesses with CPU operations. If the loop does not have
1716 enough CPU operations to overlap with memory operations, prefetching
1717 won't give a significant benefit. One approximate way of checking
1718 this is to require the ratio of instructions to memory references to
db34470d
GS
1719 be above a certain limit. This approximation works well in practice.
1720 TODO: Implement a more precise computation by estimating the time
1721 for each CPU or memory op in the loop. Time estimates for memory ops
1722 should account for cache misses. */
b8698a0f 1723 insn_to_mem_ratio = ninsns / mem_ref_count;
db34470d
GS
1724
1725 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
55e5a2eb
CF
1726 {
1727 if (dump_file && (dump_flags & TDF_DETAILS))
1728 fprintf (dump_file,
1729 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1730 insn_to_mem_ratio);
1731 return false;
1732 }
db34470d 1733
0bbe50f6
CF
1734 return true;
1735}
1736
1737/* Determine whether or not the instruction to prefetch ratio in the loop is
1738 too small based on the profitablity consideration.
1739 NINSNS: estimated number of instructions in the loop,
1740 PREFETCH_COUNT: an estimate of the number of prefetches,
1741 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1742
1743static bool
1744insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1745 unsigned unroll_factor)
1746{
1747 int insn_to_prefetch_ratio;
1748
d3a9b459
CF
1749 /* Prefetching most likely causes performance degradation when the instruction
1750 to prefetch ratio is too small. Too many prefetch instructions in a loop
1751 may reduce the I-cache performance.
ccacf0e1
CF
1752 (unroll_factor * ninsns) is used to estimate the number of instructions in
1753 the unrolled loop. This implementation is a bit simplistic -- the number
1754 of issued prefetch instructions is also affected by unrolling. So,
1755 prefetch_mod and the unroll factor should be taken into account when
1756 determining prefetch_count. Also, the number of insns of the unrolled
1757 loop will usually be significantly smaller than the number of insns of the
1758 original loop * unroll_factor (at least the induction variable increases
1759 and the exit branches will get eliminated), so it might be better to use
1760 tree_estimate_loop_size + estimated_unrolled_size. */
d3a9b459
CF
1761 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1762 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
db34470d 1763 {
d3a9b459
CF
1764 if (dump_file && (dump_flags & TDF_DETAILS))
1765 fprintf (dump_file,
1766 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1767 insn_to_prefetch_ratio);
0bbe50f6 1768 return true;
db34470d 1769 }
b8698a0f 1770
0bbe50f6 1771 return false;
db34470d
GS
1772}
1773
1774
b076a3fd 1775/* Issue prefetch instructions for array references in LOOP. Returns
d73be268 1776 true if the LOOP was unrolled. */
b076a3fd
ZD
1777
1778static bool
d73be268 1779loop_prefetch_arrays (struct loop *loop)
b076a3fd
ZD
1780{
1781 struct mem_ref_group *refs;
2711355f
ZD
1782 unsigned ahead, ninsns, time, unroll_factor;
1783 HOST_WIDE_INT est_niter;
b076a3fd 1784 struct tree_niter_desc desc;
79f5e442 1785 bool unrolled = false, no_other_refs;
db34470d
GS
1786 unsigned prefetch_count;
1787 unsigned mem_ref_count;
b076a3fd 1788
efd8f750 1789 if (optimize_loop_nest_for_size_p (loop))
2732d767
ZD
1790 {
1791 if (dump_file && (dump_flags & TDF_DETAILS))
1792 fprintf (dump_file, " ignored (cold area)\n");
1793 return false;
1794 }
1795
0bbe50f6
CF
1796 /* FIXME: the time should be weighted by the probabilities of the blocks in
1797 the loop body. */
1798 time = tree_num_loop_insns (loop, &eni_time_weights);
1799 if (time == 0)
1800 return false;
1801
1802 ahead = (PREFETCH_LATENCY + time - 1) / time;
b4a9343c 1803 est_niter = max_stmt_executions_int (loop, false);
0bbe50f6
CF
1804
1805 /* Prefetching is not likely to be profitable if the trip count to ahead
1806 ratio is too small. */
1807 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1808 return false;
1809
1810 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1811
b076a3fd 1812 /* Step 1: gather the memory references. */
db34470d 1813 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
b076a3fd 1814
0bbe50f6
CF
1815 /* Give up prefetching if the number of memory references in the
1816 loop is not reasonable based on profitablity and compilation time
1817 considerations. */
1818 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1819 goto fail;
1820
b076a3fd
ZD
1821 /* Step 2: estimate the reuse effects. */
1822 prune_by_reuse (refs);
1823
d5058523 1824 if (nothing_to_prefetch_p (refs))
b076a3fd
ZD
1825 goto fail;
1826
79f5e442 1827 determine_loop_nest_reuse (loop, refs, no_other_refs);
5417e022 1828
0bbe50f6 1829 /* Step 3: determine unroll factor. */
2711355f
ZD
1830 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1831 est_niter);
d5058523
CF
1832
1833 /* Estimate prefetch count for the unrolled loop. */
1834 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1835 if (prefetch_count == 0)
1836 goto fail;
1837
2711355f 1838 if (dump_file && (dump_flags & TDF_DETAILS))
b8698a0f 1839 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
d81f5387 1840 HOST_WIDE_INT_PRINT_DEC "\n"
b8698a0f
L
1841 "insn count %d, mem ref count %d, prefetch count %d\n",
1842 ahead, unroll_factor, est_niter,
1843 ninsns, mem_ref_count, prefetch_count);
db34470d 1844
0bbe50f6
CF
1845 /* Prefetching is not likely to be profitable if the instruction to prefetch
1846 ratio is too small. */
1847 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1848 unroll_factor))
db34470d
GS
1849 goto fail;
1850
1851 mark_nontemporal_stores (loop, refs);
2711355f 1852
b076a3fd
ZD
1853 /* Step 4: what to prefetch? */
1854 if (!schedule_prefetches (refs, unroll_factor, ahead))
1855 goto fail;
1856
1857 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1858 iterations so that we do not issue superfluous prefetches. */
1859 if (unroll_factor != 1)
1860 {
d73be268 1861 tree_unroll_loop (loop, unroll_factor,
b076a3fd
ZD
1862 single_dom_exit (loop), &desc);
1863 unrolled = true;
1864 }
1865
1866 /* Step 6: issue the prefetches. */
1867 issue_prefetches (refs, unroll_factor, ahead);
1868
1869fail:
1870 release_mem_refs (refs);
1871 return unrolled;
1872}
1873
d73be268 1874/* Issue prefetch instructions for array references in loops. */
b076a3fd 1875
c7f965b6 1876unsigned int
d73be268 1877tree_ssa_prefetch_arrays (void)
b076a3fd 1878{
42fd6772 1879 loop_iterator li;
b076a3fd
ZD
1880 struct loop *loop;
1881 bool unrolled = false;
c7f965b6 1882 int todo_flags = 0;
b076a3fd
ZD
1883
1884 if (!HAVE_prefetch
1885 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1886 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1887 of processor costs and i486 does not have prefetch, but
1888 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1889 || PREFETCH_BLOCK == 0)
c7f965b6 1890 return 0;
b076a3fd 1891
47eb5b32
ZD
1892 if (dump_file && (dump_flags & TDF_DETAILS))
1893 {
1894 fprintf (dump_file, "Prefetching parameters:\n");
1895 fprintf (dump_file, " simultaneous prefetches: %d\n",
1896 SIMULTANEOUS_PREFETCHES);
1897 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
47eb5b32 1898 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
46cb0441
ZD
1899 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1900 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
5417e022 1901 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
b8698a0f
L
1902 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1903 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
db34470d 1904 MIN_INSN_TO_PREFETCH_RATIO);
b8698a0f 1905 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
db34470d 1906 PREFETCH_MIN_INSN_TO_MEM_RATIO);
47eb5b32
ZD
1907 fprintf (dump_file, "\n");
1908 }
1909
b076a3fd
ZD
1910 initialize_original_copy_tables ();
1911
e79983f4 1912 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
b076a3fd 1913 {
6a4825bd
NF
1914 tree type = build_function_type_list (void_type_node,
1915 const_ptr_type_node, NULL_TREE);
c79efc4d
RÁE
1916 tree decl = add_builtin_function ("__builtin_prefetch", type,
1917 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1918 NULL, NULL_TREE);
b076a3fd 1919 DECL_IS_NOVOPS (decl) = true;
e79983f4 1920 set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
b076a3fd
ZD
1921 }
1922
1923 /* We assume that size of cache line is a power of two, so verify this
1924 here. */
1925 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1926
42fd6772 1927 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
b076a3fd 1928 {
b076a3fd
ZD
1929 if (dump_file && (dump_flags & TDF_DETAILS))
1930 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1931
d73be268 1932 unrolled |= loop_prefetch_arrays (loop);
b076a3fd
ZD
1933
1934 if (dump_file && (dump_flags & TDF_DETAILS))
1935 fprintf (dump_file, "\n\n");
1936 }
1937
1938 if (unrolled)
1939 {
1940 scev_reset ();
c7f965b6 1941 todo_flags |= TODO_cleanup_cfg;
b076a3fd
ZD
1942 }
1943
1944 free_original_copy_tables ();
c7f965b6 1945 return todo_flags;
b076a3fd 1946}