]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-loop-prefetch.c
sh.c: Do not include algorithm.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-prefetch.c
CommitLineData
b076a3fd 1/* Array prefetching.
23a5b65a 2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
b8698a0f 3
b076a3fd 4This file is part of GCC.
b8698a0f 5
b076a3fd
ZD
6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
9dcd6f09 8Free Software Foundation; either version 3, or (at your option) any
b076a3fd 9later version.
b8698a0f 10
b076a3fd
ZD
11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
b8698a0f 15
b076a3fd 16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
b076a3fd
ZD
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
24#include "tree.h"
d8a2d370 25#include "stor-layout.h"
b076a3fd 26#include "tm_p.h"
60393bbc
AM
27#include "predict.h"
28#include "vec.h"
29#include "hashtab.h"
30#include "hash-set.h"
31#include "machmode.h"
32#include "hard-reg-set.h"
33#include "input.h"
34#include "function.h"
35#include "dominance.h"
36#include "cfg.h"
b076a3fd 37#include "basic-block.h"
cf835838 38#include "tree-pretty-print.h"
2fb9a547
AM
39#include "tree-ssa-alias.h"
40#include "internal-fn.h"
41#include "gimple-expr.h"
42#include "is-a.h"
18f429e2 43#include "gimple.h"
45b0be94 44#include "gimplify.h"
5be5c238 45#include "gimple-iterator.h"
18f429e2 46#include "gimplify-me.h"
442b4905 47#include "gimple-ssa.h"
e28030cf
AM
48#include "tree-ssa-loop-ivopts.h"
49#include "tree-ssa-loop-manip.h"
50#include "tree-ssa-loop-niter.h"
442b4905
AM
51#include "tree-ssa-loop.h"
52#include "tree-into-ssa.h"
b076a3fd 53#include "cfgloop.h"
b076a3fd 54#include "tree-pass.h"
b076a3fd 55#include "insn-config.h"
b076a3fd
ZD
56#include "tree-chrec.h"
57#include "tree-scalar-evolution.h"
718f9c0f 58#include "diagnostic-core.h"
b076a3fd
ZD
59#include "params.h"
60#include "langhooks.h"
7f9bc51b 61#include "tree-inline.h"
5417e022 62#include "tree-data-ref.h"
2eb79bbb
SB
63
64
65/* FIXME: Needed for optabs, but this should all be moved to a TBD interface
66 between the GIMPLE and RTL worlds. */
67#include "expr.h"
b0710fe1 68#include "insn-codes.h"
79f5e442 69#include "optabs.h"
1c1ad7bb 70#include "recog.h"
b076a3fd
ZD
71
72/* This pass inserts prefetch instructions to optimize cache usage during
73 accesses to arrays in loops. It processes loops sequentially and:
74
75 1) Gathers all memory references in the single loop.
76 2) For each of the references it decides when it is profitable to prefetch
77 it. To do it, we evaluate the reuse among the accesses, and determines
78 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
79 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
80 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
81 iterations of the loop that are zero modulo PREFETCH_MOD). For example
82 (assuming cache line size is 64 bytes, char has size 1 byte and there
83 is no hardware sequential prefetch):
84
85 char *a;
86 for (i = 0; i < max; i++)
87 {
88 a[255] = ...; (0)
89 a[i] = ...; (1)
90 a[i + 64] = ...; (2)
91 a[16*i] = ...; (3)
92 a[187*i] = ...; (4)
93 a[187*i + 50] = ...; (5)
94 }
95
96 (0) obviously has PREFETCH_BEFORE 1
97 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
98 location 64 iterations before it, and PREFETCH_MOD 64 (since
99 it hits the same cache line otherwise).
100 (2) has PREFETCH_MOD 64
101 (3) has PREFETCH_MOD 4
102 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
bae077dc 103 the cache line accessed by (5) is the same with probability only
b076a3fd
ZD
104 7/32.
105 (5) has PREFETCH_MOD 1 as well.
106
5417e022
ZD
107 Additionally, we use data dependence analysis to determine for each
108 reference the distance till the first reuse; this information is used
109 to determine the temporality of the issued prefetch instruction.
110
b076a3fd
ZD
111 3) We determine how much ahead we need to prefetch. The number of
112 iterations needed is time to fetch / time spent in one iteration of
113 the loop. The problem is that we do not know either of these values,
114 so we just make a heuristic guess based on a magic (possibly)
115 target-specific constant and size of the loop.
116
117 4) Determine which of the references we prefetch. We take into account
118 that there is a maximum number of simultaneous prefetches (provided
119 by machine description). We prefetch as many prefetches as possible
120 while still within this bound (starting with those with lowest
121 prefetch_mod, since they are responsible for most of the cache
122 misses).
b8698a0f 123
b076a3fd
ZD
124 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
125 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
126 prefetching nonaccessed memory.
127 TODO -- actually implement peeling.
b8698a0f 128
b076a3fd
ZD
129 6) We actually emit the prefetch instructions. ??? Perhaps emit the
130 prefetch instructions with guards in cases where 5) was not sufficient
131 to satisfy the constraints?
132
0bbe50f6
CF
133 A cost model is implemented to determine whether or not prefetching is
134 profitable for a given loop. The cost model has three heuristics:
135
136 1. Function trip_count_to_ahead_ratio_too_small_p implements a
137 heuristic that determines whether or not the loop has too few
138 iterations (compared to ahead). Prefetching is not likely to be
139 beneficial if the trip count to ahead ratio is below a certain
140 minimum.
141
142 2. Function mem_ref_count_reasonable_p implements a heuristic that
143 determines whether the given loop has enough CPU ops that can be
144 overlapped with cache missing memory ops. If not, the loop
145 won't benefit from prefetching. In the implementation,
146 prefetching is not considered beneficial if the ratio between
147 the instruction count and the mem ref count is below a certain
148 minimum.
149
150 3. Function insn_to_prefetch_ratio_too_small_p implements a
151 heuristic that disables prefetching in a loop if the prefetching
152 cost is above a certain limit. The relative prefetching cost is
153 estimated by taking the ratio between the prefetch count and the
154 total intruction count (this models the I-cache cost).
155
db34470d 156 The limits used in these heuristics are defined as parameters with
b8698a0f 157 reasonable default values. Machine-specific default values will be
db34470d 158 added later.
b8698a0f 159
b076a3fd
ZD
160 Some other TODO:
161 -- write and use more general reuse analysis (that could be also used
162 in other cache aimed loop optimizations)
163 -- make it behave sanely together with the prefetches given by user
164 (now we just ignore them; at the very least we should avoid
165 optimizing loops in that user put his own prefetches)
166 -- we assume cache line size alignment of arrays; this could be
167 improved. */
168
169/* Magic constants follow. These should be replaced by machine specific
170 numbers. */
171
b076a3fd
ZD
172/* True if write can be prefetched by a read prefetch. */
173
174#ifndef WRITE_CAN_USE_READ_PREFETCH
175#define WRITE_CAN_USE_READ_PREFETCH 1
176#endif
177
178/* True if read can be prefetched by a write prefetch. */
179
180#ifndef READ_CAN_USE_WRITE_PREFETCH
181#define READ_CAN_USE_WRITE_PREFETCH 0
182#endif
183
47eb5b32
ZD
184/* The size of the block loaded by a single prefetch. Usually, this is
185 the same as cache line size (at the moment, we only consider one level
186 of cache hierarchy). */
b076a3fd
ZD
187
188#ifndef PREFETCH_BLOCK
47eb5b32 189#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
b076a3fd
ZD
190#endif
191
192/* Do we have a forward hardware sequential prefetching? */
193
194#ifndef HAVE_FORWARD_PREFETCH
195#define HAVE_FORWARD_PREFETCH 0
196#endif
197
198/* Do we have a backward hardware sequential prefetching? */
199
200#ifndef HAVE_BACKWARD_PREFETCH
201#define HAVE_BACKWARD_PREFETCH 0
202#endif
203
204/* In some cases we are only able to determine that there is a certain
205 probability that the two accesses hit the same cache line. In this
206 case, we issue the prefetches for both of them if this probability
fa10beec 207 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
b076a3fd
ZD
208
209#ifndef ACCEPTABLE_MISS_RATE
210#define ACCEPTABLE_MISS_RATE 50
211#endif
212
213#ifndef HAVE_prefetch
214#define HAVE_prefetch 0
215#endif
216
46cb0441
ZD
217#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
218#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
5417e022
ZD
219
220/* We consider a memory access nontemporal if it is not reused sooner than
221 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
222 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
223 so that we use nontemporal prefetches e.g. if single memory location
224 is accessed several times in a single iteration of the loop. */
225#define NONTEMPORAL_FRACTION 16
226
79f5e442
ZD
227/* In case we have to emit a memory fence instruction after the loop that
228 uses nontemporal stores, this defines the builtin to use. */
229
230#ifndef FENCE_FOLLOWING_MOVNT
231#define FENCE_FOLLOWING_MOVNT NULL_TREE
232#endif
233
9bf4598b
CF
234/* It is not profitable to prefetch when the trip count is not at
235 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
236 For example, in a loop with a prefetch ahead distance of 10,
237 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
238 profitable to prefetch when the trip count is greater or equal to
239 40. In that case, 30 out of the 40 iterations will benefit from
240 prefetching. */
241
242#ifndef TRIP_COUNT_TO_AHEAD_RATIO
243#define TRIP_COUNT_TO_AHEAD_RATIO 4
244#endif
245
b076a3fd
ZD
246/* The group of references between that reuse may occur. */
247
248struct mem_ref_group
249{
250 tree base; /* Base of the reference. */
81f32326 251 tree step; /* Step of the reference. */
b076a3fd
ZD
252 struct mem_ref *refs; /* References in the group. */
253 struct mem_ref_group *next; /* Next group of references. */
254};
255
256/* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
257
258#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
259
8532678c
CF
260/* Do not generate a prefetch if the unroll factor is significantly less
261 than what is required by the prefetch. This is to avoid redundant
f7963a7c
CF
262 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
263 2, prefetching requires unrolling the loop 16 times, but
264 the loop is actually unrolled twice. In this case (ratio = 8),
8532678c
CF
265 prefetching is not likely to be beneficial. */
266
267#ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
f7963a7c 268#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
8532678c
CF
269#endif
270
0bbe50f6
CF
271/* Some of the prefetch computations have quadratic complexity. We want to
272 avoid huge compile times and, therefore, want to limit the amount of
273 memory references per loop where we consider prefetching. */
274
275#ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
276#define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
277#endif
278
b076a3fd
ZD
279/* The memory reference. */
280
281struct mem_ref
282{
726a989a 283 gimple stmt; /* Statement in that the reference appears. */
b076a3fd
ZD
284 tree mem; /* The reference. */
285 HOST_WIDE_INT delta; /* Constant offset of the reference. */
b076a3fd
ZD
286 struct mem_ref_group *group; /* The group of references it belongs to. */
287 unsigned HOST_WIDE_INT prefetch_mod;
288 /* Prefetch only each PREFETCH_MOD-th
289 iteration. */
290 unsigned HOST_WIDE_INT prefetch_before;
291 /* Prefetch only first PREFETCH_BEFORE
292 iterations. */
5417e022
ZD
293 unsigned reuse_distance; /* The amount of data accessed before the first
294 reuse of this value. */
b076a3fd 295 struct mem_ref *next; /* The next reference in the group. */
79f5e442
ZD
296 unsigned write_p : 1; /* Is it a write? */
297 unsigned independent_p : 1; /* True if the reference is independent on
298 all other references inside the loop. */
299 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
300 unsigned storent_p : 1; /* True if we changed the store to a
301 nontemporal one. */
b076a3fd
ZD
302};
303
a5497b12 304/* Dumps information about memory reference */
b076a3fd 305static void
a5497b12
VK
306dump_mem_details (FILE *file, tree base, tree step,
307 HOST_WIDE_INT delta, bool write_p)
b076a3fd 308{
a5497b12
VK
309 fprintf (file, "(base ");
310 print_generic_expr (file, base, TDF_SLIM);
b076a3fd 311 fprintf (file, ", step ");
a5497b12
VK
312 if (cst_and_fits_in_hwi (step))
313 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
81f32326 314 else
a5497b12 315 print_generic_expr (file, step, TDF_TREE);
b076a3fd 316 fprintf (file, ")\n");
e324a72f 317 fprintf (file, " delta ");
a5497b12
VK
318 fprintf (file, HOST_WIDE_INT_PRINT_DEC, delta);
319 fprintf (file, "\n");
320 fprintf (file, " %s\n", write_p ? "write" : "read");
b076a3fd 321 fprintf (file, "\n");
a5497b12 322}
b076a3fd 323
a5497b12 324/* Dumps information about reference REF to FILE. */
b076a3fd 325
a5497b12
VK
326static void
327dump_mem_ref (FILE *file, struct mem_ref *ref)
328{
329 fprintf (file, "Reference %p:\n", (void *) ref);
330
331 fprintf (file, " group %p ", (void *) ref->group);
332
333 dump_mem_details (file, ref->group->base, ref->group->step, ref->delta,
334 ref->write_p);
b076a3fd
ZD
335}
336
337/* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
338 exist. */
339
340static struct mem_ref_group *
81f32326 341find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
b076a3fd
ZD
342{
343 struct mem_ref_group *group;
344
345 for (; *groups; groups = &(*groups)->next)
346 {
81f32326 347 if (operand_equal_p ((*groups)->step, step, 0)
b076a3fd
ZD
348 && operand_equal_p ((*groups)->base, base, 0))
349 return *groups;
350
81f32326
CB
351 /* If step is an integer constant, keep the list of groups sorted
352 by decreasing step. */
353 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
354 && int_cst_value ((*groups)->step) < int_cst_value (step))
b076a3fd
ZD
355 break;
356 }
357
5417e022 358 group = XNEW (struct mem_ref_group);
b076a3fd
ZD
359 group->base = base;
360 group->step = step;
361 group->refs = NULL;
362 group->next = *groups;
363 *groups = group;
364
365 return group;
366}
367
368/* Records a memory reference MEM in GROUP with offset DELTA and write status
369 WRITE_P. The reference occurs in statement STMT. */
370
371static void
726a989a 372record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
b076a3fd
ZD
373 HOST_WIDE_INT delta, bool write_p)
374{
375 struct mem_ref **aref;
376
377 /* Do not record the same address twice. */
378 for (aref = &group->refs; *aref; aref = &(*aref)->next)
379 {
380 /* It does not have to be possible for write reference to reuse the read
381 prefetch, or vice versa. */
382 if (!WRITE_CAN_USE_READ_PREFETCH
383 && write_p
384 && !(*aref)->write_p)
385 continue;
386 if (!READ_CAN_USE_WRITE_PREFETCH
387 && !write_p
388 && (*aref)->write_p)
389 continue;
390
391 if ((*aref)->delta == delta)
392 return;
393 }
394
5417e022 395 (*aref) = XNEW (struct mem_ref);
b076a3fd
ZD
396 (*aref)->stmt = stmt;
397 (*aref)->mem = mem;
398 (*aref)->delta = delta;
399 (*aref)->write_p = write_p;
400 (*aref)->prefetch_before = PREFETCH_ALL;
401 (*aref)->prefetch_mod = 1;
5417e022 402 (*aref)->reuse_distance = 0;
b076a3fd
ZD
403 (*aref)->issue_prefetch_p = false;
404 (*aref)->group = group;
405 (*aref)->next = NULL;
79f5e442
ZD
406 (*aref)->independent_p = false;
407 (*aref)->storent_p = false;
b076a3fd
ZD
408
409 if (dump_file && (dump_flags & TDF_DETAILS))
410 dump_mem_ref (dump_file, *aref);
411}
412
413/* Release memory references in GROUPS. */
414
415static void
416release_mem_refs (struct mem_ref_group *groups)
417{
418 struct mem_ref_group *next_g;
419 struct mem_ref *ref, *next_r;
420
421 for (; groups; groups = next_g)
422 {
423 next_g = groups->next;
424 for (ref = groups->refs; ref; ref = next_r)
425 {
426 next_r = ref->next;
427 free (ref);
428 }
429 free (groups);
430 }
431}
432
433/* A structure used to pass arguments to idx_analyze_ref. */
434
435struct ar_data
436{
437 struct loop *loop; /* Loop of the reference. */
726a989a 438 gimple stmt; /* Statement of the reference. */
81f32326 439 tree *step; /* Step of the memory reference. */
b076a3fd
ZD
440 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
441};
442
443/* Analyzes a single INDEX of a memory reference to obtain information
444 described at analyze_ref. Callback for for_each_index. */
445
446static bool
447idx_analyze_ref (tree base, tree *index, void *data)
448{
c22940cd 449 struct ar_data *ar_data = (struct ar_data *) data;
b076a3fd 450 tree ibase, step, stepsize;
81f32326 451 HOST_WIDE_INT idelta = 0, imult = 1;
b076a3fd
ZD
452 affine_iv iv;
453
f017bf5e 454 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
81f32326 455 *index, &iv, true))
b076a3fd
ZD
456 return false;
457 ibase = iv.base;
458 step = iv.step;
459
5be014d5 460 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
b076a3fd
ZD
461 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
462 {
463 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
464 ibase = TREE_OPERAND (ibase, 0);
465 }
466 if (cst_and_fits_in_hwi (ibase))
467 {
468 idelta += int_cst_value (ibase);
ff5e9a94 469 ibase = build_int_cst (TREE_TYPE (ibase), 0);
b076a3fd
ZD
470 }
471
472 if (TREE_CODE (base) == ARRAY_REF)
473 {
474 stepsize = array_ref_element_size (base);
475 if (!cst_and_fits_in_hwi (stepsize))
476 return false;
477 imult = int_cst_value (stepsize);
8fde8b40
CB
478 step = fold_build2 (MULT_EXPR, sizetype,
479 fold_convert (sizetype, step),
480 fold_convert (sizetype, stepsize));
b076a3fd
ZD
481 idelta *= imult;
482 }
483
8fde8b40
CB
484 if (*ar_data->step == NULL_TREE)
485 *ar_data->step = step;
486 else
487 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
488 fold_convert (sizetype, *ar_data->step),
489 fold_convert (sizetype, step));
b076a3fd
ZD
490 *ar_data->delta += idelta;
491 *index = ibase;
492
493 return true;
494}
495
aac8b8ed 496/* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
b076a3fd 497 STEP are integer constants and iter is number of iterations of LOOP. The
aac8b8ed
RS
498 reference occurs in statement STMT. Strips nonaddressable component
499 references from REF_P. */
b076a3fd
ZD
500
501static bool
aac8b8ed 502analyze_ref (struct loop *loop, tree *ref_p, tree *base,
81f32326 503 tree *step, HOST_WIDE_INT *delta,
726a989a 504 gimple stmt)
b076a3fd
ZD
505{
506 struct ar_data ar_data;
507 tree off;
508 HOST_WIDE_INT bit_offset;
aac8b8ed 509 tree ref = *ref_p;
b076a3fd 510
81f32326 511 *step = NULL_TREE;
b076a3fd
ZD
512 *delta = 0;
513
7c6dafac
CF
514 /* First strip off the component references. Ignore bitfields.
515 Also strip off the real and imagine parts of a complex, so that
516 they can have the same base. */
517 if (TREE_CODE (ref) == REALPART_EXPR
518 || TREE_CODE (ref) == IMAGPART_EXPR
519 || (TREE_CODE (ref) == COMPONENT_REF
520 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
521 {
522 if (TREE_CODE (ref) == IMAGPART_EXPR)
523 *delta += int_size_in_bytes (TREE_TYPE (ref));
524 ref = TREE_OPERAND (ref, 0);
525 }
b076a3fd 526
aac8b8ed
RS
527 *ref_p = ref;
528
b076a3fd
ZD
529 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
530 {
531 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
532 bit_offset = TREE_INT_CST_LOW (off);
533 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
b8698a0f 534
b076a3fd
ZD
535 *delta += bit_offset / BITS_PER_UNIT;
536 }
537
538 *base = unshare_expr (ref);
539 ar_data.loop = loop;
540 ar_data.stmt = stmt;
541 ar_data.step = step;
542 ar_data.delta = delta;
543 return for_each_index (base, idx_analyze_ref, &ar_data);
544}
545
546/* Record a memory reference REF to the list REFS. The reference occurs in
79f5e442
ZD
547 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
548 reference was recorded, false otherwise. */
b076a3fd 549
79f5e442 550static bool
b076a3fd 551gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
726a989a 552 tree ref, bool write_p, gimple stmt)
b076a3fd 553{
81f32326
CB
554 tree base, step;
555 HOST_WIDE_INT delta;
b076a3fd
ZD
556 struct mem_ref_group *agrp;
557
a80a2701
JJ
558 if (get_base_address (ref) == NULL)
559 return false;
560
aac8b8ed 561 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
79f5e442 562 return false;
81f32326
CB
563 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
564 if (step == NULL_TREE)
565 return false;
b076a3fd 566
756f50ce 567 /* Stop if the address of BASE could not be taken. */
bc068a23
CF
568 if (may_be_nonaddressable_p (base))
569 return false;
570
a5497b12
VK
571 /* Limit non-constant step prefetching only to the innermost loops and
572 only when the step is loop invariant in the entire loop nest. */
573 if (!cst_and_fits_in_hwi (step))
574 {
575 if (loop->inner != NULL)
576 {
577 if (dump_file && (dump_flags & TDF_DETAILS))
578 {
579 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
580 print_generic_expr (dump_file, ref, TDF_TREE);
581 fprintf (dump_file,":");
c3284718 582 dump_mem_details (dump_file, base, step, delta, write_p);
a5497b12
VK
583 fprintf (dump_file,
584 "Ignoring %p, non-constant step prefetching is "
585 "limited to inner most loops \n",
586 (void *) ref);
587 }
588 return false;
589 }
590 else
591 {
592 if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
593 {
594 if (dump_file && (dump_flags & TDF_DETAILS))
595 {
596 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
597 print_generic_expr (dump_file, ref, TDF_TREE);
598 fprintf (dump_file,":");
c3284718 599 dump_mem_details (dump_file, base, step, delta, write_p);
a5497b12
VK
600 fprintf (dump_file,
601 "Not prefetching, ignoring %p due to "
602 "loop variant step\n",
603 (void *) ref);
604 }
605 return false;
606 }
607 }
608 }
50814135 609
b076a3fd
ZD
610 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
611 are integer constants. */
612 agrp = find_or_create_group (refs, base, step);
613 record_ref (agrp, stmt, ref, delta, write_p);
79f5e442
ZD
614
615 return true;
b076a3fd
ZD
616}
617
79f5e442
ZD
618/* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
619 true if there are no other memory references inside the loop. */
b076a3fd
ZD
620
621static struct mem_ref_group *
db34470d 622gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
b076a3fd
ZD
623{
624 basic_block *body = get_loop_body_in_dom_order (loop);
625 basic_block bb;
626 unsigned i;
726a989a
RB
627 gimple_stmt_iterator bsi;
628 gimple stmt;
629 tree lhs, rhs;
b076a3fd
ZD
630 struct mem_ref_group *refs = NULL;
631
79f5e442 632 *no_other_refs = true;
db34470d 633 *ref_count = 0;
79f5e442 634
b076a3fd
ZD
635 /* Scan the loop body in order, so that the former references precede the
636 later ones. */
637 for (i = 0; i < loop->num_nodes; i++)
638 {
639 bb = body[i];
640 if (bb->loop_father != loop)
641 continue;
642
726a989a 643 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
b076a3fd 644 {
726a989a 645 stmt = gsi_stmt (bsi);
79f5e442 646
726a989a 647 if (gimple_code (stmt) != GIMPLE_ASSIGN)
79f5e442 648 {
5006671f 649 if (gimple_vuse (stmt)
726a989a
RB
650 || (is_gimple_call (stmt)
651 && !(gimple_call_flags (stmt) & ECF_CONST)))
79f5e442
ZD
652 *no_other_refs = false;
653 continue;
654 }
b076a3fd 655
726a989a
RB
656 lhs = gimple_assign_lhs (stmt);
657 rhs = gimple_assign_rhs1 (stmt);
b076a3fd
ZD
658
659 if (REFERENCE_CLASS_P (rhs))
db34470d 660 {
79f5e442
ZD
661 *no_other_refs &= gather_memory_references_ref (loop, &refs,
662 rhs, false, stmt);
db34470d
GS
663 *ref_count += 1;
664 }
b076a3fd 665 if (REFERENCE_CLASS_P (lhs))
db34470d 666 {
79f5e442
ZD
667 *no_other_refs &= gather_memory_references_ref (loop, &refs,
668 lhs, true, stmt);
db34470d
GS
669 *ref_count += 1;
670 }
b076a3fd
ZD
671 }
672 }
673 free (body);
674
675 return refs;
676}
677
678/* Prune the prefetch candidate REF using the self-reuse. */
679
680static void
681prune_ref_by_self_reuse (struct mem_ref *ref)
682{
81f32326
CB
683 HOST_WIDE_INT step;
684 bool backward;
685
686 /* If the step size is non constant, we cannot calculate prefetch_mod. */
687 if (!cst_and_fits_in_hwi (ref->group->step))
688 return;
689
690 step = int_cst_value (ref->group->step);
691
692 backward = step < 0;
b076a3fd
ZD
693
694 if (step == 0)
695 {
696 /* Prefetch references to invariant address just once. */
697 ref->prefetch_before = 1;
698 return;
699 }
700
701 if (backward)
702 step = -step;
703
704 if (step > PREFETCH_BLOCK)
705 return;
706
707 if ((backward && HAVE_BACKWARD_PREFETCH)
708 || (!backward && HAVE_FORWARD_PREFETCH))
709 {
710 ref->prefetch_before = 1;
711 return;
712 }
713
714 ref->prefetch_mod = PREFETCH_BLOCK / step;
715}
716
717/* Divides X by BY, rounding down. */
718
719static HOST_WIDE_INT
720ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
721{
722 gcc_assert (by > 0);
723
724 if (x >= 0)
725 return x / by;
726 else
727 return (x + by - 1) / by;
728}
729
b8698a0f
L
730/* Given a CACHE_LINE_SIZE and two inductive memory references
731 with a common STEP greater than CACHE_LINE_SIZE and an address
732 difference DELTA, compute the probability that they will fall
14e444c3
CF
733 in different cache lines. Return true if the computed miss rate
734 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
735 number of distinct iterations after which the pattern repeats itself.
2c6dd136
GS
736 ALIGN_UNIT is the unit of alignment in bytes. */
737
14e444c3
CF
738static bool
739is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
2c6dd136
GS
740 HOST_WIDE_INT step, HOST_WIDE_INT delta,
741 unsigned HOST_WIDE_INT distinct_iters,
742 int align_unit)
743{
744 unsigned align, iter;
14e444c3 745 int total_positions, miss_positions, max_allowed_miss_positions;
2c6dd136
GS
746 int address1, address2, cache_line1, cache_line2;
747
a245c04b
CF
748 /* It always misses if delta is greater than or equal to the cache
749 line size. */
14e444c3
CF
750 if (delta >= (HOST_WIDE_INT) cache_line_size)
751 return false;
a245c04b 752
2c6dd136 753 miss_positions = 0;
14e444c3
CF
754 total_positions = (cache_line_size / align_unit) * distinct_iters;
755 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
b8698a0f 756
2c6dd136
GS
757 /* Iterate through all possible alignments of the first
758 memory reference within its cache line. */
759 for (align = 0; align < cache_line_size; align += align_unit)
760
761 /* Iterate through all distinct iterations. */
762 for (iter = 0; iter < distinct_iters; iter++)
763 {
764 address1 = align + step * iter;
765 address2 = address1 + delta;
766 cache_line1 = address1 / cache_line_size;
767 cache_line2 = address2 / cache_line_size;
2c6dd136 768 if (cache_line1 != cache_line2)
14e444c3
CF
769 {
770 miss_positions += 1;
771 if (miss_positions > max_allowed_miss_positions)
772 return false;
773 }
2c6dd136 774 }
14e444c3 775 return true;
2c6dd136
GS
776}
777
b076a3fd
ZD
778/* Prune the prefetch candidate REF using the reuse with BY.
779 If BY_IS_BEFORE is true, BY is before REF in the loop. */
780
781static void
782prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
783 bool by_is_before)
784{
81f32326
CB
785 HOST_WIDE_INT step;
786 bool backward;
b076a3fd
ZD
787 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
788 HOST_WIDE_INT delta = delta_b - delta_r;
789 HOST_WIDE_INT hit_from;
790 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
2c6dd136
GS
791 HOST_WIDE_INT reduced_step;
792 unsigned HOST_WIDE_INT reduced_prefetch_block;
793 tree ref_type;
794 int align_unit;
b076a3fd 795
81f32326
CB
796 /* If the step is non constant we cannot calculate prefetch_before. */
797 if (!cst_and_fits_in_hwi (ref->group->step)) {
798 return;
799 }
800
801 step = int_cst_value (ref->group->step);
802
803 backward = step < 0;
804
805
b076a3fd
ZD
806 if (delta == 0)
807 {
808 /* If the references has the same address, only prefetch the
809 former. */
810 if (by_is_before)
811 ref->prefetch_before = 0;
b8698a0f 812
b076a3fd
ZD
813 return;
814 }
815
816 if (!step)
817 {
818 /* If the reference addresses are invariant and fall into the
819 same cache line, prefetch just the first one. */
820 if (!by_is_before)
821 return;
822
823 if (ddown (ref->delta, PREFETCH_BLOCK)
824 != ddown (by->delta, PREFETCH_BLOCK))
825 return;
826
827 ref->prefetch_before = 0;
828 return;
829 }
830
831 /* Only prune the reference that is behind in the array. */
832 if (backward)
833 {
834 if (delta > 0)
835 return;
836
837 /* Transform the data so that we may assume that the accesses
838 are forward. */
839 delta = - delta;
840 step = -step;
841 delta_r = PREFETCH_BLOCK - 1 - delta_r;
842 delta_b = PREFETCH_BLOCK - 1 - delta_b;
843 }
844 else
845 {
846 if (delta < 0)
847 return;
848 }
849
850 /* Check whether the two references are likely to hit the same cache
851 line, and how distant the iterations in that it occurs are from
852 each other. */
853
854 if (step <= PREFETCH_BLOCK)
855 {
856 /* The accesses are sure to meet. Let us check when. */
857 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
858 prefetch_before = (hit_from - delta_r + step - 1) / step;
859
57762e97 860 /* Do not reduce prefetch_before if we meet beyond cache size. */
4c9cf7af 861 if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
57762e97 862 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
863 if (prefetch_before < ref->prefetch_before)
864 ref->prefetch_before = prefetch_before;
865
866 return;
867 }
868
b8698a0f 869 /* A more complicated case with step > prefetch_block. First reduce
2c6dd136 870 the ratio between the step and the cache line size to its simplest
b8698a0f
L
871 terms. The resulting denominator will then represent the number of
872 distinct iterations after which each address will go back to its
873 initial location within the cache line. This computation assumes
2c6dd136 874 that PREFETCH_BLOCK is a power of two. */
b076a3fd 875 prefetch_block = PREFETCH_BLOCK;
2c6dd136
GS
876 reduced_prefetch_block = prefetch_block;
877 reduced_step = step;
878 while ((reduced_step & 1) == 0
879 && reduced_prefetch_block > 1)
b076a3fd 880 {
2c6dd136
GS
881 reduced_step >>= 1;
882 reduced_prefetch_block >>= 1;
b076a3fd
ZD
883 }
884
b076a3fd
ZD
885 prefetch_before = delta / step;
886 delta %= step;
2c6dd136
GS
887 ref_type = TREE_TYPE (ref->mem);
888 align_unit = TYPE_ALIGN (ref_type) / 8;
14e444c3
CF
889 if (is_miss_rate_acceptable (prefetch_block, step, delta,
890 reduced_prefetch_block, align_unit))
b076a3fd 891 {
57762e97
CB
892 /* Do not reduce prefetch_before if we meet beyond cache size. */
893 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
894 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
895 if (prefetch_before < ref->prefetch_before)
896 ref->prefetch_before = prefetch_before;
897
898 return;
899 }
900
901 /* Try also the following iteration. */
902 prefetch_before++;
903 delta = step - delta;
14e444c3
CF
904 if (is_miss_rate_acceptable (prefetch_block, step, delta,
905 reduced_prefetch_block, align_unit))
b076a3fd
ZD
906 {
907 if (prefetch_before < ref->prefetch_before)
908 ref->prefetch_before = prefetch_before;
909
910 return;
911 }
912
913 /* The ref probably does not reuse by. */
914 return;
915}
916
917/* Prune the prefetch candidate REF using the reuses with other references
918 in REFS. */
919
920static void
921prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
922{
923 struct mem_ref *prune_by;
924 bool before = true;
925
926 prune_ref_by_self_reuse (ref);
927
928 for (prune_by = refs; prune_by; prune_by = prune_by->next)
929 {
930 if (prune_by == ref)
931 {
932 before = false;
933 continue;
934 }
935
936 if (!WRITE_CAN_USE_READ_PREFETCH
937 && ref->write_p
938 && !prune_by->write_p)
939 continue;
940 if (!READ_CAN_USE_WRITE_PREFETCH
941 && !ref->write_p
942 && prune_by->write_p)
943 continue;
944
945 prune_ref_by_group_reuse (ref, prune_by, before);
946 }
947}
948
949/* Prune the prefetch candidates in GROUP using the reuse analysis. */
950
951static void
952prune_group_by_reuse (struct mem_ref_group *group)
953{
954 struct mem_ref *ref_pruned;
955
956 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
957 {
958 prune_ref_by_reuse (ref_pruned, group->refs);
959
960 if (dump_file && (dump_flags & TDF_DETAILS))
961 {
962 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
963
964 if (ref_pruned->prefetch_before == PREFETCH_ALL
965 && ref_pruned->prefetch_mod == 1)
966 fprintf (dump_file, " no restrictions");
967 else if (ref_pruned->prefetch_before == 0)
968 fprintf (dump_file, " do not prefetch");
969 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
970 fprintf (dump_file, " prefetch once");
971 else
972 {
973 if (ref_pruned->prefetch_before != PREFETCH_ALL)
974 {
975 fprintf (dump_file, " prefetch before ");
976 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
977 ref_pruned->prefetch_before);
978 }
979 if (ref_pruned->prefetch_mod != 1)
980 {
981 fprintf (dump_file, " prefetch mod ");
982 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
983 ref_pruned->prefetch_mod);
984 }
985 }
986 fprintf (dump_file, "\n");
987 }
988 }
989}
990
991/* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
992
993static void
994prune_by_reuse (struct mem_ref_group *groups)
995{
996 for (; groups; groups = groups->next)
997 prune_group_by_reuse (groups);
998}
999
1000/* Returns true if we should issue prefetch for REF. */
1001
1002static bool
1003should_issue_prefetch_p (struct mem_ref *ref)
1004{
1005 /* For now do not issue prefetches for only first few of the
1006 iterations. */
1007 if (ref->prefetch_before != PREFETCH_ALL)
a8beb3a7
CB
1008 {
1009 if (dump_file && (dump_flags & TDF_DETAILS))
1010 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
1011 (void *) ref);
1012 return false;
1013 }
b076a3fd 1014
79f5e442
ZD
1015 /* Do not prefetch nontemporal stores. */
1016 if (ref->storent_p)
a8beb3a7
CB
1017 {
1018 if (dump_file && (dump_flags & TDF_DETAILS))
1019 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
1020 return false;
1021 }
79f5e442 1022
b076a3fd
ZD
1023 return true;
1024}
1025
1026/* Decide which of the prefetch candidates in GROUPS to prefetch.
1027 AHEAD is the number of iterations to prefetch ahead (which corresponds
1028 to the number of simultaneous instances of one prefetch running at a
1029 time). UNROLL_FACTOR is the factor by that the loop is going to be
1030 unrolled. Returns true if there is anything to prefetch. */
1031
1032static bool
1033schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
1034 unsigned ahead)
1035{
911b3fdb
ZD
1036 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
1037 unsigned slots_per_prefetch;
b076a3fd
ZD
1038 struct mem_ref *ref;
1039 bool any = false;
1040
911b3fdb
ZD
1041 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
1042 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
b076a3fd 1043
911b3fdb
ZD
1044 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1045 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
1046 it will need a prefetch slot. */
1047 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
b076a3fd 1048 if (dump_file && (dump_flags & TDF_DETAILS))
911b3fdb
ZD
1049 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
1050 slots_per_prefetch);
b076a3fd
ZD
1051
1052 /* For now we just take memory references one by one and issue
1053 prefetches for as many as possible. The groups are sorted
1054 starting with the largest step, since the references with
c0220ea4 1055 large step are more likely to cause many cache misses. */
b076a3fd
ZD
1056
1057 for (; groups; groups = groups->next)
1058 for (ref = groups->refs; ref; ref = ref->next)
1059 {
1060 if (!should_issue_prefetch_p (ref))
1061 continue;
1062
8532678c
CF
1063 /* The loop is far from being sufficiently unrolled for this
1064 prefetch. Do not generate prefetch to avoid many redudant
1065 prefetches. */
1066 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1067 continue;
1068
911b3fdb
ZD
1069 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1070 and we unroll the loop UNROLL_FACTOR times, we need to insert
1071 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1072 iteration. */
b076a3fd
ZD
1073 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1074 / ref->prefetch_mod);
911b3fdb
ZD
1075 prefetch_slots = n_prefetches * slots_per_prefetch;
1076
1077 /* If more than half of the prefetches would be lost anyway, do not
1078 issue the prefetch. */
1079 if (2 * remaining_prefetch_slots < prefetch_slots)
1080 continue;
1081
1082 ref->issue_prefetch_p = true;
b076a3fd 1083
911b3fdb
ZD
1084 if (remaining_prefetch_slots <= prefetch_slots)
1085 return true;
1086 remaining_prefetch_slots -= prefetch_slots;
b076a3fd
ZD
1087 any = true;
1088 }
1089
1090 return any;
1091}
1092
d5058523
CF
1093/* Return TRUE if no prefetch is going to be generated in the given
1094 GROUPS. */
1095
1096static bool
1097nothing_to_prefetch_p (struct mem_ref_group *groups)
1098{
1099 struct mem_ref *ref;
1100
1101 for (; groups; groups = groups->next)
1102 for (ref = groups->refs; ref; ref = ref->next)
1103 if (should_issue_prefetch_p (ref))
1104 return false;
1105
1106 return true;
1107}
1108
1109/* Estimate the number of prefetches in the given GROUPS.
1110 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
b076a3fd 1111
db34470d 1112static int
d5058523 1113estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
b076a3fd
ZD
1114{
1115 struct mem_ref *ref;
d5058523 1116 unsigned n_prefetches;
db34470d 1117 int prefetch_count = 0;
b076a3fd
ZD
1118
1119 for (; groups; groups = groups->next)
1120 for (ref = groups->refs; ref; ref = ref->next)
1121 if (should_issue_prefetch_p (ref))
d5058523
CF
1122 {
1123 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1124 / ref->prefetch_mod);
1125 prefetch_count += n_prefetches;
1126 }
b076a3fd 1127
db34470d 1128 return prefetch_count;
b076a3fd
ZD
1129}
1130
1131/* Issue prefetches for the reference REF into loop as decided before.
1132 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
917f1b7e 1133 is the factor by which LOOP was unrolled. */
b076a3fd
ZD
1134
1135static void
1136issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1137{
1138 HOST_WIDE_INT delta;
81f32326 1139 tree addr, addr_base, write_p, local, forward;
726a989a
RB
1140 gimple prefetch;
1141 gimple_stmt_iterator bsi;
b076a3fd 1142 unsigned n_prefetches, ap;
5417e022 1143 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
b076a3fd
ZD
1144
1145 if (dump_file && (dump_flags & TDF_DETAILS))
5417e022
ZD
1146 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1147 nontemporal ? " nontemporal" : "",
1148 (void *) ref);
b076a3fd 1149
726a989a 1150 bsi = gsi_for_stmt (ref->stmt);
b076a3fd
ZD
1151
1152 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1153 / ref->prefetch_mod);
1154 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
726a989a
RB
1155 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1156 true, NULL, true, GSI_SAME_STMT);
911b3fdb 1157 write_p = ref->write_p ? integer_one_node : integer_zero_node;
9a9d280e 1158 local = nontemporal ? integer_zero_node : integer_three_node;
b076a3fd
ZD
1159
1160 for (ap = 0; ap < n_prefetches; ap++)
1161 {
81f32326
CB
1162 if (cst_and_fits_in_hwi (ref->group->step))
1163 {
1164 /* Determine the address to prefetch. */
1165 delta = (ahead + ap * ref->prefetch_mod) *
1166 int_cst_value (ref->group->step);
5d49b6a7 1167 addr = fold_build_pointer_plus_hwi (addr_base, delta);
81f32326
CB
1168 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1169 true, GSI_SAME_STMT);
1170 }
1171 else
1172 {
1173 /* The step size is non-constant but loop-invariant. We use the
1174 heuristic to simply prefetch ahead iterations ahead. */
1175 forward = fold_build2 (MULT_EXPR, sizetype,
1176 fold_convert (sizetype, ref->group->step),
1177 fold_convert (sizetype, size_int (ahead)));
5d49b6a7 1178 addr = fold_build_pointer_plus (addr_base, forward);
81f32326
CB
1179 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1180 NULL, true, GSI_SAME_STMT);
1181 }
b076a3fd 1182 /* Create the prefetch instruction. */
e79983f4 1183 prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
726a989a
RB
1184 3, addr, write_p, local);
1185 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
b076a3fd
ZD
1186 }
1187}
1188
1189/* Issue prefetches for the references in GROUPS into loop as decided before.
1190 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1191 factor by that LOOP was unrolled. */
1192
1193static void
1194issue_prefetches (struct mem_ref_group *groups,
1195 unsigned unroll_factor, unsigned ahead)
1196{
1197 struct mem_ref *ref;
1198
1199 for (; groups; groups = groups->next)
1200 for (ref = groups->refs; ref; ref = ref->next)
1201 if (ref->issue_prefetch_p)
1202 issue_prefetch_ref (ref, unroll_factor, ahead);
1203}
1204
79f5e442
ZD
1205/* Returns true if REF is a memory write for that a nontemporal store insn
1206 can be used. */
1207
1208static bool
1209nontemporal_store_p (struct mem_ref *ref)
1210{
ef4bddc2 1211 machine_mode mode;
79f5e442
ZD
1212 enum insn_code code;
1213
1214 /* REF must be a write that is not reused. We require it to be independent
1215 on all other memory references in the loop, as the nontemporal stores may
1216 be reordered with respect to other memory references. */
1217 if (!ref->write_p
1218 || !ref->independent_p
1219 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1220 return false;
1221
1222 /* Check that we have the storent instruction for the mode. */
1223 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1224 if (mode == BLKmode)
1225 return false;
1226
947131ba 1227 code = optab_handler (storent_optab, mode);
79f5e442
ZD
1228 return code != CODE_FOR_nothing;
1229}
1230
1231/* If REF is a nontemporal store, we mark the corresponding modify statement
1232 and return true. Otherwise, we return false. */
1233
1234static bool
1235mark_nontemporal_store (struct mem_ref *ref)
1236{
1237 if (!nontemporal_store_p (ref))
1238 return false;
1239
1240 if (dump_file && (dump_flags & TDF_DETAILS))
1241 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1242 (void *) ref);
1243
726a989a 1244 gimple_assign_set_nontemporal_move (ref->stmt, true);
79f5e442
ZD
1245 ref->storent_p = true;
1246
1247 return true;
1248}
1249
1250/* Issue a memory fence instruction after LOOP. */
1251
1252static void
1253emit_mfence_after_loop (struct loop *loop)
1254{
9771b263 1255 vec<edge> exits = get_loop_exit_edges (loop);
79f5e442 1256 edge exit;
726a989a
RB
1257 gimple call;
1258 gimple_stmt_iterator bsi;
79f5e442
ZD
1259 unsigned i;
1260
9771b263 1261 FOR_EACH_VEC_ELT (exits, i, exit)
79f5e442 1262 {
726a989a 1263 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
79f5e442
ZD
1264
1265 if (!single_pred_p (exit->dest)
1266 /* If possible, we prefer not to insert the fence on other paths
1267 in cfg. */
1268 && !(exit->flags & EDGE_ABNORMAL))
1269 split_loop_exit_edge (exit);
726a989a 1270 bsi = gsi_after_labels (exit->dest);
79f5e442 1271
726a989a 1272 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
79f5e442
ZD
1273 }
1274
9771b263 1275 exits.release ();
79f5e442
ZD
1276 update_ssa (TODO_update_ssa_only_virtuals);
1277}
1278
1279/* Returns true if we can use storent in loop, false otherwise. */
1280
1281static bool
1282may_use_storent_in_loop_p (struct loop *loop)
1283{
1284 bool ret = true;
1285
1286 if (loop->inner != NULL)
1287 return false;
1288
1289 /* If we must issue a mfence insn after using storent, check that there
1290 is a suitable place for it at each of the loop exits. */
1291 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1292 {
9771b263 1293 vec<edge> exits = get_loop_exit_edges (loop);
79f5e442
ZD
1294 unsigned i;
1295 edge exit;
1296
9771b263 1297 FOR_EACH_VEC_ELT (exits, i, exit)
79f5e442 1298 if ((exit->flags & EDGE_ABNORMAL)
fefa31b5 1299 && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
79f5e442
ZD
1300 ret = false;
1301
9771b263 1302 exits.release ();
79f5e442
ZD
1303 }
1304
1305 return ret;
1306}
1307
1308/* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1309 references in the loop. */
1310
1311static void
1312mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1313{
1314 struct mem_ref *ref;
1315 bool any = false;
1316
1317 if (!may_use_storent_in_loop_p (loop))
1318 return;
1319
1320 for (; groups; groups = groups->next)
1321 for (ref = groups->refs; ref; ref = ref->next)
1322 any |= mark_nontemporal_store (ref);
1323
1324 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1325 emit_mfence_after_loop (loop);
1326}
1327
b076a3fd
ZD
1328/* Determines whether we can profitably unroll LOOP FACTOR times, and if
1329 this is the case, fill in DESC by the description of number of
1330 iterations. */
1331
1332static bool
1333should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1334 unsigned factor)
1335{
1336 if (!can_unroll_loop_p (loop, factor, desc))
1337 return false;
1338
1339 /* We only consider loops without control flow for unrolling. This is not
1340 a hard restriction -- tree_unroll_loop works with arbitrary loops
1341 as well; but the unrolling/prefetching is usually more profitable for
1342 loops consisting of a single basic block, and we want to limit the
1343 code growth. */
1344 if (loop->num_nodes > 2)
1345 return false;
1346
1347 return true;
1348}
1349
1350/* Determine the coefficient by that unroll LOOP, from the information
1351 contained in the list of memory references REFS. Description of
2711355f
ZD
1352 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1353 insns of the LOOP. EST_NITER is the estimated number of iterations of
1354 the loop, or -1 if no estimate is available. */
b076a3fd
ZD
1355
1356static unsigned
1357determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
2711355f
ZD
1358 unsigned ninsns, struct tree_niter_desc *desc,
1359 HOST_WIDE_INT est_niter)
b076a3fd 1360{
911b3fdb
ZD
1361 unsigned upper_bound;
1362 unsigned nfactor, factor, mod_constraint;
b076a3fd
ZD
1363 struct mem_ref_group *agp;
1364 struct mem_ref *ref;
1365
911b3fdb
ZD
1366 /* First check whether the loop is not too large to unroll. We ignore
1367 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1368 from unrolling them enough to make exactly one cache line covered by each
1369 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1370 us from unrolling the loops too many times in cases where we only expect
1371 gains from better scheduling and decreasing loop overhead, which is not
1372 the case here. */
1373 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
2711355f
ZD
1374
1375 /* If we unrolled the loop more times than it iterates, the unrolled version
1376 of the loop would be never entered. */
1377 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1378 upper_bound = est_niter;
1379
911b3fdb 1380 if (upper_bound <= 1)
b076a3fd
ZD
1381 return 1;
1382
911b3fdb
ZD
1383 /* Choose the factor so that we may prefetch each cache just once,
1384 but bound the unrolling by UPPER_BOUND. */
1385 factor = 1;
b076a3fd
ZD
1386 for (agp = refs; agp; agp = agp->next)
1387 for (ref = agp->refs; ref; ref = ref->next)
911b3fdb
ZD
1388 if (should_issue_prefetch_p (ref))
1389 {
1390 mod_constraint = ref->prefetch_mod;
1391 nfactor = least_common_multiple (mod_constraint, factor);
1392 if (nfactor <= upper_bound)
1393 factor = nfactor;
1394 }
b076a3fd
ZD
1395
1396 if (!should_unroll_loop_p (loop, desc, factor))
1397 return 1;
1398
1399 return factor;
1400}
1401
5417e022
ZD
1402/* Returns the total volume of the memory references REFS, taking into account
1403 reuses in the innermost loop and cache line size. TODO -- we should also
1404 take into account reuses across the iterations of the loops in the loop
1405 nest. */
1406
1407static unsigned
1408volume_of_references (struct mem_ref_group *refs)
1409{
1410 unsigned volume = 0;
1411 struct mem_ref_group *gr;
1412 struct mem_ref *ref;
1413
1414 for (gr = refs; gr; gr = gr->next)
1415 for (ref = gr->refs; ref; ref = ref->next)
1416 {
1417 /* Almost always reuses another value? */
1418 if (ref->prefetch_before != PREFETCH_ALL)
1419 continue;
1420
1421 /* If several iterations access the same cache line, use the size of
1422 the line divided by this number. Otherwise, a cache line is
1423 accessed in each iteration. TODO -- in the latter case, we should
1424 take the size of the reference into account, rounding it up on cache
1425 line size multiple. */
1426 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1427 }
1428 return volume;
1429}
1430
1431/* Returns the volume of memory references accessed across VEC iterations of
1432 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1433 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1434
1435static unsigned
1436volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1437{
1438 unsigned i;
1439
1440 for (i = 0; i < n; i++)
1441 if (vec[i] != 0)
1442 break;
1443
1444 if (i == n)
1445 return 0;
1446
1447 gcc_assert (vec[i] > 0);
1448
1449 /* We ignore the parts of the distance vector in subloops, since usually
1450 the numbers of iterations are much smaller. */
1451 return loop_sizes[i] * vec[i];
1452}
1453
1454/* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1455 at the position corresponding to the loop of the step. N is the depth
1456 of the considered loop nest, and, LOOP is its innermost loop. */
1457
1458static void
1459add_subscript_strides (tree access_fn, unsigned stride,
1460 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1461{
1462 struct loop *aloop;
1463 tree step;
1464 HOST_WIDE_INT astep;
1465 unsigned min_depth = loop_depth (loop) - n;
1466
1467 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1468 {
1469 aloop = get_chrec_loop (access_fn);
1470 step = CHREC_RIGHT (access_fn);
1471 access_fn = CHREC_LEFT (access_fn);
1472
1473 if ((unsigned) loop_depth (aloop) <= min_depth)
1474 continue;
1475
9541ffee 1476 if (tree_fits_shwi_p (step))
9439e9a1 1477 astep = tree_to_shwi (step);
5417e022
ZD
1478 else
1479 astep = L1_CACHE_LINE_SIZE;
1480
1481 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1482
1483 }
1484}
1485
1486/* Returns the volume of memory references accessed between two consecutive
1487 self-reuses of the reference DR. We consider the subscripts of DR in N
1488 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1489 loops. LOOP is the innermost loop of the current loop nest. */
1490
1491static unsigned
1492self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1493 struct loop *loop)
1494{
1495 tree stride, access_fn;
1496 HOST_WIDE_INT *strides, astride;
9771b263 1497 vec<tree> access_fns;
5417e022
ZD
1498 tree ref = DR_REF (dr);
1499 unsigned i, ret = ~0u;
1500
1501 /* In the following example:
1502
1503 for (i = 0; i < N; i++)
1504 for (j = 0; j < N; j++)
1505 use (a[j][i]);
1506 the same cache line is accessed each N steps (except if the change from
1507 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1508 we cannot rely purely on the results of the data dependence analysis.
1509
1510 Instead, we compute the stride of the reference in each loop, and consider
1511 the innermost loop in that the stride is less than cache size. */
1512
1513 strides = XCNEWVEC (HOST_WIDE_INT, n);
1514 access_fns = DR_ACCESS_FNS (dr);
1515
9771b263 1516 FOR_EACH_VEC_ELT (access_fns, i, access_fn)
5417e022
ZD
1517 {
1518 /* Keep track of the reference corresponding to the subscript, so that we
1519 know its stride. */
1520 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1521 ref = TREE_OPERAND (ref, 0);
b8698a0f 1522
5417e022
ZD
1523 if (TREE_CODE (ref) == ARRAY_REF)
1524 {
1525 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
cc269bb6 1526 if (tree_fits_uhwi_p (stride))
ae7e9ddd 1527 astride = tree_to_uhwi (stride);
5417e022
ZD
1528 else
1529 astride = L1_CACHE_LINE_SIZE;
1530
1531 ref = TREE_OPERAND (ref, 0);
1532 }
1533 else
1534 astride = 1;
1535
1536 add_subscript_strides (access_fn, astride, strides, n, loop);
1537 }
1538
1539 for (i = n; i-- > 0; )
1540 {
1541 unsigned HOST_WIDE_INT s;
1542
1543 s = strides[i] < 0 ? -strides[i] : strides[i];
1544
1545 if (s < (unsigned) L1_CACHE_LINE_SIZE
1546 && (loop_sizes[i]
1547 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1548 {
1549 ret = loop_sizes[i];
1550 break;
1551 }
1552 }
1553
1554 free (strides);
1555 return ret;
1556}
1557
1558/* Determines the distance till the first reuse of each reference in REFS
79f5e442 1559 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1e373390 1560 memory references in the loop. Return false if the analysis fails. */
5417e022 1561
1e373390 1562static bool
79f5e442
ZD
1563determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1564 bool no_other_refs)
5417e022
ZD
1565{
1566 struct loop *nest, *aloop;
6e1aa848
DN
1567 vec<data_reference_p> datarefs = vNULL;
1568 vec<ddr_p> dependences = vNULL;
5417e022 1569 struct mem_ref_group *gr;
79f5e442 1570 struct mem_ref *ref, *refb;
6e1aa848 1571 vec<loop_p> vloops = vNULL;
5417e022
ZD
1572 unsigned *loop_data_size;
1573 unsigned i, j, n;
1574 unsigned volume, dist, adist;
1575 HOST_WIDE_INT vol;
1576 data_reference_p dr;
1577 ddr_p dep;
1578
1579 if (loop->inner)
1e373390 1580 return true;
5417e022
ZD
1581
1582 /* Find the outermost loop of the loop nest of loop (we require that
1583 there are no sibling loops inside the nest). */
1584 nest = loop;
1585 while (1)
1586 {
1587 aloop = loop_outer (nest);
1588
1589 if (aloop == current_loops->tree_root
1590 || aloop->inner->next)
1591 break;
1592
1593 nest = aloop;
1594 }
1595
1596 /* For each loop, determine the amount of data accessed in each iteration.
1597 We use this to estimate whether the reference is evicted from the
1598 cache before its reuse. */
1599 find_loop_nest (nest, &vloops);
9771b263 1600 n = vloops.length ();
5417e022
ZD
1601 loop_data_size = XNEWVEC (unsigned, n);
1602 volume = volume_of_references (refs);
1603 i = n;
1604 while (i-- != 0)
1605 {
1606 loop_data_size[i] = volume;
1607 /* Bound the volume by the L2 cache size, since above this bound,
1608 all dependence distances are equivalent. */
1609 if (volume > L2_CACHE_SIZE_BYTES)
1610 continue;
1611
9771b263 1612 aloop = vloops[i];
652c4c71 1613 vol = estimated_stmt_executions_int (aloop);
e5b332cd 1614 if (vol == -1)
5417e022
ZD
1615 vol = expected_loop_iterations (aloop);
1616 volume *= vol;
1617 }
1618
1619 /* Prepare the references in the form suitable for data dependence
0d52bcc1 1620 analysis. We ignore unanalyzable data references (the results
5417e022
ZD
1621 are used just as a heuristics to estimate temporality of the
1622 references, hence we do not need to worry about correctness). */
1623 for (gr = refs; gr; gr = gr->next)
1624 for (ref = gr->refs; ref; ref = ref->next)
1625 {
5c640e29
SP
1626 dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
1627 ref->mem, ref->stmt, !ref->write_p);
5417e022
ZD
1628
1629 if (dr)
1630 {
1631 ref->reuse_distance = volume;
1632 dr->aux = ref;
9771b263 1633 datarefs.safe_push (dr);
5417e022 1634 }
79f5e442
ZD
1635 else
1636 no_other_refs = false;
5417e022
ZD
1637 }
1638
9771b263 1639 FOR_EACH_VEC_ELT (datarefs, i, dr)
5417e022
ZD
1640 {
1641 dist = self_reuse_distance (dr, loop_data_size, n, loop);
3d9a9f94 1642 ref = (struct mem_ref *) dr->aux;
5417e022
ZD
1643 if (ref->reuse_distance > dist)
1644 ref->reuse_distance = dist;
79f5e442
ZD
1645
1646 if (no_other_refs)
1647 ref->independent_p = true;
5417e022
ZD
1648 }
1649
1e373390
RG
1650 if (!compute_all_dependences (datarefs, &dependences, vloops, true))
1651 return false;
5417e022 1652
9771b263 1653 FOR_EACH_VEC_ELT (dependences, i, dep)
5417e022
ZD
1654 {
1655 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1656 continue;
1657
3d9a9f94
KG
1658 ref = (struct mem_ref *) DDR_A (dep)->aux;
1659 refb = (struct mem_ref *) DDR_B (dep)->aux;
79f5e442 1660
5417e022
ZD
1661 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1662 || DDR_NUM_DIST_VECTS (dep) == 0)
1663 {
0d52bcc1 1664 /* If the dependence cannot be analyzed, assume that there might be
5417e022
ZD
1665 a reuse. */
1666 dist = 0;
b8698a0f 1667
79f5e442
ZD
1668 ref->independent_p = false;
1669 refb->independent_p = false;
5417e022
ZD
1670 }
1671 else
1672 {
0d52bcc1 1673 /* The distance vectors are normalized to be always lexicographically
5417e022
ZD
1674 positive, hence we cannot tell just from them whether DDR_A comes
1675 before DDR_B or vice versa. However, it is not important,
1676 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1677 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1678 in cache (and marking it as nontemporal would not affect
1679 anything). */
1680
1681 dist = volume;
1682 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1683 {
1684 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1685 loop_data_size, n);
1686
79f5e442
ZD
1687 /* If this is a dependence in the innermost loop (i.e., the
1688 distances in all superloops are zero) and it is not
1689 the trivial self-dependence with distance zero, record that
1690 the references are not completely independent. */
1691 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1692 && (ref != refb
1693 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1694 {
1695 ref->independent_p = false;
1696 refb->independent_p = false;
1697 }
1698
5417e022
ZD
1699 /* Ignore accesses closer than
1700 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1701 so that we use nontemporal prefetches e.g. if single memory
1702 location is accessed several times in a single iteration of
1703 the loop. */
1704 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1705 continue;
1706
1707 if (adist < dist)
1708 dist = adist;
1709 }
1710 }
1711
5417e022
ZD
1712 if (ref->reuse_distance > dist)
1713 ref->reuse_distance = dist;
79f5e442
ZD
1714 if (refb->reuse_distance > dist)
1715 refb->reuse_distance = dist;
5417e022
ZD
1716 }
1717
1718 free_dependence_relations (dependences);
1719 free_data_refs (datarefs);
1720 free (loop_data_size);
1721
1722 if (dump_file && (dump_flags & TDF_DETAILS))
1723 {
1724 fprintf (dump_file, "Reuse distances:\n");
1725 for (gr = refs; gr; gr = gr->next)
1726 for (ref = gr->refs; ref; ref = ref->next)
1727 fprintf (dump_file, " ref %p distance %u\n",
1728 (void *) ref, ref->reuse_distance);
1729 }
1e373390
RG
1730
1731 return true;
5417e022
ZD
1732}
1733
0bbe50f6
CF
1734/* Determine whether or not the trip count to ahead ratio is too small based
1735 on prefitablility consideration.
db34470d 1736 AHEAD: the iteration ahead distance,
0bbe50f6
CF
1737 EST_NITER: the estimated trip count. */
1738
1739static bool
1740trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1741{
1742 /* Assume trip count to ahead ratio is big enough if the trip count could not
1743 be estimated at compile time. */
1744 if (est_niter < 0)
1745 return false;
1746
1747 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1748 {
1749 if (dump_file && (dump_flags & TDF_DETAILS))
1750 fprintf (dump_file,
1751 "Not prefetching -- loop estimated to roll only %d times\n",
1752 (int) est_niter);
1753 return true;
1754 }
1755
1756 return false;
1757}
1758
1759/* Determine whether or not the number of memory references in the loop is
1760 reasonable based on the profitablity and compilation time considerations.
db34470d 1761 NINSNS: estimated number of instructions in the loop,
db34470d
GS
1762 MEM_REF_COUNT: total number of memory references in the loop. */
1763
b8698a0f 1764static bool
0bbe50f6 1765mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
db34470d 1766{
0bbe50f6 1767 int insn_to_mem_ratio;
db34470d
GS
1768
1769 if (mem_ref_count == 0)
1770 return false;
1771
0bbe50f6
CF
1772 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1773 (compute_all_dependences) have high costs based on quadratic complexity.
1774 To avoid huge compilation time, we give up prefetching if mem_ref_count
1775 is too large. */
1776 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1777 return false;
1778
b8698a0f
L
1779 /* Prefetching improves performance by overlapping cache missing
1780 memory accesses with CPU operations. If the loop does not have
1781 enough CPU operations to overlap with memory operations, prefetching
1782 won't give a significant benefit. One approximate way of checking
1783 this is to require the ratio of instructions to memory references to
db34470d
GS
1784 be above a certain limit. This approximation works well in practice.
1785 TODO: Implement a more precise computation by estimating the time
1786 for each CPU or memory op in the loop. Time estimates for memory ops
1787 should account for cache misses. */
b8698a0f 1788 insn_to_mem_ratio = ninsns / mem_ref_count;
db34470d
GS
1789
1790 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
55e5a2eb
CF
1791 {
1792 if (dump_file && (dump_flags & TDF_DETAILS))
1793 fprintf (dump_file,
1794 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1795 insn_to_mem_ratio);
1796 return false;
1797 }
db34470d 1798
0bbe50f6
CF
1799 return true;
1800}
1801
1802/* Determine whether or not the instruction to prefetch ratio in the loop is
1803 too small based on the profitablity consideration.
1804 NINSNS: estimated number of instructions in the loop,
1805 PREFETCH_COUNT: an estimate of the number of prefetches,
1806 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1807
1808static bool
1809insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1810 unsigned unroll_factor)
1811{
1812 int insn_to_prefetch_ratio;
1813
d3a9b459
CF
1814 /* Prefetching most likely causes performance degradation when the instruction
1815 to prefetch ratio is too small. Too many prefetch instructions in a loop
1816 may reduce the I-cache performance.
ccacf0e1
CF
1817 (unroll_factor * ninsns) is used to estimate the number of instructions in
1818 the unrolled loop. This implementation is a bit simplistic -- the number
1819 of issued prefetch instructions is also affected by unrolling. So,
1820 prefetch_mod and the unroll factor should be taken into account when
1821 determining prefetch_count. Also, the number of insns of the unrolled
1822 loop will usually be significantly smaller than the number of insns of the
1823 original loop * unroll_factor (at least the induction variable increases
1824 and the exit branches will get eliminated), so it might be better to use
1825 tree_estimate_loop_size + estimated_unrolled_size. */
d3a9b459
CF
1826 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1827 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
db34470d 1828 {
d3a9b459
CF
1829 if (dump_file && (dump_flags & TDF_DETAILS))
1830 fprintf (dump_file,
1831 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1832 insn_to_prefetch_ratio);
0bbe50f6 1833 return true;
db34470d 1834 }
b8698a0f 1835
0bbe50f6 1836 return false;
db34470d
GS
1837}
1838
1839
b076a3fd 1840/* Issue prefetch instructions for array references in LOOP. Returns
d73be268 1841 true if the LOOP was unrolled. */
b076a3fd
ZD
1842
1843static bool
d73be268 1844loop_prefetch_arrays (struct loop *loop)
b076a3fd
ZD
1845{
1846 struct mem_ref_group *refs;
2711355f
ZD
1847 unsigned ahead, ninsns, time, unroll_factor;
1848 HOST_WIDE_INT est_niter;
b076a3fd 1849 struct tree_niter_desc desc;
79f5e442 1850 bool unrolled = false, no_other_refs;
db34470d
GS
1851 unsigned prefetch_count;
1852 unsigned mem_ref_count;
b076a3fd 1853
efd8f750 1854 if (optimize_loop_nest_for_size_p (loop))
2732d767
ZD
1855 {
1856 if (dump_file && (dump_flags & TDF_DETAILS))
1857 fprintf (dump_file, " ignored (cold area)\n");
1858 return false;
1859 }
1860
0bbe50f6
CF
1861 /* FIXME: the time should be weighted by the probabilities of the blocks in
1862 the loop body. */
1863 time = tree_num_loop_insns (loop, &eni_time_weights);
1864 if (time == 0)
1865 return false;
1866
1867 ahead = (PREFETCH_LATENCY + time - 1) / time;
652c4c71 1868 est_niter = estimated_stmt_executions_int (loop);
e5b332cd
RG
1869 if (est_niter == -1)
1870 est_niter = max_stmt_executions_int (loop);
0bbe50f6
CF
1871
1872 /* Prefetching is not likely to be profitable if the trip count to ahead
1873 ratio is too small. */
1874 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1875 return false;
1876
1877 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1878
b076a3fd 1879 /* Step 1: gather the memory references. */
db34470d 1880 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
b076a3fd 1881
0bbe50f6
CF
1882 /* Give up prefetching if the number of memory references in the
1883 loop is not reasonable based on profitablity and compilation time
1884 considerations. */
1885 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1886 goto fail;
1887
b076a3fd
ZD
1888 /* Step 2: estimate the reuse effects. */
1889 prune_by_reuse (refs);
1890
d5058523 1891 if (nothing_to_prefetch_p (refs))
b076a3fd
ZD
1892 goto fail;
1893
1e373390
RG
1894 if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
1895 goto fail;
5417e022 1896
0bbe50f6 1897 /* Step 3: determine unroll factor. */
2711355f
ZD
1898 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1899 est_niter);
d5058523
CF
1900
1901 /* Estimate prefetch count for the unrolled loop. */
1902 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1903 if (prefetch_count == 0)
1904 goto fail;
1905
2711355f 1906 if (dump_file && (dump_flags & TDF_DETAILS))
b8698a0f 1907 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
d81f5387 1908 HOST_WIDE_INT_PRINT_DEC "\n"
b8698a0f
L
1909 "insn count %d, mem ref count %d, prefetch count %d\n",
1910 ahead, unroll_factor, est_niter,
1911 ninsns, mem_ref_count, prefetch_count);
db34470d 1912
0bbe50f6
CF
1913 /* Prefetching is not likely to be profitable if the instruction to prefetch
1914 ratio is too small. */
1915 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1916 unroll_factor))
db34470d
GS
1917 goto fail;
1918
1919 mark_nontemporal_stores (loop, refs);
2711355f 1920
b076a3fd
ZD
1921 /* Step 4: what to prefetch? */
1922 if (!schedule_prefetches (refs, unroll_factor, ahead))
1923 goto fail;
1924
1925 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1926 iterations so that we do not issue superfluous prefetches. */
1927 if (unroll_factor != 1)
1928 {
d73be268 1929 tree_unroll_loop (loop, unroll_factor,
b076a3fd
ZD
1930 single_dom_exit (loop), &desc);
1931 unrolled = true;
1932 }
1933
1934 /* Step 6: issue the prefetches. */
1935 issue_prefetches (refs, unroll_factor, ahead);
1936
1937fail:
1938 release_mem_refs (refs);
1939 return unrolled;
1940}
1941
d73be268 1942/* Issue prefetch instructions for array references in loops. */
b076a3fd 1943
c7f965b6 1944unsigned int
d73be268 1945tree_ssa_prefetch_arrays (void)
b076a3fd 1946{
b076a3fd
ZD
1947 struct loop *loop;
1948 bool unrolled = false;
c7f965b6 1949 int todo_flags = 0;
b076a3fd
ZD
1950
1951 if (!HAVE_prefetch
1952 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1953 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1954 of processor costs and i486 does not have prefetch, but
1955 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1956 || PREFETCH_BLOCK == 0)
c7f965b6 1957 return 0;
b076a3fd 1958
47eb5b32
ZD
1959 if (dump_file && (dump_flags & TDF_DETAILS))
1960 {
1961 fprintf (dump_file, "Prefetching parameters:\n");
1962 fprintf (dump_file, " simultaneous prefetches: %d\n",
1963 SIMULTANEOUS_PREFETCHES);
1964 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
47eb5b32 1965 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
46cb0441
ZD
1966 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1967 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
5417e022 1968 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
b8698a0f
L
1969 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1970 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
db34470d 1971 MIN_INSN_TO_PREFETCH_RATIO);
b8698a0f 1972 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
db34470d 1973 PREFETCH_MIN_INSN_TO_MEM_RATIO);
47eb5b32
ZD
1974 fprintf (dump_file, "\n");
1975 }
1976
b076a3fd
ZD
1977 initialize_original_copy_tables ();
1978
e79983f4 1979 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
b076a3fd 1980 {
6a4825bd
NF
1981 tree type = build_function_type_list (void_type_node,
1982 const_ptr_type_node, NULL_TREE);
c79efc4d
RÁE
1983 tree decl = add_builtin_function ("__builtin_prefetch", type,
1984 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1985 NULL, NULL_TREE);
b076a3fd 1986 DECL_IS_NOVOPS (decl) = true;
e79983f4 1987 set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
b076a3fd
ZD
1988 }
1989
1990 /* We assume that size of cache line is a power of two, so verify this
1991 here. */
1992 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1993
f0bd40b1 1994 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
b076a3fd 1995 {
b076a3fd
ZD
1996 if (dump_file && (dump_flags & TDF_DETAILS))
1997 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1998
d73be268 1999 unrolled |= loop_prefetch_arrays (loop);
b076a3fd
ZD
2000
2001 if (dump_file && (dump_flags & TDF_DETAILS))
2002 fprintf (dump_file, "\n\n");
2003 }
2004
2005 if (unrolled)
2006 {
2007 scev_reset ();
c7f965b6 2008 todo_flags |= TODO_cleanup_cfg;
b076a3fd
ZD
2009 }
2010
2011 free_original_copy_tables ();
c7f965b6 2012 return todo_flags;
b076a3fd 2013}
71343877
AM
2014
2015/* Prefetching. */
2016
71343877
AM
2017namespace {
2018
2019const pass_data pass_data_loop_prefetch =
2020{
2021 GIMPLE_PASS, /* type */
2022 "aprefetch", /* name */
2023 OPTGROUP_LOOP, /* optinfo_flags */
71343877
AM
2024 TV_TREE_PREFETCH, /* tv_id */
2025 ( PROP_cfg | PROP_ssa ), /* properties_required */
2026 0, /* properties_provided */
2027 0, /* properties_destroyed */
2028 0, /* todo_flags_start */
2029 0, /* todo_flags_finish */
2030};
2031
2032class pass_loop_prefetch : public gimple_opt_pass
2033{
2034public:
2035 pass_loop_prefetch (gcc::context *ctxt)
2036 : gimple_opt_pass (pass_data_loop_prefetch, ctxt)
2037 {}
2038
2039 /* opt_pass methods: */
1a3d085c 2040 virtual bool gate (function *) { return flag_prefetch_loop_arrays > 0; }
be55bfe6 2041 virtual unsigned int execute (function *);
71343877
AM
2042
2043}; // class pass_loop_prefetch
2044
be55bfe6
TS
2045unsigned int
2046pass_loop_prefetch::execute (function *fun)
2047{
2048 if (number_of_loops (fun) <= 1)
2049 return 0;
2050
2051 return tree_ssa_prefetch_arrays ();
2052}
2053
71343877
AM
2054} // anon namespace
2055
2056gimple_opt_pass *
2057make_pass_loop_prefetch (gcc::context *ctxt)
2058{
2059 return new pass_loop_prefetch (ctxt);
2060}
2061
2062