]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-loop-prefetch.c
Update copyright years.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-prefetch.c
CommitLineData
b076a3fd 1/* Array prefetching.
a5544970 2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
b8698a0f 3
b076a3fd 4This file is part of GCC.
b8698a0f 5
b076a3fd
ZD
6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
9dcd6f09 8Free Software Foundation; either version 3, or (at your option) any
b076a3fd 9later version.
b8698a0f 10
b076a3fd
ZD
11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
b8698a0f 15
b076a3fd 16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
b076a3fd
ZD
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
c7131fb2 23#include "backend.h"
957060b5
AM
24#include "target.h"
25#include "rtl.h"
b076a3fd 26#include "tree.h"
c7131fb2 27#include "gimple.h"
957060b5
AM
28#include "predict.h"
29#include "tree-pass.h"
957060b5 30#include "gimple-ssa.h"
957060b5 31#include "optabs-query.h"
957060b5 32#include "tree-pretty-print.h"
40e23961 33#include "fold-const.h"
d8a2d370 34#include "stor-layout.h"
45b0be94 35#include "gimplify.h"
5be5c238 36#include "gimple-iterator.h"
18f429e2 37#include "gimplify-me.h"
e28030cf
AM
38#include "tree-ssa-loop-ivopts.h"
39#include "tree-ssa-loop-manip.h"
40#include "tree-ssa-loop-niter.h"
442b4905 41#include "tree-ssa-loop.h"
ed481942 42#include "ssa.h"
442b4905 43#include "tree-into-ssa.h"
b076a3fd 44#include "cfgloop.h"
b076a3fd 45#include "tree-scalar-evolution.h"
b076a3fd
ZD
46#include "params.h"
47#include "langhooks.h"
7f9bc51b 48#include "tree-inline.h"
5417e022 49#include "tree-data-ref.h"
d78a1c01 50#include "diagnostic-core.h"
1a70c8d5 51#include "dbgcnt.h"
2eb79bbb 52
b076a3fd
ZD
53/* This pass inserts prefetch instructions to optimize cache usage during
54 accesses to arrays in loops. It processes loops sequentially and:
55
56 1) Gathers all memory references in the single loop.
57 2) For each of the references it decides when it is profitable to prefetch
58 it. To do it, we evaluate the reuse among the accesses, and determines
59 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
60 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
61 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
62 iterations of the loop that are zero modulo PREFETCH_MOD). For example
63 (assuming cache line size is 64 bytes, char has size 1 byte and there
64 is no hardware sequential prefetch):
65
66 char *a;
67 for (i = 0; i < max; i++)
68 {
69 a[255] = ...; (0)
70 a[i] = ...; (1)
71 a[i + 64] = ...; (2)
72 a[16*i] = ...; (3)
73 a[187*i] = ...; (4)
74 a[187*i + 50] = ...; (5)
75 }
76
77 (0) obviously has PREFETCH_BEFORE 1
78 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
79 location 64 iterations before it, and PREFETCH_MOD 64 (since
80 it hits the same cache line otherwise).
81 (2) has PREFETCH_MOD 64
82 (3) has PREFETCH_MOD 4
83 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
bae077dc 84 the cache line accessed by (5) is the same with probability only
b076a3fd
ZD
85 7/32.
86 (5) has PREFETCH_MOD 1 as well.
87
5417e022
ZD
88 Additionally, we use data dependence analysis to determine for each
89 reference the distance till the first reuse; this information is used
90 to determine the temporality of the issued prefetch instruction.
91
b076a3fd
ZD
92 3) We determine how much ahead we need to prefetch. The number of
93 iterations needed is time to fetch / time spent in one iteration of
94 the loop. The problem is that we do not know either of these values,
95 so we just make a heuristic guess based on a magic (possibly)
96 target-specific constant and size of the loop.
97
98 4) Determine which of the references we prefetch. We take into account
99 that there is a maximum number of simultaneous prefetches (provided
100 by machine description). We prefetch as many prefetches as possible
101 while still within this bound (starting with those with lowest
102 prefetch_mod, since they are responsible for most of the cache
103 misses).
b8698a0f 104
b076a3fd
ZD
105 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
106 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
107 prefetching nonaccessed memory.
108 TODO -- actually implement peeling.
b8698a0f 109
b076a3fd
ZD
110 6) We actually emit the prefetch instructions. ??? Perhaps emit the
111 prefetch instructions with guards in cases where 5) was not sufficient
112 to satisfy the constraints?
113
0bbe50f6
CF
114 A cost model is implemented to determine whether or not prefetching is
115 profitable for a given loop. The cost model has three heuristics:
116
117 1. Function trip_count_to_ahead_ratio_too_small_p implements a
118 heuristic that determines whether or not the loop has too few
119 iterations (compared to ahead). Prefetching is not likely to be
120 beneficial if the trip count to ahead ratio is below a certain
121 minimum.
122
123 2. Function mem_ref_count_reasonable_p implements a heuristic that
124 determines whether the given loop has enough CPU ops that can be
125 overlapped with cache missing memory ops. If not, the loop
126 won't benefit from prefetching. In the implementation,
127 prefetching is not considered beneficial if the ratio between
128 the instruction count and the mem ref count is below a certain
129 minimum.
130
131 3. Function insn_to_prefetch_ratio_too_small_p implements a
132 heuristic that disables prefetching in a loop if the prefetching
133 cost is above a certain limit. The relative prefetching cost is
134 estimated by taking the ratio between the prefetch count and the
135 total intruction count (this models the I-cache cost).
136
db34470d 137 The limits used in these heuristics are defined as parameters with
b8698a0f 138 reasonable default values. Machine-specific default values will be
db34470d 139 added later.
b8698a0f 140
b076a3fd
ZD
141 Some other TODO:
142 -- write and use more general reuse analysis (that could be also used
143 in other cache aimed loop optimizations)
144 -- make it behave sanely together with the prefetches given by user
145 (now we just ignore them; at the very least we should avoid
146 optimizing loops in that user put his own prefetches)
147 -- we assume cache line size alignment of arrays; this could be
148 improved. */
149
150/* Magic constants follow. These should be replaced by machine specific
151 numbers. */
152
b076a3fd
ZD
153/* True if write can be prefetched by a read prefetch. */
154
155#ifndef WRITE_CAN_USE_READ_PREFETCH
156#define WRITE_CAN_USE_READ_PREFETCH 1
157#endif
158
159/* True if read can be prefetched by a write prefetch. */
160
161#ifndef READ_CAN_USE_WRITE_PREFETCH
162#define READ_CAN_USE_WRITE_PREFETCH 0
163#endif
164
47eb5b32
ZD
165/* The size of the block loaded by a single prefetch. Usually, this is
166 the same as cache line size (at the moment, we only consider one level
167 of cache hierarchy). */
b076a3fd
ZD
168
169#ifndef PREFETCH_BLOCK
47eb5b32 170#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
b076a3fd
ZD
171#endif
172
173/* Do we have a forward hardware sequential prefetching? */
174
175#ifndef HAVE_FORWARD_PREFETCH
176#define HAVE_FORWARD_PREFETCH 0
177#endif
178
179/* Do we have a backward hardware sequential prefetching? */
180
181#ifndef HAVE_BACKWARD_PREFETCH
182#define HAVE_BACKWARD_PREFETCH 0
183#endif
184
185/* In some cases we are only able to determine that there is a certain
186 probability that the two accesses hit the same cache line. In this
187 case, we issue the prefetches for both of them if this probability
fa10beec 188 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
b076a3fd
ZD
189
190#ifndef ACCEPTABLE_MISS_RATE
191#define ACCEPTABLE_MISS_RATE 50
192#endif
193
46cb0441
ZD
194#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
195#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
5417e022
ZD
196
197/* We consider a memory access nontemporal if it is not reused sooner than
198 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
199 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
200 so that we use nontemporal prefetches e.g. if single memory location
201 is accessed several times in a single iteration of the loop. */
202#define NONTEMPORAL_FRACTION 16
203
79f5e442
ZD
204/* In case we have to emit a memory fence instruction after the loop that
205 uses nontemporal stores, this defines the builtin to use. */
206
207#ifndef FENCE_FOLLOWING_MOVNT
208#define FENCE_FOLLOWING_MOVNT NULL_TREE
209#endif
210
9bf4598b
CF
211/* It is not profitable to prefetch when the trip count is not at
212 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
213 For example, in a loop with a prefetch ahead distance of 10,
214 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
215 profitable to prefetch when the trip count is greater or equal to
216 40. In that case, 30 out of the 40 iterations will benefit from
217 prefetching. */
218
219#ifndef TRIP_COUNT_TO_AHEAD_RATIO
220#define TRIP_COUNT_TO_AHEAD_RATIO 4
221#endif
222
b076a3fd
ZD
223/* The group of references between that reuse may occur. */
224
225struct mem_ref_group
226{
227 tree base; /* Base of the reference. */
81f32326 228 tree step; /* Step of the reference. */
b076a3fd
ZD
229 struct mem_ref *refs; /* References in the group. */
230 struct mem_ref_group *next; /* Next group of references. */
23b0f9f8 231 unsigned int uid; /* Group UID, used only for debugging. */
b076a3fd
ZD
232};
233
234/* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
235
dd4786fe 236#define PREFETCH_ALL HOST_WIDE_INT_M1U
b076a3fd 237
8532678c
CF
238/* Do not generate a prefetch if the unroll factor is significantly less
239 than what is required by the prefetch. This is to avoid redundant
f7963a7c
CF
240 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
241 2, prefetching requires unrolling the loop 16 times, but
242 the loop is actually unrolled twice. In this case (ratio = 8),
8532678c
CF
243 prefetching is not likely to be beneficial. */
244
245#ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
f7963a7c 246#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
8532678c
CF
247#endif
248
0bbe50f6
CF
249/* Some of the prefetch computations have quadratic complexity. We want to
250 avoid huge compile times and, therefore, want to limit the amount of
251 memory references per loop where we consider prefetching. */
252
253#ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
254#define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
255#endif
256
b076a3fd
ZD
257/* The memory reference. */
258
259struct mem_ref
260{
355fe088 261 gimple *stmt; /* Statement in that the reference appears. */
b076a3fd
ZD
262 tree mem; /* The reference. */
263 HOST_WIDE_INT delta; /* Constant offset of the reference. */
b076a3fd
ZD
264 struct mem_ref_group *group; /* The group of references it belongs to. */
265 unsigned HOST_WIDE_INT prefetch_mod;
266 /* Prefetch only each PREFETCH_MOD-th
267 iteration. */
268 unsigned HOST_WIDE_INT prefetch_before;
269 /* Prefetch only first PREFETCH_BEFORE
270 iterations. */
5417e022
ZD
271 unsigned reuse_distance; /* The amount of data accessed before the first
272 reuse of this value. */
b076a3fd 273 struct mem_ref *next; /* The next reference in the group. */
23b0f9f8 274 unsigned int uid; /* Ref UID, used only for debugging. */
79f5e442
ZD
275 unsigned write_p : 1; /* Is it a write? */
276 unsigned independent_p : 1; /* True if the reference is independent on
277 all other references inside the loop. */
278 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
279 unsigned storent_p : 1; /* True if we changed the store to a
280 nontemporal one. */
b076a3fd
ZD
281};
282
a5497b12 283/* Dumps information about memory reference */
b076a3fd 284static void
a5497b12
VK
285dump_mem_details (FILE *file, tree base, tree step,
286 HOST_WIDE_INT delta, bool write_p)
b076a3fd 287{
a5497b12
VK
288 fprintf (file, "(base ");
289 print_generic_expr (file, base, TDF_SLIM);
b076a3fd 290 fprintf (file, ", step ");
a5497b12
VK
291 if (cst_and_fits_in_hwi (step))
292 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
81f32326 293 else
8264c84d 294 print_generic_expr (file, step, TDF_SLIM);
b076a3fd 295 fprintf (file, ")\n");
23b0f9f8
MK
296 fprintf (file, " delta " HOST_WIDE_INT_PRINT_DEC "\n", delta);
297 fprintf (file, " %s\n\n", write_p ? "write" : "read");
a5497b12 298}
b076a3fd 299
a5497b12 300/* Dumps information about reference REF to FILE. */
b076a3fd 301
a5497b12
VK
302static void
303dump_mem_ref (FILE *file, struct mem_ref *ref)
304{
23b0f9f8
MK
305 fprintf (file, "reference %u:%u (", ref->group->uid, ref->uid);
306 print_generic_expr (file, ref->mem, TDF_SLIM);
307 fprintf (file, ")\n");
b076a3fd
ZD
308}
309
310/* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
311 exist. */
312
313static struct mem_ref_group *
81f32326 314find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
b076a3fd 315{
23b0f9f8
MK
316 /* Global count for setting struct mem_ref_group->uid. */
317 static unsigned int last_mem_ref_group_uid = 0;
318
b076a3fd
ZD
319 struct mem_ref_group *group;
320
321 for (; *groups; groups = &(*groups)->next)
322 {
81f32326 323 if (operand_equal_p ((*groups)->step, step, 0)
b076a3fd
ZD
324 && operand_equal_p ((*groups)->base, base, 0))
325 return *groups;
326
81f32326
CB
327 /* If step is an integer constant, keep the list of groups sorted
328 by decreasing step. */
21c0a521
DM
329 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
330 && int_cst_value ((*groups)->step) < int_cst_value (step))
b076a3fd
ZD
331 break;
332 }
333
5417e022 334 group = XNEW (struct mem_ref_group);
b076a3fd
ZD
335 group->base = base;
336 group->step = step;
337 group->refs = NULL;
23b0f9f8 338 group->uid = ++last_mem_ref_group_uid;
b076a3fd
ZD
339 group->next = *groups;
340 *groups = group;
341
342 return group;
343}
344
345/* Records a memory reference MEM in GROUP with offset DELTA and write status
346 WRITE_P. The reference occurs in statement STMT. */
347
348static void
355fe088 349record_ref (struct mem_ref_group *group, gimple *stmt, tree mem,
b076a3fd
ZD
350 HOST_WIDE_INT delta, bool write_p)
351{
23b0f9f8 352 unsigned int last_mem_ref_uid = 0;
b076a3fd
ZD
353 struct mem_ref **aref;
354
355 /* Do not record the same address twice. */
356 for (aref = &group->refs; *aref; aref = &(*aref)->next)
357 {
23b0f9f8
MK
358 last_mem_ref_uid = (*aref)->uid;
359
b076a3fd
ZD
360 /* It does not have to be possible for write reference to reuse the read
361 prefetch, or vice versa. */
362 if (!WRITE_CAN_USE_READ_PREFETCH
363 && write_p
364 && !(*aref)->write_p)
365 continue;
366 if (!READ_CAN_USE_WRITE_PREFETCH
367 && !write_p
368 && (*aref)->write_p)
369 continue;
370
371 if ((*aref)->delta == delta)
372 return;
373 }
374
5417e022 375 (*aref) = XNEW (struct mem_ref);
b076a3fd
ZD
376 (*aref)->stmt = stmt;
377 (*aref)->mem = mem;
378 (*aref)->delta = delta;
379 (*aref)->write_p = write_p;
380 (*aref)->prefetch_before = PREFETCH_ALL;
381 (*aref)->prefetch_mod = 1;
5417e022 382 (*aref)->reuse_distance = 0;
b076a3fd
ZD
383 (*aref)->issue_prefetch_p = false;
384 (*aref)->group = group;
385 (*aref)->next = NULL;
79f5e442
ZD
386 (*aref)->independent_p = false;
387 (*aref)->storent_p = false;
23b0f9f8 388 (*aref)->uid = last_mem_ref_uid + 1;
b076a3fd
ZD
389
390 if (dump_file && (dump_flags & TDF_DETAILS))
23b0f9f8
MK
391 {
392 dump_mem_ref (dump_file, *aref);
393
394 fprintf (dump_file, " group %u ", group->uid);
395 dump_mem_details (dump_file, group->base, group->step, delta,
396 write_p);
397 }
b076a3fd
ZD
398}
399
400/* Release memory references in GROUPS. */
401
402static void
403release_mem_refs (struct mem_ref_group *groups)
404{
405 struct mem_ref_group *next_g;
406 struct mem_ref *ref, *next_r;
407
408 for (; groups; groups = next_g)
409 {
410 next_g = groups->next;
411 for (ref = groups->refs; ref; ref = next_r)
412 {
413 next_r = ref->next;
414 free (ref);
415 }
416 free (groups);
417 }
418}
419
420/* A structure used to pass arguments to idx_analyze_ref. */
421
422struct ar_data
423{
424 struct loop *loop; /* Loop of the reference. */
355fe088 425 gimple *stmt; /* Statement of the reference. */
81f32326 426 tree *step; /* Step of the memory reference. */
b076a3fd
ZD
427 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
428};
429
430/* Analyzes a single INDEX of a memory reference to obtain information
431 described at analyze_ref. Callback for for_each_index. */
432
433static bool
434idx_analyze_ref (tree base, tree *index, void *data)
435{
c22940cd 436 struct ar_data *ar_data = (struct ar_data *) data;
b076a3fd 437 tree ibase, step, stepsize;
81f32326 438 HOST_WIDE_INT idelta = 0, imult = 1;
b076a3fd
ZD
439 affine_iv iv;
440
f017bf5e 441 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
81f32326 442 *index, &iv, true))
b076a3fd
ZD
443 return false;
444 ibase = iv.base;
445 step = iv.step;
446
5be014d5 447 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
b076a3fd
ZD
448 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
449 {
450 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
451 ibase = TREE_OPERAND (ibase, 0);
452 }
453 if (cst_and_fits_in_hwi (ibase))
454 {
455 idelta += int_cst_value (ibase);
ff5e9a94 456 ibase = build_int_cst (TREE_TYPE (ibase), 0);
b076a3fd
ZD
457 }
458
459 if (TREE_CODE (base) == ARRAY_REF)
460 {
461 stepsize = array_ref_element_size (base);
462 if (!cst_and_fits_in_hwi (stepsize))
463 return false;
464 imult = int_cst_value (stepsize);
8fde8b40
CB
465 step = fold_build2 (MULT_EXPR, sizetype,
466 fold_convert (sizetype, step),
467 fold_convert (sizetype, stepsize));
b076a3fd
ZD
468 idelta *= imult;
469 }
470
8fde8b40
CB
471 if (*ar_data->step == NULL_TREE)
472 *ar_data->step = step;
473 else
474 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
475 fold_convert (sizetype, *ar_data->step),
476 fold_convert (sizetype, step));
b076a3fd
ZD
477 *ar_data->delta += idelta;
478 *index = ibase;
479
480 return true;
481}
482
aac8b8ed 483/* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
b076a3fd 484 STEP are integer constants and iter is number of iterations of LOOP. The
aac8b8ed
RS
485 reference occurs in statement STMT. Strips nonaddressable component
486 references from REF_P. */
b076a3fd
ZD
487
488static bool
aac8b8ed 489analyze_ref (struct loop *loop, tree *ref_p, tree *base,
81f32326 490 tree *step, HOST_WIDE_INT *delta,
355fe088 491 gimple *stmt)
b076a3fd
ZD
492{
493 struct ar_data ar_data;
494 tree off;
495 HOST_WIDE_INT bit_offset;
aac8b8ed 496 tree ref = *ref_p;
b076a3fd 497
81f32326 498 *step = NULL_TREE;
b076a3fd
ZD
499 *delta = 0;
500
7c6dafac
CF
501 /* First strip off the component references. Ignore bitfields.
502 Also strip off the real and imagine parts of a complex, so that
503 they can have the same base. */
504 if (TREE_CODE (ref) == REALPART_EXPR
505 || TREE_CODE (ref) == IMAGPART_EXPR
506 || (TREE_CODE (ref) == COMPONENT_REF
507 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
508 {
509 if (TREE_CODE (ref) == IMAGPART_EXPR)
510 *delta += int_size_in_bytes (TREE_TYPE (ref));
511 ref = TREE_OPERAND (ref, 0);
512 }
b076a3fd 513
aac8b8ed
RS
514 *ref_p = ref;
515
b076a3fd
ZD
516 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
517 {
518 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
519 bit_offset = TREE_INT_CST_LOW (off);
520 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
b8698a0f 521
b076a3fd
ZD
522 *delta += bit_offset / BITS_PER_UNIT;
523 }
524
525 *base = unshare_expr (ref);
526 ar_data.loop = loop;
527 ar_data.stmt = stmt;
528 ar_data.step = step;
529 ar_data.delta = delta;
530 return for_each_index (base, idx_analyze_ref, &ar_data);
531}
532
533/* Record a memory reference REF to the list REFS. The reference occurs in
79f5e442
ZD
534 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
535 reference was recorded, false otherwise. */
b076a3fd 536
79f5e442 537static bool
b076a3fd 538gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
355fe088 539 tree ref, bool write_p, gimple *stmt)
b076a3fd 540{
81f32326
CB
541 tree base, step;
542 HOST_WIDE_INT delta;
b076a3fd
ZD
543 struct mem_ref_group *agrp;
544
a80a2701
JJ
545 if (get_base_address (ref) == NULL)
546 return false;
547
aac8b8ed 548 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
79f5e442 549 return false;
81f32326
CB
550 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
551 if (step == NULL_TREE)
552 return false;
b076a3fd 553
756f50ce 554 /* Stop if the address of BASE could not be taken. */
bc068a23
CF
555 if (may_be_nonaddressable_p (base))
556 return false;
557
a5497b12
VK
558 /* Limit non-constant step prefetching only to the innermost loops and
559 only when the step is loop invariant in the entire loop nest. */
560 if (!cst_and_fits_in_hwi (step))
561 {
562 if (loop->inner != NULL)
563 {
564 if (dump_file && (dump_flags & TDF_DETAILS))
565 {
566 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
8264c84d
ML
567 print_generic_expr (dump_file, ref, TDF_SLIM);
568 fprintf (dump_file,":");
c3284718 569 dump_mem_details (dump_file, base, step, delta, write_p);
a5497b12
VK
570 fprintf (dump_file,
571 "Ignoring %p, non-constant step prefetching is "
572 "limited to inner most loops \n",
573 (void *) ref);
574 }
575 return false;
576 }
577 else
578 {
579 if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
580 {
581 if (dump_file && (dump_flags & TDF_DETAILS))
582 {
583 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
8264c84d 584 print_generic_expr (dump_file, ref, TDF_SLIM);
a5497b12 585 fprintf (dump_file,":");
c3284718 586 dump_mem_details (dump_file, base, step, delta, write_p);
a5497b12
VK
587 fprintf (dump_file,
588 "Not prefetching, ignoring %p due to "
589 "loop variant step\n",
590 (void *) ref);
591 }
592 return false;
593 }
594 }
595 }
50814135 596
b076a3fd
ZD
597 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
598 are integer constants. */
599 agrp = find_or_create_group (refs, base, step);
600 record_ref (agrp, stmt, ref, delta, write_p);
79f5e442
ZD
601
602 return true;
b076a3fd
ZD
603}
604
79f5e442
ZD
605/* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
606 true if there are no other memory references inside the loop. */
b076a3fd
ZD
607
608static struct mem_ref_group *
db34470d 609gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
b076a3fd
ZD
610{
611 basic_block *body = get_loop_body_in_dom_order (loop);
612 basic_block bb;
613 unsigned i;
726a989a 614 gimple_stmt_iterator bsi;
355fe088 615 gimple *stmt;
726a989a 616 tree lhs, rhs;
b076a3fd
ZD
617 struct mem_ref_group *refs = NULL;
618
79f5e442 619 *no_other_refs = true;
db34470d 620 *ref_count = 0;
79f5e442 621
b076a3fd
ZD
622 /* Scan the loop body in order, so that the former references precede the
623 later ones. */
624 for (i = 0; i < loop->num_nodes; i++)
625 {
626 bb = body[i];
627 if (bb->loop_father != loop)
628 continue;
629
726a989a 630 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
b076a3fd 631 {
726a989a 632 stmt = gsi_stmt (bsi);
79f5e442 633
726a989a 634 if (gimple_code (stmt) != GIMPLE_ASSIGN)
79f5e442 635 {
5006671f 636 if (gimple_vuse (stmt)
726a989a
RB
637 || (is_gimple_call (stmt)
638 && !(gimple_call_flags (stmt) & ECF_CONST)))
79f5e442
ZD
639 *no_other_refs = false;
640 continue;
641 }
b076a3fd 642
eb066284
RB
643 if (! gimple_vuse (stmt))
644 continue;
645
726a989a
RB
646 lhs = gimple_assign_lhs (stmt);
647 rhs = gimple_assign_rhs1 (stmt);
b076a3fd
ZD
648
649 if (REFERENCE_CLASS_P (rhs))
db34470d 650 {
79f5e442
ZD
651 *no_other_refs &= gather_memory_references_ref (loop, &refs,
652 rhs, false, stmt);
db34470d
GS
653 *ref_count += 1;
654 }
b076a3fd 655 if (REFERENCE_CLASS_P (lhs))
db34470d 656 {
79f5e442
ZD
657 *no_other_refs &= gather_memory_references_ref (loop, &refs,
658 lhs, true, stmt);
db34470d
GS
659 *ref_count += 1;
660 }
b076a3fd
ZD
661 }
662 }
663 free (body);
664
665 return refs;
666}
667
668/* Prune the prefetch candidate REF using the self-reuse. */
669
670static void
671prune_ref_by_self_reuse (struct mem_ref *ref)
672{
81f32326
CB
673 HOST_WIDE_INT step;
674 bool backward;
675
676 /* If the step size is non constant, we cannot calculate prefetch_mod. */
677 if (!cst_and_fits_in_hwi (ref->group->step))
678 return;
679
680 step = int_cst_value (ref->group->step);
681
682 backward = step < 0;
b076a3fd
ZD
683
684 if (step == 0)
685 {
686 /* Prefetch references to invariant address just once. */
687 ref->prefetch_before = 1;
688 return;
689 }
690
691 if (backward)
692 step = -step;
693
694 if (step > PREFETCH_BLOCK)
695 return;
696
697 if ((backward && HAVE_BACKWARD_PREFETCH)
698 || (!backward && HAVE_FORWARD_PREFETCH))
699 {
700 ref->prefetch_before = 1;
701 return;
702 }
703
704 ref->prefetch_mod = PREFETCH_BLOCK / step;
705}
706
707/* Divides X by BY, rounding down. */
708
709static HOST_WIDE_INT
710ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
711{
712 gcc_assert (by > 0);
713
714 if (x >= 0)
56e1a4d7 715 return x / (HOST_WIDE_INT) by;
b076a3fd 716 else
56e1a4d7 717 return (x + (HOST_WIDE_INT) by - 1) / (HOST_WIDE_INT) by;
b076a3fd
ZD
718}
719
b8698a0f
L
720/* Given a CACHE_LINE_SIZE and two inductive memory references
721 with a common STEP greater than CACHE_LINE_SIZE and an address
722 difference DELTA, compute the probability that they will fall
14e444c3
CF
723 in different cache lines. Return true if the computed miss rate
724 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
725 number of distinct iterations after which the pattern repeats itself.
2c6dd136
GS
726 ALIGN_UNIT is the unit of alignment in bytes. */
727
14e444c3
CF
728static bool
729is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
2c6dd136
GS
730 HOST_WIDE_INT step, HOST_WIDE_INT delta,
731 unsigned HOST_WIDE_INT distinct_iters,
732 int align_unit)
733{
734 unsigned align, iter;
14e444c3 735 int total_positions, miss_positions, max_allowed_miss_positions;
2c6dd136
GS
736 int address1, address2, cache_line1, cache_line2;
737
a245c04b
CF
738 /* It always misses if delta is greater than or equal to the cache
739 line size. */
14e444c3
CF
740 if (delta >= (HOST_WIDE_INT) cache_line_size)
741 return false;
a245c04b 742
2c6dd136 743 miss_positions = 0;
14e444c3
CF
744 total_positions = (cache_line_size / align_unit) * distinct_iters;
745 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
b8698a0f 746
2c6dd136
GS
747 /* Iterate through all possible alignments of the first
748 memory reference within its cache line. */
749 for (align = 0; align < cache_line_size; align += align_unit)
750
751 /* Iterate through all distinct iterations. */
752 for (iter = 0; iter < distinct_iters; iter++)
753 {
754 address1 = align + step * iter;
755 address2 = address1 + delta;
756 cache_line1 = address1 / cache_line_size;
757 cache_line2 = address2 / cache_line_size;
2c6dd136 758 if (cache_line1 != cache_line2)
14e444c3
CF
759 {
760 miss_positions += 1;
761 if (miss_positions > max_allowed_miss_positions)
762 return false;
763 }
2c6dd136 764 }
14e444c3 765 return true;
2c6dd136
GS
766}
767
b076a3fd
ZD
768/* Prune the prefetch candidate REF using the reuse with BY.
769 If BY_IS_BEFORE is true, BY is before REF in the loop. */
770
771static void
772prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
773 bool by_is_before)
774{
81f32326
CB
775 HOST_WIDE_INT step;
776 bool backward;
b076a3fd
ZD
777 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
778 HOST_WIDE_INT delta = delta_b - delta_r;
779 HOST_WIDE_INT hit_from;
780 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
2c6dd136
GS
781 HOST_WIDE_INT reduced_step;
782 unsigned HOST_WIDE_INT reduced_prefetch_block;
783 tree ref_type;
784 int align_unit;
b076a3fd 785
81f32326
CB
786 /* If the step is non constant we cannot calculate prefetch_before. */
787 if (!cst_and_fits_in_hwi (ref->group->step)) {
788 return;
789 }
790
791 step = int_cst_value (ref->group->step);
792
793 backward = step < 0;
794
795
b076a3fd
ZD
796 if (delta == 0)
797 {
798 /* If the references has the same address, only prefetch the
799 former. */
800 if (by_is_before)
801 ref->prefetch_before = 0;
b8698a0f 802
b076a3fd
ZD
803 return;
804 }
805
806 if (!step)
807 {
808 /* If the reference addresses are invariant and fall into the
809 same cache line, prefetch just the first one. */
810 if (!by_is_before)
811 return;
812
813 if (ddown (ref->delta, PREFETCH_BLOCK)
814 != ddown (by->delta, PREFETCH_BLOCK))
815 return;
816
817 ref->prefetch_before = 0;
818 return;
819 }
820
821 /* Only prune the reference that is behind in the array. */
822 if (backward)
823 {
824 if (delta > 0)
825 return;
826
827 /* Transform the data so that we may assume that the accesses
828 are forward. */
829 delta = - delta;
830 step = -step;
831 delta_r = PREFETCH_BLOCK - 1 - delta_r;
832 delta_b = PREFETCH_BLOCK - 1 - delta_b;
833 }
834 else
835 {
836 if (delta < 0)
837 return;
838 }
839
840 /* Check whether the two references are likely to hit the same cache
841 line, and how distant the iterations in that it occurs are from
842 each other. */
843
844 if (step <= PREFETCH_BLOCK)
845 {
846 /* The accesses are sure to meet. Let us check when. */
847 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
848 prefetch_before = (hit_from - delta_r + step - 1) / step;
849
57762e97 850 /* Do not reduce prefetch_before if we meet beyond cache size. */
4c9cf7af 851 if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
57762e97 852 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
853 if (prefetch_before < ref->prefetch_before)
854 ref->prefetch_before = prefetch_before;
855
856 return;
857 }
858
b8698a0f 859 /* A more complicated case with step > prefetch_block. First reduce
2c6dd136 860 the ratio between the step and the cache line size to its simplest
b8698a0f
L
861 terms. The resulting denominator will then represent the number of
862 distinct iterations after which each address will go back to its
863 initial location within the cache line. This computation assumes
2c6dd136 864 that PREFETCH_BLOCK is a power of two. */
b076a3fd 865 prefetch_block = PREFETCH_BLOCK;
2c6dd136
GS
866 reduced_prefetch_block = prefetch_block;
867 reduced_step = step;
868 while ((reduced_step & 1) == 0
869 && reduced_prefetch_block > 1)
b076a3fd 870 {
2c6dd136
GS
871 reduced_step >>= 1;
872 reduced_prefetch_block >>= 1;
b076a3fd
ZD
873 }
874
b076a3fd
ZD
875 prefetch_before = delta / step;
876 delta %= step;
2c6dd136
GS
877 ref_type = TREE_TYPE (ref->mem);
878 align_unit = TYPE_ALIGN (ref_type) / 8;
14e444c3
CF
879 if (is_miss_rate_acceptable (prefetch_block, step, delta,
880 reduced_prefetch_block, align_unit))
b076a3fd 881 {
57762e97
CB
882 /* Do not reduce prefetch_before if we meet beyond cache size. */
883 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
884 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
885 if (prefetch_before < ref->prefetch_before)
886 ref->prefetch_before = prefetch_before;
887
888 return;
889 }
890
891 /* Try also the following iteration. */
892 prefetch_before++;
893 delta = step - delta;
14e444c3
CF
894 if (is_miss_rate_acceptable (prefetch_block, step, delta,
895 reduced_prefetch_block, align_unit))
b076a3fd
ZD
896 {
897 if (prefetch_before < ref->prefetch_before)
898 ref->prefetch_before = prefetch_before;
899
900 return;
901 }
902
903 /* The ref probably does not reuse by. */
904 return;
905}
906
907/* Prune the prefetch candidate REF using the reuses with other references
908 in REFS. */
909
910static void
911prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
912{
913 struct mem_ref *prune_by;
914 bool before = true;
915
916 prune_ref_by_self_reuse (ref);
917
918 for (prune_by = refs; prune_by; prune_by = prune_by->next)
919 {
920 if (prune_by == ref)
921 {
922 before = false;
923 continue;
924 }
925
926 if (!WRITE_CAN_USE_READ_PREFETCH
927 && ref->write_p
928 && !prune_by->write_p)
929 continue;
930 if (!READ_CAN_USE_WRITE_PREFETCH
931 && !ref->write_p
932 && prune_by->write_p)
933 continue;
934
935 prune_ref_by_group_reuse (ref, prune_by, before);
936 }
937}
938
939/* Prune the prefetch candidates in GROUP using the reuse analysis. */
940
941static void
942prune_group_by_reuse (struct mem_ref_group *group)
943{
944 struct mem_ref *ref_pruned;
945
946 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
947 {
948 prune_ref_by_reuse (ref_pruned, group->refs);
949
950 if (dump_file && (dump_flags & TDF_DETAILS))
951 {
23b0f9f8 952 dump_mem_ref (dump_file, ref_pruned);
b076a3fd
ZD
953
954 if (ref_pruned->prefetch_before == PREFETCH_ALL
955 && ref_pruned->prefetch_mod == 1)
956 fprintf (dump_file, " no restrictions");
957 else if (ref_pruned->prefetch_before == 0)
958 fprintf (dump_file, " do not prefetch");
959 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
960 fprintf (dump_file, " prefetch once");
961 else
962 {
963 if (ref_pruned->prefetch_before != PREFETCH_ALL)
964 {
965 fprintf (dump_file, " prefetch before ");
966 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
967 ref_pruned->prefetch_before);
968 }
969 if (ref_pruned->prefetch_mod != 1)
970 {
971 fprintf (dump_file, " prefetch mod ");
972 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
973 ref_pruned->prefetch_mod);
974 }
975 }
976 fprintf (dump_file, "\n");
977 }
978 }
979}
980
981/* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
982
983static void
984prune_by_reuse (struct mem_ref_group *groups)
985{
986 for (; groups; groups = groups->next)
987 prune_group_by_reuse (groups);
988}
989
990/* Returns true if we should issue prefetch for REF. */
991
992static bool
993should_issue_prefetch_p (struct mem_ref *ref)
994{
d2ff35c0
LM
995 /* Do we want to issue prefetches for non-constant strides? */
996 if (!cst_and_fits_in_hwi (ref->group->step) && PREFETCH_DYNAMIC_STRIDES == 0)
997 {
998 if (dump_file && (dump_flags & TDF_DETAILS))
999 fprintf (dump_file,
1000 "Skipping non-constant step for reference %u:%u\n",
1001 ref->group->uid, ref->uid);
1002 return false;
1003 }
1004
59100dfc
LM
1005 /* Some processors may have a hardware prefetcher that may conflict with
1006 prefetch hints for a range of strides. Make sure we don't issue
1007 prefetches for such cases if the stride is within this particular
1008 range. */
1009 if (cst_and_fits_in_hwi (ref->group->step)
1010 && abs_hwi (int_cst_value (ref->group->step))
1011 < (HOST_WIDE_INT) PREFETCH_MINIMUM_STRIDE)
1012 {
1013 if (dump_file && (dump_flags & TDF_DETAILS))
1014 fprintf (dump_file,
79bf1170
LM
1015 "Step for reference %u:%u (" HOST_WIDE_INT_PRINT_DEC
1016 ") is less than the mininum required stride of %d\n",
59100dfc
LM
1017 ref->group->uid, ref->uid, int_cst_value (ref->group->step),
1018 PREFETCH_MINIMUM_STRIDE);
1019 return false;
1020 }
1021
b076a3fd
ZD
1022 /* For now do not issue prefetches for only first few of the
1023 iterations. */
1024 if (ref->prefetch_before != PREFETCH_ALL)
a8beb3a7
CB
1025 {
1026 if (dump_file && (dump_flags & TDF_DETAILS))
23b0f9f8
MK
1027 fprintf (dump_file, "Ignoring reference %u:%u due to prefetch_before\n",
1028 ref->group->uid, ref->uid);
a8beb3a7
CB
1029 return false;
1030 }
b076a3fd 1031
79f5e442
ZD
1032 /* Do not prefetch nontemporal stores. */
1033 if (ref->storent_p)
a8beb3a7
CB
1034 {
1035 if (dump_file && (dump_flags & TDF_DETAILS))
23b0f9f8 1036 fprintf (dump_file, "Ignoring nontemporal store reference %u:%u\n", ref->group->uid, ref->uid);
a8beb3a7
CB
1037 return false;
1038 }
79f5e442 1039
b076a3fd
ZD
1040 return true;
1041}
1042
1043/* Decide which of the prefetch candidates in GROUPS to prefetch.
1044 AHEAD is the number of iterations to prefetch ahead (which corresponds
1045 to the number of simultaneous instances of one prefetch running at a
1046 time). UNROLL_FACTOR is the factor by that the loop is going to be
1047 unrolled. Returns true if there is anything to prefetch. */
1048
1049static bool
1050schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
1051 unsigned ahead)
1052{
911b3fdb
ZD
1053 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
1054 unsigned slots_per_prefetch;
b076a3fd
ZD
1055 struct mem_ref *ref;
1056 bool any = false;
1057
911b3fdb
ZD
1058 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
1059 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
b076a3fd 1060
911b3fdb
ZD
1061 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1062 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
1063 it will need a prefetch slot. */
1064 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
b076a3fd 1065 if (dump_file && (dump_flags & TDF_DETAILS))
911b3fdb
ZD
1066 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
1067 slots_per_prefetch);
b076a3fd
ZD
1068
1069 /* For now we just take memory references one by one and issue
1070 prefetches for as many as possible. The groups are sorted
1071 starting with the largest step, since the references with
c0220ea4 1072 large step are more likely to cause many cache misses. */
b076a3fd
ZD
1073
1074 for (; groups; groups = groups->next)
1075 for (ref = groups->refs; ref; ref = ref->next)
1076 {
1077 if (!should_issue_prefetch_p (ref))
1078 continue;
1079
8532678c
CF
1080 /* The loop is far from being sufficiently unrolled for this
1081 prefetch. Do not generate prefetch to avoid many redudant
1082 prefetches. */
1083 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1084 continue;
1085
911b3fdb
ZD
1086 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1087 and we unroll the loop UNROLL_FACTOR times, we need to insert
1088 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1089 iteration. */
b076a3fd
ZD
1090 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1091 / ref->prefetch_mod);
911b3fdb
ZD
1092 prefetch_slots = n_prefetches * slots_per_prefetch;
1093
1094 /* If more than half of the prefetches would be lost anyway, do not
1095 issue the prefetch. */
1096 if (2 * remaining_prefetch_slots < prefetch_slots)
1097 continue;
1098
1a70c8d5
MK
1099 /* Stop prefetching if debug counter is activated. */
1100 if (!dbg_cnt (prefetch))
1101 continue;
1102
911b3fdb 1103 ref->issue_prefetch_p = true;
23b0f9f8
MK
1104 if (dump_file && (dump_flags & TDF_DETAILS))
1105 fprintf (dump_file, "Decided to issue prefetch for reference %u:%u\n",
1106 ref->group->uid, ref->uid);
b076a3fd 1107
911b3fdb
ZD
1108 if (remaining_prefetch_slots <= prefetch_slots)
1109 return true;
1110 remaining_prefetch_slots -= prefetch_slots;
b076a3fd
ZD
1111 any = true;
1112 }
1113
1114 return any;
1115}
1116
d5058523
CF
1117/* Return TRUE if no prefetch is going to be generated in the given
1118 GROUPS. */
1119
1120static bool
1121nothing_to_prefetch_p (struct mem_ref_group *groups)
1122{
1123 struct mem_ref *ref;
1124
1125 for (; groups; groups = groups->next)
1126 for (ref = groups->refs; ref; ref = ref->next)
1127 if (should_issue_prefetch_p (ref))
1128 return false;
1129
1130 return true;
1131}
1132
1133/* Estimate the number of prefetches in the given GROUPS.
1134 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
b076a3fd 1135
db34470d 1136static int
d5058523 1137estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
b076a3fd
ZD
1138{
1139 struct mem_ref *ref;
d5058523 1140 unsigned n_prefetches;
db34470d 1141 int prefetch_count = 0;
b076a3fd
ZD
1142
1143 for (; groups; groups = groups->next)
1144 for (ref = groups->refs; ref; ref = ref->next)
1145 if (should_issue_prefetch_p (ref))
d5058523
CF
1146 {
1147 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1148 / ref->prefetch_mod);
1149 prefetch_count += n_prefetches;
1150 }
b076a3fd 1151
db34470d 1152 return prefetch_count;
b076a3fd
ZD
1153}
1154
1155/* Issue prefetches for the reference REF into loop as decided before.
1156 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
917f1b7e 1157 is the factor by which LOOP was unrolled. */
b076a3fd
ZD
1158
1159static void
1160issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1161{
1162 HOST_WIDE_INT delta;
81f32326 1163 tree addr, addr_base, write_p, local, forward;
538dd0b7 1164 gcall *prefetch;
726a989a 1165 gimple_stmt_iterator bsi;
b076a3fd 1166 unsigned n_prefetches, ap;
5417e022 1167 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
b076a3fd
ZD
1168
1169 if (dump_file && (dump_flags & TDF_DETAILS))
23b0f9f8 1170 fprintf (dump_file, "Issued%s prefetch for reference %u:%u.\n",
5417e022 1171 nontemporal ? " nontemporal" : "",
23b0f9f8 1172 ref->group->uid, ref->uid);
b076a3fd 1173
726a989a 1174 bsi = gsi_for_stmt (ref->stmt);
b076a3fd
ZD
1175
1176 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1177 / ref->prefetch_mod);
1178 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
726a989a
RB
1179 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1180 true, NULL, true, GSI_SAME_STMT);
911b3fdb 1181 write_p = ref->write_p ? integer_one_node : integer_zero_node;
9a9d280e 1182 local = nontemporal ? integer_zero_node : integer_three_node;
b076a3fd
ZD
1183
1184 for (ap = 0; ap < n_prefetches; ap++)
1185 {
81f32326
CB
1186 if (cst_and_fits_in_hwi (ref->group->step))
1187 {
1188 /* Determine the address to prefetch. */
1189 delta = (ahead + ap * ref->prefetch_mod) *
1190 int_cst_value (ref->group->step);
5d49b6a7 1191 addr = fold_build_pointer_plus_hwi (addr_base, delta);
23b0f9f8
MK
1192 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1193 NULL, true, GSI_SAME_STMT);
81f32326
CB
1194 }
1195 else
1196 {
1197 /* The step size is non-constant but loop-invariant. We use the
1198 heuristic to simply prefetch ahead iterations ahead. */
1199 forward = fold_build2 (MULT_EXPR, sizetype,
1200 fold_convert (sizetype, ref->group->step),
1201 fold_convert (sizetype, size_int (ahead)));
5d49b6a7 1202 addr = fold_build_pointer_plus (addr_base, forward);
81f32326
CB
1203 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1204 NULL, true, GSI_SAME_STMT);
1205 }
ed481942 1206
fe8b1e83
DG
1207 if (addr_base != addr
1208 && TREE_CODE (addr_base) == SSA_NAME
ed481942
DG
1209 && TREE_CODE (addr) == SSA_NAME)
1210 {
1211 duplicate_ssa_name_ptr_info (addr, SSA_NAME_PTR_INFO (addr_base));
1212 /* As this isn't a plain copy we have to reset alignment
1213 information. */
1214 if (SSA_NAME_PTR_INFO (addr))
1215 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr));
1216 }
1217
b076a3fd 1218 /* Create the prefetch instruction. */
e79983f4 1219 prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
726a989a
RB
1220 3, addr, write_p, local);
1221 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
b076a3fd
ZD
1222 }
1223}
1224
1225/* Issue prefetches for the references in GROUPS into loop as decided before.
1226 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1227 factor by that LOOP was unrolled. */
1228
1229static void
1230issue_prefetches (struct mem_ref_group *groups,
1231 unsigned unroll_factor, unsigned ahead)
1232{
1233 struct mem_ref *ref;
1234
1235 for (; groups; groups = groups->next)
1236 for (ref = groups->refs; ref; ref = ref->next)
1237 if (ref->issue_prefetch_p)
1238 issue_prefetch_ref (ref, unroll_factor, ahead);
1239}
1240
79f5e442
ZD
1241/* Returns true if REF is a memory write for that a nontemporal store insn
1242 can be used. */
1243
1244static bool
1245nontemporal_store_p (struct mem_ref *ref)
1246{
ef4bddc2 1247 machine_mode mode;
79f5e442
ZD
1248 enum insn_code code;
1249
1250 /* REF must be a write that is not reused. We require it to be independent
1251 on all other memory references in the loop, as the nontemporal stores may
1252 be reordered with respect to other memory references. */
1253 if (!ref->write_p
1254 || !ref->independent_p
1255 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1256 return false;
1257
1258 /* Check that we have the storent instruction for the mode. */
1259 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1260 if (mode == BLKmode)
1261 return false;
1262
947131ba 1263 code = optab_handler (storent_optab, mode);
79f5e442
ZD
1264 return code != CODE_FOR_nothing;
1265}
1266
1267/* If REF is a nontemporal store, we mark the corresponding modify statement
1268 and return true. Otherwise, we return false. */
1269
1270static bool
1271mark_nontemporal_store (struct mem_ref *ref)
1272{
1273 if (!nontemporal_store_p (ref))
1274 return false;
1275
1276 if (dump_file && (dump_flags & TDF_DETAILS))
23b0f9f8
MK
1277 fprintf (dump_file, "Marked reference %u:%u as a nontemporal store.\n",
1278 ref->group->uid, ref->uid);
79f5e442 1279
726a989a 1280 gimple_assign_set_nontemporal_move (ref->stmt, true);
79f5e442
ZD
1281 ref->storent_p = true;
1282
1283 return true;
1284}
1285
1286/* Issue a memory fence instruction after LOOP. */
1287
1288static void
1289emit_mfence_after_loop (struct loop *loop)
1290{
9771b263 1291 vec<edge> exits = get_loop_exit_edges (loop);
79f5e442 1292 edge exit;
538dd0b7 1293 gcall *call;
726a989a 1294 gimple_stmt_iterator bsi;
79f5e442
ZD
1295 unsigned i;
1296
9771b263 1297 FOR_EACH_VEC_ELT (exits, i, exit)
79f5e442 1298 {
726a989a 1299 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
79f5e442
ZD
1300
1301 if (!single_pred_p (exit->dest)
1302 /* If possible, we prefer not to insert the fence on other paths
1303 in cfg. */
1304 && !(exit->flags & EDGE_ABNORMAL))
1305 split_loop_exit_edge (exit);
726a989a 1306 bsi = gsi_after_labels (exit->dest);
79f5e442 1307
726a989a 1308 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
79f5e442
ZD
1309 }
1310
9771b263 1311 exits.release ();
79f5e442
ZD
1312 update_ssa (TODO_update_ssa_only_virtuals);
1313}
1314
1315/* Returns true if we can use storent in loop, false otherwise. */
1316
1317static bool
1318may_use_storent_in_loop_p (struct loop *loop)
1319{
1320 bool ret = true;
1321
1322 if (loop->inner != NULL)
1323 return false;
1324
1325 /* If we must issue a mfence insn after using storent, check that there
1326 is a suitable place for it at each of the loop exits. */
1327 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1328 {
9771b263 1329 vec<edge> exits = get_loop_exit_edges (loop);
79f5e442
ZD
1330 unsigned i;
1331 edge exit;
1332
9771b263 1333 FOR_EACH_VEC_ELT (exits, i, exit)
79f5e442 1334 if ((exit->flags & EDGE_ABNORMAL)
fefa31b5 1335 && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
79f5e442
ZD
1336 ret = false;
1337
9771b263 1338 exits.release ();
79f5e442
ZD
1339 }
1340
1341 return ret;
1342}
1343
1344/* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1345 references in the loop. */
1346
1347static void
1348mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1349{
1350 struct mem_ref *ref;
1351 bool any = false;
1352
1353 if (!may_use_storent_in_loop_p (loop))
1354 return;
1355
1356 for (; groups; groups = groups->next)
1357 for (ref = groups->refs; ref; ref = ref->next)
1358 any |= mark_nontemporal_store (ref);
1359
1360 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1361 emit_mfence_after_loop (loop);
1362}
1363
b076a3fd
ZD
1364/* Determines whether we can profitably unroll LOOP FACTOR times, and if
1365 this is the case, fill in DESC by the description of number of
1366 iterations. */
1367
1368static bool
1369should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1370 unsigned factor)
1371{
1372 if (!can_unroll_loop_p (loop, factor, desc))
1373 return false;
1374
1375 /* We only consider loops without control flow for unrolling. This is not
1376 a hard restriction -- tree_unroll_loop works with arbitrary loops
1377 as well; but the unrolling/prefetching is usually more profitable for
1378 loops consisting of a single basic block, and we want to limit the
1379 code growth. */
1380 if (loop->num_nodes > 2)
1381 return false;
1382
1383 return true;
1384}
1385
1386/* Determine the coefficient by that unroll LOOP, from the information
1387 contained in the list of memory references REFS. Description of
23b0f9f8 1388 number of iterations of LOOP is stored to DESC. NINSNS is the number of
2711355f
ZD
1389 insns of the LOOP. EST_NITER is the estimated number of iterations of
1390 the loop, or -1 if no estimate is available. */
b076a3fd
ZD
1391
1392static unsigned
1393determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
2711355f
ZD
1394 unsigned ninsns, struct tree_niter_desc *desc,
1395 HOST_WIDE_INT est_niter)
b076a3fd 1396{
911b3fdb
ZD
1397 unsigned upper_bound;
1398 unsigned nfactor, factor, mod_constraint;
b076a3fd
ZD
1399 struct mem_ref_group *agp;
1400 struct mem_ref *ref;
1401
911b3fdb
ZD
1402 /* First check whether the loop is not too large to unroll. We ignore
1403 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1404 from unrolling them enough to make exactly one cache line covered by each
1405 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1406 us from unrolling the loops too many times in cases where we only expect
1407 gains from better scheduling and decreasing loop overhead, which is not
1408 the case here. */
1409 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
2711355f
ZD
1410
1411 /* If we unrolled the loop more times than it iterates, the unrolled version
1412 of the loop would be never entered. */
1413 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1414 upper_bound = est_niter;
1415
911b3fdb 1416 if (upper_bound <= 1)
b076a3fd
ZD
1417 return 1;
1418
911b3fdb
ZD
1419 /* Choose the factor so that we may prefetch each cache just once,
1420 but bound the unrolling by UPPER_BOUND. */
1421 factor = 1;
b076a3fd
ZD
1422 for (agp = refs; agp; agp = agp->next)
1423 for (ref = agp->refs; ref; ref = ref->next)
911b3fdb
ZD
1424 if (should_issue_prefetch_p (ref))
1425 {
1426 mod_constraint = ref->prefetch_mod;
1427 nfactor = least_common_multiple (mod_constraint, factor);
1428 if (nfactor <= upper_bound)
1429 factor = nfactor;
1430 }
b076a3fd
ZD
1431
1432 if (!should_unroll_loop_p (loop, desc, factor))
1433 return 1;
1434
1435 return factor;
1436}
1437
5417e022
ZD
1438/* Returns the total volume of the memory references REFS, taking into account
1439 reuses in the innermost loop and cache line size. TODO -- we should also
1440 take into account reuses across the iterations of the loops in the loop
1441 nest. */
1442
1443static unsigned
1444volume_of_references (struct mem_ref_group *refs)
1445{
1446 unsigned volume = 0;
1447 struct mem_ref_group *gr;
1448 struct mem_ref *ref;
1449
1450 for (gr = refs; gr; gr = gr->next)
1451 for (ref = gr->refs; ref; ref = ref->next)
1452 {
1453 /* Almost always reuses another value? */
1454 if (ref->prefetch_before != PREFETCH_ALL)
1455 continue;
1456
1457 /* If several iterations access the same cache line, use the size of
1458 the line divided by this number. Otherwise, a cache line is
1459 accessed in each iteration. TODO -- in the latter case, we should
1460 take the size of the reference into account, rounding it up on cache
1461 line size multiple. */
1462 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1463 }
1464 return volume;
1465}
1466
1467/* Returns the volume of memory references accessed across VEC iterations of
1468 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1469 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1470
1471static unsigned
1472volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1473{
1474 unsigned i;
1475
1476 for (i = 0; i < n; i++)
1477 if (vec[i] != 0)
1478 break;
1479
1480 if (i == n)
1481 return 0;
1482
1483 gcc_assert (vec[i] > 0);
1484
1485 /* We ignore the parts of the distance vector in subloops, since usually
1486 the numbers of iterations are much smaller. */
1487 return loop_sizes[i] * vec[i];
1488}
1489
1490/* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1491 at the position corresponding to the loop of the step. N is the depth
1492 of the considered loop nest, and, LOOP is its innermost loop. */
1493
1494static void
1495add_subscript_strides (tree access_fn, unsigned stride,
1496 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1497{
1498 struct loop *aloop;
1499 tree step;
1500 HOST_WIDE_INT astep;
1501 unsigned min_depth = loop_depth (loop) - n;
1502
1503 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1504 {
1505 aloop = get_chrec_loop (access_fn);
1506 step = CHREC_RIGHT (access_fn);
1507 access_fn = CHREC_LEFT (access_fn);
1508
1509 if ((unsigned) loop_depth (aloop) <= min_depth)
1510 continue;
1511
9541ffee 1512 if (tree_fits_shwi_p (step))
9439e9a1 1513 astep = tree_to_shwi (step);
5417e022
ZD
1514 else
1515 astep = L1_CACHE_LINE_SIZE;
1516
1517 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1518
1519 }
1520}
1521
1522/* Returns the volume of memory references accessed between two consecutive
1523 self-reuses of the reference DR. We consider the subscripts of DR in N
1524 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1525 loops. LOOP is the innermost loop of the current loop nest. */
1526
1527static unsigned
1528self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1529 struct loop *loop)
1530{
1531 tree stride, access_fn;
1532 HOST_WIDE_INT *strides, astride;
9771b263 1533 vec<tree> access_fns;
5417e022
ZD
1534 tree ref = DR_REF (dr);
1535 unsigned i, ret = ~0u;
1536
1537 /* In the following example:
1538
1539 for (i = 0; i < N; i++)
1540 for (j = 0; j < N; j++)
1541 use (a[j][i]);
1542 the same cache line is accessed each N steps (except if the change from
1543 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1544 we cannot rely purely on the results of the data dependence analysis.
1545
1546 Instead, we compute the stride of the reference in each loop, and consider
1547 the innermost loop in that the stride is less than cache size. */
1548
1549 strides = XCNEWVEC (HOST_WIDE_INT, n);
1550 access_fns = DR_ACCESS_FNS (dr);
1551
9771b263 1552 FOR_EACH_VEC_ELT (access_fns, i, access_fn)
5417e022
ZD
1553 {
1554 /* Keep track of the reference corresponding to the subscript, so that we
1555 know its stride. */
1556 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1557 ref = TREE_OPERAND (ref, 0);
b8698a0f 1558
5417e022
ZD
1559 if (TREE_CODE (ref) == ARRAY_REF)
1560 {
1561 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
cc269bb6 1562 if (tree_fits_uhwi_p (stride))
ae7e9ddd 1563 astride = tree_to_uhwi (stride);
5417e022
ZD
1564 else
1565 astride = L1_CACHE_LINE_SIZE;
1566
1567 ref = TREE_OPERAND (ref, 0);
1568 }
1569 else
1570 astride = 1;
1571
1572 add_subscript_strides (access_fn, astride, strides, n, loop);
1573 }
1574
1575 for (i = n; i-- > 0; )
1576 {
1577 unsigned HOST_WIDE_INT s;
1578
1579 s = strides[i] < 0 ? -strides[i] : strides[i];
1580
1581 if (s < (unsigned) L1_CACHE_LINE_SIZE
1582 && (loop_sizes[i]
1583 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1584 {
1585 ret = loop_sizes[i];
1586 break;
1587 }
1588 }
1589
1590 free (strides);
1591 return ret;
1592}
1593
1594/* Determines the distance till the first reuse of each reference in REFS
79f5e442 1595 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1e373390 1596 memory references in the loop. Return false if the analysis fails. */
5417e022 1597
1e373390 1598static bool
79f5e442
ZD
1599determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1600 bool no_other_refs)
5417e022
ZD
1601{
1602 struct loop *nest, *aloop;
6e1aa848
DN
1603 vec<data_reference_p> datarefs = vNULL;
1604 vec<ddr_p> dependences = vNULL;
5417e022 1605 struct mem_ref_group *gr;
79f5e442 1606 struct mem_ref *ref, *refb;
843374f2 1607 auto_vec<loop_p> vloops;
5417e022
ZD
1608 unsigned *loop_data_size;
1609 unsigned i, j, n;
1610 unsigned volume, dist, adist;
1611 HOST_WIDE_INT vol;
1612 data_reference_p dr;
1613 ddr_p dep;
1614
1615 if (loop->inner)
1e373390 1616 return true;
5417e022
ZD
1617
1618 /* Find the outermost loop of the loop nest of loop (we require that
1619 there are no sibling loops inside the nest). */
1620 nest = loop;
1621 while (1)
1622 {
1623 aloop = loop_outer (nest);
1624
1625 if (aloop == current_loops->tree_root
1626 || aloop->inner->next)
1627 break;
1628
1629 nest = aloop;
1630 }
1631
1632 /* For each loop, determine the amount of data accessed in each iteration.
1633 We use this to estimate whether the reference is evicted from the
1634 cache before its reuse. */
1635 find_loop_nest (nest, &vloops);
9771b263 1636 n = vloops.length ();
5417e022
ZD
1637 loop_data_size = XNEWVEC (unsigned, n);
1638 volume = volume_of_references (refs);
1639 i = n;
1640 while (i-- != 0)
1641 {
1642 loop_data_size[i] = volume;
1643 /* Bound the volume by the L2 cache size, since above this bound,
1644 all dependence distances are equivalent. */
1645 if (volume > L2_CACHE_SIZE_BYTES)
1646 continue;
1647
9771b263 1648 aloop = vloops[i];
652c4c71 1649 vol = estimated_stmt_executions_int (aloop);
e5b332cd 1650 if (vol == -1)
5417e022
ZD
1651 vol = expected_loop_iterations (aloop);
1652 volume *= vol;
1653 }
1654
1655 /* Prepare the references in the form suitable for data dependence
0d52bcc1 1656 analysis. We ignore unanalyzable data references (the results
5417e022
ZD
1657 are used just as a heuristics to estimate temporality of the
1658 references, hence we do not need to worry about correctness). */
1659 for (gr = refs; gr; gr = gr->next)
1660 for (ref = gr->refs; ref; ref = ref->next)
1661 {
a68f286c
RB
1662 dr = create_data_ref (loop_preheader_edge (nest),
1663 loop_containing_stmt (ref->stmt),
62c8a2cf 1664 ref->mem, ref->stmt, !ref->write_p, false);
5417e022
ZD
1665
1666 if (dr)
1667 {
1668 ref->reuse_distance = volume;
1669 dr->aux = ref;
9771b263 1670 datarefs.safe_push (dr);
5417e022 1671 }
79f5e442
ZD
1672 else
1673 no_other_refs = false;
5417e022
ZD
1674 }
1675
9771b263 1676 FOR_EACH_VEC_ELT (datarefs, i, dr)
5417e022
ZD
1677 {
1678 dist = self_reuse_distance (dr, loop_data_size, n, loop);
3d9a9f94 1679 ref = (struct mem_ref *) dr->aux;
5417e022
ZD
1680 if (ref->reuse_distance > dist)
1681 ref->reuse_distance = dist;
79f5e442
ZD
1682
1683 if (no_other_refs)
1684 ref->independent_p = true;
5417e022
ZD
1685 }
1686
1e373390
RG
1687 if (!compute_all_dependences (datarefs, &dependences, vloops, true))
1688 return false;
5417e022 1689
9771b263 1690 FOR_EACH_VEC_ELT (dependences, i, dep)
5417e022
ZD
1691 {
1692 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1693 continue;
1694
3d9a9f94
KG
1695 ref = (struct mem_ref *) DDR_A (dep)->aux;
1696 refb = (struct mem_ref *) DDR_B (dep)->aux;
79f5e442 1697
5417e022 1698 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
dfbddbeb 1699 || DDR_COULD_BE_INDEPENDENT_P (dep)
5417e022
ZD
1700 || DDR_NUM_DIST_VECTS (dep) == 0)
1701 {
0d52bcc1 1702 /* If the dependence cannot be analyzed, assume that there might be
5417e022
ZD
1703 a reuse. */
1704 dist = 0;
b8698a0f 1705
79f5e442
ZD
1706 ref->independent_p = false;
1707 refb->independent_p = false;
5417e022
ZD
1708 }
1709 else
1710 {
0d52bcc1 1711 /* The distance vectors are normalized to be always lexicographically
5417e022
ZD
1712 positive, hence we cannot tell just from them whether DDR_A comes
1713 before DDR_B or vice versa. However, it is not important,
1714 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1715 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1716 in cache (and marking it as nontemporal would not affect
1717 anything). */
1718
1719 dist = volume;
1720 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1721 {
1722 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1723 loop_data_size, n);
1724
79f5e442
ZD
1725 /* If this is a dependence in the innermost loop (i.e., the
1726 distances in all superloops are zero) and it is not
1727 the trivial self-dependence with distance zero, record that
1728 the references are not completely independent. */
1729 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1730 && (ref != refb
1731 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1732 {
1733 ref->independent_p = false;
1734 refb->independent_p = false;
1735 }
1736
5417e022
ZD
1737 /* Ignore accesses closer than
1738 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1739 so that we use nontemporal prefetches e.g. if single memory
1740 location is accessed several times in a single iteration of
1741 the loop. */
1742 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1743 continue;
1744
1745 if (adist < dist)
1746 dist = adist;
1747 }
1748 }
1749
5417e022
ZD
1750 if (ref->reuse_distance > dist)
1751 ref->reuse_distance = dist;
79f5e442
ZD
1752 if (refb->reuse_distance > dist)
1753 refb->reuse_distance = dist;
5417e022
ZD
1754 }
1755
1756 free_dependence_relations (dependences);
1757 free_data_refs (datarefs);
1758 free (loop_data_size);
1759
1760 if (dump_file && (dump_flags & TDF_DETAILS))
1761 {
1762 fprintf (dump_file, "Reuse distances:\n");
1763 for (gr = refs; gr; gr = gr->next)
1764 for (ref = gr->refs; ref; ref = ref->next)
23b0f9f8
MK
1765 fprintf (dump_file, " reference %u:%u distance %u\n",
1766 ref->group->uid, ref->uid, ref->reuse_distance);
5417e022 1767 }
1e373390
RG
1768
1769 return true;
5417e022
ZD
1770}
1771
0bbe50f6
CF
1772/* Determine whether or not the trip count to ahead ratio is too small based
1773 on prefitablility consideration.
db34470d 1774 AHEAD: the iteration ahead distance,
0bbe50f6
CF
1775 EST_NITER: the estimated trip count. */
1776
1777static bool
1778trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1779{
1780 /* Assume trip count to ahead ratio is big enough if the trip count could not
1781 be estimated at compile time. */
1782 if (est_niter < 0)
1783 return false;
1784
1785 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1786 {
1787 if (dump_file && (dump_flags & TDF_DETAILS))
1788 fprintf (dump_file,
1789 "Not prefetching -- loop estimated to roll only %d times\n",
1790 (int) est_niter);
1791 return true;
1792 }
1793
1794 return false;
1795}
1796
1797/* Determine whether or not the number of memory references in the loop is
1798 reasonable based on the profitablity and compilation time considerations.
db34470d 1799 NINSNS: estimated number of instructions in the loop,
db34470d
GS
1800 MEM_REF_COUNT: total number of memory references in the loop. */
1801
b8698a0f 1802static bool
0bbe50f6 1803mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
db34470d 1804{
0bbe50f6 1805 int insn_to_mem_ratio;
db34470d
GS
1806
1807 if (mem_ref_count == 0)
1808 return false;
1809
0bbe50f6
CF
1810 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1811 (compute_all_dependences) have high costs based on quadratic complexity.
1812 To avoid huge compilation time, we give up prefetching if mem_ref_count
1813 is too large. */
1814 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1815 return false;
1816
b8698a0f
L
1817 /* Prefetching improves performance by overlapping cache missing
1818 memory accesses with CPU operations. If the loop does not have
1819 enough CPU operations to overlap with memory operations, prefetching
1820 won't give a significant benefit. One approximate way of checking
1821 this is to require the ratio of instructions to memory references to
db34470d
GS
1822 be above a certain limit. This approximation works well in practice.
1823 TODO: Implement a more precise computation by estimating the time
1824 for each CPU or memory op in the loop. Time estimates for memory ops
1825 should account for cache misses. */
b8698a0f 1826 insn_to_mem_ratio = ninsns / mem_ref_count;
db34470d
GS
1827
1828 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
55e5a2eb
CF
1829 {
1830 if (dump_file && (dump_flags & TDF_DETAILS))
1831 fprintf (dump_file,
1832 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1833 insn_to_mem_ratio);
1834 return false;
1835 }
db34470d 1836
0bbe50f6
CF
1837 return true;
1838}
1839
1840/* Determine whether or not the instruction to prefetch ratio in the loop is
1841 too small based on the profitablity consideration.
1842 NINSNS: estimated number of instructions in the loop,
1843 PREFETCH_COUNT: an estimate of the number of prefetches,
1844 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1845
1846static bool
1847insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1848 unsigned unroll_factor)
1849{
1850 int insn_to_prefetch_ratio;
1851
d3a9b459
CF
1852 /* Prefetching most likely causes performance degradation when the instruction
1853 to prefetch ratio is too small. Too many prefetch instructions in a loop
1854 may reduce the I-cache performance.
ccacf0e1
CF
1855 (unroll_factor * ninsns) is used to estimate the number of instructions in
1856 the unrolled loop. This implementation is a bit simplistic -- the number
1857 of issued prefetch instructions is also affected by unrolling. So,
1858 prefetch_mod and the unroll factor should be taken into account when
1859 determining prefetch_count. Also, the number of insns of the unrolled
1860 loop will usually be significantly smaller than the number of insns of the
1861 original loop * unroll_factor (at least the induction variable increases
1862 and the exit branches will get eliminated), so it might be better to use
1863 tree_estimate_loop_size + estimated_unrolled_size. */
d3a9b459
CF
1864 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1865 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
db34470d 1866 {
d3a9b459
CF
1867 if (dump_file && (dump_flags & TDF_DETAILS))
1868 fprintf (dump_file,
1869 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1870 insn_to_prefetch_ratio);
0bbe50f6 1871 return true;
db34470d 1872 }
b8698a0f 1873
0bbe50f6 1874 return false;
db34470d
GS
1875}
1876
1877
b076a3fd 1878/* Issue prefetch instructions for array references in LOOP. Returns
d73be268 1879 true if the LOOP was unrolled. */
b076a3fd
ZD
1880
1881static bool
d73be268 1882loop_prefetch_arrays (struct loop *loop)
b076a3fd
ZD
1883{
1884 struct mem_ref_group *refs;
2711355f
ZD
1885 unsigned ahead, ninsns, time, unroll_factor;
1886 HOST_WIDE_INT est_niter;
b076a3fd 1887 struct tree_niter_desc desc;
79f5e442 1888 bool unrolled = false, no_other_refs;
db34470d
GS
1889 unsigned prefetch_count;
1890 unsigned mem_ref_count;
b076a3fd 1891
efd8f750 1892 if (optimize_loop_nest_for_size_p (loop))
2732d767
ZD
1893 {
1894 if (dump_file && (dump_flags & TDF_DETAILS))
1895 fprintf (dump_file, " ignored (cold area)\n");
1896 return false;
1897 }
1898
0bbe50f6
CF
1899 /* FIXME: the time should be weighted by the probabilities of the blocks in
1900 the loop body. */
1901 time = tree_num_loop_insns (loop, &eni_time_weights);
1902 if (time == 0)
1903 return false;
1904
1905 ahead = (PREFETCH_LATENCY + time - 1) / time;
652c4c71 1906 est_niter = estimated_stmt_executions_int (loop);
e5b332cd 1907 if (est_niter == -1)
00022058 1908 est_niter = likely_max_stmt_executions_int (loop);
0bbe50f6
CF
1909
1910 /* Prefetching is not likely to be profitable if the trip count to ahead
1911 ratio is too small. */
1912 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1913 return false;
1914
1915 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1916
b076a3fd 1917 /* Step 1: gather the memory references. */
db34470d 1918 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
b076a3fd 1919
0bbe50f6
CF
1920 /* Give up prefetching if the number of memory references in the
1921 loop is not reasonable based on profitablity and compilation time
1922 considerations. */
1923 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1924 goto fail;
1925
b076a3fd
ZD
1926 /* Step 2: estimate the reuse effects. */
1927 prune_by_reuse (refs);
1928
d5058523 1929 if (nothing_to_prefetch_p (refs))
b076a3fd
ZD
1930 goto fail;
1931
1e373390
RG
1932 if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
1933 goto fail;
5417e022 1934
0bbe50f6 1935 /* Step 3: determine unroll factor. */
2711355f
ZD
1936 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1937 est_niter);
d5058523
CF
1938
1939 /* Estimate prefetch count for the unrolled loop. */
1940 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1941 if (prefetch_count == 0)
1942 goto fail;
1943
2711355f 1944 if (dump_file && (dump_flags & TDF_DETAILS))
b8698a0f 1945 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
d81f5387 1946 HOST_WIDE_INT_PRINT_DEC "\n"
b8698a0f
L
1947 "insn count %d, mem ref count %d, prefetch count %d\n",
1948 ahead, unroll_factor, est_niter,
1949 ninsns, mem_ref_count, prefetch_count);
db34470d 1950
0bbe50f6
CF
1951 /* Prefetching is not likely to be profitable if the instruction to prefetch
1952 ratio is too small. */
1953 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1954 unroll_factor))
db34470d
GS
1955 goto fail;
1956
1957 mark_nontemporal_stores (loop, refs);
2711355f 1958
b076a3fd
ZD
1959 /* Step 4: what to prefetch? */
1960 if (!schedule_prefetches (refs, unroll_factor, ahead))
1961 goto fail;
1962
1963 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1964 iterations so that we do not issue superfluous prefetches. */
1965 if (unroll_factor != 1)
1966 {
d73be268 1967 tree_unroll_loop (loop, unroll_factor,
b076a3fd
ZD
1968 single_dom_exit (loop), &desc);
1969 unrolled = true;
1970 }
1971
1972 /* Step 6: issue the prefetches. */
1973 issue_prefetches (refs, unroll_factor, ahead);
1974
1975fail:
1976 release_mem_refs (refs);
1977 return unrolled;
1978}
1979
d73be268 1980/* Issue prefetch instructions for array references in loops. */
b076a3fd 1981
c7f965b6 1982unsigned int
d73be268 1983tree_ssa_prefetch_arrays (void)
b076a3fd 1984{
b076a3fd
ZD
1985 struct loop *loop;
1986 bool unrolled = false;
c7f965b6 1987 int todo_flags = 0;
b076a3fd 1988
134b044d 1989 if (!targetm.have_prefetch ()
b076a3fd
ZD
1990 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1991 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1992 of processor costs and i486 does not have prefetch, but
134b044d 1993 -march=pentium4 causes targetm.have_prefetch to be true. Ugh. */
b076a3fd 1994 || PREFETCH_BLOCK == 0)
c7f965b6 1995 return 0;
b076a3fd 1996
47eb5b32
ZD
1997 if (dump_file && (dump_flags & TDF_DETAILS))
1998 {
1999 fprintf (dump_file, "Prefetching parameters:\n");
2000 fprintf (dump_file, " simultaneous prefetches: %d\n",
2001 SIMULTANEOUS_PREFETCHES);
2002 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
47eb5b32 2003 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
46cb0441
ZD
2004 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
2005 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
5417e022 2006 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
b8698a0f
L
2007 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
2008 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
db34470d 2009 MIN_INSN_TO_PREFETCH_RATIO);
b8698a0f 2010 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
db34470d 2011 PREFETCH_MIN_INSN_TO_MEM_RATIO);
47eb5b32
ZD
2012 fprintf (dump_file, "\n");
2013 }
2014
b076a3fd
ZD
2015 initialize_original_copy_tables ();
2016
e79983f4 2017 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
b076a3fd 2018 {
6a4825bd
NF
2019 tree type = build_function_type_list (void_type_node,
2020 const_ptr_type_node, NULL_TREE);
c79efc4d
RÁE
2021 tree decl = add_builtin_function ("__builtin_prefetch", type,
2022 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
2023 NULL, NULL_TREE);
b076a3fd 2024 DECL_IS_NOVOPS (decl) = true;
e79983f4 2025 set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
b076a3fd
ZD
2026 }
2027
f0bd40b1 2028 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
b076a3fd 2029 {
b076a3fd
ZD
2030 if (dump_file && (dump_flags & TDF_DETAILS))
2031 fprintf (dump_file, "Processing loop %d:\n", loop->num);
2032
d73be268 2033 unrolled |= loop_prefetch_arrays (loop);
b076a3fd
ZD
2034
2035 if (dump_file && (dump_flags & TDF_DETAILS))
2036 fprintf (dump_file, "\n\n");
2037 }
2038
2039 if (unrolled)
2040 {
2041 scev_reset ();
c7f965b6 2042 todo_flags |= TODO_cleanup_cfg;
b076a3fd
ZD
2043 }
2044
2045 free_original_copy_tables ();
c7f965b6 2046 return todo_flags;
b076a3fd 2047}
71343877
AM
2048
2049/* Prefetching. */
2050
71343877
AM
2051namespace {
2052
2053const pass_data pass_data_loop_prefetch =
2054{
2055 GIMPLE_PASS, /* type */
2056 "aprefetch", /* name */
2057 OPTGROUP_LOOP, /* optinfo_flags */
71343877
AM
2058 TV_TREE_PREFETCH, /* tv_id */
2059 ( PROP_cfg | PROP_ssa ), /* properties_required */
2060 0, /* properties_provided */
2061 0, /* properties_destroyed */
2062 0, /* todo_flags_start */
2063 0, /* todo_flags_finish */
2064};
2065
2066class pass_loop_prefetch : public gimple_opt_pass
2067{
2068public:
2069 pass_loop_prefetch (gcc::context *ctxt)
2070 : gimple_opt_pass (pass_data_loop_prefetch, ctxt)
2071 {}
2072
2073 /* opt_pass methods: */
1a3d085c 2074 virtual bool gate (function *) { return flag_prefetch_loop_arrays > 0; }
be55bfe6 2075 virtual unsigned int execute (function *);
71343877
AM
2076
2077}; // class pass_loop_prefetch
2078
be55bfe6
TS
2079unsigned int
2080pass_loop_prefetch::execute (function *fun)
2081{
2082 if (number_of_loops (fun) <= 1)
2083 return 0;
2084
d78a1c01
ML
2085 if ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) != 0)
2086 {
2087 static bool warned = false;
2088
2089 if (!warned)
2090 {
2091 warning (OPT_Wdisabled_optimization,
2092 "%<l1-cache-size%> parameter is not a power of two %d",
2093 PREFETCH_BLOCK);
2094 warned = true;
2095 }
2096 return 0;
2097 }
2098
be55bfe6
TS
2099 return tree_ssa_prefetch_arrays ();
2100}
2101
71343877
AM
2102} // anon namespace
2103
2104gimple_opt_pass *
2105make_pass_loop_prefetch (gcc::context *ctxt)
2106{
2107 return new pass_loop_prefetch (ctxt);
2108}
2109
2110