]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-loop-prefetch.c
[Ada] Define the -fdump-scos option in lang.opt
[thirdparty/gcc.git] / gcc / tree-ssa-loop-prefetch.c
CommitLineData
8dfbf380 1/* Array prefetching.
fbd26352 2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
48e1416a 3
8dfbf380 4This file is part of GCC.
48e1416a 5
8dfbf380 6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
8c4c00c1 8Free Software Foundation; either version 3, or (at your option) any
8dfbf380 9later version.
48e1416a 10
8dfbf380 11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
48e1416a 15
8dfbf380 16You should have received a copy of the GNU General Public License
8c4c00c1 17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
8dfbf380 19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
9ef16211 23#include "backend.h"
7c29e30e 24#include "target.h"
25#include "rtl.h"
8dfbf380 26#include "tree.h"
9ef16211 27#include "gimple.h"
7c29e30e 28#include "predict.h"
29#include "tree-pass.h"
7c29e30e 30#include "gimple-ssa.h"
7c29e30e 31#include "optabs-query.h"
7c29e30e 32#include "tree-pretty-print.h"
b20a8bb4 33#include "fold-const.h"
9ed99284 34#include "stor-layout.h"
a8783bee 35#include "gimplify.h"
dcf1a1ec 36#include "gimple-iterator.h"
e795d6e1 37#include "gimplify-me.h"
05d9c18a 38#include "tree-ssa-loop-ivopts.h"
39#include "tree-ssa-loop-manip.h"
40#include "tree-ssa-loop-niter.h"
073c1fd5 41#include "tree-ssa-loop.h"
fce0c3a6 42#include "ssa.h"
073c1fd5 43#include "tree-into-ssa.h"
8dfbf380 44#include "cfgloop.h"
8dfbf380 45#include "tree-scalar-evolution.h"
8dfbf380 46#include "params.h"
47#include "langhooks.h"
bc8bb825 48#include "tree-inline.h"
5c205353 49#include "tree-data-ref.h"
c159e770 50#include "diagnostic-core.h"
35673d39 51#include "dbgcnt.h"
8e3cb73b 52
8dfbf380 53/* This pass inserts prefetch instructions to optimize cache usage during
54 accesses to arrays in loops. It processes loops sequentially and:
55
56 1) Gathers all memory references in the single loop.
57 2) For each of the references it decides when it is profitable to prefetch
58 it. To do it, we evaluate the reuse among the accesses, and determines
59 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
60 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
61 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
62 iterations of the loop that are zero modulo PREFETCH_MOD). For example
63 (assuming cache line size is 64 bytes, char has size 1 byte and there
64 is no hardware sequential prefetch):
65
66 char *a;
67 for (i = 0; i < max; i++)
68 {
69 a[255] = ...; (0)
70 a[i] = ...; (1)
71 a[i + 64] = ...; (2)
72 a[16*i] = ...; (3)
73 a[187*i] = ...; (4)
74 a[187*i + 50] = ...; (5)
75 }
76
77 (0) obviously has PREFETCH_BEFORE 1
78 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
79 location 64 iterations before it, and PREFETCH_MOD 64 (since
80 it hits the same cache line otherwise).
81 (2) has PREFETCH_MOD 64
82 (3) has PREFETCH_MOD 4
83 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
66f19dbb 84 the cache line accessed by (5) is the same with probability only
8dfbf380 85 7/32.
86 (5) has PREFETCH_MOD 1 as well.
87
5c205353 88 Additionally, we use data dependence analysis to determine for each
89 reference the distance till the first reuse; this information is used
90 to determine the temporality of the issued prefetch instruction.
91
8dfbf380 92 3) We determine how much ahead we need to prefetch. The number of
93 iterations needed is time to fetch / time spent in one iteration of
94 the loop. The problem is that we do not know either of these values,
95 so we just make a heuristic guess based on a magic (possibly)
96 target-specific constant and size of the loop.
97
98 4) Determine which of the references we prefetch. We take into account
99 that there is a maximum number of simultaneous prefetches (provided
100 by machine description). We prefetch as many prefetches as possible
101 while still within this bound (starting with those with lowest
102 prefetch_mod, since they are responsible for most of the cache
103 misses).
48e1416a 104
8dfbf380 105 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
106 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
107 prefetching nonaccessed memory.
108 TODO -- actually implement peeling.
48e1416a 109
8dfbf380 110 6) We actually emit the prefetch instructions. ??? Perhaps emit the
111 prefetch instructions with guards in cases where 5) was not sufficient
112 to satisfy the constraints?
113
76595608 114 A cost model is implemented to determine whether or not prefetching is
115 profitable for a given loop. The cost model has three heuristics:
116
117 1. Function trip_count_to_ahead_ratio_too_small_p implements a
118 heuristic that determines whether or not the loop has too few
119 iterations (compared to ahead). Prefetching is not likely to be
120 beneficial if the trip count to ahead ratio is below a certain
121 minimum.
122
123 2. Function mem_ref_count_reasonable_p implements a heuristic that
124 determines whether the given loop has enough CPU ops that can be
125 overlapped with cache missing memory ops. If not, the loop
126 won't benefit from prefetching. In the implementation,
127 prefetching is not considered beneficial if the ratio between
128 the instruction count and the mem ref count is below a certain
129 minimum.
130
131 3. Function insn_to_prefetch_ratio_too_small_p implements a
132 heuristic that disables prefetching in a loop if the prefetching
133 cost is above a certain limit. The relative prefetching cost is
134 estimated by taking the ratio between the prefetch count and the
135 total intruction count (this models the I-cache cost).
136
0ab353e1 137 The limits used in these heuristics are defined as parameters with
48e1416a 138 reasonable default values. Machine-specific default values will be
0ab353e1 139 added later.
48e1416a 140
8dfbf380 141 Some other TODO:
142 -- write and use more general reuse analysis (that could be also used
143 in other cache aimed loop optimizations)
144 -- make it behave sanely together with the prefetches given by user
145 (now we just ignore them; at the very least we should avoid
146 optimizing loops in that user put his own prefetches)
147 -- we assume cache line size alignment of arrays; this could be
148 improved. */
149
150/* Magic constants follow. These should be replaced by machine specific
151 numbers. */
152
8dfbf380 153/* True if write can be prefetched by a read prefetch. */
154
155#ifndef WRITE_CAN_USE_READ_PREFETCH
156#define WRITE_CAN_USE_READ_PREFETCH 1
157#endif
158
159/* True if read can be prefetched by a write prefetch. */
160
161#ifndef READ_CAN_USE_WRITE_PREFETCH
162#define READ_CAN_USE_WRITE_PREFETCH 0
163#endif
164
07804af5 165/* The size of the block loaded by a single prefetch. Usually, this is
166 the same as cache line size (at the moment, we only consider one level
167 of cache hierarchy). */
8dfbf380 168
169#ifndef PREFETCH_BLOCK
07804af5 170#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
8dfbf380 171#endif
172
173/* Do we have a forward hardware sequential prefetching? */
174
175#ifndef HAVE_FORWARD_PREFETCH
176#define HAVE_FORWARD_PREFETCH 0
177#endif
178
179/* Do we have a backward hardware sequential prefetching? */
180
181#ifndef HAVE_BACKWARD_PREFETCH
182#define HAVE_BACKWARD_PREFETCH 0
183#endif
184
185/* In some cases we are only able to determine that there is a certain
186 probability that the two accesses hit the same cache line. In this
187 case, we issue the prefetches for both of them if this probability
f0b5f617 188 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
8dfbf380 189
190#ifndef ACCEPTABLE_MISS_RATE
191#define ACCEPTABLE_MISS_RATE 50
192#endif
193
0c916a7b 194#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
195#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
5c205353 196
197/* We consider a memory access nontemporal if it is not reused sooner than
198 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
199 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
200 so that we use nontemporal prefetches e.g. if single memory location
201 is accessed several times in a single iteration of the loop. */
202#define NONTEMPORAL_FRACTION 16
203
5b5037b3 204/* In case we have to emit a memory fence instruction after the loop that
205 uses nontemporal stores, this defines the builtin to use. */
206
207#ifndef FENCE_FOLLOWING_MOVNT
208#define FENCE_FOLLOWING_MOVNT NULL_TREE
209#endif
210
e20bb126 211/* It is not profitable to prefetch when the trip count is not at
212 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
213 For example, in a loop with a prefetch ahead distance of 10,
214 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
215 profitable to prefetch when the trip count is greater or equal to
216 40. In that case, 30 out of the 40 iterations will benefit from
217 prefetching. */
218
219#ifndef TRIP_COUNT_TO_AHEAD_RATIO
220#define TRIP_COUNT_TO_AHEAD_RATIO 4
221#endif
222
8dfbf380 223/* The group of references between that reuse may occur. */
224
225struct mem_ref_group
226{
227 tree base; /* Base of the reference. */
81d2a38f 228 tree step; /* Step of the reference. */
8dfbf380 229 struct mem_ref *refs; /* References in the group. */
230 struct mem_ref_group *next; /* Next group of references. */
76f32cd9 231 unsigned int uid; /* Group UID, used only for debugging. */
8dfbf380 232};
233
234/* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
235
7097b942 236#define PREFETCH_ALL HOST_WIDE_INT_M1U
8dfbf380 237
c0a0de5e 238/* Do not generate a prefetch if the unroll factor is significantly less
239 than what is required by the prefetch. This is to avoid redundant
1aabe697 240 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
241 2, prefetching requires unrolling the loop 16 times, but
242 the loop is actually unrolled twice. In this case (ratio = 8),
c0a0de5e 243 prefetching is not likely to be beneficial. */
244
245#ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
1aabe697 246#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
c0a0de5e 247#endif
248
76595608 249/* Some of the prefetch computations have quadratic complexity. We want to
250 avoid huge compile times and, therefore, want to limit the amount of
251 memory references per loop where we consider prefetching. */
252
253#ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
254#define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
255#endif
256
8dfbf380 257/* The memory reference. */
258
259struct mem_ref
260{
42acab1c 261 gimple *stmt; /* Statement in that the reference appears. */
8dfbf380 262 tree mem; /* The reference. */
263 HOST_WIDE_INT delta; /* Constant offset of the reference. */
8dfbf380 264 struct mem_ref_group *group; /* The group of references it belongs to. */
265 unsigned HOST_WIDE_INT prefetch_mod;
266 /* Prefetch only each PREFETCH_MOD-th
267 iteration. */
268 unsigned HOST_WIDE_INT prefetch_before;
269 /* Prefetch only first PREFETCH_BEFORE
270 iterations. */
5c205353 271 unsigned reuse_distance; /* The amount of data accessed before the first
272 reuse of this value. */
8dfbf380 273 struct mem_ref *next; /* The next reference in the group. */
76f32cd9 274 unsigned int uid; /* Ref UID, used only for debugging. */
5b5037b3 275 unsigned write_p : 1; /* Is it a write? */
276 unsigned independent_p : 1; /* True if the reference is independent on
277 all other references inside the loop. */
278 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
279 unsigned storent_p : 1; /* True if we changed the store to a
280 nontemporal one. */
8dfbf380 281};
282
7b64e7e0 283/* Dumps information about memory reference */
8dfbf380 284static void
7b64e7e0 285dump_mem_details (FILE *file, tree base, tree step,
286 HOST_WIDE_INT delta, bool write_p)
8dfbf380 287{
7b64e7e0 288 fprintf (file, "(base ");
289 print_generic_expr (file, base, TDF_SLIM);
8dfbf380 290 fprintf (file, ", step ");
7b64e7e0 291 if (cst_and_fits_in_hwi (step))
292 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
81d2a38f 293 else
ffdaf8f1 294 print_generic_expr (file, step, TDF_SLIM);
8dfbf380 295 fprintf (file, ")\n");
76f32cd9 296 fprintf (file, " delta " HOST_WIDE_INT_PRINT_DEC "\n", delta);
297 fprintf (file, " %s\n\n", write_p ? "write" : "read");
7b64e7e0 298}
8dfbf380 299
7b64e7e0 300/* Dumps information about reference REF to FILE. */
8dfbf380 301
7b64e7e0 302static void
303dump_mem_ref (FILE *file, struct mem_ref *ref)
304{
76f32cd9 305 fprintf (file, "reference %u:%u (", ref->group->uid, ref->uid);
306 print_generic_expr (file, ref->mem, TDF_SLIM);
307 fprintf (file, ")\n");
8dfbf380 308}
309
310/* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
311 exist. */
312
313static struct mem_ref_group *
81d2a38f 314find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
8dfbf380 315{
76f32cd9 316 /* Global count for setting struct mem_ref_group->uid. */
317 static unsigned int last_mem_ref_group_uid = 0;
318
8dfbf380 319 struct mem_ref_group *group;
320
321 for (; *groups; groups = &(*groups)->next)
322 {
81d2a38f 323 if (operand_equal_p ((*groups)->step, step, 0)
8dfbf380 324 && operand_equal_p ((*groups)->base, base, 0))
325 return *groups;
326
81d2a38f 327 /* If step is an integer constant, keep the list of groups sorted
328 by decreasing step. */
5c6f6a61 329 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
330 && int_cst_value ((*groups)->step) < int_cst_value (step))
8dfbf380 331 break;
332 }
333
5c205353 334 group = XNEW (struct mem_ref_group);
8dfbf380 335 group->base = base;
336 group->step = step;
337 group->refs = NULL;
76f32cd9 338 group->uid = ++last_mem_ref_group_uid;
8dfbf380 339 group->next = *groups;
340 *groups = group;
341
342 return group;
343}
344
345/* Records a memory reference MEM in GROUP with offset DELTA and write status
346 WRITE_P. The reference occurs in statement STMT. */
347
348static void
42acab1c 349record_ref (struct mem_ref_group *group, gimple *stmt, tree mem,
8dfbf380 350 HOST_WIDE_INT delta, bool write_p)
351{
76f32cd9 352 unsigned int last_mem_ref_uid = 0;
8dfbf380 353 struct mem_ref **aref;
354
355 /* Do not record the same address twice. */
356 for (aref = &group->refs; *aref; aref = &(*aref)->next)
357 {
76f32cd9 358 last_mem_ref_uid = (*aref)->uid;
359
8dfbf380 360 /* It does not have to be possible for write reference to reuse the read
361 prefetch, or vice versa. */
362 if (!WRITE_CAN_USE_READ_PREFETCH
363 && write_p
364 && !(*aref)->write_p)
365 continue;
366 if (!READ_CAN_USE_WRITE_PREFETCH
367 && !write_p
368 && (*aref)->write_p)
369 continue;
370
371 if ((*aref)->delta == delta)
372 return;
373 }
374
5c205353 375 (*aref) = XNEW (struct mem_ref);
8dfbf380 376 (*aref)->stmt = stmt;
377 (*aref)->mem = mem;
378 (*aref)->delta = delta;
379 (*aref)->write_p = write_p;
380 (*aref)->prefetch_before = PREFETCH_ALL;
381 (*aref)->prefetch_mod = 1;
5c205353 382 (*aref)->reuse_distance = 0;
8dfbf380 383 (*aref)->issue_prefetch_p = false;
384 (*aref)->group = group;
385 (*aref)->next = NULL;
5b5037b3 386 (*aref)->independent_p = false;
387 (*aref)->storent_p = false;
76f32cd9 388 (*aref)->uid = last_mem_ref_uid + 1;
8dfbf380 389
390 if (dump_file && (dump_flags & TDF_DETAILS))
76f32cd9 391 {
392 dump_mem_ref (dump_file, *aref);
393
394 fprintf (dump_file, " group %u ", group->uid);
395 dump_mem_details (dump_file, group->base, group->step, delta,
396 write_p);
397 }
8dfbf380 398}
399
400/* Release memory references in GROUPS. */
401
402static void
403release_mem_refs (struct mem_ref_group *groups)
404{
405 struct mem_ref_group *next_g;
406 struct mem_ref *ref, *next_r;
407
408 for (; groups; groups = next_g)
409 {
410 next_g = groups->next;
411 for (ref = groups->refs; ref; ref = next_r)
412 {
413 next_r = ref->next;
414 free (ref);
415 }
416 free (groups);
417 }
418}
419
420/* A structure used to pass arguments to idx_analyze_ref. */
421
422struct ar_data
423{
2e966e2a 424 class loop *loop; /* Loop of the reference. */
42acab1c 425 gimple *stmt; /* Statement of the reference. */
81d2a38f 426 tree *step; /* Step of the memory reference. */
8dfbf380 427 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
428};
429
430/* Analyzes a single INDEX of a memory reference to obtain information
431 described at analyze_ref. Callback for for_each_index. */
432
433static bool
434idx_analyze_ref (tree base, tree *index, void *data)
435{
f0d6e81c 436 struct ar_data *ar_data = (struct ar_data *) data;
8dfbf380 437 tree ibase, step, stepsize;
81d2a38f 438 HOST_WIDE_INT idelta = 0, imult = 1;
8dfbf380 439 affine_iv iv;
440
76610704 441 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
81d2a38f 442 *index, &iv, true))
8dfbf380 443 return false;
444 ibase = iv.base;
445 step = iv.step;
446
0de36bdb 447 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
8dfbf380 448 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
449 {
450 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
451 ibase = TREE_OPERAND (ibase, 0);
452 }
453 if (cst_and_fits_in_hwi (ibase))
454 {
455 idelta += int_cst_value (ibase);
05db596e 456 ibase = build_int_cst (TREE_TYPE (ibase), 0);
8dfbf380 457 }
458
459 if (TREE_CODE (base) == ARRAY_REF)
460 {
461 stepsize = array_ref_element_size (base);
462 if (!cst_and_fits_in_hwi (stepsize))
463 return false;
464 imult = int_cst_value (stepsize);
f547ca12 465 step = fold_build2 (MULT_EXPR, sizetype,
466 fold_convert (sizetype, step),
467 fold_convert (sizetype, stepsize));
8dfbf380 468 idelta *= imult;
469 }
470
f547ca12 471 if (*ar_data->step == NULL_TREE)
472 *ar_data->step = step;
473 else
474 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
475 fold_convert (sizetype, *ar_data->step),
476 fold_convert (sizetype, step));
8dfbf380 477 *ar_data->delta += idelta;
478 *index = ibase;
479
480 return true;
481}
482
6dce7ee9 483/* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
8dfbf380 484 STEP are integer constants and iter is number of iterations of LOOP. The
6dce7ee9 485 reference occurs in statement STMT. Strips nonaddressable component
486 references from REF_P. */
8dfbf380 487
488static bool
2e966e2a 489analyze_ref (class loop *loop, tree *ref_p, tree *base,
81d2a38f 490 tree *step, HOST_WIDE_INT *delta,
42acab1c 491 gimple *stmt)
8dfbf380 492{
493 struct ar_data ar_data;
494 tree off;
495 HOST_WIDE_INT bit_offset;
6dce7ee9 496 tree ref = *ref_p;
8dfbf380 497
81d2a38f 498 *step = NULL_TREE;
8dfbf380 499 *delta = 0;
500
0e948838 501 /* First strip off the component references. Ignore bitfields.
502 Also strip off the real and imagine parts of a complex, so that
503 they can have the same base. */
504 if (TREE_CODE (ref) == REALPART_EXPR
505 || TREE_CODE (ref) == IMAGPART_EXPR
506 || (TREE_CODE (ref) == COMPONENT_REF
507 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
508 {
509 if (TREE_CODE (ref) == IMAGPART_EXPR)
510 *delta += int_size_in_bytes (TREE_TYPE (ref));
511 ref = TREE_OPERAND (ref, 0);
512 }
8dfbf380 513
6dce7ee9 514 *ref_p = ref;
515
8dfbf380 516 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
517 {
518 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
519 bit_offset = TREE_INT_CST_LOW (off);
520 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
48e1416a 521
8dfbf380 522 *delta += bit_offset / BITS_PER_UNIT;
523 }
524
525 *base = unshare_expr (ref);
526 ar_data.loop = loop;
527 ar_data.stmt = stmt;
528 ar_data.step = step;
529 ar_data.delta = delta;
530 return for_each_index (base, idx_analyze_ref, &ar_data);
531}
532
533/* Record a memory reference REF to the list REFS. The reference occurs in
5b5037b3 534 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
535 reference was recorded, false otherwise. */
8dfbf380 536
5b5037b3 537static bool
2e966e2a 538gather_memory_references_ref (class loop *loop, struct mem_ref_group **refs,
42acab1c 539 tree ref, bool write_p, gimple *stmt)
8dfbf380 540{
81d2a38f 541 tree base, step;
542 HOST_WIDE_INT delta;
8dfbf380 543 struct mem_ref_group *agrp;
544
5d4305e1 545 if (get_base_address (ref) == NULL)
546 return false;
547
6dce7ee9 548 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
5b5037b3 549 return false;
81d2a38f 550 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
551 if (step == NULL_TREE)
552 return false;
8dfbf380 553
bd62669e 554 /* Stop if the address of BASE could not be taken. */
09a6f6f5 555 if (may_be_nonaddressable_p (base))
556 return false;
557
7b64e7e0 558 /* Limit non-constant step prefetching only to the innermost loops and
559 only when the step is loop invariant in the entire loop nest. */
560 if (!cst_and_fits_in_hwi (step))
561 {
562 if (loop->inner != NULL)
563 {
564 if (dump_file && (dump_flags & TDF_DETAILS))
565 {
566 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
ffdaf8f1 567 print_generic_expr (dump_file, ref, TDF_SLIM);
568 fprintf (dump_file,":");
9af5ce0c 569 dump_mem_details (dump_file, base, step, delta, write_p);
7b64e7e0 570 fprintf (dump_file,
571 "Ignoring %p, non-constant step prefetching is "
572 "limited to inner most loops \n",
573 (void *) ref);
574 }
575 return false;
576 }
577 else
578 {
579 if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
580 {
581 if (dump_file && (dump_flags & TDF_DETAILS))
582 {
583 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
ffdaf8f1 584 print_generic_expr (dump_file, ref, TDF_SLIM);
7b64e7e0 585 fprintf (dump_file,":");
9af5ce0c 586 dump_mem_details (dump_file, base, step, delta, write_p);
7b64e7e0 587 fprintf (dump_file,
588 "Not prefetching, ignoring %p due to "
589 "loop variant step\n",
590 (void *) ref);
591 }
592 return false;
593 }
594 }
595 }
94ce9ff0 596
8dfbf380 597 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
598 are integer constants. */
599 agrp = find_or_create_group (refs, base, step);
600 record_ref (agrp, stmt, ref, delta, write_p);
5b5037b3 601
602 return true;
8dfbf380 603}
604
5b5037b3 605/* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
606 true if there are no other memory references inside the loop. */
8dfbf380 607
608static struct mem_ref_group *
2e966e2a 609gather_memory_references (class loop *loop, bool *no_other_refs, unsigned *ref_count)
8dfbf380 610{
611 basic_block *body = get_loop_body_in_dom_order (loop);
612 basic_block bb;
613 unsigned i;
75a70cf9 614 gimple_stmt_iterator bsi;
42acab1c 615 gimple *stmt;
75a70cf9 616 tree lhs, rhs;
8dfbf380 617 struct mem_ref_group *refs = NULL;
618
5b5037b3 619 *no_other_refs = true;
0ab353e1 620 *ref_count = 0;
5b5037b3 621
8dfbf380 622 /* Scan the loop body in order, so that the former references precede the
623 later ones. */
624 for (i = 0; i < loop->num_nodes; i++)
625 {
626 bb = body[i];
627 if (bb->loop_father != loop)
628 continue;
629
75a70cf9 630 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
8dfbf380 631 {
75a70cf9 632 stmt = gsi_stmt (bsi);
5b5037b3 633
75a70cf9 634 if (gimple_code (stmt) != GIMPLE_ASSIGN)
5b5037b3 635 {
dd277d48 636 if (gimple_vuse (stmt)
75a70cf9 637 || (is_gimple_call (stmt)
638 && !(gimple_call_flags (stmt) & ECF_CONST)))
5b5037b3 639 *no_other_refs = false;
640 continue;
641 }
8dfbf380 642
adade307 643 if (! gimple_vuse (stmt))
644 continue;
645
75a70cf9 646 lhs = gimple_assign_lhs (stmt);
647 rhs = gimple_assign_rhs1 (stmt);
8dfbf380 648
649 if (REFERENCE_CLASS_P (rhs))
0ab353e1 650 {
5b5037b3 651 *no_other_refs &= gather_memory_references_ref (loop, &refs,
652 rhs, false, stmt);
0ab353e1 653 *ref_count += 1;
654 }
8dfbf380 655 if (REFERENCE_CLASS_P (lhs))
0ab353e1 656 {
5b5037b3 657 *no_other_refs &= gather_memory_references_ref (loop, &refs,
658 lhs, true, stmt);
0ab353e1 659 *ref_count += 1;
660 }
8dfbf380 661 }
662 }
663 free (body);
664
665 return refs;
666}
667
668/* Prune the prefetch candidate REF using the self-reuse. */
669
670static void
671prune_ref_by_self_reuse (struct mem_ref *ref)
672{
81d2a38f 673 HOST_WIDE_INT step;
674 bool backward;
675
676 /* If the step size is non constant, we cannot calculate prefetch_mod. */
677 if (!cst_and_fits_in_hwi (ref->group->step))
678 return;
679
680 step = int_cst_value (ref->group->step);
681
682 backward = step < 0;
8dfbf380 683
684 if (step == 0)
685 {
686 /* Prefetch references to invariant address just once. */
687 ref->prefetch_before = 1;
688 return;
689 }
690
691 if (backward)
692 step = -step;
693
694 if (step > PREFETCH_BLOCK)
695 return;
696
697 if ((backward && HAVE_BACKWARD_PREFETCH)
698 || (!backward && HAVE_FORWARD_PREFETCH))
699 {
700 ref->prefetch_before = 1;
701 return;
702 }
703
704 ref->prefetch_mod = PREFETCH_BLOCK / step;
705}
706
707/* Divides X by BY, rounding down. */
708
709static HOST_WIDE_INT
710ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
711{
712 gcc_assert (by > 0);
713
714 if (x >= 0)
a191a43c 715 return x / (HOST_WIDE_INT) by;
8dfbf380 716 else
a191a43c 717 return (x + (HOST_WIDE_INT) by - 1) / (HOST_WIDE_INT) by;
8dfbf380 718}
719
48e1416a 720/* Given a CACHE_LINE_SIZE and two inductive memory references
721 with a common STEP greater than CACHE_LINE_SIZE and an address
722 difference DELTA, compute the probability that they will fall
3a2f43cf 723 in different cache lines. Return true if the computed miss rate
724 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
725 number of distinct iterations after which the pattern repeats itself.
e17cf2c8 726 ALIGN_UNIT is the unit of alignment in bytes. */
727
3a2f43cf 728static bool
729is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
e17cf2c8 730 HOST_WIDE_INT step, HOST_WIDE_INT delta,
731 unsigned HOST_WIDE_INT distinct_iters,
732 int align_unit)
733{
734 unsigned align, iter;
3a2f43cf 735 int total_positions, miss_positions, max_allowed_miss_positions;
e17cf2c8 736 int address1, address2, cache_line1, cache_line2;
737
5a91155f 738 /* It always misses if delta is greater than or equal to the cache
739 line size. */
3a2f43cf 740 if (delta >= (HOST_WIDE_INT) cache_line_size)
741 return false;
5a91155f 742
e17cf2c8 743 miss_positions = 0;
3a2f43cf 744 total_positions = (cache_line_size / align_unit) * distinct_iters;
745 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
48e1416a 746
e17cf2c8 747 /* Iterate through all possible alignments of the first
748 memory reference within its cache line. */
749 for (align = 0; align < cache_line_size; align += align_unit)
750
751 /* Iterate through all distinct iterations. */
752 for (iter = 0; iter < distinct_iters; iter++)
753 {
754 address1 = align + step * iter;
755 address2 = address1 + delta;
756 cache_line1 = address1 / cache_line_size;
757 cache_line2 = address2 / cache_line_size;
e17cf2c8 758 if (cache_line1 != cache_line2)
3a2f43cf 759 {
760 miss_positions += 1;
761 if (miss_positions > max_allowed_miss_positions)
762 return false;
763 }
e17cf2c8 764 }
3a2f43cf 765 return true;
e17cf2c8 766}
767
8dfbf380 768/* Prune the prefetch candidate REF using the reuse with BY.
769 If BY_IS_BEFORE is true, BY is before REF in the loop. */
770
771static void
772prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
773 bool by_is_before)
774{
81d2a38f 775 HOST_WIDE_INT step;
776 bool backward;
8dfbf380 777 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
778 HOST_WIDE_INT delta = delta_b - delta_r;
779 HOST_WIDE_INT hit_from;
780 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
e17cf2c8 781 HOST_WIDE_INT reduced_step;
782 unsigned HOST_WIDE_INT reduced_prefetch_block;
783 tree ref_type;
784 int align_unit;
8dfbf380 785
81d2a38f 786 /* If the step is non constant we cannot calculate prefetch_before. */
787 if (!cst_and_fits_in_hwi (ref->group->step)) {
788 return;
789 }
790
791 step = int_cst_value (ref->group->step);
792
793 backward = step < 0;
794
795
8dfbf380 796 if (delta == 0)
797 {
798 /* If the references has the same address, only prefetch the
799 former. */
800 if (by_is_before)
801 ref->prefetch_before = 0;
48e1416a 802
8dfbf380 803 return;
804 }
805
806 if (!step)
807 {
808 /* If the reference addresses are invariant and fall into the
809 same cache line, prefetch just the first one. */
810 if (!by_is_before)
811 return;
812
813 if (ddown (ref->delta, PREFETCH_BLOCK)
814 != ddown (by->delta, PREFETCH_BLOCK))
815 return;
816
817 ref->prefetch_before = 0;
818 return;
819 }
820
821 /* Only prune the reference that is behind in the array. */
822 if (backward)
823 {
824 if (delta > 0)
825 return;
826
827 /* Transform the data so that we may assume that the accesses
828 are forward. */
829 delta = - delta;
830 step = -step;
831 delta_r = PREFETCH_BLOCK - 1 - delta_r;
832 delta_b = PREFETCH_BLOCK - 1 - delta_b;
833 }
834 else
835 {
836 if (delta < 0)
837 return;
838 }
839
840 /* Check whether the two references are likely to hit the same cache
841 line, and how distant the iterations in that it occurs are from
842 each other. */
843
844 if (step <= PREFETCH_BLOCK)
845 {
846 /* The accesses are sure to meet. Let us check when. */
847 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
848 prefetch_before = (hit_from - delta_r + step - 1) / step;
849
8234f090 850 /* Do not reduce prefetch_before if we meet beyond cache size. */
b1757d46 851 if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
8234f090 852 prefetch_before = PREFETCH_ALL;
8dfbf380 853 if (prefetch_before < ref->prefetch_before)
854 ref->prefetch_before = prefetch_before;
855
856 return;
857 }
858
48e1416a 859 /* A more complicated case with step > prefetch_block. First reduce
e17cf2c8 860 the ratio between the step and the cache line size to its simplest
48e1416a 861 terms. The resulting denominator will then represent the number of
862 distinct iterations after which each address will go back to its
863 initial location within the cache line. This computation assumes
e17cf2c8 864 that PREFETCH_BLOCK is a power of two. */
8dfbf380 865 prefetch_block = PREFETCH_BLOCK;
e17cf2c8 866 reduced_prefetch_block = prefetch_block;
867 reduced_step = step;
868 while ((reduced_step & 1) == 0
869 && reduced_prefetch_block > 1)
8dfbf380 870 {
e17cf2c8 871 reduced_step >>= 1;
872 reduced_prefetch_block >>= 1;
8dfbf380 873 }
874
8dfbf380 875 prefetch_before = delta / step;
876 delta %= step;
e17cf2c8 877 ref_type = TREE_TYPE (ref->mem);
878 align_unit = TYPE_ALIGN (ref_type) / 8;
3a2f43cf 879 if (is_miss_rate_acceptable (prefetch_block, step, delta,
880 reduced_prefetch_block, align_unit))
8dfbf380 881 {
8234f090 882 /* Do not reduce prefetch_before if we meet beyond cache size. */
883 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
884 prefetch_before = PREFETCH_ALL;
8dfbf380 885 if (prefetch_before < ref->prefetch_before)
886 ref->prefetch_before = prefetch_before;
887
888 return;
889 }
890
891 /* Try also the following iteration. */
892 prefetch_before++;
893 delta = step - delta;
3a2f43cf 894 if (is_miss_rate_acceptable (prefetch_block, step, delta,
895 reduced_prefetch_block, align_unit))
8dfbf380 896 {
897 if (prefetch_before < ref->prefetch_before)
898 ref->prefetch_before = prefetch_before;
899
900 return;
901 }
902
903 /* The ref probably does not reuse by. */
904 return;
905}
906
907/* Prune the prefetch candidate REF using the reuses with other references
908 in REFS. */
909
910static void
911prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
912{
913 struct mem_ref *prune_by;
914 bool before = true;
915
916 prune_ref_by_self_reuse (ref);
917
918 for (prune_by = refs; prune_by; prune_by = prune_by->next)
919 {
920 if (prune_by == ref)
921 {
922 before = false;
923 continue;
924 }
925
926 if (!WRITE_CAN_USE_READ_PREFETCH
927 && ref->write_p
928 && !prune_by->write_p)
929 continue;
930 if (!READ_CAN_USE_WRITE_PREFETCH
931 && !ref->write_p
932 && prune_by->write_p)
933 continue;
934
935 prune_ref_by_group_reuse (ref, prune_by, before);
936 }
937}
938
939/* Prune the prefetch candidates in GROUP using the reuse analysis. */
940
941static void
942prune_group_by_reuse (struct mem_ref_group *group)
943{
944 struct mem_ref *ref_pruned;
945
946 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
947 {
948 prune_ref_by_reuse (ref_pruned, group->refs);
949
950 if (dump_file && (dump_flags & TDF_DETAILS))
951 {
76f32cd9 952 dump_mem_ref (dump_file, ref_pruned);
8dfbf380 953
954 if (ref_pruned->prefetch_before == PREFETCH_ALL
955 && ref_pruned->prefetch_mod == 1)
956 fprintf (dump_file, " no restrictions");
957 else if (ref_pruned->prefetch_before == 0)
958 fprintf (dump_file, " do not prefetch");
959 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
960 fprintf (dump_file, " prefetch once");
961 else
962 {
963 if (ref_pruned->prefetch_before != PREFETCH_ALL)
964 {
965 fprintf (dump_file, " prefetch before ");
966 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
967 ref_pruned->prefetch_before);
968 }
969 if (ref_pruned->prefetch_mod != 1)
970 {
971 fprintf (dump_file, " prefetch mod ");
972 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
973 ref_pruned->prefetch_mod);
974 }
975 }
976 fprintf (dump_file, "\n");
977 }
978 }
979}
980
981/* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
982
983static void
984prune_by_reuse (struct mem_ref_group *groups)
985{
986 for (; groups; groups = groups->next)
987 prune_group_by_reuse (groups);
988}
989
990/* Returns true if we should issue prefetch for REF. */
991
992static bool
993should_issue_prefetch_p (struct mem_ref *ref)
994{
48956da3 995 /* Do we want to issue prefetches for non-constant strides? */
996 if (!cst_and_fits_in_hwi (ref->group->step) && PREFETCH_DYNAMIC_STRIDES == 0)
997 {
998 if (dump_file && (dump_flags & TDF_DETAILS))
999 fprintf (dump_file,
1000 "Skipping non-constant step for reference %u:%u\n",
1001 ref->group->uid, ref->uid);
1002 return false;
1003 }
1004
6dc01178 1005 /* Some processors may have a hardware prefetcher that may conflict with
1006 prefetch hints for a range of strides. Make sure we don't issue
1007 prefetches for such cases if the stride is within this particular
1008 range. */
1009 if (cst_and_fits_in_hwi (ref->group->step)
1010 && abs_hwi (int_cst_value (ref->group->step))
1011 < (HOST_WIDE_INT) PREFETCH_MINIMUM_STRIDE)
1012 {
1013 if (dump_file && (dump_flags & TDF_DETAILS))
1014 fprintf (dump_file,
28c2948a 1015 "Step for reference %u:%u (" HOST_WIDE_INT_PRINT_DEC
1016 ") is less than the mininum required stride of %d\n",
6dc01178 1017 ref->group->uid, ref->uid, int_cst_value (ref->group->step),
1018 PREFETCH_MINIMUM_STRIDE);
1019 return false;
1020 }
1021
8dfbf380 1022 /* For now do not issue prefetches for only first few of the
1023 iterations. */
1024 if (ref->prefetch_before != PREFETCH_ALL)
5d68c00f 1025 {
1026 if (dump_file && (dump_flags & TDF_DETAILS))
76f32cd9 1027 fprintf (dump_file, "Ignoring reference %u:%u due to prefetch_before\n",
1028 ref->group->uid, ref->uid);
5d68c00f 1029 return false;
1030 }
8dfbf380 1031
5b5037b3 1032 /* Do not prefetch nontemporal stores. */
1033 if (ref->storent_p)
5d68c00f 1034 {
1035 if (dump_file && (dump_flags & TDF_DETAILS))
76f32cd9 1036 fprintf (dump_file, "Ignoring nontemporal store reference %u:%u\n", ref->group->uid, ref->uid);
5d68c00f 1037 return false;
1038 }
5b5037b3 1039
8dfbf380 1040 return true;
1041}
1042
1043/* Decide which of the prefetch candidates in GROUPS to prefetch.
1044 AHEAD is the number of iterations to prefetch ahead (which corresponds
1045 to the number of simultaneous instances of one prefetch running at a
1046 time). UNROLL_FACTOR is the factor by that the loop is going to be
1047 unrolled. Returns true if there is anything to prefetch. */
1048
1049static bool
1050schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
1051 unsigned ahead)
1052{
53d4d5cc 1053 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
1054 unsigned slots_per_prefetch;
8dfbf380 1055 struct mem_ref *ref;
1056 bool any = false;
1057
53d4d5cc 1058 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
1059 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
8dfbf380 1060
53d4d5cc 1061 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1062 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
1063 it will need a prefetch slot. */
1064 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
8dfbf380 1065 if (dump_file && (dump_flags & TDF_DETAILS))
53d4d5cc 1066 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
1067 slots_per_prefetch);
8dfbf380 1068
1069 /* For now we just take memory references one by one and issue
1070 prefetches for as many as possible. The groups are sorted
1071 starting with the largest step, since the references with
334ec2d8 1072 large step are more likely to cause many cache misses. */
8dfbf380 1073
1074 for (; groups; groups = groups->next)
1075 for (ref = groups->refs; ref; ref = ref->next)
1076 {
1077 if (!should_issue_prefetch_p (ref))
1078 continue;
1079
c0a0de5e 1080 /* The loop is far from being sufficiently unrolled for this
1081 prefetch. Do not generate prefetch to avoid many redudant
1082 prefetches. */
1083 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1084 continue;
1085
53d4d5cc 1086 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1087 and we unroll the loop UNROLL_FACTOR times, we need to insert
1088 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1089 iteration. */
8dfbf380 1090 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1091 / ref->prefetch_mod);
53d4d5cc 1092 prefetch_slots = n_prefetches * slots_per_prefetch;
1093
1094 /* If more than half of the prefetches would be lost anyway, do not
1095 issue the prefetch. */
1096 if (2 * remaining_prefetch_slots < prefetch_slots)
1097 continue;
1098
35673d39 1099 /* Stop prefetching if debug counter is activated. */
1100 if (!dbg_cnt (prefetch))
1101 continue;
1102
53d4d5cc 1103 ref->issue_prefetch_p = true;
76f32cd9 1104 if (dump_file && (dump_flags & TDF_DETAILS))
1105 fprintf (dump_file, "Decided to issue prefetch for reference %u:%u\n",
1106 ref->group->uid, ref->uid);
8dfbf380 1107
53d4d5cc 1108 if (remaining_prefetch_slots <= prefetch_slots)
1109 return true;
1110 remaining_prefetch_slots -= prefetch_slots;
8dfbf380 1111 any = true;
1112 }
1113
1114 return any;
1115}
1116
5da8318c 1117/* Return TRUE if no prefetch is going to be generated in the given
1118 GROUPS. */
1119
1120static bool
1121nothing_to_prefetch_p (struct mem_ref_group *groups)
1122{
1123 struct mem_ref *ref;
1124
1125 for (; groups; groups = groups->next)
1126 for (ref = groups->refs; ref; ref = ref->next)
1127 if (should_issue_prefetch_p (ref))
1128 return false;
1129
1130 return true;
1131}
1132
1133/* Estimate the number of prefetches in the given GROUPS.
1134 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
8dfbf380 1135
0ab353e1 1136static int
5da8318c 1137estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
8dfbf380 1138{
1139 struct mem_ref *ref;
5da8318c 1140 unsigned n_prefetches;
0ab353e1 1141 int prefetch_count = 0;
8dfbf380 1142
1143 for (; groups; groups = groups->next)
1144 for (ref = groups->refs; ref; ref = ref->next)
1145 if (should_issue_prefetch_p (ref))
5da8318c 1146 {
1147 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1148 / ref->prefetch_mod);
1149 prefetch_count += n_prefetches;
1150 }
8dfbf380 1151
0ab353e1 1152 return prefetch_count;
8dfbf380 1153}
1154
1155/* Issue prefetches for the reference REF into loop as decided before.
1156 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
9ca2c29a 1157 is the factor by which LOOP was unrolled. */
8dfbf380 1158
1159static void
1160issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1161{
1162 HOST_WIDE_INT delta;
81d2a38f 1163 tree addr, addr_base, write_p, local, forward;
1a91d914 1164 gcall *prefetch;
75a70cf9 1165 gimple_stmt_iterator bsi;
8dfbf380 1166 unsigned n_prefetches, ap;
5c205353 1167 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
8dfbf380 1168
1169 if (dump_file && (dump_flags & TDF_DETAILS))
76f32cd9 1170 fprintf (dump_file, "Issued%s prefetch for reference %u:%u.\n",
5c205353 1171 nontemporal ? " nontemporal" : "",
76f32cd9 1172 ref->group->uid, ref->uid);
8dfbf380 1173
75a70cf9 1174 bsi = gsi_for_stmt (ref->stmt);
8dfbf380 1175
1176 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1177 / ref->prefetch_mod);
1178 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
75a70cf9 1179 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1180 true, NULL, true, GSI_SAME_STMT);
53d4d5cc 1181 write_p = ref->write_p ? integer_one_node : integer_zero_node;
2512209b 1182 local = nontemporal ? integer_zero_node : integer_three_node;
8dfbf380 1183
1184 for (ap = 0; ap < n_prefetches; ap++)
1185 {
81d2a38f 1186 if (cst_and_fits_in_hwi (ref->group->step))
1187 {
1188 /* Determine the address to prefetch. */
1189 delta = (ahead + ap * ref->prefetch_mod) *
1190 int_cst_value (ref->group->step);
2cc66f2a 1191 addr = fold_build_pointer_plus_hwi (addr_base, delta);
76f32cd9 1192 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1193 NULL, true, GSI_SAME_STMT);
81d2a38f 1194 }
1195 else
1196 {
1197 /* The step size is non-constant but loop-invariant. We use the
1198 heuristic to simply prefetch ahead iterations ahead. */
1199 forward = fold_build2 (MULT_EXPR, sizetype,
1200 fold_convert (sizetype, ref->group->step),
1201 fold_convert (sizetype, size_int (ahead)));
2cc66f2a 1202 addr = fold_build_pointer_plus (addr_base, forward);
81d2a38f 1203 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1204 NULL, true, GSI_SAME_STMT);
1205 }
fce0c3a6 1206
e62363b1 1207 if (addr_base != addr
1208 && TREE_CODE (addr_base) == SSA_NAME
fce0c3a6 1209 && TREE_CODE (addr) == SSA_NAME)
1210 {
1211 duplicate_ssa_name_ptr_info (addr, SSA_NAME_PTR_INFO (addr_base));
1212 /* As this isn't a plain copy we have to reset alignment
1213 information. */
1214 if (SSA_NAME_PTR_INFO (addr))
1215 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr));
1216 }
1217
8dfbf380 1218 /* Create the prefetch instruction. */
b9a16870 1219 prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
75a70cf9 1220 3, addr, write_p, local);
1221 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
8dfbf380 1222 }
1223}
1224
1225/* Issue prefetches for the references in GROUPS into loop as decided before.
1226 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1227 factor by that LOOP was unrolled. */
1228
1229static void
1230issue_prefetches (struct mem_ref_group *groups,
1231 unsigned unroll_factor, unsigned ahead)
1232{
1233 struct mem_ref *ref;
1234
1235 for (; groups; groups = groups->next)
1236 for (ref = groups->refs; ref; ref = ref->next)
1237 if (ref->issue_prefetch_p)
1238 issue_prefetch_ref (ref, unroll_factor, ahead);
1239}
1240
5b5037b3 1241/* Returns true if REF is a memory write for that a nontemporal store insn
1242 can be used. */
1243
1244static bool
1245nontemporal_store_p (struct mem_ref *ref)
1246{
3754d046 1247 machine_mode mode;
5b5037b3 1248 enum insn_code code;
1249
1250 /* REF must be a write that is not reused. We require it to be independent
1251 on all other memory references in the loop, as the nontemporal stores may
1252 be reordered with respect to other memory references. */
1253 if (!ref->write_p
1254 || !ref->independent_p
1255 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1256 return false;
1257
1258 /* Check that we have the storent instruction for the mode. */
1259 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1260 if (mode == BLKmode)
1261 return false;
1262
d6bf3b14 1263 code = optab_handler (storent_optab, mode);
5b5037b3 1264 return code != CODE_FOR_nothing;
1265}
1266
1267/* If REF is a nontemporal store, we mark the corresponding modify statement
1268 and return true. Otherwise, we return false. */
1269
1270static bool
1271mark_nontemporal_store (struct mem_ref *ref)
1272{
1273 if (!nontemporal_store_p (ref))
1274 return false;
1275
1276 if (dump_file && (dump_flags & TDF_DETAILS))
76f32cd9 1277 fprintf (dump_file, "Marked reference %u:%u as a nontemporal store.\n",
1278 ref->group->uid, ref->uid);
5b5037b3 1279
75a70cf9 1280 gimple_assign_set_nontemporal_move (ref->stmt, true);
5b5037b3 1281 ref->storent_p = true;
1282
1283 return true;
1284}
1285
1286/* Issue a memory fence instruction after LOOP. */
1287
1288static void
2e966e2a 1289emit_mfence_after_loop (class loop *loop)
5b5037b3 1290{
f1f41a6c 1291 vec<edge> exits = get_loop_exit_edges (loop);
5b5037b3 1292 edge exit;
1a91d914 1293 gcall *call;
75a70cf9 1294 gimple_stmt_iterator bsi;
5b5037b3 1295 unsigned i;
1296
f1f41a6c 1297 FOR_EACH_VEC_ELT (exits, i, exit)
5b5037b3 1298 {
75a70cf9 1299 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
5b5037b3 1300
1301 if (!single_pred_p (exit->dest)
1302 /* If possible, we prefer not to insert the fence on other paths
1303 in cfg. */
1304 && !(exit->flags & EDGE_ABNORMAL))
1305 split_loop_exit_edge (exit);
75a70cf9 1306 bsi = gsi_after_labels (exit->dest);
5b5037b3 1307
75a70cf9 1308 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
5b5037b3 1309 }
1310
f1f41a6c 1311 exits.release ();
5b5037b3 1312 update_ssa (TODO_update_ssa_only_virtuals);
1313}
1314
1315/* Returns true if we can use storent in loop, false otherwise. */
1316
1317static bool
2e966e2a 1318may_use_storent_in_loop_p (class loop *loop)
5b5037b3 1319{
1320 bool ret = true;
1321
1322 if (loop->inner != NULL)
1323 return false;
1324
1325 /* If we must issue a mfence insn after using storent, check that there
1326 is a suitable place for it at each of the loop exits. */
1327 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1328 {
f1f41a6c 1329 vec<edge> exits = get_loop_exit_edges (loop);
5b5037b3 1330 unsigned i;
1331 edge exit;
1332
f1f41a6c 1333 FOR_EACH_VEC_ELT (exits, i, exit)
5b5037b3 1334 if ((exit->flags & EDGE_ABNORMAL)
34154e27 1335 && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
5b5037b3 1336 ret = false;
1337
f1f41a6c 1338 exits.release ();
5b5037b3 1339 }
1340
1341 return ret;
1342}
1343
1344/* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1345 references in the loop. */
1346
1347static void
2e966e2a 1348mark_nontemporal_stores (class loop *loop, struct mem_ref_group *groups)
5b5037b3 1349{
1350 struct mem_ref *ref;
1351 bool any = false;
1352
1353 if (!may_use_storent_in_loop_p (loop))
1354 return;
1355
1356 for (; groups; groups = groups->next)
1357 for (ref = groups->refs; ref; ref = ref->next)
1358 any |= mark_nontemporal_store (ref);
1359
1360 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1361 emit_mfence_after_loop (loop);
1362}
1363
8dfbf380 1364/* Determines whether we can profitably unroll LOOP FACTOR times, and if
1365 this is the case, fill in DESC by the description of number of
1366 iterations. */
1367
1368static bool
2e966e2a 1369should_unroll_loop_p (class loop *loop, class tree_niter_desc *desc,
8dfbf380 1370 unsigned factor)
1371{
1372 if (!can_unroll_loop_p (loop, factor, desc))
1373 return false;
1374
1375 /* We only consider loops without control flow for unrolling. This is not
1376 a hard restriction -- tree_unroll_loop works with arbitrary loops
1377 as well; but the unrolling/prefetching is usually more profitable for
1378 loops consisting of a single basic block, and we want to limit the
1379 code growth. */
1380 if (loop->num_nodes > 2)
1381 return false;
1382
1383 return true;
1384}
1385
1386/* Determine the coefficient by that unroll LOOP, from the information
1387 contained in the list of memory references REFS. Description of
76f32cd9 1388 number of iterations of LOOP is stored to DESC. NINSNS is the number of
78f46d45 1389 insns of the LOOP. EST_NITER is the estimated number of iterations of
1390 the loop, or -1 if no estimate is available. */
8dfbf380 1391
1392static unsigned
2e966e2a 1393determine_unroll_factor (class loop *loop, struct mem_ref_group *refs,
1394 unsigned ninsns, class tree_niter_desc *desc,
78f46d45 1395 HOST_WIDE_INT est_niter)
8dfbf380 1396{
53d4d5cc 1397 unsigned upper_bound;
1398 unsigned nfactor, factor, mod_constraint;
8dfbf380 1399 struct mem_ref_group *agp;
1400 struct mem_ref *ref;
1401
53d4d5cc 1402 /* First check whether the loop is not too large to unroll. We ignore
1403 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1404 from unrolling them enough to make exactly one cache line covered by each
1405 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1406 us from unrolling the loops too many times in cases where we only expect
1407 gains from better scheduling and decreasing loop overhead, which is not
1408 the case here. */
1409 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
78f46d45 1410
1411 /* If we unrolled the loop more times than it iterates, the unrolled version
1412 of the loop would be never entered. */
1413 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1414 upper_bound = est_niter;
1415
53d4d5cc 1416 if (upper_bound <= 1)
8dfbf380 1417 return 1;
1418
53d4d5cc 1419 /* Choose the factor so that we may prefetch each cache just once,
1420 but bound the unrolling by UPPER_BOUND. */
1421 factor = 1;
8dfbf380 1422 for (agp = refs; agp; agp = agp->next)
1423 for (ref = agp->refs; ref; ref = ref->next)
53d4d5cc 1424 if (should_issue_prefetch_p (ref))
1425 {
1426 mod_constraint = ref->prefetch_mod;
1427 nfactor = least_common_multiple (mod_constraint, factor);
1428 if (nfactor <= upper_bound)
1429 factor = nfactor;
1430 }
8dfbf380 1431
1432 if (!should_unroll_loop_p (loop, desc, factor))
1433 return 1;
1434
1435 return factor;
1436}
1437
5c205353 1438/* Returns the total volume of the memory references REFS, taking into account
1439 reuses in the innermost loop and cache line size. TODO -- we should also
1440 take into account reuses across the iterations of the loops in the loop
1441 nest. */
1442
1443static unsigned
1444volume_of_references (struct mem_ref_group *refs)
1445{
1446 unsigned volume = 0;
1447 struct mem_ref_group *gr;
1448 struct mem_ref *ref;
1449
1450 for (gr = refs; gr; gr = gr->next)
1451 for (ref = gr->refs; ref; ref = ref->next)
1452 {
1453 /* Almost always reuses another value? */
1454 if (ref->prefetch_before != PREFETCH_ALL)
1455 continue;
1456
1457 /* If several iterations access the same cache line, use the size of
1458 the line divided by this number. Otherwise, a cache line is
1459 accessed in each iteration. TODO -- in the latter case, we should
1460 take the size of the reference into account, rounding it up on cache
1461 line size multiple. */
1462 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1463 }
1464 return volume;
1465}
1466
1467/* Returns the volume of memory references accessed across VEC iterations of
1468 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1469 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1470
1471static unsigned
1472volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1473{
1474 unsigned i;
1475
1476 for (i = 0; i < n; i++)
1477 if (vec[i] != 0)
1478 break;
1479
1480 if (i == n)
1481 return 0;
1482
1483 gcc_assert (vec[i] > 0);
1484
1485 /* We ignore the parts of the distance vector in subloops, since usually
1486 the numbers of iterations are much smaller. */
1487 return loop_sizes[i] * vec[i];
1488}
1489
1490/* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1491 at the position corresponding to the loop of the step. N is the depth
1492 of the considered loop nest, and, LOOP is its innermost loop. */
1493
1494static void
1495add_subscript_strides (tree access_fn, unsigned stride,
2e966e2a 1496 HOST_WIDE_INT *strides, unsigned n, class loop *loop)
5c205353 1497{
2e966e2a 1498 class loop *aloop;
5c205353 1499 tree step;
1500 HOST_WIDE_INT astep;
1501 unsigned min_depth = loop_depth (loop) - n;
1502
1503 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1504 {
1505 aloop = get_chrec_loop (access_fn);
1506 step = CHREC_RIGHT (access_fn);
1507 access_fn = CHREC_LEFT (access_fn);
1508
1509 if ((unsigned) loop_depth (aloop) <= min_depth)
1510 continue;
1511
35ec552a 1512 if (tree_fits_shwi_p (step))
fcb97e84 1513 astep = tree_to_shwi (step);
5c205353 1514 else
1515 astep = L1_CACHE_LINE_SIZE;
1516
1517 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1518
1519 }
1520}
1521
1522/* Returns the volume of memory references accessed between two consecutive
1523 self-reuses of the reference DR. We consider the subscripts of DR in N
1524 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1525 loops. LOOP is the innermost loop of the current loop nest. */
1526
1527static unsigned
1528self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
2e966e2a 1529 class loop *loop)
5c205353 1530{
1531 tree stride, access_fn;
1532 HOST_WIDE_INT *strides, astride;
f1f41a6c 1533 vec<tree> access_fns;
5c205353 1534 tree ref = DR_REF (dr);
1535 unsigned i, ret = ~0u;
1536
1537 /* In the following example:
1538
1539 for (i = 0; i < N; i++)
1540 for (j = 0; j < N; j++)
1541 use (a[j][i]);
1542 the same cache line is accessed each N steps (except if the change from
1543 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1544 we cannot rely purely on the results of the data dependence analysis.
1545
1546 Instead, we compute the stride of the reference in each loop, and consider
1547 the innermost loop in that the stride is less than cache size. */
1548
1549 strides = XCNEWVEC (HOST_WIDE_INT, n);
1550 access_fns = DR_ACCESS_FNS (dr);
1551
f1f41a6c 1552 FOR_EACH_VEC_ELT (access_fns, i, access_fn)
5c205353 1553 {
1554 /* Keep track of the reference corresponding to the subscript, so that we
1555 know its stride. */
1556 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1557 ref = TREE_OPERAND (ref, 0);
48e1416a 1558
5c205353 1559 if (TREE_CODE (ref) == ARRAY_REF)
1560 {
1561 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
cd4547bf 1562 if (tree_fits_uhwi_p (stride))
6a0712d4 1563 astride = tree_to_uhwi (stride);
5c205353 1564 else
1565 astride = L1_CACHE_LINE_SIZE;
1566
1567 ref = TREE_OPERAND (ref, 0);
1568 }
1569 else
1570 astride = 1;
1571
1572 add_subscript_strides (access_fn, astride, strides, n, loop);
1573 }
1574
1575 for (i = n; i-- > 0; )
1576 {
1577 unsigned HOST_WIDE_INT s;
1578
1579 s = strides[i] < 0 ? -strides[i] : strides[i];
1580
1581 if (s < (unsigned) L1_CACHE_LINE_SIZE
1582 && (loop_sizes[i]
1583 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1584 {
1585 ret = loop_sizes[i];
1586 break;
1587 }
1588 }
1589
1590 free (strides);
1591 return ret;
1592}
1593
1594/* Determines the distance till the first reuse of each reference in REFS
5b5037b3 1595 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
b920ee38 1596 memory references in the loop. Return false if the analysis fails. */
5c205353 1597
b920ee38 1598static bool
2e966e2a 1599determine_loop_nest_reuse (class loop *loop, struct mem_ref_group *refs,
5b5037b3 1600 bool no_other_refs)
5c205353 1601{
2e966e2a 1602 class loop *nest, *aloop;
1e094109 1603 vec<data_reference_p> datarefs = vNULL;
1604 vec<ddr_p> dependences = vNULL;
5c205353 1605 struct mem_ref_group *gr;
5b5037b3 1606 struct mem_ref *ref, *refb;
cf58e90a 1607 auto_vec<loop_p> vloops;
5c205353 1608 unsigned *loop_data_size;
1609 unsigned i, j, n;
1610 unsigned volume, dist, adist;
1611 HOST_WIDE_INT vol;
1612 data_reference_p dr;
1613 ddr_p dep;
1614
1615 if (loop->inner)
b920ee38 1616 return true;
5c205353 1617
1618 /* Find the outermost loop of the loop nest of loop (we require that
1619 there are no sibling loops inside the nest). */
1620 nest = loop;
1621 while (1)
1622 {
1623 aloop = loop_outer (nest);
1624
1625 if (aloop == current_loops->tree_root
1626 || aloop->inner->next)
1627 break;
1628
1629 nest = aloop;
1630 }
1631
1632 /* For each loop, determine the amount of data accessed in each iteration.
1633 We use this to estimate whether the reference is evicted from the
1634 cache before its reuse. */
1635 find_loop_nest (nest, &vloops);
f1f41a6c 1636 n = vloops.length ();
5c205353 1637 loop_data_size = XNEWVEC (unsigned, n);
1638 volume = volume_of_references (refs);
1639 i = n;
1640 while (i-- != 0)
1641 {
1642 loop_data_size[i] = volume;
1643 /* Bound the volume by the L2 cache size, since above this bound,
1644 all dependence distances are equivalent. */
1645 if (volume > L2_CACHE_SIZE_BYTES)
1646 continue;
1647
f1f41a6c 1648 aloop = vloops[i];
fee017b3 1649 vol = estimated_stmt_executions_int (aloop);
b0b097b4 1650 if (vol == -1)
5c205353 1651 vol = expected_loop_iterations (aloop);
1652 volume *= vol;
1653 }
1654
1655 /* Prepare the references in the form suitable for data dependence
bef304b8 1656 analysis. We ignore unanalyzable data references (the results
5c205353 1657 are used just as a heuristics to estimate temporality of the
1658 references, hence we do not need to worry about correctness). */
1659 for (gr = refs; gr; gr = gr->next)
1660 for (ref = gr->refs; ref; ref = ref->next)
1661 {
453841f9 1662 dr = create_data_ref (loop_preheader_edge (nest),
1663 loop_containing_stmt (ref->stmt),
4f372c2c 1664 ref->mem, ref->stmt, !ref->write_p, false);
5c205353 1665
1666 if (dr)
1667 {
1668 ref->reuse_distance = volume;
1669 dr->aux = ref;
f1f41a6c 1670 datarefs.safe_push (dr);
5c205353 1671 }
5b5037b3 1672 else
1673 no_other_refs = false;
5c205353 1674 }
1675
f1f41a6c 1676 FOR_EACH_VEC_ELT (datarefs, i, dr)
5c205353 1677 {
1678 dist = self_reuse_distance (dr, loop_data_size, n, loop);
45ba1503 1679 ref = (struct mem_ref *) dr->aux;
5c205353 1680 if (ref->reuse_distance > dist)
1681 ref->reuse_distance = dist;
5b5037b3 1682
1683 if (no_other_refs)
1684 ref->independent_p = true;
5c205353 1685 }
1686
b920ee38 1687 if (!compute_all_dependences (datarefs, &dependences, vloops, true))
1688 return false;
5c205353 1689
f1f41a6c 1690 FOR_EACH_VEC_ELT (dependences, i, dep)
5c205353 1691 {
1692 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1693 continue;
1694
45ba1503 1695 ref = (struct mem_ref *) DDR_A (dep)->aux;
1696 refb = (struct mem_ref *) DDR_B (dep)->aux;
5b5037b3 1697
5c205353 1698 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
403965f7 1699 || DDR_COULD_BE_INDEPENDENT_P (dep)
5c205353 1700 || DDR_NUM_DIST_VECTS (dep) == 0)
1701 {
bef304b8 1702 /* If the dependence cannot be analyzed, assume that there might be
5c205353 1703 a reuse. */
1704 dist = 0;
48e1416a 1705
5b5037b3 1706 ref->independent_p = false;
1707 refb->independent_p = false;
5c205353 1708 }
1709 else
1710 {
bef304b8 1711 /* The distance vectors are normalized to be always lexicographically
5c205353 1712 positive, hence we cannot tell just from them whether DDR_A comes
1713 before DDR_B or vice versa. However, it is not important,
1714 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1715 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1716 in cache (and marking it as nontemporal would not affect
1717 anything). */
1718
1719 dist = volume;
1720 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1721 {
1722 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1723 loop_data_size, n);
1724
5b5037b3 1725 /* If this is a dependence in the innermost loop (i.e., the
1726 distances in all superloops are zero) and it is not
1727 the trivial self-dependence with distance zero, record that
1728 the references are not completely independent. */
1729 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1730 && (ref != refb
1731 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1732 {
1733 ref->independent_p = false;
1734 refb->independent_p = false;
1735 }
1736
5c205353 1737 /* Ignore accesses closer than
1738 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1739 so that we use nontemporal prefetches e.g. if single memory
1740 location is accessed several times in a single iteration of
1741 the loop. */
1742 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1743 continue;
1744
1745 if (adist < dist)
1746 dist = adist;
1747 }
1748 }
1749
5c205353 1750 if (ref->reuse_distance > dist)
1751 ref->reuse_distance = dist;
5b5037b3 1752 if (refb->reuse_distance > dist)
1753 refb->reuse_distance = dist;
5c205353 1754 }
1755
1756 free_dependence_relations (dependences);
1757 free_data_refs (datarefs);
1758 free (loop_data_size);
1759
1760 if (dump_file && (dump_flags & TDF_DETAILS))
1761 {
1762 fprintf (dump_file, "Reuse distances:\n");
1763 for (gr = refs; gr; gr = gr->next)
1764 for (ref = gr->refs; ref; ref = ref->next)
76f32cd9 1765 fprintf (dump_file, " reference %u:%u distance %u\n",
1766 ref->group->uid, ref->uid, ref->reuse_distance);
5c205353 1767 }
b920ee38 1768
1769 return true;
5c205353 1770}
1771
76595608 1772/* Determine whether or not the trip count to ahead ratio is too small based
1773 on prefitablility consideration.
0ab353e1 1774 AHEAD: the iteration ahead distance,
76595608 1775 EST_NITER: the estimated trip count. */
1776
1777static bool
1778trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1779{
1780 /* Assume trip count to ahead ratio is big enough if the trip count could not
1781 be estimated at compile time. */
1782 if (est_niter < 0)
1783 return false;
1784
1785 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1786 {
1787 if (dump_file && (dump_flags & TDF_DETAILS))
1788 fprintf (dump_file,
1789 "Not prefetching -- loop estimated to roll only %d times\n",
1790 (int) est_niter);
1791 return true;
1792 }
1793
1794 return false;
1795}
1796
1797/* Determine whether or not the number of memory references in the loop is
1798 reasonable based on the profitablity and compilation time considerations.
0ab353e1 1799 NINSNS: estimated number of instructions in the loop,
0ab353e1 1800 MEM_REF_COUNT: total number of memory references in the loop. */
1801
48e1416a 1802static bool
76595608 1803mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
0ab353e1 1804{
76595608 1805 int insn_to_mem_ratio;
0ab353e1 1806
1807 if (mem_ref_count == 0)
1808 return false;
1809
76595608 1810 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1811 (compute_all_dependences) have high costs based on quadratic complexity.
1812 To avoid huge compilation time, we give up prefetching if mem_ref_count
1813 is too large. */
1814 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1815 return false;
1816
48e1416a 1817 /* Prefetching improves performance by overlapping cache missing
1818 memory accesses with CPU operations. If the loop does not have
1819 enough CPU operations to overlap with memory operations, prefetching
1820 won't give a significant benefit. One approximate way of checking
1821 this is to require the ratio of instructions to memory references to
0ab353e1 1822 be above a certain limit. This approximation works well in practice.
1823 TODO: Implement a more precise computation by estimating the time
1824 for each CPU or memory op in the loop. Time estimates for memory ops
1825 should account for cache misses. */
48e1416a 1826 insn_to_mem_ratio = ninsns / mem_ref_count;
0ab353e1 1827
1828 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
3665c1ba 1829 {
1830 if (dump_file && (dump_flags & TDF_DETAILS))
1831 fprintf (dump_file,
1832 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1833 insn_to_mem_ratio);
1834 return false;
1835 }
0ab353e1 1836
76595608 1837 return true;
1838}
1839
1840/* Determine whether or not the instruction to prefetch ratio in the loop is
1841 too small based on the profitablity consideration.
1842 NINSNS: estimated number of instructions in the loop,
1843 PREFETCH_COUNT: an estimate of the number of prefetches,
1844 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1845
1846static bool
1847insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1848 unsigned unroll_factor)
1849{
1850 int insn_to_prefetch_ratio;
1851
016efb93 1852 /* Prefetching most likely causes performance degradation when the instruction
1853 to prefetch ratio is too small. Too many prefetch instructions in a loop
1854 may reduce the I-cache performance.
3fa57e84 1855 (unroll_factor * ninsns) is used to estimate the number of instructions in
1856 the unrolled loop. This implementation is a bit simplistic -- the number
1857 of issued prefetch instructions is also affected by unrolling. So,
1858 prefetch_mod and the unroll factor should be taken into account when
1859 determining prefetch_count. Also, the number of insns of the unrolled
1860 loop will usually be significantly smaller than the number of insns of the
1861 original loop * unroll_factor (at least the induction variable increases
1862 and the exit branches will get eliminated), so it might be better to use
1863 tree_estimate_loop_size + estimated_unrolled_size. */
016efb93 1864 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1865 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
0ab353e1 1866 {
016efb93 1867 if (dump_file && (dump_flags & TDF_DETAILS))
1868 fprintf (dump_file,
1869 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1870 insn_to_prefetch_ratio);
76595608 1871 return true;
0ab353e1 1872 }
48e1416a 1873
76595608 1874 return false;
0ab353e1 1875}
1876
1877
8dfbf380 1878/* Issue prefetch instructions for array references in LOOP. Returns
7194de72 1879 true if the LOOP was unrolled. */
8dfbf380 1880
1881static bool
2e966e2a 1882loop_prefetch_arrays (class loop *loop)
8dfbf380 1883{
1884 struct mem_ref_group *refs;
78f46d45 1885 unsigned ahead, ninsns, time, unroll_factor;
1886 HOST_WIDE_INT est_niter;
2e966e2a 1887 class tree_niter_desc desc;
5b5037b3 1888 bool unrolled = false, no_other_refs;
0ab353e1 1889 unsigned prefetch_count;
1890 unsigned mem_ref_count;
8dfbf380 1891
0bfd8d5c 1892 if (optimize_loop_nest_for_size_p (loop))
a30d0a5b 1893 {
1894 if (dump_file && (dump_flags & TDF_DETAILS))
1895 fprintf (dump_file, " ignored (cold area)\n");
1896 return false;
1897 }
1898
76595608 1899 /* FIXME: the time should be weighted by the probabilities of the blocks in
1900 the loop body. */
1901 time = tree_num_loop_insns (loop, &eni_time_weights);
1902 if (time == 0)
1903 return false;
1904
1905 ahead = (PREFETCH_LATENCY + time - 1) / time;
fee017b3 1906 est_niter = estimated_stmt_executions_int (loop);
b0b097b4 1907 if (est_niter == -1)
b6ab3f43 1908 est_niter = likely_max_stmt_executions_int (loop);
76595608 1909
1910 /* Prefetching is not likely to be profitable if the trip count to ahead
1911 ratio is too small. */
1912 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1913 return false;
1914
1915 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1916
8dfbf380 1917 /* Step 1: gather the memory references. */
0ab353e1 1918 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
8dfbf380 1919
76595608 1920 /* Give up prefetching if the number of memory references in the
1921 loop is not reasonable based on profitablity and compilation time
1922 considerations. */
1923 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1924 goto fail;
1925
8dfbf380 1926 /* Step 2: estimate the reuse effects. */
1927 prune_by_reuse (refs);
1928
5da8318c 1929 if (nothing_to_prefetch_p (refs))
8dfbf380 1930 goto fail;
1931
b920ee38 1932 if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
1933 goto fail;
5c205353 1934
76595608 1935 /* Step 3: determine unroll factor. */
78f46d45 1936 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1937 est_niter);
5da8318c 1938
1939 /* Estimate prefetch count for the unrolled loop. */
1940 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1941 if (prefetch_count == 0)
1942 goto fail;
1943
78f46d45 1944 if (dump_file && (dump_flags & TDF_DETAILS))
48e1416a 1945 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
f937ec59 1946 HOST_WIDE_INT_PRINT_DEC "\n"
48e1416a 1947 "insn count %d, mem ref count %d, prefetch count %d\n",
1948 ahead, unroll_factor, est_niter,
1949 ninsns, mem_ref_count, prefetch_count);
0ab353e1 1950
76595608 1951 /* Prefetching is not likely to be profitable if the instruction to prefetch
1952 ratio is too small. */
1953 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1954 unroll_factor))
0ab353e1 1955 goto fail;
1956
1957 mark_nontemporal_stores (loop, refs);
78f46d45 1958
8dfbf380 1959 /* Step 4: what to prefetch? */
1960 if (!schedule_prefetches (refs, unroll_factor, ahead))
1961 goto fail;
1962
1963 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1964 iterations so that we do not issue superfluous prefetches. */
1965 if (unroll_factor != 1)
1966 {
7194de72 1967 tree_unroll_loop (loop, unroll_factor,
8dfbf380 1968 single_dom_exit (loop), &desc);
1969 unrolled = true;
1970 }
1971
1972 /* Step 6: issue the prefetches. */
1973 issue_prefetches (refs, unroll_factor, ahead);
1974
1975fail:
1976 release_mem_refs (refs);
1977 return unrolled;
1978}
1979
7194de72 1980/* Issue prefetch instructions for array references in loops. */
8dfbf380 1981
4c641bf8 1982unsigned int
7194de72 1983tree_ssa_prefetch_arrays (void)
8dfbf380 1984{
2e966e2a 1985 class loop *loop;
8dfbf380 1986 bool unrolled = false;
4c641bf8 1987 int todo_flags = 0;
8dfbf380 1988
1d375a79 1989 if (!targetm.have_prefetch ()
8dfbf380 1990 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1991 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1992 of processor costs and i486 does not have prefetch, but
1d375a79 1993 -march=pentium4 causes targetm.have_prefetch to be true. Ugh. */
8dfbf380 1994 || PREFETCH_BLOCK == 0)
4c641bf8 1995 return 0;
8dfbf380 1996
07804af5 1997 if (dump_file && (dump_flags & TDF_DETAILS))
1998 {
1999 fprintf (dump_file, "Prefetching parameters:\n");
2000 fprintf (dump_file, " simultaneous prefetches: %d\n",
2001 SIMULTANEOUS_PREFETCHES);
2002 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
07804af5 2003 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
0c916a7b 2004 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
2005 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
5c205353 2006 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
48e1416a 2007 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
2008 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
0ab353e1 2009 MIN_INSN_TO_PREFETCH_RATIO);
48e1416a 2010 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
0ab353e1 2011 PREFETCH_MIN_INSN_TO_MEM_RATIO);
07804af5 2012 fprintf (dump_file, "\n");
2013 }
2014
8dfbf380 2015 initialize_original_copy_tables ();
2016
b9a16870 2017 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
8dfbf380 2018 {
dbdf4b31 2019 tree type = build_function_type_list (void_type_node,
2020 const_ptr_type_node, NULL_TREE);
54be5d7e 2021 tree decl = add_builtin_function ("__builtin_prefetch", type,
2022 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
2023 NULL, NULL_TREE);
8dfbf380 2024 DECL_IS_NOVOPS (decl) = true;
b9a16870 2025 set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
8dfbf380 2026 }
2027
f21d4d00 2028 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
8dfbf380 2029 {
8dfbf380 2030 if (dump_file && (dump_flags & TDF_DETAILS))
2031 fprintf (dump_file, "Processing loop %d:\n", loop->num);
2032
7194de72 2033 unrolled |= loop_prefetch_arrays (loop);
8dfbf380 2034
2035 if (dump_file && (dump_flags & TDF_DETAILS))
2036 fprintf (dump_file, "\n\n");
2037 }
2038
2039 if (unrolled)
2040 {
2041 scev_reset ();
4c641bf8 2042 todo_flags |= TODO_cleanup_cfg;
8dfbf380 2043 }
2044
2045 free_original_copy_tables ();
4c641bf8 2046 return todo_flags;
8dfbf380 2047}
f86b328b 2048
2049/* Prefetching. */
2050
f86b328b 2051namespace {
2052
2053const pass_data pass_data_loop_prefetch =
2054{
2055 GIMPLE_PASS, /* type */
2056 "aprefetch", /* name */
2057 OPTGROUP_LOOP, /* optinfo_flags */
f86b328b 2058 TV_TREE_PREFETCH, /* tv_id */
2059 ( PROP_cfg | PROP_ssa ), /* properties_required */
2060 0, /* properties_provided */
2061 0, /* properties_destroyed */
2062 0, /* todo_flags_start */
2063 0, /* todo_flags_finish */
2064};
2065
2066class pass_loop_prefetch : public gimple_opt_pass
2067{
2068public:
2069 pass_loop_prefetch (gcc::context *ctxt)
2070 : gimple_opt_pass (pass_data_loop_prefetch, ctxt)
2071 {}
2072
2073 /* opt_pass methods: */
31315c24 2074 virtual bool gate (function *) { return flag_prefetch_loop_arrays > 0; }
65b0537f 2075 virtual unsigned int execute (function *);
f86b328b 2076
2077}; // class pass_loop_prefetch
2078
65b0537f 2079unsigned int
2080pass_loop_prefetch::execute (function *fun)
2081{
2082 if (number_of_loops (fun) <= 1)
2083 return 0;
2084
c159e770 2085 if ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) != 0)
2086 {
2087 static bool warned = false;
2088
2089 if (!warned)
2090 {
2091 warning (OPT_Wdisabled_optimization,
2092 "%<l1-cache-size%> parameter is not a power of two %d",
2093 PREFETCH_BLOCK);
2094 warned = true;
2095 }
2096 return 0;
2097 }
2098
65b0537f 2099 return tree_ssa_prefetch_arrays ();
2100}
2101
f86b328b 2102} // anon namespace
2103
2104gimple_opt_pass *
2105make_pass_loop_prefetch (gcc::context *ctxt)
2106{
2107 return new pass_loop_prefetch (ctxt);
2108}
2109
2110