]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-loop-prefetch.c
MAINTAINERS (Write After Approval): Add myself.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-prefetch.c
CommitLineData
b076a3fd 1/* Array prefetching.
cf835838 2 Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
b8698a0f 3
b076a3fd 4This file is part of GCC.
b8698a0f 5
b076a3fd
ZD
6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
9dcd6f09 8Free Software Foundation; either version 3, or (at your option) any
b076a3fd 9later version.
b8698a0f 10
b076a3fd
ZD
11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
b8698a0f 15
b076a3fd 16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
b076a3fd
ZD
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
24#include "tree.h"
b076a3fd 25#include "tm_p.h"
b076a3fd
ZD
26#include "basic-block.h"
27#include "output.h"
cf835838 28#include "tree-pretty-print.h"
b076a3fd
ZD
29#include "tree-flow.h"
30#include "tree-dump.h"
31#include "timevar.h"
32#include "cfgloop.h"
b076a3fd 33#include "tree-pass.h"
b076a3fd
ZD
34#include "insn-config.h"
35#include "recog.h"
36#include "hashtab.h"
37#include "tree-chrec.h"
38#include "tree-scalar-evolution.h"
718f9c0f 39#include "diagnostic-core.h"
b076a3fd
ZD
40#include "toplev.h"
41#include "params.h"
42#include "langhooks.h"
7f9bc51b 43#include "tree-inline.h"
5417e022 44#include "tree-data-ref.h"
2eb79bbb
SB
45
46
47/* FIXME: Needed for optabs, but this should all be moved to a TBD interface
48 between the GIMPLE and RTL worlds. */
49#include "expr.h"
79f5e442 50#include "optabs.h"
b076a3fd
ZD
51
52/* This pass inserts prefetch instructions to optimize cache usage during
53 accesses to arrays in loops. It processes loops sequentially and:
54
55 1) Gathers all memory references in the single loop.
56 2) For each of the references it decides when it is profitable to prefetch
57 it. To do it, we evaluate the reuse among the accesses, and determines
58 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
59 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
60 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
61 iterations of the loop that are zero modulo PREFETCH_MOD). For example
62 (assuming cache line size is 64 bytes, char has size 1 byte and there
63 is no hardware sequential prefetch):
64
65 char *a;
66 for (i = 0; i < max; i++)
67 {
68 a[255] = ...; (0)
69 a[i] = ...; (1)
70 a[i + 64] = ...; (2)
71 a[16*i] = ...; (3)
72 a[187*i] = ...; (4)
73 a[187*i + 50] = ...; (5)
74 }
75
76 (0) obviously has PREFETCH_BEFORE 1
77 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
78 location 64 iterations before it, and PREFETCH_MOD 64 (since
79 it hits the same cache line otherwise).
80 (2) has PREFETCH_MOD 64
81 (3) has PREFETCH_MOD 4
82 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
83 the cache line accessed by (4) is the same with probability only
84 7/32.
85 (5) has PREFETCH_MOD 1 as well.
86
5417e022
ZD
87 Additionally, we use data dependence analysis to determine for each
88 reference the distance till the first reuse; this information is used
89 to determine the temporality of the issued prefetch instruction.
90
b076a3fd
ZD
91 3) We determine how much ahead we need to prefetch. The number of
92 iterations needed is time to fetch / time spent in one iteration of
93 the loop. The problem is that we do not know either of these values,
94 so we just make a heuristic guess based on a magic (possibly)
95 target-specific constant and size of the loop.
96
97 4) Determine which of the references we prefetch. We take into account
98 that there is a maximum number of simultaneous prefetches (provided
99 by machine description). We prefetch as many prefetches as possible
100 while still within this bound (starting with those with lowest
101 prefetch_mod, since they are responsible for most of the cache
102 misses).
b8698a0f 103
b076a3fd
ZD
104 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
105 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
106 prefetching nonaccessed memory.
107 TODO -- actually implement peeling.
b8698a0f 108
b076a3fd
ZD
109 6) We actually emit the prefetch instructions. ??? Perhaps emit the
110 prefetch instructions with guards in cases where 5) was not sufficient
111 to satisfy the constraints?
112
0bbe50f6
CF
113 A cost model is implemented to determine whether or not prefetching is
114 profitable for a given loop. The cost model has three heuristics:
115
116 1. Function trip_count_to_ahead_ratio_too_small_p implements a
117 heuristic that determines whether or not the loop has too few
118 iterations (compared to ahead). Prefetching is not likely to be
119 beneficial if the trip count to ahead ratio is below a certain
120 minimum.
121
122 2. Function mem_ref_count_reasonable_p implements a heuristic that
123 determines whether the given loop has enough CPU ops that can be
124 overlapped with cache missing memory ops. If not, the loop
125 won't benefit from prefetching. In the implementation,
126 prefetching is not considered beneficial if the ratio between
127 the instruction count and the mem ref count is below a certain
128 minimum.
129
130 3. Function insn_to_prefetch_ratio_too_small_p implements a
131 heuristic that disables prefetching in a loop if the prefetching
132 cost is above a certain limit. The relative prefetching cost is
133 estimated by taking the ratio between the prefetch count and the
134 total intruction count (this models the I-cache cost).
135
db34470d 136 The limits used in these heuristics are defined as parameters with
b8698a0f 137 reasonable default values. Machine-specific default values will be
db34470d 138 added later.
b8698a0f 139
b076a3fd
ZD
140 Some other TODO:
141 -- write and use more general reuse analysis (that could be also used
142 in other cache aimed loop optimizations)
143 -- make it behave sanely together with the prefetches given by user
144 (now we just ignore them; at the very least we should avoid
145 optimizing loops in that user put his own prefetches)
146 -- we assume cache line size alignment of arrays; this could be
147 improved. */
148
149/* Magic constants follow. These should be replaced by machine specific
150 numbers. */
151
b076a3fd
ZD
152/* True if write can be prefetched by a read prefetch. */
153
154#ifndef WRITE_CAN_USE_READ_PREFETCH
155#define WRITE_CAN_USE_READ_PREFETCH 1
156#endif
157
158/* True if read can be prefetched by a write prefetch. */
159
160#ifndef READ_CAN_USE_WRITE_PREFETCH
161#define READ_CAN_USE_WRITE_PREFETCH 0
162#endif
163
47eb5b32
ZD
164/* The size of the block loaded by a single prefetch. Usually, this is
165 the same as cache line size (at the moment, we only consider one level
166 of cache hierarchy). */
b076a3fd
ZD
167
168#ifndef PREFETCH_BLOCK
47eb5b32 169#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
b076a3fd
ZD
170#endif
171
172/* Do we have a forward hardware sequential prefetching? */
173
174#ifndef HAVE_FORWARD_PREFETCH
175#define HAVE_FORWARD_PREFETCH 0
176#endif
177
178/* Do we have a backward hardware sequential prefetching? */
179
180#ifndef HAVE_BACKWARD_PREFETCH
181#define HAVE_BACKWARD_PREFETCH 0
182#endif
183
184/* In some cases we are only able to determine that there is a certain
185 probability that the two accesses hit the same cache line. In this
186 case, we issue the prefetches for both of them if this probability
fa10beec 187 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
b076a3fd
ZD
188
189#ifndef ACCEPTABLE_MISS_RATE
190#define ACCEPTABLE_MISS_RATE 50
191#endif
192
193#ifndef HAVE_prefetch
194#define HAVE_prefetch 0
195#endif
196
46cb0441
ZD
197#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
198#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
5417e022
ZD
199
200/* We consider a memory access nontemporal if it is not reused sooner than
201 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
202 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
203 so that we use nontemporal prefetches e.g. if single memory location
204 is accessed several times in a single iteration of the loop. */
205#define NONTEMPORAL_FRACTION 16
206
79f5e442
ZD
207/* In case we have to emit a memory fence instruction after the loop that
208 uses nontemporal stores, this defines the builtin to use. */
209
210#ifndef FENCE_FOLLOWING_MOVNT
211#define FENCE_FOLLOWING_MOVNT NULL_TREE
212#endif
213
9bf4598b
CF
214/* It is not profitable to prefetch when the trip count is not at
215 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
216 For example, in a loop with a prefetch ahead distance of 10,
217 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
218 profitable to prefetch when the trip count is greater or equal to
219 40. In that case, 30 out of the 40 iterations will benefit from
220 prefetching. */
221
222#ifndef TRIP_COUNT_TO_AHEAD_RATIO
223#define TRIP_COUNT_TO_AHEAD_RATIO 4
224#endif
225
b076a3fd
ZD
226/* The group of references between that reuse may occur. */
227
228struct mem_ref_group
229{
230 tree base; /* Base of the reference. */
81f32326 231 tree step; /* Step of the reference. */
b076a3fd
ZD
232 struct mem_ref *refs; /* References in the group. */
233 struct mem_ref_group *next; /* Next group of references. */
234};
235
236/* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
237
238#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
239
8532678c
CF
240/* Do not generate a prefetch if the unroll factor is significantly less
241 than what is required by the prefetch. This is to avoid redundant
f7963a7c
CF
242 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
243 2, prefetching requires unrolling the loop 16 times, but
244 the loop is actually unrolled twice. In this case (ratio = 8),
8532678c
CF
245 prefetching is not likely to be beneficial. */
246
247#ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
f7963a7c 248#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
8532678c
CF
249#endif
250
0bbe50f6
CF
251/* Some of the prefetch computations have quadratic complexity. We want to
252 avoid huge compile times and, therefore, want to limit the amount of
253 memory references per loop where we consider prefetching. */
254
255#ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
256#define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
257#endif
258
b076a3fd
ZD
259/* The memory reference. */
260
261struct mem_ref
262{
726a989a 263 gimple stmt; /* Statement in that the reference appears. */
b076a3fd
ZD
264 tree mem; /* The reference. */
265 HOST_WIDE_INT delta; /* Constant offset of the reference. */
b076a3fd
ZD
266 struct mem_ref_group *group; /* The group of references it belongs to. */
267 unsigned HOST_WIDE_INT prefetch_mod;
268 /* Prefetch only each PREFETCH_MOD-th
269 iteration. */
270 unsigned HOST_WIDE_INT prefetch_before;
271 /* Prefetch only first PREFETCH_BEFORE
272 iterations. */
5417e022
ZD
273 unsigned reuse_distance; /* The amount of data accessed before the first
274 reuse of this value. */
b076a3fd 275 struct mem_ref *next; /* The next reference in the group. */
79f5e442
ZD
276 unsigned write_p : 1; /* Is it a write? */
277 unsigned independent_p : 1; /* True if the reference is independent on
278 all other references inside the loop. */
279 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
280 unsigned storent_p : 1; /* True if we changed the store to a
281 nontemporal one. */
b076a3fd
ZD
282};
283
75c40d56 284/* Dumps information about reference REF to FILE. */
b076a3fd
ZD
285
286static void
287dump_mem_ref (FILE *file, struct mem_ref *ref)
288{
289 fprintf (file, "Reference %p:\n", (void *) ref);
290
291 fprintf (file, " group %p (base ", (void *) ref->group);
292 print_generic_expr (file, ref->group->base, TDF_SLIM);
293 fprintf (file, ", step ");
81f32326
CB
294 if (cst_and_fits_in_hwi (ref->group->step))
295 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
296 else
297 print_generic_expr (file, ref->group->step, TDF_TREE);
b076a3fd
ZD
298 fprintf (file, ")\n");
299
e324a72f 300 fprintf (file, " delta ");
b076a3fd
ZD
301 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
302 fprintf (file, "\n");
303
304 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
305
306 fprintf (file, "\n");
307}
308
309/* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
310 exist. */
311
312static struct mem_ref_group *
81f32326 313find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
b076a3fd
ZD
314{
315 struct mem_ref_group *group;
316
317 for (; *groups; groups = &(*groups)->next)
318 {
81f32326 319 if (operand_equal_p ((*groups)->step, step, 0)
b076a3fd
ZD
320 && operand_equal_p ((*groups)->base, base, 0))
321 return *groups;
322
81f32326
CB
323 /* If step is an integer constant, keep the list of groups sorted
324 by decreasing step. */
325 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
326 && int_cst_value ((*groups)->step) < int_cst_value (step))
b076a3fd
ZD
327 break;
328 }
329
5417e022 330 group = XNEW (struct mem_ref_group);
b076a3fd
ZD
331 group->base = base;
332 group->step = step;
333 group->refs = NULL;
334 group->next = *groups;
335 *groups = group;
336
337 return group;
338}
339
340/* Records a memory reference MEM in GROUP with offset DELTA and write status
341 WRITE_P. The reference occurs in statement STMT. */
342
343static void
726a989a 344record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
b076a3fd
ZD
345 HOST_WIDE_INT delta, bool write_p)
346{
347 struct mem_ref **aref;
348
349 /* Do not record the same address twice. */
350 for (aref = &group->refs; *aref; aref = &(*aref)->next)
351 {
352 /* It does not have to be possible for write reference to reuse the read
353 prefetch, or vice versa. */
354 if (!WRITE_CAN_USE_READ_PREFETCH
355 && write_p
356 && !(*aref)->write_p)
357 continue;
358 if (!READ_CAN_USE_WRITE_PREFETCH
359 && !write_p
360 && (*aref)->write_p)
361 continue;
362
363 if ((*aref)->delta == delta)
364 return;
365 }
366
5417e022 367 (*aref) = XNEW (struct mem_ref);
b076a3fd
ZD
368 (*aref)->stmt = stmt;
369 (*aref)->mem = mem;
370 (*aref)->delta = delta;
371 (*aref)->write_p = write_p;
372 (*aref)->prefetch_before = PREFETCH_ALL;
373 (*aref)->prefetch_mod = 1;
5417e022 374 (*aref)->reuse_distance = 0;
b076a3fd
ZD
375 (*aref)->issue_prefetch_p = false;
376 (*aref)->group = group;
377 (*aref)->next = NULL;
79f5e442
ZD
378 (*aref)->independent_p = false;
379 (*aref)->storent_p = false;
b076a3fd
ZD
380
381 if (dump_file && (dump_flags & TDF_DETAILS))
382 dump_mem_ref (dump_file, *aref);
383}
384
385/* Release memory references in GROUPS. */
386
387static void
388release_mem_refs (struct mem_ref_group *groups)
389{
390 struct mem_ref_group *next_g;
391 struct mem_ref *ref, *next_r;
392
393 for (; groups; groups = next_g)
394 {
395 next_g = groups->next;
396 for (ref = groups->refs; ref; ref = next_r)
397 {
398 next_r = ref->next;
399 free (ref);
400 }
401 free (groups);
402 }
403}
404
405/* A structure used to pass arguments to idx_analyze_ref. */
406
407struct ar_data
408{
409 struct loop *loop; /* Loop of the reference. */
726a989a 410 gimple stmt; /* Statement of the reference. */
81f32326 411 tree *step; /* Step of the memory reference. */
b076a3fd
ZD
412 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
413};
414
415/* Analyzes a single INDEX of a memory reference to obtain information
416 described at analyze_ref. Callback for for_each_index. */
417
418static bool
419idx_analyze_ref (tree base, tree *index, void *data)
420{
c22940cd 421 struct ar_data *ar_data = (struct ar_data *) data;
b076a3fd 422 tree ibase, step, stepsize;
81f32326 423 HOST_WIDE_INT idelta = 0, imult = 1;
b076a3fd
ZD
424 affine_iv iv;
425
75421dcd 426 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF)
b076a3fd
ZD
427 return false;
428
f017bf5e 429 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
81f32326 430 *index, &iv, true))
b076a3fd
ZD
431 return false;
432 ibase = iv.base;
433 step = iv.step;
434
5be014d5 435 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
b076a3fd
ZD
436 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
437 {
438 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
439 ibase = TREE_OPERAND (ibase, 0);
440 }
441 if (cst_and_fits_in_hwi (ibase))
442 {
443 idelta += int_cst_value (ibase);
ff5e9a94 444 ibase = build_int_cst (TREE_TYPE (ibase), 0);
b076a3fd
ZD
445 }
446
447 if (TREE_CODE (base) == ARRAY_REF)
448 {
449 stepsize = array_ref_element_size (base);
450 if (!cst_and_fits_in_hwi (stepsize))
451 return false;
452 imult = int_cst_value (stepsize);
8fde8b40
CB
453 step = fold_build2 (MULT_EXPR, sizetype,
454 fold_convert (sizetype, step),
455 fold_convert (sizetype, stepsize));
b076a3fd
ZD
456 idelta *= imult;
457 }
458
8fde8b40
CB
459 if (*ar_data->step == NULL_TREE)
460 *ar_data->step = step;
461 else
462 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
463 fold_convert (sizetype, *ar_data->step),
464 fold_convert (sizetype, step));
b076a3fd
ZD
465 *ar_data->delta += idelta;
466 *index = ibase;
467
468 return true;
469}
470
aac8b8ed 471/* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
b076a3fd 472 STEP are integer constants and iter is number of iterations of LOOP. The
aac8b8ed
RS
473 reference occurs in statement STMT. Strips nonaddressable component
474 references from REF_P. */
b076a3fd
ZD
475
476static bool
aac8b8ed 477analyze_ref (struct loop *loop, tree *ref_p, tree *base,
81f32326 478 tree *step, HOST_WIDE_INT *delta,
726a989a 479 gimple stmt)
b076a3fd
ZD
480{
481 struct ar_data ar_data;
482 tree off;
483 HOST_WIDE_INT bit_offset;
aac8b8ed 484 tree ref = *ref_p;
b076a3fd 485
81f32326 486 *step = NULL_TREE;
b076a3fd
ZD
487 *delta = 0;
488
7c6dafac
CF
489 /* First strip off the component references. Ignore bitfields.
490 Also strip off the real and imagine parts of a complex, so that
491 they can have the same base. */
492 if (TREE_CODE (ref) == REALPART_EXPR
493 || TREE_CODE (ref) == IMAGPART_EXPR
494 || (TREE_CODE (ref) == COMPONENT_REF
495 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
496 {
497 if (TREE_CODE (ref) == IMAGPART_EXPR)
498 *delta += int_size_in_bytes (TREE_TYPE (ref));
499 ref = TREE_OPERAND (ref, 0);
500 }
b076a3fd 501
aac8b8ed
RS
502 *ref_p = ref;
503
b076a3fd
ZD
504 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
505 {
506 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
507 bit_offset = TREE_INT_CST_LOW (off);
508 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
b8698a0f 509
b076a3fd
ZD
510 *delta += bit_offset / BITS_PER_UNIT;
511 }
512
513 *base = unshare_expr (ref);
514 ar_data.loop = loop;
515 ar_data.stmt = stmt;
516 ar_data.step = step;
517 ar_data.delta = delta;
518 return for_each_index (base, idx_analyze_ref, &ar_data);
519}
520
521/* Record a memory reference REF to the list REFS. The reference occurs in
79f5e442
ZD
522 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
523 reference was recorded, false otherwise. */
b076a3fd 524
79f5e442 525static bool
b076a3fd 526gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
726a989a 527 tree ref, bool write_p, gimple stmt)
b076a3fd 528{
81f32326
CB
529 tree base, step;
530 HOST_WIDE_INT delta;
b076a3fd
ZD
531 struct mem_ref_group *agrp;
532
a80a2701
JJ
533 if (get_base_address (ref) == NULL)
534 return false;
535
aac8b8ed 536 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
79f5e442 537 return false;
81f32326
CB
538 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
539 if (step == NULL_TREE)
540 return false;
b076a3fd 541
756f50ce 542 /* Stop if the address of BASE could not be taken. */
bc068a23
CF
543 if (may_be_nonaddressable_p (base))
544 return false;
545
50814135
CF
546 /* Limit non-constant step prefetching only to the innermost loops. */
547 if (!cst_and_fits_in_hwi (step) && loop->inner != NULL)
548 return false;
549
b076a3fd
ZD
550 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
551 are integer constants. */
552 agrp = find_or_create_group (refs, base, step);
553 record_ref (agrp, stmt, ref, delta, write_p);
79f5e442
ZD
554
555 return true;
b076a3fd
ZD
556}
557
79f5e442
ZD
558/* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
559 true if there are no other memory references inside the loop. */
b076a3fd
ZD
560
561static struct mem_ref_group *
db34470d 562gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
b076a3fd
ZD
563{
564 basic_block *body = get_loop_body_in_dom_order (loop);
565 basic_block bb;
566 unsigned i;
726a989a
RB
567 gimple_stmt_iterator bsi;
568 gimple stmt;
569 tree lhs, rhs;
b076a3fd
ZD
570 struct mem_ref_group *refs = NULL;
571
79f5e442 572 *no_other_refs = true;
db34470d 573 *ref_count = 0;
79f5e442 574
b076a3fd
ZD
575 /* Scan the loop body in order, so that the former references precede the
576 later ones. */
577 for (i = 0; i < loop->num_nodes; i++)
578 {
579 bb = body[i];
580 if (bb->loop_father != loop)
581 continue;
582
726a989a 583 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
b076a3fd 584 {
726a989a 585 stmt = gsi_stmt (bsi);
79f5e442 586
726a989a 587 if (gimple_code (stmt) != GIMPLE_ASSIGN)
79f5e442 588 {
5006671f 589 if (gimple_vuse (stmt)
726a989a
RB
590 || (is_gimple_call (stmt)
591 && !(gimple_call_flags (stmt) & ECF_CONST)))
79f5e442
ZD
592 *no_other_refs = false;
593 continue;
594 }
b076a3fd 595
726a989a
RB
596 lhs = gimple_assign_lhs (stmt);
597 rhs = gimple_assign_rhs1 (stmt);
b076a3fd
ZD
598
599 if (REFERENCE_CLASS_P (rhs))
db34470d 600 {
79f5e442
ZD
601 *no_other_refs &= gather_memory_references_ref (loop, &refs,
602 rhs, false, stmt);
db34470d
GS
603 *ref_count += 1;
604 }
b076a3fd 605 if (REFERENCE_CLASS_P (lhs))
db34470d 606 {
79f5e442
ZD
607 *no_other_refs &= gather_memory_references_ref (loop, &refs,
608 lhs, true, stmt);
db34470d
GS
609 *ref_count += 1;
610 }
b076a3fd
ZD
611 }
612 }
613 free (body);
614
615 return refs;
616}
617
618/* Prune the prefetch candidate REF using the self-reuse. */
619
620static void
621prune_ref_by_self_reuse (struct mem_ref *ref)
622{
81f32326
CB
623 HOST_WIDE_INT step;
624 bool backward;
625
626 /* If the step size is non constant, we cannot calculate prefetch_mod. */
627 if (!cst_and_fits_in_hwi (ref->group->step))
628 return;
629
630 step = int_cst_value (ref->group->step);
631
632 backward = step < 0;
b076a3fd
ZD
633
634 if (step == 0)
635 {
636 /* Prefetch references to invariant address just once. */
637 ref->prefetch_before = 1;
638 return;
639 }
640
641 if (backward)
642 step = -step;
643
644 if (step > PREFETCH_BLOCK)
645 return;
646
647 if ((backward && HAVE_BACKWARD_PREFETCH)
648 || (!backward && HAVE_FORWARD_PREFETCH))
649 {
650 ref->prefetch_before = 1;
651 return;
652 }
653
654 ref->prefetch_mod = PREFETCH_BLOCK / step;
655}
656
657/* Divides X by BY, rounding down. */
658
659static HOST_WIDE_INT
660ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
661{
662 gcc_assert (by > 0);
663
664 if (x >= 0)
665 return x / by;
666 else
667 return (x + by - 1) / by;
668}
669
b8698a0f
L
670/* Given a CACHE_LINE_SIZE and two inductive memory references
671 with a common STEP greater than CACHE_LINE_SIZE and an address
672 difference DELTA, compute the probability that they will fall
14e444c3
CF
673 in different cache lines. Return true if the computed miss rate
674 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
675 number of distinct iterations after which the pattern repeats itself.
2c6dd136
GS
676 ALIGN_UNIT is the unit of alignment in bytes. */
677
14e444c3
CF
678static bool
679is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
2c6dd136
GS
680 HOST_WIDE_INT step, HOST_WIDE_INT delta,
681 unsigned HOST_WIDE_INT distinct_iters,
682 int align_unit)
683{
684 unsigned align, iter;
14e444c3 685 int total_positions, miss_positions, max_allowed_miss_positions;
2c6dd136
GS
686 int address1, address2, cache_line1, cache_line2;
687
a245c04b
CF
688 /* It always misses if delta is greater than or equal to the cache
689 line size. */
14e444c3
CF
690 if (delta >= (HOST_WIDE_INT) cache_line_size)
691 return false;
a245c04b 692
2c6dd136 693 miss_positions = 0;
14e444c3
CF
694 total_positions = (cache_line_size / align_unit) * distinct_iters;
695 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
b8698a0f 696
2c6dd136
GS
697 /* Iterate through all possible alignments of the first
698 memory reference within its cache line. */
699 for (align = 0; align < cache_line_size; align += align_unit)
700
701 /* Iterate through all distinct iterations. */
702 for (iter = 0; iter < distinct_iters; iter++)
703 {
704 address1 = align + step * iter;
705 address2 = address1 + delta;
706 cache_line1 = address1 / cache_line_size;
707 cache_line2 = address2 / cache_line_size;
2c6dd136 708 if (cache_line1 != cache_line2)
14e444c3
CF
709 {
710 miss_positions += 1;
711 if (miss_positions > max_allowed_miss_positions)
712 return false;
713 }
2c6dd136 714 }
14e444c3 715 return true;
2c6dd136
GS
716}
717
b076a3fd
ZD
718/* Prune the prefetch candidate REF using the reuse with BY.
719 If BY_IS_BEFORE is true, BY is before REF in the loop. */
720
721static void
722prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
723 bool by_is_before)
724{
81f32326
CB
725 HOST_WIDE_INT step;
726 bool backward;
b076a3fd
ZD
727 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
728 HOST_WIDE_INT delta = delta_b - delta_r;
729 HOST_WIDE_INT hit_from;
730 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
2c6dd136
GS
731 HOST_WIDE_INT reduced_step;
732 unsigned HOST_WIDE_INT reduced_prefetch_block;
733 tree ref_type;
734 int align_unit;
b076a3fd 735
81f32326
CB
736 /* If the step is non constant we cannot calculate prefetch_before. */
737 if (!cst_and_fits_in_hwi (ref->group->step)) {
738 return;
739 }
740
741 step = int_cst_value (ref->group->step);
742
743 backward = step < 0;
744
745
b076a3fd
ZD
746 if (delta == 0)
747 {
748 /* If the references has the same address, only prefetch the
749 former. */
750 if (by_is_before)
751 ref->prefetch_before = 0;
b8698a0f 752
b076a3fd
ZD
753 return;
754 }
755
756 if (!step)
757 {
758 /* If the reference addresses are invariant and fall into the
759 same cache line, prefetch just the first one. */
760 if (!by_is_before)
761 return;
762
763 if (ddown (ref->delta, PREFETCH_BLOCK)
764 != ddown (by->delta, PREFETCH_BLOCK))
765 return;
766
767 ref->prefetch_before = 0;
768 return;
769 }
770
771 /* Only prune the reference that is behind in the array. */
772 if (backward)
773 {
774 if (delta > 0)
775 return;
776
777 /* Transform the data so that we may assume that the accesses
778 are forward. */
779 delta = - delta;
780 step = -step;
781 delta_r = PREFETCH_BLOCK - 1 - delta_r;
782 delta_b = PREFETCH_BLOCK - 1 - delta_b;
783 }
784 else
785 {
786 if (delta < 0)
787 return;
788 }
789
790 /* Check whether the two references are likely to hit the same cache
791 line, and how distant the iterations in that it occurs are from
792 each other. */
793
794 if (step <= PREFETCH_BLOCK)
795 {
796 /* The accesses are sure to meet. Let us check when. */
797 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
798 prefetch_before = (hit_from - delta_r + step - 1) / step;
799
57762e97 800 /* Do not reduce prefetch_before if we meet beyond cache size. */
e972cc7e 801 if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
57762e97 802 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
803 if (prefetch_before < ref->prefetch_before)
804 ref->prefetch_before = prefetch_before;
805
806 return;
807 }
808
b8698a0f 809 /* A more complicated case with step > prefetch_block. First reduce
2c6dd136 810 the ratio between the step and the cache line size to its simplest
b8698a0f
L
811 terms. The resulting denominator will then represent the number of
812 distinct iterations after which each address will go back to its
813 initial location within the cache line. This computation assumes
2c6dd136 814 that PREFETCH_BLOCK is a power of two. */
b076a3fd 815 prefetch_block = PREFETCH_BLOCK;
2c6dd136
GS
816 reduced_prefetch_block = prefetch_block;
817 reduced_step = step;
818 while ((reduced_step & 1) == 0
819 && reduced_prefetch_block > 1)
b076a3fd 820 {
2c6dd136
GS
821 reduced_step >>= 1;
822 reduced_prefetch_block >>= 1;
b076a3fd
ZD
823 }
824
b076a3fd
ZD
825 prefetch_before = delta / step;
826 delta %= step;
2c6dd136
GS
827 ref_type = TREE_TYPE (ref->mem);
828 align_unit = TYPE_ALIGN (ref_type) / 8;
14e444c3
CF
829 if (is_miss_rate_acceptable (prefetch_block, step, delta,
830 reduced_prefetch_block, align_unit))
b076a3fd 831 {
57762e97
CB
832 /* Do not reduce prefetch_before if we meet beyond cache size. */
833 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
834 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
835 if (prefetch_before < ref->prefetch_before)
836 ref->prefetch_before = prefetch_before;
837
838 return;
839 }
840
841 /* Try also the following iteration. */
842 prefetch_before++;
843 delta = step - delta;
14e444c3
CF
844 if (is_miss_rate_acceptable (prefetch_block, step, delta,
845 reduced_prefetch_block, align_unit))
b076a3fd
ZD
846 {
847 if (prefetch_before < ref->prefetch_before)
848 ref->prefetch_before = prefetch_before;
849
850 return;
851 }
852
853 /* The ref probably does not reuse by. */
854 return;
855}
856
857/* Prune the prefetch candidate REF using the reuses with other references
858 in REFS. */
859
860static void
861prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
862{
863 struct mem_ref *prune_by;
864 bool before = true;
865
866 prune_ref_by_self_reuse (ref);
867
868 for (prune_by = refs; prune_by; prune_by = prune_by->next)
869 {
870 if (prune_by == ref)
871 {
872 before = false;
873 continue;
874 }
875
876 if (!WRITE_CAN_USE_READ_PREFETCH
877 && ref->write_p
878 && !prune_by->write_p)
879 continue;
880 if (!READ_CAN_USE_WRITE_PREFETCH
881 && !ref->write_p
882 && prune_by->write_p)
883 continue;
884
885 prune_ref_by_group_reuse (ref, prune_by, before);
886 }
887}
888
889/* Prune the prefetch candidates in GROUP using the reuse analysis. */
890
891static void
892prune_group_by_reuse (struct mem_ref_group *group)
893{
894 struct mem_ref *ref_pruned;
895
896 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
897 {
898 prune_ref_by_reuse (ref_pruned, group->refs);
899
900 if (dump_file && (dump_flags & TDF_DETAILS))
901 {
902 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
903
904 if (ref_pruned->prefetch_before == PREFETCH_ALL
905 && ref_pruned->prefetch_mod == 1)
906 fprintf (dump_file, " no restrictions");
907 else if (ref_pruned->prefetch_before == 0)
908 fprintf (dump_file, " do not prefetch");
909 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
910 fprintf (dump_file, " prefetch once");
911 else
912 {
913 if (ref_pruned->prefetch_before != PREFETCH_ALL)
914 {
915 fprintf (dump_file, " prefetch before ");
916 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
917 ref_pruned->prefetch_before);
918 }
919 if (ref_pruned->prefetch_mod != 1)
920 {
921 fprintf (dump_file, " prefetch mod ");
922 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
923 ref_pruned->prefetch_mod);
924 }
925 }
926 fprintf (dump_file, "\n");
927 }
928 }
929}
930
931/* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
932
933static void
934prune_by_reuse (struct mem_ref_group *groups)
935{
936 for (; groups; groups = groups->next)
937 prune_group_by_reuse (groups);
938}
939
940/* Returns true if we should issue prefetch for REF. */
941
942static bool
943should_issue_prefetch_p (struct mem_ref *ref)
944{
945 /* For now do not issue prefetches for only first few of the
946 iterations. */
947 if (ref->prefetch_before != PREFETCH_ALL)
a8beb3a7
CB
948 {
949 if (dump_file && (dump_flags & TDF_DETAILS))
950 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
951 (void *) ref);
952 return false;
953 }
b076a3fd 954
79f5e442
ZD
955 /* Do not prefetch nontemporal stores. */
956 if (ref->storent_p)
a8beb3a7
CB
957 {
958 if (dump_file && (dump_flags & TDF_DETAILS))
959 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
960 return false;
961 }
79f5e442 962
b076a3fd
ZD
963 return true;
964}
965
966/* Decide which of the prefetch candidates in GROUPS to prefetch.
967 AHEAD is the number of iterations to prefetch ahead (which corresponds
968 to the number of simultaneous instances of one prefetch running at a
969 time). UNROLL_FACTOR is the factor by that the loop is going to be
970 unrolled. Returns true if there is anything to prefetch. */
971
972static bool
973schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
974 unsigned ahead)
975{
911b3fdb
ZD
976 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
977 unsigned slots_per_prefetch;
b076a3fd
ZD
978 struct mem_ref *ref;
979 bool any = false;
980
911b3fdb
ZD
981 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
982 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
b076a3fd 983
911b3fdb
ZD
984 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
985 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
986 it will need a prefetch slot. */
987 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
b076a3fd 988 if (dump_file && (dump_flags & TDF_DETAILS))
911b3fdb
ZD
989 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
990 slots_per_prefetch);
b076a3fd
ZD
991
992 /* For now we just take memory references one by one and issue
993 prefetches for as many as possible. The groups are sorted
994 starting with the largest step, since the references with
c0220ea4 995 large step are more likely to cause many cache misses. */
b076a3fd
ZD
996
997 for (; groups; groups = groups->next)
998 for (ref = groups->refs; ref; ref = ref->next)
999 {
1000 if (!should_issue_prefetch_p (ref))
1001 continue;
1002
8532678c
CF
1003 /* The loop is far from being sufficiently unrolled for this
1004 prefetch. Do not generate prefetch to avoid many redudant
1005 prefetches. */
1006 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1007 continue;
1008
911b3fdb
ZD
1009 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1010 and we unroll the loop UNROLL_FACTOR times, we need to insert
1011 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1012 iteration. */
b076a3fd
ZD
1013 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1014 / ref->prefetch_mod);
911b3fdb
ZD
1015 prefetch_slots = n_prefetches * slots_per_prefetch;
1016
1017 /* If more than half of the prefetches would be lost anyway, do not
1018 issue the prefetch. */
1019 if (2 * remaining_prefetch_slots < prefetch_slots)
1020 continue;
1021
1022 ref->issue_prefetch_p = true;
b076a3fd 1023
911b3fdb
ZD
1024 if (remaining_prefetch_slots <= prefetch_slots)
1025 return true;
1026 remaining_prefetch_slots -= prefetch_slots;
b076a3fd
ZD
1027 any = true;
1028 }
1029
1030 return any;
1031}
1032
d5058523
CF
1033/* Return TRUE if no prefetch is going to be generated in the given
1034 GROUPS. */
1035
1036static bool
1037nothing_to_prefetch_p (struct mem_ref_group *groups)
1038{
1039 struct mem_ref *ref;
1040
1041 for (; groups; groups = groups->next)
1042 for (ref = groups->refs; ref; ref = ref->next)
1043 if (should_issue_prefetch_p (ref))
1044 return false;
1045
1046 return true;
1047}
1048
1049/* Estimate the number of prefetches in the given GROUPS.
1050 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
b076a3fd 1051
db34470d 1052static int
d5058523 1053estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
b076a3fd
ZD
1054{
1055 struct mem_ref *ref;
d5058523 1056 unsigned n_prefetches;
db34470d 1057 int prefetch_count = 0;
b076a3fd
ZD
1058
1059 for (; groups; groups = groups->next)
1060 for (ref = groups->refs; ref; ref = ref->next)
1061 if (should_issue_prefetch_p (ref))
d5058523
CF
1062 {
1063 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1064 / ref->prefetch_mod);
1065 prefetch_count += n_prefetches;
1066 }
b076a3fd 1067
db34470d 1068 return prefetch_count;
b076a3fd
ZD
1069}
1070
1071/* Issue prefetches for the reference REF into loop as decided before.
1072 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
917f1b7e 1073 is the factor by which LOOP was unrolled. */
b076a3fd
ZD
1074
1075static void
1076issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1077{
1078 HOST_WIDE_INT delta;
81f32326 1079 tree addr, addr_base, write_p, local, forward;
726a989a
RB
1080 gimple prefetch;
1081 gimple_stmt_iterator bsi;
b076a3fd 1082 unsigned n_prefetches, ap;
5417e022 1083 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
b076a3fd
ZD
1084
1085 if (dump_file && (dump_flags & TDF_DETAILS))
5417e022
ZD
1086 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1087 nontemporal ? " nontemporal" : "",
1088 (void *) ref);
b076a3fd 1089
726a989a 1090 bsi = gsi_for_stmt (ref->stmt);
b076a3fd
ZD
1091
1092 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1093 / ref->prefetch_mod);
1094 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
726a989a
RB
1095 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1096 true, NULL, true, GSI_SAME_STMT);
911b3fdb 1097 write_p = ref->write_p ? integer_one_node : integer_zero_node;
9a9d280e 1098 local = nontemporal ? integer_zero_node : integer_three_node;
b076a3fd
ZD
1099
1100 for (ap = 0; ap < n_prefetches; ap++)
1101 {
81f32326
CB
1102 if (cst_and_fits_in_hwi (ref->group->step))
1103 {
1104 /* Determine the address to prefetch. */
1105 delta = (ahead + ap * ref->prefetch_mod) *
1106 int_cst_value (ref->group->step);
1107 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
1108 addr_base, size_int (delta));
1109 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1110 true, GSI_SAME_STMT);
1111 }
1112 else
1113 {
1114 /* The step size is non-constant but loop-invariant. We use the
1115 heuristic to simply prefetch ahead iterations ahead. */
1116 forward = fold_build2 (MULT_EXPR, sizetype,
1117 fold_convert (sizetype, ref->group->step),
1118 fold_convert (sizetype, size_int (ahead)));
1119 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
1120 forward);
1121 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1122 NULL, true, GSI_SAME_STMT);
1123 }
b076a3fd 1124 /* Create the prefetch instruction. */
726a989a
RB
1125 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
1126 3, addr, write_p, local);
1127 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
b076a3fd
ZD
1128 }
1129}
1130
1131/* Issue prefetches for the references in GROUPS into loop as decided before.
1132 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1133 factor by that LOOP was unrolled. */
1134
1135static void
1136issue_prefetches (struct mem_ref_group *groups,
1137 unsigned unroll_factor, unsigned ahead)
1138{
1139 struct mem_ref *ref;
1140
1141 for (; groups; groups = groups->next)
1142 for (ref = groups->refs; ref; ref = ref->next)
1143 if (ref->issue_prefetch_p)
1144 issue_prefetch_ref (ref, unroll_factor, ahead);
1145}
1146
79f5e442
ZD
1147/* Returns true if REF is a memory write for that a nontemporal store insn
1148 can be used. */
1149
1150static bool
1151nontemporal_store_p (struct mem_ref *ref)
1152{
1153 enum machine_mode mode;
1154 enum insn_code code;
1155
1156 /* REF must be a write that is not reused. We require it to be independent
1157 on all other memory references in the loop, as the nontemporal stores may
1158 be reordered with respect to other memory references. */
1159 if (!ref->write_p
1160 || !ref->independent_p
1161 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1162 return false;
1163
1164 /* Check that we have the storent instruction for the mode. */
1165 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1166 if (mode == BLKmode)
1167 return false;
1168
947131ba 1169 code = optab_handler (storent_optab, mode);
79f5e442
ZD
1170 return code != CODE_FOR_nothing;
1171}
1172
1173/* If REF is a nontemporal store, we mark the corresponding modify statement
1174 and return true. Otherwise, we return false. */
1175
1176static bool
1177mark_nontemporal_store (struct mem_ref *ref)
1178{
1179 if (!nontemporal_store_p (ref))
1180 return false;
1181
1182 if (dump_file && (dump_flags & TDF_DETAILS))
1183 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1184 (void *) ref);
1185
726a989a 1186 gimple_assign_set_nontemporal_move (ref->stmt, true);
79f5e442
ZD
1187 ref->storent_p = true;
1188
1189 return true;
1190}
1191
1192/* Issue a memory fence instruction after LOOP. */
1193
1194static void
1195emit_mfence_after_loop (struct loop *loop)
1196{
1197 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1198 edge exit;
726a989a
RB
1199 gimple call;
1200 gimple_stmt_iterator bsi;
79f5e442
ZD
1201 unsigned i;
1202
ac47786e 1203 FOR_EACH_VEC_ELT (edge, exits, i, exit)
79f5e442 1204 {
726a989a 1205 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
79f5e442
ZD
1206
1207 if (!single_pred_p (exit->dest)
1208 /* If possible, we prefer not to insert the fence on other paths
1209 in cfg. */
1210 && !(exit->flags & EDGE_ABNORMAL))
1211 split_loop_exit_edge (exit);
726a989a 1212 bsi = gsi_after_labels (exit->dest);
79f5e442 1213
726a989a 1214 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
79f5e442
ZD
1215 mark_virtual_ops_for_renaming (call);
1216 }
1217
1218 VEC_free (edge, heap, exits);
1219 update_ssa (TODO_update_ssa_only_virtuals);
1220}
1221
1222/* Returns true if we can use storent in loop, false otherwise. */
1223
1224static bool
1225may_use_storent_in_loop_p (struct loop *loop)
1226{
1227 bool ret = true;
1228
1229 if (loop->inner != NULL)
1230 return false;
1231
1232 /* If we must issue a mfence insn after using storent, check that there
1233 is a suitable place for it at each of the loop exits. */
1234 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1235 {
1236 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1237 unsigned i;
1238 edge exit;
1239
ac47786e 1240 FOR_EACH_VEC_ELT (edge, exits, i, exit)
79f5e442
ZD
1241 if ((exit->flags & EDGE_ABNORMAL)
1242 && exit->dest == EXIT_BLOCK_PTR)
1243 ret = false;
1244
1245 VEC_free (edge, heap, exits);
1246 }
1247
1248 return ret;
1249}
1250
1251/* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1252 references in the loop. */
1253
1254static void
1255mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1256{
1257 struct mem_ref *ref;
1258 bool any = false;
1259
1260 if (!may_use_storent_in_loop_p (loop))
1261 return;
1262
1263 for (; groups; groups = groups->next)
1264 for (ref = groups->refs; ref; ref = ref->next)
1265 any |= mark_nontemporal_store (ref);
1266
1267 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1268 emit_mfence_after_loop (loop);
1269}
1270
b076a3fd
ZD
1271/* Determines whether we can profitably unroll LOOP FACTOR times, and if
1272 this is the case, fill in DESC by the description of number of
1273 iterations. */
1274
1275static bool
1276should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1277 unsigned factor)
1278{
1279 if (!can_unroll_loop_p (loop, factor, desc))
1280 return false;
1281
1282 /* We only consider loops without control flow for unrolling. This is not
1283 a hard restriction -- tree_unroll_loop works with arbitrary loops
1284 as well; but the unrolling/prefetching is usually more profitable for
1285 loops consisting of a single basic block, and we want to limit the
1286 code growth. */
1287 if (loop->num_nodes > 2)
1288 return false;
1289
1290 return true;
1291}
1292
1293/* Determine the coefficient by that unroll LOOP, from the information
1294 contained in the list of memory references REFS. Description of
2711355f
ZD
1295 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1296 insns of the LOOP. EST_NITER is the estimated number of iterations of
1297 the loop, or -1 if no estimate is available. */
b076a3fd
ZD
1298
1299static unsigned
1300determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
2711355f
ZD
1301 unsigned ninsns, struct tree_niter_desc *desc,
1302 HOST_WIDE_INT est_niter)
b076a3fd 1303{
911b3fdb
ZD
1304 unsigned upper_bound;
1305 unsigned nfactor, factor, mod_constraint;
b076a3fd
ZD
1306 struct mem_ref_group *agp;
1307 struct mem_ref *ref;
1308
911b3fdb
ZD
1309 /* First check whether the loop is not too large to unroll. We ignore
1310 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1311 from unrolling them enough to make exactly one cache line covered by each
1312 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1313 us from unrolling the loops too many times in cases where we only expect
1314 gains from better scheduling and decreasing loop overhead, which is not
1315 the case here. */
1316 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
2711355f
ZD
1317
1318 /* If we unrolled the loop more times than it iterates, the unrolled version
1319 of the loop would be never entered. */
1320 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1321 upper_bound = est_niter;
1322
911b3fdb 1323 if (upper_bound <= 1)
b076a3fd
ZD
1324 return 1;
1325
911b3fdb
ZD
1326 /* Choose the factor so that we may prefetch each cache just once,
1327 but bound the unrolling by UPPER_BOUND. */
1328 factor = 1;
b076a3fd
ZD
1329 for (agp = refs; agp; agp = agp->next)
1330 for (ref = agp->refs; ref; ref = ref->next)
911b3fdb
ZD
1331 if (should_issue_prefetch_p (ref))
1332 {
1333 mod_constraint = ref->prefetch_mod;
1334 nfactor = least_common_multiple (mod_constraint, factor);
1335 if (nfactor <= upper_bound)
1336 factor = nfactor;
1337 }
b076a3fd
ZD
1338
1339 if (!should_unroll_loop_p (loop, desc, factor))
1340 return 1;
1341
1342 return factor;
1343}
1344
5417e022
ZD
1345/* Returns the total volume of the memory references REFS, taking into account
1346 reuses in the innermost loop and cache line size. TODO -- we should also
1347 take into account reuses across the iterations of the loops in the loop
1348 nest. */
1349
1350static unsigned
1351volume_of_references (struct mem_ref_group *refs)
1352{
1353 unsigned volume = 0;
1354 struct mem_ref_group *gr;
1355 struct mem_ref *ref;
1356
1357 for (gr = refs; gr; gr = gr->next)
1358 for (ref = gr->refs; ref; ref = ref->next)
1359 {
1360 /* Almost always reuses another value? */
1361 if (ref->prefetch_before != PREFETCH_ALL)
1362 continue;
1363
1364 /* If several iterations access the same cache line, use the size of
1365 the line divided by this number. Otherwise, a cache line is
1366 accessed in each iteration. TODO -- in the latter case, we should
1367 take the size of the reference into account, rounding it up on cache
1368 line size multiple. */
1369 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1370 }
1371 return volume;
1372}
1373
1374/* Returns the volume of memory references accessed across VEC iterations of
1375 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1376 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1377
1378static unsigned
1379volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1380{
1381 unsigned i;
1382
1383 for (i = 0; i < n; i++)
1384 if (vec[i] != 0)
1385 break;
1386
1387 if (i == n)
1388 return 0;
1389
1390 gcc_assert (vec[i] > 0);
1391
1392 /* We ignore the parts of the distance vector in subloops, since usually
1393 the numbers of iterations are much smaller. */
1394 return loop_sizes[i] * vec[i];
1395}
1396
1397/* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1398 at the position corresponding to the loop of the step. N is the depth
1399 of the considered loop nest, and, LOOP is its innermost loop. */
1400
1401static void
1402add_subscript_strides (tree access_fn, unsigned stride,
1403 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1404{
1405 struct loop *aloop;
1406 tree step;
1407 HOST_WIDE_INT astep;
1408 unsigned min_depth = loop_depth (loop) - n;
1409
1410 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1411 {
1412 aloop = get_chrec_loop (access_fn);
1413 step = CHREC_RIGHT (access_fn);
1414 access_fn = CHREC_LEFT (access_fn);
1415
1416 if ((unsigned) loop_depth (aloop) <= min_depth)
1417 continue;
1418
1419 if (host_integerp (step, 0))
1420 astep = tree_low_cst (step, 0);
1421 else
1422 astep = L1_CACHE_LINE_SIZE;
1423
1424 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1425
1426 }
1427}
1428
1429/* Returns the volume of memory references accessed between two consecutive
1430 self-reuses of the reference DR. We consider the subscripts of DR in N
1431 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1432 loops. LOOP is the innermost loop of the current loop nest. */
1433
1434static unsigned
1435self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1436 struct loop *loop)
1437{
1438 tree stride, access_fn;
1439 HOST_WIDE_INT *strides, astride;
1440 VEC (tree, heap) *access_fns;
1441 tree ref = DR_REF (dr);
1442 unsigned i, ret = ~0u;
1443
1444 /* In the following example:
1445
1446 for (i = 0; i < N; i++)
1447 for (j = 0; j < N; j++)
1448 use (a[j][i]);
1449 the same cache line is accessed each N steps (except if the change from
1450 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1451 we cannot rely purely on the results of the data dependence analysis.
1452
1453 Instead, we compute the stride of the reference in each loop, and consider
1454 the innermost loop in that the stride is less than cache size. */
1455
1456 strides = XCNEWVEC (HOST_WIDE_INT, n);
1457 access_fns = DR_ACCESS_FNS (dr);
1458
ac47786e 1459 FOR_EACH_VEC_ELT (tree, access_fns, i, access_fn)
5417e022
ZD
1460 {
1461 /* Keep track of the reference corresponding to the subscript, so that we
1462 know its stride. */
1463 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1464 ref = TREE_OPERAND (ref, 0);
b8698a0f 1465
5417e022
ZD
1466 if (TREE_CODE (ref) == ARRAY_REF)
1467 {
1468 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1469 if (host_integerp (stride, 1))
1470 astride = tree_low_cst (stride, 1);
1471 else
1472 astride = L1_CACHE_LINE_SIZE;
1473
1474 ref = TREE_OPERAND (ref, 0);
1475 }
1476 else
1477 astride = 1;
1478
1479 add_subscript_strides (access_fn, astride, strides, n, loop);
1480 }
1481
1482 for (i = n; i-- > 0; )
1483 {
1484 unsigned HOST_WIDE_INT s;
1485
1486 s = strides[i] < 0 ? -strides[i] : strides[i];
1487
1488 if (s < (unsigned) L1_CACHE_LINE_SIZE
1489 && (loop_sizes[i]
1490 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1491 {
1492 ret = loop_sizes[i];
1493 break;
1494 }
1495 }
1496
1497 free (strides);
1498 return ret;
1499}
1500
1501/* Determines the distance till the first reuse of each reference in REFS
79f5e442
ZD
1502 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1503 memory references in the loop. */
5417e022
ZD
1504
1505static void
79f5e442
ZD
1506determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1507 bool no_other_refs)
5417e022
ZD
1508{
1509 struct loop *nest, *aloop;
1510 VEC (data_reference_p, heap) *datarefs = NULL;
1511 VEC (ddr_p, heap) *dependences = NULL;
1512 struct mem_ref_group *gr;
79f5e442 1513 struct mem_ref *ref, *refb;
5417e022
ZD
1514 VEC (loop_p, heap) *vloops = NULL;
1515 unsigned *loop_data_size;
1516 unsigned i, j, n;
1517 unsigned volume, dist, adist;
1518 HOST_WIDE_INT vol;
1519 data_reference_p dr;
1520 ddr_p dep;
1521
1522 if (loop->inner)
1523 return;
1524
1525 /* Find the outermost loop of the loop nest of loop (we require that
1526 there are no sibling loops inside the nest). */
1527 nest = loop;
1528 while (1)
1529 {
1530 aloop = loop_outer (nest);
1531
1532 if (aloop == current_loops->tree_root
1533 || aloop->inner->next)
1534 break;
1535
1536 nest = aloop;
1537 }
1538
1539 /* For each loop, determine the amount of data accessed in each iteration.
1540 We use this to estimate whether the reference is evicted from the
1541 cache before its reuse. */
1542 find_loop_nest (nest, &vloops);
1543 n = VEC_length (loop_p, vloops);
1544 loop_data_size = XNEWVEC (unsigned, n);
1545 volume = volume_of_references (refs);
1546 i = n;
1547 while (i-- != 0)
1548 {
1549 loop_data_size[i] = volume;
1550 /* Bound the volume by the L2 cache size, since above this bound,
1551 all dependence distances are equivalent. */
1552 if (volume > L2_CACHE_SIZE_BYTES)
1553 continue;
1554
1555 aloop = VEC_index (loop_p, vloops, i);
1556 vol = estimated_loop_iterations_int (aloop, false);
1557 if (vol < 0)
1558 vol = expected_loop_iterations (aloop);
1559 volume *= vol;
1560 }
1561
1562 /* Prepare the references in the form suitable for data dependence
0d52bcc1 1563 analysis. We ignore unanalyzable data references (the results
5417e022
ZD
1564 are used just as a heuristics to estimate temporality of the
1565 references, hence we do not need to worry about correctness). */
1566 for (gr = refs; gr; gr = gr->next)
1567 for (ref = gr->refs; ref; ref = ref->next)
1568 {
1569 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1570
1571 if (dr)
1572 {
1573 ref->reuse_distance = volume;
1574 dr->aux = ref;
1575 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1576 }
79f5e442
ZD
1577 else
1578 no_other_refs = false;
5417e022
ZD
1579 }
1580
ac47786e 1581 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
5417e022
ZD
1582 {
1583 dist = self_reuse_distance (dr, loop_data_size, n, loop);
3d9a9f94 1584 ref = (struct mem_ref *) dr->aux;
5417e022
ZD
1585 if (ref->reuse_distance > dist)
1586 ref->reuse_distance = dist;
79f5e442
ZD
1587
1588 if (no_other_refs)
1589 ref->independent_p = true;
5417e022
ZD
1590 }
1591
1592 compute_all_dependences (datarefs, &dependences, vloops, true);
1593
ac47786e 1594 FOR_EACH_VEC_ELT (ddr_p, dependences, i, dep)
5417e022
ZD
1595 {
1596 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1597 continue;
1598
3d9a9f94
KG
1599 ref = (struct mem_ref *) DDR_A (dep)->aux;
1600 refb = (struct mem_ref *) DDR_B (dep)->aux;
79f5e442 1601
5417e022
ZD
1602 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1603 || DDR_NUM_DIST_VECTS (dep) == 0)
1604 {
0d52bcc1 1605 /* If the dependence cannot be analyzed, assume that there might be
5417e022
ZD
1606 a reuse. */
1607 dist = 0;
b8698a0f 1608
79f5e442
ZD
1609 ref->independent_p = false;
1610 refb->independent_p = false;
5417e022
ZD
1611 }
1612 else
1613 {
0d52bcc1 1614 /* The distance vectors are normalized to be always lexicographically
5417e022
ZD
1615 positive, hence we cannot tell just from them whether DDR_A comes
1616 before DDR_B or vice versa. However, it is not important,
1617 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1618 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1619 in cache (and marking it as nontemporal would not affect
1620 anything). */
1621
1622 dist = volume;
1623 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1624 {
1625 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1626 loop_data_size, n);
1627
79f5e442
ZD
1628 /* If this is a dependence in the innermost loop (i.e., the
1629 distances in all superloops are zero) and it is not
1630 the trivial self-dependence with distance zero, record that
1631 the references are not completely independent. */
1632 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1633 && (ref != refb
1634 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1635 {
1636 ref->independent_p = false;
1637 refb->independent_p = false;
1638 }
1639
5417e022
ZD
1640 /* Ignore accesses closer than
1641 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1642 so that we use nontemporal prefetches e.g. if single memory
1643 location is accessed several times in a single iteration of
1644 the loop. */
1645 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1646 continue;
1647
1648 if (adist < dist)
1649 dist = adist;
1650 }
1651 }
1652
5417e022
ZD
1653 if (ref->reuse_distance > dist)
1654 ref->reuse_distance = dist;
79f5e442
ZD
1655 if (refb->reuse_distance > dist)
1656 refb->reuse_distance = dist;
5417e022
ZD
1657 }
1658
1659 free_dependence_relations (dependences);
1660 free_data_refs (datarefs);
1661 free (loop_data_size);
1662
1663 if (dump_file && (dump_flags & TDF_DETAILS))
1664 {
1665 fprintf (dump_file, "Reuse distances:\n");
1666 for (gr = refs; gr; gr = gr->next)
1667 for (ref = gr->refs; ref; ref = ref->next)
1668 fprintf (dump_file, " ref %p distance %u\n",
1669 (void *) ref, ref->reuse_distance);
1670 }
1671}
1672
0bbe50f6
CF
1673/* Determine whether or not the trip count to ahead ratio is too small based
1674 on prefitablility consideration.
db34470d 1675 AHEAD: the iteration ahead distance,
0bbe50f6
CF
1676 EST_NITER: the estimated trip count. */
1677
1678static bool
1679trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1680{
1681 /* Assume trip count to ahead ratio is big enough if the trip count could not
1682 be estimated at compile time. */
1683 if (est_niter < 0)
1684 return false;
1685
1686 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1687 {
1688 if (dump_file && (dump_flags & TDF_DETAILS))
1689 fprintf (dump_file,
1690 "Not prefetching -- loop estimated to roll only %d times\n",
1691 (int) est_niter);
1692 return true;
1693 }
1694
1695 return false;
1696}
1697
1698/* Determine whether or not the number of memory references in the loop is
1699 reasonable based on the profitablity and compilation time considerations.
db34470d 1700 NINSNS: estimated number of instructions in the loop,
db34470d
GS
1701 MEM_REF_COUNT: total number of memory references in the loop. */
1702
b8698a0f 1703static bool
0bbe50f6 1704mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
db34470d 1705{
0bbe50f6 1706 int insn_to_mem_ratio;
db34470d
GS
1707
1708 if (mem_ref_count == 0)
1709 return false;
1710
0bbe50f6
CF
1711 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1712 (compute_all_dependences) have high costs based on quadratic complexity.
1713 To avoid huge compilation time, we give up prefetching if mem_ref_count
1714 is too large. */
1715 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1716 return false;
1717
b8698a0f
L
1718 /* Prefetching improves performance by overlapping cache missing
1719 memory accesses with CPU operations. If the loop does not have
1720 enough CPU operations to overlap with memory operations, prefetching
1721 won't give a significant benefit. One approximate way of checking
1722 this is to require the ratio of instructions to memory references to
db34470d
GS
1723 be above a certain limit. This approximation works well in practice.
1724 TODO: Implement a more precise computation by estimating the time
1725 for each CPU or memory op in the loop. Time estimates for memory ops
1726 should account for cache misses. */
b8698a0f 1727 insn_to_mem_ratio = ninsns / mem_ref_count;
db34470d
GS
1728
1729 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
55e5a2eb
CF
1730 {
1731 if (dump_file && (dump_flags & TDF_DETAILS))
1732 fprintf (dump_file,
1733 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1734 insn_to_mem_ratio);
1735 return false;
1736 }
db34470d 1737
0bbe50f6
CF
1738 return true;
1739}
1740
1741/* Determine whether or not the instruction to prefetch ratio in the loop is
1742 too small based on the profitablity consideration.
1743 NINSNS: estimated number of instructions in the loop,
1744 PREFETCH_COUNT: an estimate of the number of prefetches,
1745 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1746
1747static bool
1748insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1749 unsigned unroll_factor)
1750{
1751 int insn_to_prefetch_ratio;
1752
d3a9b459
CF
1753 /* Prefetching most likely causes performance degradation when the instruction
1754 to prefetch ratio is too small. Too many prefetch instructions in a loop
1755 may reduce the I-cache performance.
ccacf0e1
CF
1756 (unroll_factor * ninsns) is used to estimate the number of instructions in
1757 the unrolled loop. This implementation is a bit simplistic -- the number
1758 of issued prefetch instructions is also affected by unrolling. So,
1759 prefetch_mod and the unroll factor should be taken into account when
1760 determining prefetch_count. Also, the number of insns of the unrolled
1761 loop will usually be significantly smaller than the number of insns of the
1762 original loop * unroll_factor (at least the induction variable increases
1763 and the exit branches will get eliminated), so it might be better to use
1764 tree_estimate_loop_size + estimated_unrolled_size. */
d3a9b459
CF
1765 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1766 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
db34470d 1767 {
d3a9b459
CF
1768 if (dump_file && (dump_flags & TDF_DETAILS))
1769 fprintf (dump_file,
1770 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1771 insn_to_prefetch_ratio);
0bbe50f6 1772 return true;
db34470d 1773 }
b8698a0f 1774
0bbe50f6 1775 return false;
db34470d
GS
1776}
1777
1778
b076a3fd 1779/* Issue prefetch instructions for array references in LOOP. Returns
d73be268 1780 true if the LOOP was unrolled. */
b076a3fd
ZD
1781
1782static bool
d73be268 1783loop_prefetch_arrays (struct loop *loop)
b076a3fd
ZD
1784{
1785 struct mem_ref_group *refs;
2711355f
ZD
1786 unsigned ahead, ninsns, time, unroll_factor;
1787 HOST_WIDE_INT est_niter;
b076a3fd 1788 struct tree_niter_desc desc;
79f5e442 1789 bool unrolled = false, no_other_refs;
db34470d
GS
1790 unsigned prefetch_count;
1791 unsigned mem_ref_count;
b076a3fd 1792
efd8f750 1793 if (optimize_loop_nest_for_size_p (loop))
2732d767
ZD
1794 {
1795 if (dump_file && (dump_flags & TDF_DETAILS))
1796 fprintf (dump_file, " ignored (cold area)\n");
1797 return false;
1798 }
1799
0bbe50f6
CF
1800 /* FIXME: the time should be weighted by the probabilities of the blocks in
1801 the loop body. */
1802 time = tree_num_loop_insns (loop, &eni_time_weights);
1803 if (time == 0)
1804 return false;
1805
1806 ahead = (PREFETCH_LATENCY + time - 1) / time;
1807 est_niter = estimated_loop_iterations_int (loop, false);
1808
1809 /* Prefetching is not likely to be profitable if the trip count to ahead
1810 ratio is too small. */
1811 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1812 return false;
1813
1814 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1815
b076a3fd 1816 /* Step 1: gather the memory references. */
db34470d 1817 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
b076a3fd 1818
0bbe50f6
CF
1819 /* Give up prefetching if the number of memory references in the
1820 loop is not reasonable based on profitablity and compilation time
1821 considerations. */
1822 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1823 goto fail;
1824
b076a3fd
ZD
1825 /* Step 2: estimate the reuse effects. */
1826 prune_by_reuse (refs);
1827
d5058523 1828 if (nothing_to_prefetch_p (refs))
b076a3fd
ZD
1829 goto fail;
1830
79f5e442 1831 determine_loop_nest_reuse (loop, refs, no_other_refs);
5417e022 1832
0bbe50f6 1833 /* Step 3: determine unroll factor. */
2711355f
ZD
1834 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1835 est_niter);
d5058523
CF
1836
1837 /* Estimate prefetch count for the unrolled loop. */
1838 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1839 if (prefetch_count == 0)
1840 goto fail;
1841
2711355f 1842 if (dump_file && (dump_flags & TDF_DETAILS))
b8698a0f 1843 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
d81f5387 1844 HOST_WIDE_INT_PRINT_DEC "\n"
b8698a0f
L
1845 "insn count %d, mem ref count %d, prefetch count %d\n",
1846 ahead, unroll_factor, est_niter,
1847 ninsns, mem_ref_count, prefetch_count);
db34470d 1848
0bbe50f6
CF
1849 /* Prefetching is not likely to be profitable if the instruction to prefetch
1850 ratio is too small. */
1851 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1852 unroll_factor))
db34470d
GS
1853 goto fail;
1854
1855 mark_nontemporal_stores (loop, refs);
2711355f 1856
b076a3fd
ZD
1857 /* Step 4: what to prefetch? */
1858 if (!schedule_prefetches (refs, unroll_factor, ahead))
1859 goto fail;
1860
1861 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1862 iterations so that we do not issue superfluous prefetches. */
1863 if (unroll_factor != 1)
1864 {
d73be268 1865 tree_unroll_loop (loop, unroll_factor,
b076a3fd
ZD
1866 single_dom_exit (loop), &desc);
1867 unrolled = true;
1868 }
1869
1870 /* Step 6: issue the prefetches. */
1871 issue_prefetches (refs, unroll_factor, ahead);
1872
1873fail:
1874 release_mem_refs (refs);
1875 return unrolled;
1876}
1877
d73be268 1878/* Issue prefetch instructions for array references in loops. */
b076a3fd 1879
c7f965b6 1880unsigned int
d73be268 1881tree_ssa_prefetch_arrays (void)
b076a3fd 1882{
42fd6772 1883 loop_iterator li;
b076a3fd
ZD
1884 struct loop *loop;
1885 bool unrolled = false;
c7f965b6 1886 int todo_flags = 0;
b076a3fd
ZD
1887
1888 if (!HAVE_prefetch
1889 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1890 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1891 of processor costs and i486 does not have prefetch, but
1892 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1893 || PREFETCH_BLOCK == 0)
c7f965b6 1894 return 0;
b076a3fd 1895
47eb5b32
ZD
1896 if (dump_file && (dump_flags & TDF_DETAILS))
1897 {
1898 fprintf (dump_file, "Prefetching parameters:\n");
1899 fprintf (dump_file, " simultaneous prefetches: %d\n",
1900 SIMULTANEOUS_PREFETCHES);
1901 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
47eb5b32 1902 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
46cb0441
ZD
1903 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1904 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
5417e022 1905 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
b8698a0f
L
1906 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1907 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
db34470d 1908 MIN_INSN_TO_PREFETCH_RATIO);
b8698a0f 1909 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
db34470d 1910 PREFETCH_MIN_INSN_TO_MEM_RATIO);
47eb5b32
ZD
1911 fprintf (dump_file, "\n");
1912 }
1913
b076a3fd
ZD
1914 initialize_original_copy_tables ();
1915
1916 if (!built_in_decls[BUILT_IN_PREFETCH])
1917 {
6a4825bd
NF
1918 tree type = build_function_type_list (void_type_node,
1919 const_ptr_type_node, NULL_TREE);
c79efc4d
RÁE
1920 tree decl = add_builtin_function ("__builtin_prefetch", type,
1921 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1922 NULL, NULL_TREE);
b076a3fd
ZD
1923 DECL_IS_NOVOPS (decl) = true;
1924 built_in_decls[BUILT_IN_PREFETCH] = decl;
1925 }
1926
1927 /* We assume that size of cache line is a power of two, so verify this
1928 here. */
1929 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1930
42fd6772 1931 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
b076a3fd 1932 {
b076a3fd
ZD
1933 if (dump_file && (dump_flags & TDF_DETAILS))
1934 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1935
d73be268 1936 unrolled |= loop_prefetch_arrays (loop);
b076a3fd
ZD
1937
1938 if (dump_file && (dump_flags & TDF_DETAILS))
1939 fprintf (dump_file, "\n\n");
1940 }
1941
1942 if (unrolled)
1943 {
1944 scev_reset ();
c7f965b6 1945 todo_flags |= TODO_cleanup_cfg;
b076a3fd
ZD
1946 }
1947
1948 free_original_copy_tables ();
c7f965b6 1949 return todo_flags;
b076a3fd 1950}