]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-loop-prefetch.c
re PR tree-optimization/42172 (inefficient bit fields assignments)
[thirdparty/gcc.git] / gcc / tree-ssa-loop-prefetch.c
CommitLineData
b076a3fd 1/* Array prefetching.
cf835838 2 Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
b8698a0f 3
b076a3fd 4This file is part of GCC.
b8698a0f 5
b076a3fd
ZD
6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
9dcd6f09 8Free Software Foundation; either version 3, or (at your option) any
b076a3fd 9later version.
b8698a0f 10
b076a3fd
ZD
11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
b8698a0f 15
b076a3fd 16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
b076a3fd
ZD
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
24#include "tree.h"
b076a3fd 25#include "tm_p.h"
b076a3fd
ZD
26#include "basic-block.h"
27#include "output.h"
cf835838 28#include "tree-pretty-print.h"
b076a3fd
ZD
29#include "tree-flow.h"
30#include "tree-dump.h"
31#include "timevar.h"
32#include "cfgloop.h"
b076a3fd 33#include "tree-pass.h"
b076a3fd
ZD
34#include "insn-config.h"
35#include "recog.h"
36#include "hashtab.h"
37#include "tree-chrec.h"
38#include "tree-scalar-evolution.h"
39#include "toplev.h"
40#include "params.h"
41#include "langhooks.h"
7f9bc51b 42#include "tree-inline.h"
5417e022 43#include "tree-data-ref.h"
2eb79bbb
SB
44
45
46/* FIXME: Needed for optabs, but this should all be moved to a TBD interface
47 between the GIMPLE and RTL worlds. */
48#include "expr.h"
79f5e442 49#include "optabs.h"
b076a3fd
ZD
50
51/* This pass inserts prefetch instructions to optimize cache usage during
52 accesses to arrays in loops. It processes loops sequentially and:
53
54 1) Gathers all memory references in the single loop.
55 2) For each of the references it decides when it is profitable to prefetch
56 it. To do it, we evaluate the reuse among the accesses, and determines
57 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
58 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
59 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
60 iterations of the loop that are zero modulo PREFETCH_MOD). For example
61 (assuming cache line size is 64 bytes, char has size 1 byte and there
62 is no hardware sequential prefetch):
63
64 char *a;
65 for (i = 0; i < max; i++)
66 {
67 a[255] = ...; (0)
68 a[i] = ...; (1)
69 a[i + 64] = ...; (2)
70 a[16*i] = ...; (3)
71 a[187*i] = ...; (4)
72 a[187*i + 50] = ...; (5)
73 }
74
75 (0) obviously has PREFETCH_BEFORE 1
76 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
77 location 64 iterations before it, and PREFETCH_MOD 64 (since
78 it hits the same cache line otherwise).
79 (2) has PREFETCH_MOD 64
80 (3) has PREFETCH_MOD 4
81 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
82 the cache line accessed by (4) is the same with probability only
83 7/32.
84 (5) has PREFETCH_MOD 1 as well.
85
5417e022
ZD
86 Additionally, we use data dependence analysis to determine for each
87 reference the distance till the first reuse; this information is used
88 to determine the temporality of the issued prefetch instruction.
89
b076a3fd
ZD
90 3) We determine how much ahead we need to prefetch. The number of
91 iterations needed is time to fetch / time spent in one iteration of
92 the loop. The problem is that we do not know either of these values,
93 so we just make a heuristic guess based on a magic (possibly)
94 target-specific constant and size of the loop.
95
96 4) Determine which of the references we prefetch. We take into account
97 that there is a maximum number of simultaneous prefetches (provided
98 by machine description). We prefetch as many prefetches as possible
99 while still within this bound (starting with those with lowest
100 prefetch_mod, since they are responsible for most of the cache
101 misses).
b8698a0f 102
b076a3fd
ZD
103 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
104 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
105 prefetching nonaccessed memory.
106 TODO -- actually implement peeling.
b8698a0f 107
b076a3fd
ZD
108 6) We actually emit the prefetch instructions. ??? Perhaps emit the
109 prefetch instructions with guards in cases where 5) was not sufficient
110 to satisfy the constraints?
111
db34470d
GS
112 The function is_loop_prefetching_profitable() implements a cost model
113 to determine if prefetching is profitable for a given loop. The cost
114 model has two heuristcs:
115 1. A heuristic that determines whether the given loop has enough CPU
116 ops that can be overlapped with cache missing memory ops.
b8698a0f
L
117 If not, the loop won't benefit from prefetching. This is implemented
118 by requirung the ratio between the instruction count and the mem ref
db34470d
GS
119 count to be above a certain minimum.
120 2. A heuristic that disables prefetching in a loop with an unknown trip
b8698a0f 121 count if the prefetching cost is above a certain limit. The relative
db34470d
GS
122 prefetching cost is estimated by taking the ratio between the
123 prefetch count and the total intruction count (this models the I-cache
124 cost).
125 The limits used in these heuristics are defined as parameters with
b8698a0f 126 reasonable default values. Machine-specific default values will be
db34470d 127 added later.
b8698a0f 128
b076a3fd
ZD
129 Some other TODO:
130 -- write and use more general reuse analysis (that could be also used
131 in other cache aimed loop optimizations)
132 -- make it behave sanely together with the prefetches given by user
133 (now we just ignore them; at the very least we should avoid
134 optimizing loops in that user put his own prefetches)
135 -- we assume cache line size alignment of arrays; this could be
136 improved. */
137
138/* Magic constants follow. These should be replaced by machine specific
139 numbers. */
140
b076a3fd
ZD
141/* True if write can be prefetched by a read prefetch. */
142
143#ifndef WRITE_CAN_USE_READ_PREFETCH
144#define WRITE_CAN_USE_READ_PREFETCH 1
145#endif
146
147/* True if read can be prefetched by a write prefetch. */
148
149#ifndef READ_CAN_USE_WRITE_PREFETCH
150#define READ_CAN_USE_WRITE_PREFETCH 0
151#endif
152
47eb5b32
ZD
153/* The size of the block loaded by a single prefetch. Usually, this is
154 the same as cache line size (at the moment, we only consider one level
155 of cache hierarchy). */
b076a3fd
ZD
156
157#ifndef PREFETCH_BLOCK
47eb5b32 158#define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
b076a3fd
ZD
159#endif
160
161/* Do we have a forward hardware sequential prefetching? */
162
163#ifndef HAVE_FORWARD_PREFETCH
164#define HAVE_FORWARD_PREFETCH 0
165#endif
166
167/* Do we have a backward hardware sequential prefetching? */
168
169#ifndef HAVE_BACKWARD_PREFETCH
170#define HAVE_BACKWARD_PREFETCH 0
171#endif
172
173/* In some cases we are only able to determine that there is a certain
174 probability that the two accesses hit the same cache line. In this
175 case, we issue the prefetches for both of them if this probability
fa10beec 176 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
b076a3fd
ZD
177
178#ifndef ACCEPTABLE_MISS_RATE
179#define ACCEPTABLE_MISS_RATE 50
180#endif
181
182#ifndef HAVE_prefetch
183#define HAVE_prefetch 0
184#endif
185
46cb0441
ZD
186#define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
187#define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
5417e022
ZD
188
189/* We consider a memory access nontemporal if it is not reused sooner than
190 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
191 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
192 so that we use nontemporal prefetches e.g. if single memory location
193 is accessed several times in a single iteration of the loop. */
194#define NONTEMPORAL_FRACTION 16
195
79f5e442
ZD
196/* In case we have to emit a memory fence instruction after the loop that
197 uses nontemporal stores, this defines the builtin to use. */
198
199#ifndef FENCE_FOLLOWING_MOVNT
200#define FENCE_FOLLOWING_MOVNT NULL_TREE
201#endif
202
9bf4598b
CF
203/* It is not profitable to prefetch when the trip count is not at
204 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
205 For example, in a loop with a prefetch ahead distance of 10,
206 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
207 profitable to prefetch when the trip count is greater or equal to
208 40. In that case, 30 out of the 40 iterations will benefit from
209 prefetching. */
210
211#ifndef TRIP_COUNT_TO_AHEAD_RATIO
212#define TRIP_COUNT_TO_AHEAD_RATIO 4
213#endif
214
b076a3fd
ZD
215/* The group of references between that reuse may occur. */
216
217struct mem_ref_group
218{
219 tree base; /* Base of the reference. */
81f32326 220 tree step; /* Step of the reference. */
b076a3fd
ZD
221 struct mem_ref *refs; /* References in the group. */
222 struct mem_ref_group *next; /* Next group of references. */
223};
224
225/* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
226
227#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
228
8532678c
CF
229/* Do not generate a prefetch if the unroll factor is significantly less
230 than what is required by the prefetch. This is to avoid redundant
f7963a7c
CF
231 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
232 2, prefetching requires unrolling the loop 16 times, but
233 the loop is actually unrolled twice. In this case (ratio = 8),
8532678c
CF
234 prefetching is not likely to be beneficial. */
235
236#ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
f7963a7c 237#define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
8532678c
CF
238#endif
239
b076a3fd
ZD
240/* The memory reference. */
241
242struct mem_ref
243{
726a989a 244 gimple stmt; /* Statement in that the reference appears. */
b076a3fd
ZD
245 tree mem; /* The reference. */
246 HOST_WIDE_INT delta; /* Constant offset of the reference. */
b076a3fd
ZD
247 struct mem_ref_group *group; /* The group of references it belongs to. */
248 unsigned HOST_WIDE_INT prefetch_mod;
249 /* Prefetch only each PREFETCH_MOD-th
250 iteration. */
251 unsigned HOST_WIDE_INT prefetch_before;
252 /* Prefetch only first PREFETCH_BEFORE
253 iterations. */
5417e022
ZD
254 unsigned reuse_distance; /* The amount of data accessed before the first
255 reuse of this value. */
b076a3fd 256 struct mem_ref *next; /* The next reference in the group. */
79f5e442
ZD
257 unsigned write_p : 1; /* Is it a write? */
258 unsigned independent_p : 1; /* True if the reference is independent on
259 all other references inside the loop. */
260 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
261 unsigned storent_p : 1; /* True if we changed the store to a
262 nontemporal one. */
b076a3fd
ZD
263};
264
75c40d56 265/* Dumps information about reference REF to FILE. */
b076a3fd
ZD
266
267static void
268dump_mem_ref (FILE *file, struct mem_ref *ref)
269{
270 fprintf (file, "Reference %p:\n", (void *) ref);
271
272 fprintf (file, " group %p (base ", (void *) ref->group);
273 print_generic_expr (file, ref->group->base, TDF_SLIM);
274 fprintf (file, ", step ");
81f32326
CB
275 if (cst_and_fits_in_hwi (ref->group->step))
276 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (ref->group->step));
277 else
278 print_generic_expr (file, ref->group->step, TDF_TREE);
b076a3fd
ZD
279 fprintf (file, ")\n");
280
e324a72f 281 fprintf (file, " delta ");
b076a3fd
ZD
282 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
283 fprintf (file, "\n");
284
285 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
286
287 fprintf (file, "\n");
288}
289
290/* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
291 exist. */
292
293static struct mem_ref_group *
81f32326 294find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
b076a3fd
ZD
295{
296 struct mem_ref_group *group;
297
298 for (; *groups; groups = &(*groups)->next)
299 {
81f32326 300 if (operand_equal_p ((*groups)->step, step, 0)
b076a3fd
ZD
301 && operand_equal_p ((*groups)->base, base, 0))
302 return *groups;
303
81f32326
CB
304 /* If step is an integer constant, keep the list of groups sorted
305 by decreasing step. */
306 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
307 && int_cst_value ((*groups)->step) < int_cst_value (step))
b076a3fd
ZD
308 break;
309 }
310
5417e022 311 group = XNEW (struct mem_ref_group);
b076a3fd
ZD
312 group->base = base;
313 group->step = step;
314 group->refs = NULL;
315 group->next = *groups;
316 *groups = group;
317
318 return group;
319}
320
321/* Records a memory reference MEM in GROUP with offset DELTA and write status
322 WRITE_P. The reference occurs in statement STMT. */
323
324static void
726a989a 325record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
b076a3fd
ZD
326 HOST_WIDE_INT delta, bool write_p)
327{
328 struct mem_ref **aref;
329
330 /* Do not record the same address twice. */
331 for (aref = &group->refs; *aref; aref = &(*aref)->next)
332 {
333 /* It does not have to be possible for write reference to reuse the read
334 prefetch, or vice versa. */
335 if (!WRITE_CAN_USE_READ_PREFETCH
336 && write_p
337 && !(*aref)->write_p)
338 continue;
339 if (!READ_CAN_USE_WRITE_PREFETCH
340 && !write_p
341 && (*aref)->write_p)
342 continue;
343
344 if ((*aref)->delta == delta)
345 return;
346 }
347
5417e022 348 (*aref) = XNEW (struct mem_ref);
b076a3fd
ZD
349 (*aref)->stmt = stmt;
350 (*aref)->mem = mem;
351 (*aref)->delta = delta;
352 (*aref)->write_p = write_p;
353 (*aref)->prefetch_before = PREFETCH_ALL;
354 (*aref)->prefetch_mod = 1;
5417e022 355 (*aref)->reuse_distance = 0;
b076a3fd
ZD
356 (*aref)->issue_prefetch_p = false;
357 (*aref)->group = group;
358 (*aref)->next = NULL;
79f5e442
ZD
359 (*aref)->independent_p = false;
360 (*aref)->storent_p = false;
b076a3fd
ZD
361
362 if (dump_file && (dump_flags & TDF_DETAILS))
363 dump_mem_ref (dump_file, *aref);
364}
365
366/* Release memory references in GROUPS. */
367
368static void
369release_mem_refs (struct mem_ref_group *groups)
370{
371 struct mem_ref_group *next_g;
372 struct mem_ref *ref, *next_r;
373
374 for (; groups; groups = next_g)
375 {
376 next_g = groups->next;
377 for (ref = groups->refs; ref; ref = next_r)
378 {
379 next_r = ref->next;
380 free (ref);
381 }
382 free (groups);
383 }
384}
385
386/* A structure used to pass arguments to idx_analyze_ref. */
387
388struct ar_data
389{
390 struct loop *loop; /* Loop of the reference. */
726a989a 391 gimple stmt; /* Statement of the reference. */
81f32326 392 tree *step; /* Step of the memory reference. */
b076a3fd
ZD
393 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
394};
395
396/* Analyzes a single INDEX of a memory reference to obtain information
397 described at analyze_ref. Callback for for_each_index. */
398
399static bool
400idx_analyze_ref (tree base, tree *index, void *data)
401{
c22940cd 402 struct ar_data *ar_data = (struct ar_data *) data;
b076a3fd 403 tree ibase, step, stepsize;
81f32326 404 HOST_WIDE_INT idelta = 0, imult = 1;
b076a3fd
ZD
405 affine_iv iv;
406
407 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
408 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
409 return false;
410
f017bf5e 411 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
81f32326 412 *index, &iv, true))
b076a3fd
ZD
413 return false;
414 ibase = iv.base;
415 step = iv.step;
416
5be014d5 417 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
b076a3fd
ZD
418 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
419 {
420 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
421 ibase = TREE_OPERAND (ibase, 0);
422 }
423 if (cst_and_fits_in_hwi (ibase))
424 {
425 idelta += int_cst_value (ibase);
ff5e9a94 426 ibase = build_int_cst (TREE_TYPE (ibase), 0);
b076a3fd
ZD
427 }
428
429 if (TREE_CODE (base) == ARRAY_REF)
430 {
431 stepsize = array_ref_element_size (base);
432 if (!cst_and_fits_in_hwi (stepsize))
433 return false;
434 imult = int_cst_value (stepsize);
8fde8b40
CB
435 step = fold_build2 (MULT_EXPR, sizetype,
436 fold_convert (sizetype, step),
437 fold_convert (sizetype, stepsize));
b076a3fd
ZD
438 idelta *= imult;
439 }
440
8fde8b40
CB
441 if (*ar_data->step == NULL_TREE)
442 *ar_data->step = step;
443 else
444 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
445 fold_convert (sizetype, *ar_data->step),
446 fold_convert (sizetype, step));
b076a3fd
ZD
447 *ar_data->delta += idelta;
448 *index = ibase;
449
450 return true;
451}
452
aac8b8ed 453/* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
b076a3fd 454 STEP are integer constants and iter is number of iterations of LOOP. The
aac8b8ed
RS
455 reference occurs in statement STMT. Strips nonaddressable component
456 references from REF_P. */
b076a3fd
ZD
457
458static bool
aac8b8ed 459analyze_ref (struct loop *loop, tree *ref_p, tree *base,
81f32326 460 tree *step, HOST_WIDE_INT *delta,
726a989a 461 gimple stmt)
b076a3fd
ZD
462{
463 struct ar_data ar_data;
464 tree off;
465 HOST_WIDE_INT bit_offset;
aac8b8ed 466 tree ref = *ref_p;
b076a3fd 467
81f32326 468 *step = NULL_TREE;
b076a3fd
ZD
469 *delta = 0;
470
471 /* First strip off the component references. Ignore bitfields. */
472 if (TREE_CODE (ref) == COMPONENT_REF
473 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
474 ref = TREE_OPERAND (ref, 0);
475
aac8b8ed
RS
476 *ref_p = ref;
477
b076a3fd
ZD
478 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
479 {
480 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
481 bit_offset = TREE_INT_CST_LOW (off);
482 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
b8698a0f 483
b076a3fd
ZD
484 *delta += bit_offset / BITS_PER_UNIT;
485 }
486
487 *base = unshare_expr (ref);
488 ar_data.loop = loop;
489 ar_data.stmt = stmt;
490 ar_data.step = step;
491 ar_data.delta = delta;
492 return for_each_index (base, idx_analyze_ref, &ar_data);
493}
494
495/* Record a memory reference REF to the list REFS. The reference occurs in
79f5e442
ZD
496 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
497 reference was recorded, false otherwise. */
b076a3fd 498
79f5e442 499static bool
b076a3fd 500gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
726a989a 501 tree ref, bool write_p, gimple stmt)
b076a3fd 502{
81f32326
CB
503 tree base, step;
504 HOST_WIDE_INT delta;
b076a3fd
ZD
505 struct mem_ref_group *agrp;
506
a80a2701
JJ
507 if (get_base_address (ref) == NULL)
508 return false;
509
aac8b8ed 510 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
79f5e442 511 return false;
81f32326
CB
512 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
513 if (step == NULL_TREE)
514 return false;
b076a3fd 515
50814135
CF
516 /* Limit non-constant step prefetching only to the innermost loops. */
517 if (!cst_and_fits_in_hwi (step) && loop->inner != NULL)
518 return false;
519
b076a3fd
ZD
520 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
521 are integer constants. */
522 agrp = find_or_create_group (refs, base, step);
523 record_ref (agrp, stmt, ref, delta, write_p);
79f5e442
ZD
524
525 return true;
b076a3fd
ZD
526}
527
79f5e442
ZD
528/* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
529 true if there are no other memory references inside the loop. */
b076a3fd
ZD
530
531static struct mem_ref_group *
db34470d 532gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
b076a3fd
ZD
533{
534 basic_block *body = get_loop_body_in_dom_order (loop);
535 basic_block bb;
536 unsigned i;
726a989a
RB
537 gimple_stmt_iterator bsi;
538 gimple stmt;
539 tree lhs, rhs;
b076a3fd
ZD
540 struct mem_ref_group *refs = NULL;
541
79f5e442 542 *no_other_refs = true;
db34470d 543 *ref_count = 0;
79f5e442 544
b076a3fd
ZD
545 /* Scan the loop body in order, so that the former references precede the
546 later ones. */
547 for (i = 0; i < loop->num_nodes; i++)
548 {
549 bb = body[i];
550 if (bb->loop_father != loop)
551 continue;
552
726a989a 553 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
b076a3fd 554 {
726a989a 555 stmt = gsi_stmt (bsi);
79f5e442 556
726a989a 557 if (gimple_code (stmt) != GIMPLE_ASSIGN)
79f5e442 558 {
5006671f 559 if (gimple_vuse (stmt)
726a989a
RB
560 || (is_gimple_call (stmt)
561 && !(gimple_call_flags (stmt) & ECF_CONST)))
79f5e442
ZD
562 *no_other_refs = false;
563 continue;
564 }
b076a3fd 565
726a989a
RB
566 lhs = gimple_assign_lhs (stmt);
567 rhs = gimple_assign_rhs1 (stmt);
b076a3fd
ZD
568
569 if (REFERENCE_CLASS_P (rhs))
db34470d 570 {
79f5e442
ZD
571 *no_other_refs &= gather_memory_references_ref (loop, &refs,
572 rhs, false, stmt);
db34470d
GS
573 *ref_count += 1;
574 }
b076a3fd 575 if (REFERENCE_CLASS_P (lhs))
db34470d 576 {
79f5e442
ZD
577 *no_other_refs &= gather_memory_references_ref (loop, &refs,
578 lhs, true, stmt);
db34470d
GS
579 *ref_count += 1;
580 }
b076a3fd
ZD
581 }
582 }
583 free (body);
584
585 return refs;
586}
587
588/* Prune the prefetch candidate REF using the self-reuse. */
589
590static void
591prune_ref_by_self_reuse (struct mem_ref *ref)
592{
81f32326
CB
593 HOST_WIDE_INT step;
594 bool backward;
595
596 /* If the step size is non constant, we cannot calculate prefetch_mod. */
597 if (!cst_and_fits_in_hwi (ref->group->step))
598 return;
599
600 step = int_cst_value (ref->group->step);
601
602 backward = step < 0;
b076a3fd
ZD
603
604 if (step == 0)
605 {
606 /* Prefetch references to invariant address just once. */
607 ref->prefetch_before = 1;
608 return;
609 }
610
611 if (backward)
612 step = -step;
613
614 if (step > PREFETCH_BLOCK)
615 return;
616
617 if ((backward && HAVE_BACKWARD_PREFETCH)
618 || (!backward && HAVE_FORWARD_PREFETCH))
619 {
620 ref->prefetch_before = 1;
621 return;
622 }
623
624 ref->prefetch_mod = PREFETCH_BLOCK / step;
625}
626
627/* Divides X by BY, rounding down. */
628
629static HOST_WIDE_INT
630ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
631{
632 gcc_assert (by > 0);
633
634 if (x >= 0)
635 return x / by;
636 else
637 return (x + by - 1) / by;
638}
639
b8698a0f
L
640/* Given a CACHE_LINE_SIZE and two inductive memory references
641 with a common STEP greater than CACHE_LINE_SIZE and an address
642 difference DELTA, compute the probability that they will fall
643 in different cache lines. DISTINCT_ITERS is the number of
644 distinct iterations after which the pattern repeats itself.
2c6dd136
GS
645 ALIGN_UNIT is the unit of alignment in bytes. */
646
647static int
b8698a0f 648compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
2c6dd136
GS
649 HOST_WIDE_INT step, HOST_WIDE_INT delta,
650 unsigned HOST_WIDE_INT distinct_iters,
651 int align_unit)
652{
653 unsigned align, iter;
654 int total_positions, miss_positions, miss_rate;
655 int address1, address2, cache_line1, cache_line2;
656
657 total_positions = 0;
658 miss_positions = 0;
b8698a0f 659
2c6dd136
GS
660 /* Iterate through all possible alignments of the first
661 memory reference within its cache line. */
662 for (align = 0; align < cache_line_size; align += align_unit)
663
664 /* Iterate through all distinct iterations. */
665 for (iter = 0; iter < distinct_iters; iter++)
666 {
667 address1 = align + step * iter;
668 address2 = address1 + delta;
669 cache_line1 = address1 / cache_line_size;
670 cache_line2 = address2 / cache_line_size;
671 total_positions += 1;
672 if (cache_line1 != cache_line2)
673 miss_positions += 1;
674 }
675 miss_rate = 1000 * miss_positions / total_positions;
676 return miss_rate;
677}
678
b076a3fd
ZD
679/* Prune the prefetch candidate REF using the reuse with BY.
680 If BY_IS_BEFORE is true, BY is before REF in the loop. */
681
682static void
683prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
684 bool by_is_before)
685{
81f32326
CB
686 HOST_WIDE_INT step;
687 bool backward;
b076a3fd
ZD
688 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
689 HOST_WIDE_INT delta = delta_b - delta_r;
690 HOST_WIDE_INT hit_from;
691 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
2c6dd136
GS
692 int miss_rate;
693 HOST_WIDE_INT reduced_step;
694 unsigned HOST_WIDE_INT reduced_prefetch_block;
695 tree ref_type;
696 int align_unit;
b076a3fd 697
81f32326
CB
698 /* If the step is non constant we cannot calculate prefetch_before. */
699 if (!cst_and_fits_in_hwi (ref->group->step)) {
700 return;
701 }
702
703 step = int_cst_value (ref->group->step);
704
705 backward = step < 0;
706
707
b076a3fd
ZD
708 if (delta == 0)
709 {
710 /* If the references has the same address, only prefetch the
711 former. */
712 if (by_is_before)
713 ref->prefetch_before = 0;
b8698a0f 714
b076a3fd
ZD
715 return;
716 }
717
718 if (!step)
719 {
720 /* If the reference addresses are invariant and fall into the
721 same cache line, prefetch just the first one. */
722 if (!by_is_before)
723 return;
724
725 if (ddown (ref->delta, PREFETCH_BLOCK)
726 != ddown (by->delta, PREFETCH_BLOCK))
727 return;
728
729 ref->prefetch_before = 0;
730 return;
731 }
732
733 /* Only prune the reference that is behind in the array. */
734 if (backward)
735 {
736 if (delta > 0)
737 return;
738
739 /* Transform the data so that we may assume that the accesses
740 are forward. */
741 delta = - delta;
742 step = -step;
743 delta_r = PREFETCH_BLOCK - 1 - delta_r;
744 delta_b = PREFETCH_BLOCK - 1 - delta_b;
745 }
746 else
747 {
748 if (delta < 0)
749 return;
750 }
751
752 /* Check whether the two references are likely to hit the same cache
753 line, and how distant the iterations in that it occurs are from
754 each other. */
755
756 if (step <= PREFETCH_BLOCK)
757 {
758 /* The accesses are sure to meet. Let us check when. */
759 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
760 prefetch_before = (hit_from - delta_r + step - 1) / step;
761
57762e97 762 /* Do not reduce prefetch_before if we meet beyond cache size. */
e972cc7e 763 if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
57762e97 764 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
765 if (prefetch_before < ref->prefetch_before)
766 ref->prefetch_before = prefetch_before;
767
768 return;
769 }
770
b8698a0f 771 /* A more complicated case with step > prefetch_block. First reduce
2c6dd136 772 the ratio between the step and the cache line size to its simplest
b8698a0f
L
773 terms. The resulting denominator will then represent the number of
774 distinct iterations after which each address will go back to its
775 initial location within the cache line. This computation assumes
2c6dd136 776 that PREFETCH_BLOCK is a power of two. */
b076a3fd 777 prefetch_block = PREFETCH_BLOCK;
2c6dd136
GS
778 reduced_prefetch_block = prefetch_block;
779 reduced_step = step;
780 while ((reduced_step & 1) == 0
781 && reduced_prefetch_block > 1)
b076a3fd 782 {
2c6dd136
GS
783 reduced_step >>= 1;
784 reduced_prefetch_block >>= 1;
b076a3fd
ZD
785 }
786
b076a3fd
ZD
787 prefetch_before = delta / step;
788 delta %= step;
2c6dd136
GS
789 ref_type = TREE_TYPE (ref->mem);
790 align_unit = TYPE_ALIGN (ref_type) / 8;
b8698a0f 791 miss_rate = compute_miss_rate(prefetch_block, step, delta,
2c6dd136
GS
792 reduced_prefetch_block, align_unit);
793 if (miss_rate <= ACCEPTABLE_MISS_RATE)
b076a3fd 794 {
57762e97
CB
795 /* Do not reduce prefetch_before if we meet beyond cache size. */
796 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
797 prefetch_before = PREFETCH_ALL;
b076a3fd
ZD
798 if (prefetch_before < ref->prefetch_before)
799 ref->prefetch_before = prefetch_before;
800
801 return;
802 }
803
804 /* Try also the following iteration. */
805 prefetch_before++;
806 delta = step - delta;
b8698a0f 807 miss_rate = compute_miss_rate(prefetch_block, step, delta,
2c6dd136 808 reduced_prefetch_block, align_unit);
b8698a0f 809 if (miss_rate <= ACCEPTABLE_MISS_RATE)
b076a3fd
ZD
810 {
811 if (prefetch_before < ref->prefetch_before)
812 ref->prefetch_before = prefetch_before;
813
814 return;
815 }
816
817 /* The ref probably does not reuse by. */
818 return;
819}
820
821/* Prune the prefetch candidate REF using the reuses with other references
822 in REFS. */
823
824static void
825prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
826{
827 struct mem_ref *prune_by;
828 bool before = true;
829
830 prune_ref_by_self_reuse (ref);
831
832 for (prune_by = refs; prune_by; prune_by = prune_by->next)
833 {
834 if (prune_by == ref)
835 {
836 before = false;
837 continue;
838 }
839
840 if (!WRITE_CAN_USE_READ_PREFETCH
841 && ref->write_p
842 && !prune_by->write_p)
843 continue;
844 if (!READ_CAN_USE_WRITE_PREFETCH
845 && !ref->write_p
846 && prune_by->write_p)
847 continue;
848
849 prune_ref_by_group_reuse (ref, prune_by, before);
850 }
851}
852
853/* Prune the prefetch candidates in GROUP using the reuse analysis. */
854
855static void
856prune_group_by_reuse (struct mem_ref_group *group)
857{
858 struct mem_ref *ref_pruned;
859
860 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
861 {
862 prune_ref_by_reuse (ref_pruned, group->refs);
863
864 if (dump_file && (dump_flags & TDF_DETAILS))
865 {
866 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
867
868 if (ref_pruned->prefetch_before == PREFETCH_ALL
869 && ref_pruned->prefetch_mod == 1)
870 fprintf (dump_file, " no restrictions");
871 else if (ref_pruned->prefetch_before == 0)
872 fprintf (dump_file, " do not prefetch");
873 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
874 fprintf (dump_file, " prefetch once");
875 else
876 {
877 if (ref_pruned->prefetch_before != PREFETCH_ALL)
878 {
879 fprintf (dump_file, " prefetch before ");
880 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
881 ref_pruned->prefetch_before);
882 }
883 if (ref_pruned->prefetch_mod != 1)
884 {
885 fprintf (dump_file, " prefetch mod ");
886 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
887 ref_pruned->prefetch_mod);
888 }
889 }
890 fprintf (dump_file, "\n");
891 }
892 }
893}
894
895/* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
896
897static void
898prune_by_reuse (struct mem_ref_group *groups)
899{
900 for (; groups; groups = groups->next)
901 prune_group_by_reuse (groups);
902}
903
904/* Returns true if we should issue prefetch for REF. */
905
906static bool
907should_issue_prefetch_p (struct mem_ref *ref)
908{
909 /* For now do not issue prefetches for only first few of the
910 iterations. */
911 if (ref->prefetch_before != PREFETCH_ALL)
a8beb3a7
CB
912 {
913 if (dump_file && (dump_flags & TDF_DETAILS))
914 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
915 (void *) ref);
916 return false;
917 }
b076a3fd 918
79f5e442
ZD
919 /* Do not prefetch nontemporal stores. */
920 if (ref->storent_p)
a8beb3a7
CB
921 {
922 if (dump_file && (dump_flags & TDF_DETAILS))
923 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
924 return false;
925 }
79f5e442 926
b076a3fd
ZD
927 return true;
928}
929
930/* Decide which of the prefetch candidates in GROUPS to prefetch.
931 AHEAD is the number of iterations to prefetch ahead (which corresponds
932 to the number of simultaneous instances of one prefetch running at a
933 time). UNROLL_FACTOR is the factor by that the loop is going to be
934 unrolled. Returns true if there is anything to prefetch. */
935
936static bool
937schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
938 unsigned ahead)
939{
911b3fdb
ZD
940 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
941 unsigned slots_per_prefetch;
b076a3fd
ZD
942 struct mem_ref *ref;
943 bool any = false;
944
911b3fdb
ZD
945 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
946 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
b076a3fd 947
911b3fdb
ZD
948 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
949 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
950 it will need a prefetch slot. */
951 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
b076a3fd 952 if (dump_file && (dump_flags & TDF_DETAILS))
911b3fdb
ZD
953 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
954 slots_per_prefetch);
b076a3fd
ZD
955
956 /* For now we just take memory references one by one and issue
957 prefetches for as many as possible. The groups are sorted
958 starting with the largest step, since the references with
c0220ea4 959 large step are more likely to cause many cache misses. */
b076a3fd
ZD
960
961 for (; groups; groups = groups->next)
962 for (ref = groups->refs; ref; ref = ref->next)
963 {
964 if (!should_issue_prefetch_p (ref))
965 continue;
966
8532678c
CF
967 /* The loop is far from being sufficiently unrolled for this
968 prefetch. Do not generate prefetch to avoid many redudant
969 prefetches. */
970 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
971 continue;
972
911b3fdb
ZD
973 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
974 and we unroll the loop UNROLL_FACTOR times, we need to insert
975 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
976 iteration. */
b076a3fd
ZD
977 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
978 / ref->prefetch_mod);
911b3fdb
ZD
979 prefetch_slots = n_prefetches * slots_per_prefetch;
980
981 /* If more than half of the prefetches would be lost anyway, do not
982 issue the prefetch. */
983 if (2 * remaining_prefetch_slots < prefetch_slots)
984 continue;
985
986 ref->issue_prefetch_p = true;
b076a3fd 987
911b3fdb
ZD
988 if (remaining_prefetch_slots <= prefetch_slots)
989 return true;
990 remaining_prefetch_slots -= prefetch_slots;
b076a3fd
ZD
991 any = true;
992 }
993
994 return any;
995}
996
d5058523
CF
997/* Return TRUE if no prefetch is going to be generated in the given
998 GROUPS. */
999
1000static bool
1001nothing_to_prefetch_p (struct mem_ref_group *groups)
1002{
1003 struct mem_ref *ref;
1004
1005 for (; groups; groups = groups->next)
1006 for (ref = groups->refs; ref; ref = ref->next)
1007 if (should_issue_prefetch_p (ref))
1008 return false;
1009
1010 return true;
1011}
1012
1013/* Estimate the number of prefetches in the given GROUPS.
1014 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
b076a3fd 1015
db34470d 1016static int
d5058523 1017estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
b076a3fd
ZD
1018{
1019 struct mem_ref *ref;
d5058523 1020 unsigned n_prefetches;
db34470d 1021 int prefetch_count = 0;
b076a3fd
ZD
1022
1023 for (; groups; groups = groups->next)
1024 for (ref = groups->refs; ref; ref = ref->next)
1025 if (should_issue_prefetch_p (ref))
d5058523
CF
1026 {
1027 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1028 / ref->prefetch_mod);
1029 prefetch_count += n_prefetches;
1030 }
b076a3fd 1031
db34470d 1032 return prefetch_count;
b076a3fd
ZD
1033}
1034
1035/* Issue prefetches for the reference REF into loop as decided before.
1036 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
917f1b7e 1037 is the factor by which LOOP was unrolled. */
b076a3fd
ZD
1038
1039static void
1040issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1041{
1042 HOST_WIDE_INT delta;
81f32326 1043 tree addr, addr_base, write_p, local, forward;
726a989a
RB
1044 gimple prefetch;
1045 gimple_stmt_iterator bsi;
b076a3fd 1046 unsigned n_prefetches, ap;
5417e022 1047 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
b076a3fd
ZD
1048
1049 if (dump_file && (dump_flags & TDF_DETAILS))
5417e022
ZD
1050 fprintf (dump_file, "Issued%s prefetch for %p.\n",
1051 nontemporal ? " nontemporal" : "",
1052 (void *) ref);
b076a3fd 1053
726a989a 1054 bsi = gsi_for_stmt (ref->stmt);
b076a3fd
ZD
1055
1056 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1057 / ref->prefetch_mod);
1058 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
726a989a
RB
1059 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1060 true, NULL, true, GSI_SAME_STMT);
911b3fdb 1061 write_p = ref->write_p ? integer_one_node : integer_zero_node;
5417e022 1062 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
b076a3fd
ZD
1063
1064 for (ap = 0; ap < n_prefetches; ap++)
1065 {
81f32326
CB
1066 if (cst_and_fits_in_hwi (ref->group->step))
1067 {
1068 /* Determine the address to prefetch. */
1069 delta = (ahead + ap * ref->prefetch_mod) *
1070 int_cst_value (ref->group->step);
1071 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
1072 addr_base, size_int (delta));
1073 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1074 true, GSI_SAME_STMT);
1075 }
1076 else
1077 {
1078 /* The step size is non-constant but loop-invariant. We use the
1079 heuristic to simply prefetch ahead iterations ahead. */
1080 forward = fold_build2 (MULT_EXPR, sizetype,
1081 fold_convert (sizetype, ref->group->step),
1082 fold_convert (sizetype, size_int (ahead)));
1083 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base,
1084 forward);
1085 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1086 NULL, true, GSI_SAME_STMT);
1087 }
b076a3fd 1088 /* Create the prefetch instruction. */
726a989a
RB
1089 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
1090 3, addr, write_p, local);
1091 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
b076a3fd
ZD
1092 }
1093}
1094
1095/* Issue prefetches for the references in GROUPS into loop as decided before.
1096 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1097 factor by that LOOP was unrolled. */
1098
1099static void
1100issue_prefetches (struct mem_ref_group *groups,
1101 unsigned unroll_factor, unsigned ahead)
1102{
1103 struct mem_ref *ref;
1104
1105 for (; groups; groups = groups->next)
1106 for (ref = groups->refs; ref; ref = ref->next)
1107 if (ref->issue_prefetch_p)
1108 issue_prefetch_ref (ref, unroll_factor, ahead);
1109}
1110
79f5e442
ZD
1111/* Returns true if REF is a memory write for that a nontemporal store insn
1112 can be used. */
1113
1114static bool
1115nontemporal_store_p (struct mem_ref *ref)
1116{
1117 enum machine_mode mode;
1118 enum insn_code code;
1119
1120 /* REF must be a write that is not reused. We require it to be independent
1121 on all other memory references in the loop, as the nontemporal stores may
1122 be reordered with respect to other memory references. */
1123 if (!ref->write_p
1124 || !ref->independent_p
1125 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1126 return false;
1127
1128 /* Check that we have the storent instruction for the mode. */
1129 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1130 if (mode == BLKmode)
1131 return false;
1132
166cdb08 1133 code = optab_handler (storent_optab, mode)->insn_code;
79f5e442
ZD
1134 return code != CODE_FOR_nothing;
1135}
1136
1137/* If REF is a nontemporal store, we mark the corresponding modify statement
1138 and return true. Otherwise, we return false. */
1139
1140static bool
1141mark_nontemporal_store (struct mem_ref *ref)
1142{
1143 if (!nontemporal_store_p (ref))
1144 return false;
1145
1146 if (dump_file && (dump_flags & TDF_DETAILS))
1147 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1148 (void *) ref);
1149
726a989a 1150 gimple_assign_set_nontemporal_move (ref->stmt, true);
79f5e442
ZD
1151 ref->storent_p = true;
1152
1153 return true;
1154}
1155
1156/* Issue a memory fence instruction after LOOP. */
1157
1158static void
1159emit_mfence_after_loop (struct loop *loop)
1160{
1161 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1162 edge exit;
726a989a
RB
1163 gimple call;
1164 gimple_stmt_iterator bsi;
79f5e442
ZD
1165 unsigned i;
1166
1167 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1168 {
726a989a 1169 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
79f5e442
ZD
1170
1171 if (!single_pred_p (exit->dest)
1172 /* If possible, we prefer not to insert the fence on other paths
1173 in cfg. */
1174 && !(exit->flags & EDGE_ABNORMAL))
1175 split_loop_exit_edge (exit);
726a989a 1176 bsi = gsi_after_labels (exit->dest);
79f5e442 1177
726a989a 1178 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
79f5e442
ZD
1179 mark_virtual_ops_for_renaming (call);
1180 }
1181
1182 VEC_free (edge, heap, exits);
1183 update_ssa (TODO_update_ssa_only_virtuals);
1184}
1185
1186/* Returns true if we can use storent in loop, false otherwise. */
1187
1188static bool
1189may_use_storent_in_loop_p (struct loop *loop)
1190{
1191 bool ret = true;
1192
1193 if (loop->inner != NULL)
1194 return false;
1195
1196 /* If we must issue a mfence insn after using storent, check that there
1197 is a suitable place for it at each of the loop exits. */
1198 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1199 {
1200 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1201 unsigned i;
1202 edge exit;
1203
1204 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1205 if ((exit->flags & EDGE_ABNORMAL)
1206 && exit->dest == EXIT_BLOCK_PTR)
1207 ret = false;
1208
1209 VEC_free (edge, heap, exits);
1210 }
1211
1212 return ret;
1213}
1214
1215/* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1216 references in the loop. */
1217
1218static void
1219mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1220{
1221 struct mem_ref *ref;
1222 bool any = false;
1223
1224 if (!may_use_storent_in_loop_p (loop))
1225 return;
1226
1227 for (; groups; groups = groups->next)
1228 for (ref = groups->refs; ref; ref = ref->next)
1229 any |= mark_nontemporal_store (ref);
1230
1231 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1232 emit_mfence_after_loop (loop);
1233}
1234
b076a3fd
ZD
1235/* Determines whether we can profitably unroll LOOP FACTOR times, and if
1236 this is the case, fill in DESC by the description of number of
1237 iterations. */
1238
1239static bool
1240should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1241 unsigned factor)
1242{
1243 if (!can_unroll_loop_p (loop, factor, desc))
1244 return false;
1245
1246 /* We only consider loops without control flow for unrolling. This is not
1247 a hard restriction -- tree_unroll_loop works with arbitrary loops
1248 as well; but the unrolling/prefetching is usually more profitable for
1249 loops consisting of a single basic block, and we want to limit the
1250 code growth. */
1251 if (loop->num_nodes > 2)
1252 return false;
1253
1254 return true;
1255}
1256
1257/* Determine the coefficient by that unroll LOOP, from the information
1258 contained in the list of memory references REFS. Description of
2711355f
ZD
1259 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1260 insns of the LOOP. EST_NITER is the estimated number of iterations of
1261 the loop, or -1 if no estimate is available. */
b076a3fd
ZD
1262
1263static unsigned
1264determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
2711355f
ZD
1265 unsigned ninsns, struct tree_niter_desc *desc,
1266 HOST_WIDE_INT est_niter)
b076a3fd 1267{
911b3fdb
ZD
1268 unsigned upper_bound;
1269 unsigned nfactor, factor, mod_constraint;
b076a3fd
ZD
1270 struct mem_ref_group *agp;
1271 struct mem_ref *ref;
1272
911b3fdb
ZD
1273 /* First check whether the loop is not too large to unroll. We ignore
1274 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1275 from unrolling them enough to make exactly one cache line covered by each
1276 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1277 us from unrolling the loops too many times in cases where we only expect
1278 gains from better scheduling and decreasing loop overhead, which is not
1279 the case here. */
1280 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
2711355f
ZD
1281
1282 /* If we unrolled the loop more times than it iterates, the unrolled version
1283 of the loop would be never entered. */
1284 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1285 upper_bound = est_niter;
1286
911b3fdb 1287 if (upper_bound <= 1)
b076a3fd
ZD
1288 return 1;
1289
911b3fdb
ZD
1290 /* Choose the factor so that we may prefetch each cache just once,
1291 but bound the unrolling by UPPER_BOUND. */
1292 factor = 1;
b076a3fd
ZD
1293 for (agp = refs; agp; agp = agp->next)
1294 for (ref = agp->refs; ref; ref = ref->next)
911b3fdb
ZD
1295 if (should_issue_prefetch_p (ref))
1296 {
1297 mod_constraint = ref->prefetch_mod;
1298 nfactor = least_common_multiple (mod_constraint, factor);
1299 if (nfactor <= upper_bound)
1300 factor = nfactor;
1301 }
b076a3fd
ZD
1302
1303 if (!should_unroll_loop_p (loop, desc, factor))
1304 return 1;
1305
1306 return factor;
1307}
1308
5417e022
ZD
1309/* Returns the total volume of the memory references REFS, taking into account
1310 reuses in the innermost loop and cache line size. TODO -- we should also
1311 take into account reuses across the iterations of the loops in the loop
1312 nest. */
1313
1314static unsigned
1315volume_of_references (struct mem_ref_group *refs)
1316{
1317 unsigned volume = 0;
1318 struct mem_ref_group *gr;
1319 struct mem_ref *ref;
1320
1321 for (gr = refs; gr; gr = gr->next)
1322 for (ref = gr->refs; ref; ref = ref->next)
1323 {
1324 /* Almost always reuses another value? */
1325 if (ref->prefetch_before != PREFETCH_ALL)
1326 continue;
1327
1328 /* If several iterations access the same cache line, use the size of
1329 the line divided by this number. Otherwise, a cache line is
1330 accessed in each iteration. TODO -- in the latter case, we should
1331 take the size of the reference into account, rounding it up on cache
1332 line size multiple. */
1333 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1334 }
1335 return volume;
1336}
1337
1338/* Returns the volume of memory references accessed across VEC iterations of
1339 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1340 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1341
1342static unsigned
1343volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1344{
1345 unsigned i;
1346
1347 for (i = 0; i < n; i++)
1348 if (vec[i] != 0)
1349 break;
1350
1351 if (i == n)
1352 return 0;
1353
1354 gcc_assert (vec[i] > 0);
1355
1356 /* We ignore the parts of the distance vector in subloops, since usually
1357 the numbers of iterations are much smaller. */
1358 return loop_sizes[i] * vec[i];
1359}
1360
1361/* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1362 at the position corresponding to the loop of the step. N is the depth
1363 of the considered loop nest, and, LOOP is its innermost loop. */
1364
1365static void
1366add_subscript_strides (tree access_fn, unsigned stride,
1367 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1368{
1369 struct loop *aloop;
1370 tree step;
1371 HOST_WIDE_INT astep;
1372 unsigned min_depth = loop_depth (loop) - n;
1373
1374 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1375 {
1376 aloop = get_chrec_loop (access_fn);
1377 step = CHREC_RIGHT (access_fn);
1378 access_fn = CHREC_LEFT (access_fn);
1379
1380 if ((unsigned) loop_depth (aloop) <= min_depth)
1381 continue;
1382
1383 if (host_integerp (step, 0))
1384 astep = tree_low_cst (step, 0);
1385 else
1386 astep = L1_CACHE_LINE_SIZE;
1387
1388 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1389
1390 }
1391}
1392
1393/* Returns the volume of memory references accessed between two consecutive
1394 self-reuses of the reference DR. We consider the subscripts of DR in N
1395 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1396 loops. LOOP is the innermost loop of the current loop nest. */
1397
1398static unsigned
1399self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1400 struct loop *loop)
1401{
1402 tree stride, access_fn;
1403 HOST_WIDE_INT *strides, astride;
1404 VEC (tree, heap) *access_fns;
1405 tree ref = DR_REF (dr);
1406 unsigned i, ret = ~0u;
1407
1408 /* In the following example:
1409
1410 for (i = 0; i < N; i++)
1411 for (j = 0; j < N; j++)
1412 use (a[j][i]);
1413 the same cache line is accessed each N steps (except if the change from
1414 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1415 we cannot rely purely on the results of the data dependence analysis.
1416
1417 Instead, we compute the stride of the reference in each loop, and consider
1418 the innermost loop in that the stride is less than cache size. */
1419
1420 strides = XCNEWVEC (HOST_WIDE_INT, n);
1421 access_fns = DR_ACCESS_FNS (dr);
1422
1423 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
1424 {
1425 /* Keep track of the reference corresponding to the subscript, so that we
1426 know its stride. */
1427 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1428 ref = TREE_OPERAND (ref, 0);
b8698a0f 1429
5417e022
ZD
1430 if (TREE_CODE (ref) == ARRAY_REF)
1431 {
1432 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1433 if (host_integerp (stride, 1))
1434 astride = tree_low_cst (stride, 1);
1435 else
1436 astride = L1_CACHE_LINE_SIZE;
1437
1438 ref = TREE_OPERAND (ref, 0);
1439 }
1440 else
1441 astride = 1;
1442
1443 add_subscript_strides (access_fn, astride, strides, n, loop);
1444 }
1445
1446 for (i = n; i-- > 0; )
1447 {
1448 unsigned HOST_WIDE_INT s;
1449
1450 s = strides[i] < 0 ? -strides[i] : strides[i];
1451
1452 if (s < (unsigned) L1_CACHE_LINE_SIZE
1453 && (loop_sizes[i]
1454 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1455 {
1456 ret = loop_sizes[i];
1457 break;
1458 }
1459 }
1460
1461 free (strides);
1462 return ret;
1463}
1464
1465/* Determines the distance till the first reuse of each reference in REFS
79f5e442
ZD
1466 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1467 memory references in the loop. */
5417e022
ZD
1468
1469static void
79f5e442
ZD
1470determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1471 bool no_other_refs)
5417e022
ZD
1472{
1473 struct loop *nest, *aloop;
1474 VEC (data_reference_p, heap) *datarefs = NULL;
1475 VEC (ddr_p, heap) *dependences = NULL;
1476 struct mem_ref_group *gr;
79f5e442 1477 struct mem_ref *ref, *refb;
5417e022
ZD
1478 VEC (loop_p, heap) *vloops = NULL;
1479 unsigned *loop_data_size;
1480 unsigned i, j, n;
1481 unsigned volume, dist, adist;
1482 HOST_WIDE_INT vol;
1483 data_reference_p dr;
1484 ddr_p dep;
1485
1486 if (loop->inner)
1487 return;
1488
1489 /* Find the outermost loop of the loop nest of loop (we require that
1490 there are no sibling loops inside the nest). */
1491 nest = loop;
1492 while (1)
1493 {
1494 aloop = loop_outer (nest);
1495
1496 if (aloop == current_loops->tree_root
1497 || aloop->inner->next)
1498 break;
1499
1500 nest = aloop;
1501 }
1502
1503 /* For each loop, determine the amount of data accessed in each iteration.
1504 We use this to estimate whether the reference is evicted from the
1505 cache before its reuse. */
1506 find_loop_nest (nest, &vloops);
1507 n = VEC_length (loop_p, vloops);
1508 loop_data_size = XNEWVEC (unsigned, n);
1509 volume = volume_of_references (refs);
1510 i = n;
1511 while (i-- != 0)
1512 {
1513 loop_data_size[i] = volume;
1514 /* Bound the volume by the L2 cache size, since above this bound,
1515 all dependence distances are equivalent. */
1516 if (volume > L2_CACHE_SIZE_BYTES)
1517 continue;
1518
1519 aloop = VEC_index (loop_p, vloops, i);
1520 vol = estimated_loop_iterations_int (aloop, false);
1521 if (vol < 0)
1522 vol = expected_loop_iterations (aloop);
1523 volume *= vol;
1524 }
1525
1526 /* Prepare the references in the form suitable for data dependence
0d52bcc1 1527 analysis. We ignore unanalyzable data references (the results
5417e022
ZD
1528 are used just as a heuristics to estimate temporality of the
1529 references, hence we do not need to worry about correctness). */
1530 for (gr = refs; gr; gr = gr->next)
1531 for (ref = gr->refs; ref; ref = ref->next)
1532 {
1533 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1534
1535 if (dr)
1536 {
1537 ref->reuse_distance = volume;
1538 dr->aux = ref;
1539 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1540 }
79f5e442
ZD
1541 else
1542 no_other_refs = false;
5417e022
ZD
1543 }
1544
1545 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
1546 {
1547 dist = self_reuse_distance (dr, loop_data_size, n, loop);
3d9a9f94 1548 ref = (struct mem_ref *) dr->aux;
5417e022
ZD
1549 if (ref->reuse_distance > dist)
1550 ref->reuse_distance = dist;
79f5e442
ZD
1551
1552 if (no_other_refs)
1553 ref->independent_p = true;
5417e022
ZD
1554 }
1555
1556 compute_all_dependences (datarefs, &dependences, vloops, true);
1557
1558 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
1559 {
1560 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1561 continue;
1562
3d9a9f94
KG
1563 ref = (struct mem_ref *) DDR_A (dep)->aux;
1564 refb = (struct mem_ref *) DDR_B (dep)->aux;
79f5e442 1565
5417e022
ZD
1566 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1567 || DDR_NUM_DIST_VECTS (dep) == 0)
1568 {
0d52bcc1 1569 /* If the dependence cannot be analyzed, assume that there might be
5417e022
ZD
1570 a reuse. */
1571 dist = 0;
b8698a0f 1572
79f5e442
ZD
1573 ref->independent_p = false;
1574 refb->independent_p = false;
5417e022
ZD
1575 }
1576 else
1577 {
0d52bcc1 1578 /* The distance vectors are normalized to be always lexicographically
5417e022
ZD
1579 positive, hence we cannot tell just from them whether DDR_A comes
1580 before DDR_B or vice versa. However, it is not important,
1581 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1582 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1583 in cache (and marking it as nontemporal would not affect
1584 anything). */
1585
1586 dist = volume;
1587 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1588 {
1589 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1590 loop_data_size, n);
1591
79f5e442
ZD
1592 /* If this is a dependence in the innermost loop (i.e., the
1593 distances in all superloops are zero) and it is not
1594 the trivial self-dependence with distance zero, record that
1595 the references are not completely independent. */
1596 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1597 && (ref != refb
1598 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1599 {
1600 ref->independent_p = false;
1601 refb->independent_p = false;
1602 }
1603
5417e022
ZD
1604 /* Ignore accesses closer than
1605 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1606 so that we use nontemporal prefetches e.g. if single memory
1607 location is accessed several times in a single iteration of
1608 the loop. */
1609 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1610 continue;
1611
1612 if (adist < dist)
1613 dist = adist;
1614 }
1615 }
1616
5417e022
ZD
1617 if (ref->reuse_distance > dist)
1618 ref->reuse_distance = dist;
79f5e442
ZD
1619 if (refb->reuse_distance > dist)
1620 refb->reuse_distance = dist;
5417e022
ZD
1621 }
1622
1623 free_dependence_relations (dependences);
1624 free_data_refs (datarefs);
1625 free (loop_data_size);
1626
1627 if (dump_file && (dump_flags & TDF_DETAILS))
1628 {
1629 fprintf (dump_file, "Reuse distances:\n");
1630 for (gr = refs; gr; gr = gr->next)
1631 for (ref = gr->refs; ref; ref = ref->next)
1632 fprintf (dump_file, " ref %p distance %u\n",
1633 (void *) ref, ref->reuse_distance);
1634 }
1635}
1636
db34470d
GS
1637/* Do a cost-benefit analysis to determine if prefetching is profitable
1638 for the current loop given the following parameters:
1639 AHEAD: the iteration ahead distance,
b8698a0f 1640 EST_NITER: the estimated trip count,
db34470d
GS
1641 NINSNS: estimated number of instructions in the loop,
1642 PREFETCH_COUNT: an estimate of the number of prefetches
1643 MEM_REF_COUNT: total number of memory references in the loop. */
1644
b8698a0f
L
1645static bool
1646is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
1647 unsigned ninsns, unsigned prefetch_count,
ccacf0e1 1648 unsigned mem_ref_count, unsigned unroll_factor)
db34470d
GS
1649{
1650 int insn_to_mem_ratio, insn_to_prefetch_ratio;
1651
1652 if (mem_ref_count == 0)
1653 return false;
1654
b8698a0f
L
1655 /* Prefetching improves performance by overlapping cache missing
1656 memory accesses with CPU operations. If the loop does not have
1657 enough CPU operations to overlap with memory operations, prefetching
1658 won't give a significant benefit. One approximate way of checking
1659 this is to require the ratio of instructions to memory references to
db34470d
GS
1660 be above a certain limit. This approximation works well in practice.
1661 TODO: Implement a more precise computation by estimating the time
1662 for each CPU or memory op in the loop. Time estimates for memory ops
1663 should account for cache misses. */
b8698a0f 1664 insn_to_mem_ratio = ninsns / mem_ref_count;
db34470d
GS
1665
1666 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
55e5a2eb
CF
1667 {
1668 if (dump_file && (dump_flags & TDF_DETAILS))
1669 fprintf (dump_file,
1670 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1671 insn_to_mem_ratio);
1672 return false;
1673 }
db34470d 1674
d3a9b459
CF
1675 /* Prefetching most likely causes performance degradation when the instruction
1676 to prefetch ratio is too small. Too many prefetch instructions in a loop
1677 may reduce the I-cache performance.
ccacf0e1
CF
1678 (unroll_factor * ninsns) is used to estimate the number of instructions in
1679 the unrolled loop. This implementation is a bit simplistic -- the number
1680 of issued prefetch instructions is also affected by unrolling. So,
1681 prefetch_mod and the unroll factor should be taken into account when
1682 determining prefetch_count. Also, the number of insns of the unrolled
1683 loop will usually be significantly smaller than the number of insns of the
1684 original loop * unroll_factor (at least the induction variable increases
1685 and the exit branches will get eliminated), so it might be better to use
1686 tree_estimate_loop_size + estimated_unrolled_size. */
d3a9b459
CF
1687 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1688 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
db34470d 1689 {
d3a9b459
CF
1690 if (dump_file && (dump_flags & TDF_DETAILS))
1691 fprintf (dump_file,
1692 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1693 insn_to_prefetch_ratio);
1694 return false;
db34470d 1695 }
b8698a0f 1696
d3a9b459
CF
1697 /* Could not do further estimation if the trip count is unknown. Just assume
1698 prefetching is profitable. Too aggressive??? */
1699 if (est_niter < 0)
1700 return true;
1701
9bf4598b 1702 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
db34470d
GS
1703 {
1704 if (dump_file && (dump_flags & TDF_DETAILS))
1705 fprintf (dump_file,
1706 "Not prefetching -- loop estimated to roll only %d times\n",
1707 (int) est_niter);
1708 return false;
1709 }
1710 return true;
1711}
1712
1713
b076a3fd 1714/* Issue prefetch instructions for array references in LOOP. Returns
d73be268 1715 true if the LOOP was unrolled. */
b076a3fd
ZD
1716
1717static bool
d73be268 1718loop_prefetch_arrays (struct loop *loop)
b076a3fd
ZD
1719{
1720 struct mem_ref_group *refs;
2711355f
ZD
1721 unsigned ahead, ninsns, time, unroll_factor;
1722 HOST_WIDE_INT est_niter;
b076a3fd 1723 struct tree_niter_desc desc;
79f5e442 1724 bool unrolled = false, no_other_refs;
db34470d
GS
1725 unsigned prefetch_count;
1726 unsigned mem_ref_count;
b076a3fd 1727
efd8f750 1728 if (optimize_loop_nest_for_size_p (loop))
2732d767
ZD
1729 {
1730 if (dump_file && (dump_flags & TDF_DETAILS))
1731 fprintf (dump_file, " ignored (cold area)\n");
1732 return false;
1733 }
1734
b076a3fd 1735 /* Step 1: gather the memory references. */
db34470d 1736 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
b076a3fd
ZD
1737
1738 /* Step 2: estimate the reuse effects. */
1739 prune_by_reuse (refs);
1740
d5058523 1741 if (nothing_to_prefetch_p (refs))
b076a3fd
ZD
1742 goto fail;
1743
79f5e442 1744 determine_loop_nest_reuse (loop, refs, no_other_refs);
5417e022 1745
b076a3fd
ZD
1746 /* Step 3: determine the ahead and unroll factor. */
1747
2711355f
ZD
1748 /* FIXME: the time should be weighted by the probabilities of the blocks in
1749 the loop body. */
1750 time = tree_num_loop_insns (loop, &eni_time_weights);
1751 ahead = (PREFETCH_LATENCY + time - 1) / time;
b8698a0f 1752 est_niter = estimated_loop_iterations_int (loop, false);
79f5e442 1753
2711355f
ZD
1754 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1755 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1756 est_niter);
d5058523
CF
1757
1758 /* Estimate prefetch count for the unrolled loop. */
1759 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1760 if (prefetch_count == 0)
1761 goto fail;
1762
2711355f 1763 if (dump_file && (dump_flags & TDF_DETAILS))
b8698a0f 1764 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
d81f5387 1765 HOST_WIDE_INT_PRINT_DEC "\n"
b8698a0f
L
1766 "insn count %d, mem ref count %d, prefetch count %d\n",
1767 ahead, unroll_factor, est_niter,
1768 ninsns, mem_ref_count, prefetch_count);
db34470d 1769
ccacf0e1
CF
1770 if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns, prefetch_count,
1771 mem_ref_count, unroll_factor))
db34470d
GS
1772 goto fail;
1773
1774 mark_nontemporal_stores (loop, refs);
2711355f 1775
b076a3fd
ZD
1776 /* Step 4: what to prefetch? */
1777 if (!schedule_prefetches (refs, unroll_factor, ahead))
1778 goto fail;
1779
1780 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1781 iterations so that we do not issue superfluous prefetches. */
1782 if (unroll_factor != 1)
1783 {
d73be268 1784 tree_unroll_loop (loop, unroll_factor,
b076a3fd
ZD
1785 single_dom_exit (loop), &desc);
1786 unrolled = true;
1787 }
1788
1789 /* Step 6: issue the prefetches. */
1790 issue_prefetches (refs, unroll_factor, ahead);
1791
1792fail:
1793 release_mem_refs (refs);
1794 return unrolled;
1795}
1796
d73be268 1797/* Issue prefetch instructions for array references in loops. */
b076a3fd 1798
c7f965b6 1799unsigned int
d73be268 1800tree_ssa_prefetch_arrays (void)
b076a3fd 1801{
42fd6772 1802 loop_iterator li;
b076a3fd
ZD
1803 struct loop *loop;
1804 bool unrolled = false;
c7f965b6 1805 int todo_flags = 0;
b076a3fd
ZD
1806
1807 if (!HAVE_prefetch
1808 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1809 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1810 of processor costs and i486 does not have prefetch, but
1811 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1812 || PREFETCH_BLOCK == 0)
c7f965b6 1813 return 0;
b076a3fd 1814
47eb5b32
ZD
1815 if (dump_file && (dump_flags & TDF_DETAILS))
1816 {
1817 fprintf (dump_file, "Prefetching parameters:\n");
1818 fprintf (dump_file, " simultaneous prefetches: %d\n",
1819 SIMULTANEOUS_PREFETCHES);
1820 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
47eb5b32 1821 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
46cb0441
ZD
1822 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1823 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
5417e022 1824 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
b8698a0f
L
1825 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1826 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
db34470d 1827 MIN_INSN_TO_PREFETCH_RATIO);
b8698a0f 1828 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
db34470d 1829 PREFETCH_MIN_INSN_TO_MEM_RATIO);
47eb5b32
ZD
1830 fprintf (dump_file, "\n");
1831 }
1832
b076a3fd
ZD
1833 initialize_original_copy_tables ();
1834
1835 if (!built_in_decls[BUILT_IN_PREFETCH])
1836 {
1837 tree type = build_function_type (void_type_node,
1838 tree_cons (NULL_TREE,
1839 const_ptr_type_node,
1840 NULL_TREE));
c79efc4d
RÁE
1841 tree decl = add_builtin_function ("__builtin_prefetch", type,
1842 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1843 NULL, NULL_TREE);
b076a3fd
ZD
1844 DECL_IS_NOVOPS (decl) = true;
1845 built_in_decls[BUILT_IN_PREFETCH] = decl;
1846 }
1847
1848 /* We assume that size of cache line is a power of two, so verify this
1849 here. */
1850 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1851
42fd6772 1852 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
b076a3fd 1853 {
b076a3fd
ZD
1854 if (dump_file && (dump_flags & TDF_DETAILS))
1855 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1856
d73be268 1857 unrolled |= loop_prefetch_arrays (loop);
b076a3fd
ZD
1858
1859 if (dump_file && (dump_flags & TDF_DETAILS))
1860 fprintf (dump_file, "\n\n");
1861 }
1862
1863 if (unrolled)
1864 {
1865 scev_reset ();
c7f965b6 1866 todo_flags |= TODO_cleanup_cfg;
b076a3fd
ZD
1867 }
1868
1869 free_original_copy_tables ();
c7f965b6 1870 return todo_flags;
b076a3fd 1871}