]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-loop-prefetch.c
Also apply the insn to prefetch ratio heuristic to loops with known trip count.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "basic-block.h"
29 #include "output.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "cfgloop.h"
35 #include "expr.h"
36 #include "tree-pass.h"
37 #include "ggc.h"
38 #include "insn-config.h"
39 #include "recog.h"
40 #include "hashtab.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "toplev.h"
44 #include "params.h"
45 #include "langhooks.h"
46 #include "tree-inline.h"
47 #include "tree-data-ref.h"
48 #include "optabs.h"
49
50 /* This pass inserts prefetch instructions to optimize cache usage during
51 accesses to arrays in loops. It processes loops sequentially and:
52
53 1) Gathers all memory references in the single loop.
54 2) For each of the references it decides when it is profitable to prefetch
55 it. To do it, we evaluate the reuse among the accesses, and determines
56 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
57 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
58 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
59 iterations of the loop that are zero modulo PREFETCH_MOD). For example
60 (assuming cache line size is 64 bytes, char has size 1 byte and there
61 is no hardware sequential prefetch):
62
63 char *a;
64 for (i = 0; i < max; i++)
65 {
66 a[255] = ...; (0)
67 a[i] = ...; (1)
68 a[i + 64] = ...; (2)
69 a[16*i] = ...; (3)
70 a[187*i] = ...; (4)
71 a[187*i + 50] = ...; (5)
72 }
73
74 (0) obviously has PREFETCH_BEFORE 1
75 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
76 location 64 iterations before it, and PREFETCH_MOD 64 (since
77 it hits the same cache line otherwise).
78 (2) has PREFETCH_MOD 64
79 (3) has PREFETCH_MOD 4
80 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
81 the cache line accessed by (4) is the same with probability only
82 7/32.
83 (5) has PREFETCH_MOD 1 as well.
84
85 Additionally, we use data dependence analysis to determine for each
86 reference the distance till the first reuse; this information is used
87 to determine the temporality of the issued prefetch instruction.
88
89 3) We determine how much ahead we need to prefetch. The number of
90 iterations needed is time to fetch / time spent in one iteration of
91 the loop. The problem is that we do not know either of these values,
92 so we just make a heuristic guess based on a magic (possibly)
93 target-specific constant and size of the loop.
94
95 4) Determine which of the references we prefetch. We take into account
96 that there is a maximum number of simultaneous prefetches (provided
97 by machine description). We prefetch as many prefetches as possible
98 while still within this bound (starting with those with lowest
99 prefetch_mod, since they are responsible for most of the cache
100 misses).
101
102 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
103 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
104 prefetching nonaccessed memory.
105 TODO -- actually implement peeling.
106
107 6) We actually emit the prefetch instructions. ??? Perhaps emit the
108 prefetch instructions with guards in cases where 5) was not sufficient
109 to satisfy the constraints?
110
111 The function is_loop_prefetching_profitable() implements a cost model
112 to determine if prefetching is profitable for a given loop. The cost
113 model has two heuristcs:
114 1. A heuristic that determines whether the given loop has enough CPU
115 ops that can be overlapped with cache missing memory ops.
116 If not, the loop won't benefit from prefetching. This is implemented
117 by requirung the ratio between the instruction count and the mem ref
118 count to be above a certain minimum.
119 2. A heuristic that disables prefetching in a loop with an unknown trip
120 count if the prefetching cost is above a certain limit. The relative
121 prefetching cost is estimated by taking the ratio between the
122 prefetch count and the total intruction count (this models the I-cache
123 cost).
124 The limits used in these heuristics are defined as parameters with
125 reasonable default values. Machine-specific default values will be
126 added later.
127
128 Some other TODO:
129 -- write and use more general reuse analysis (that could be also used
130 in other cache aimed loop optimizations)
131 -- make it behave sanely together with the prefetches given by user
132 (now we just ignore them; at the very least we should avoid
133 optimizing loops in that user put his own prefetches)
134 -- we assume cache line size alignment of arrays; this could be
135 improved. */
136
137 /* Magic constants follow. These should be replaced by machine specific
138 numbers. */
139
140 /* True if write can be prefetched by a read prefetch. */
141
142 #ifndef WRITE_CAN_USE_READ_PREFETCH
143 #define WRITE_CAN_USE_READ_PREFETCH 1
144 #endif
145
146 /* True if read can be prefetched by a write prefetch. */
147
148 #ifndef READ_CAN_USE_WRITE_PREFETCH
149 #define READ_CAN_USE_WRITE_PREFETCH 0
150 #endif
151
152 /* The size of the block loaded by a single prefetch. Usually, this is
153 the same as cache line size (at the moment, we only consider one level
154 of cache hierarchy). */
155
156 #ifndef PREFETCH_BLOCK
157 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
158 #endif
159
160 /* Do we have a forward hardware sequential prefetching? */
161
162 #ifndef HAVE_FORWARD_PREFETCH
163 #define HAVE_FORWARD_PREFETCH 0
164 #endif
165
166 /* Do we have a backward hardware sequential prefetching? */
167
168 #ifndef HAVE_BACKWARD_PREFETCH
169 #define HAVE_BACKWARD_PREFETCH 0
170 #endif
171
172 /* In some cases we are only able to determine that there is a certain
173 probability that the two accesses hit the same cache line. In this
174 case, we issue the prefetches for both of them if this probability
175 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
176
177 #ifndef ACCEPTABLE_MISS_RATE
178 #define ACCEPTABLE_MISS_RATE 50
179 #endif
180
181 #ifndef HAVE_prefetch
182 #define HAVE_prefetch 0
183 #endif
184
185 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
186 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
187
188 /* We consider a memory access nontemporal if it is not reused sooner than
189 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
190 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
191 so that we use nontemporal prefetches e.g. if single memory location
192 is accessed several times in a single iteration of the loop. */
193 #define NONTEMPORAL_FRACTION 16
194
195 /* In case we have to emit a memory fence instruction after the loop that
196 uses nontemporal stores, this defines the builtin to use. */
197
198 #ifndef FENCE_FOLLOWING_MOVNT
199 #define FENCE_FOLLOWING_MOVNT NULL_TREE
200 #endif
201
202 /* It is not profitable to prefetch when the trip count is not at
203 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
204 For example, in a loop with a prefetch ahead distance of 10,
205 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
206 profitable to prefetch when the trip count is greater or equal to
207 40. In that case, 30 out of the 40 iterations will benefit from
208 prefetching. */
209
210 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
211 #define TRIP_COUNT_TO_AHEAD_RATIO 4
212 #endif
213
214 /* The group of references between that reuse may occur. */
215
216 struct mem_ref_group
217 {
218 tree base; /* Base of the reference. */
219 HOST_WIDE_INT step; /* Step of the reference. */
220 struct mem_ref *refs; /* References in the group. */
221 struct mem_ref_group *next; /* Next group of references. */
222 };
223
224 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
225
226 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
227
228 /* Do not generate a prefetch if the unroll factor is significantly less
229 than what is required by the prefetch. This is to avoid redundant
230 prefetches. For example, if prefetch_mod is 16 and unroll_factor is
231 1, this means prefetching requires unrolling the loop 16 times, but
232 the loop is not going to be unrolled. In this case (ratio = 16),
233 prefetching is not likely to be beneficial. */
234
235 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
236 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 8
237 #endif
238
239 /* The memory reference. */
240
241 struct mem_ref
242 {
243 gimple stmt; /* Statement in that the reference appears. */
244 tree mem; /* The reference. */
245 HOST_WIDE_INT delta; /* Constant offset of the reference. */
246 struct mem_ref_group *group; /* The group of references it belongs to. */
247 unsigned HOST_WIDE_INT prefetch_mod;
248 /* Prefetch only each PREFETCH_MOD-th
249 iteration. */
250 unsigned HOST_WIDE_INT prefetch_before;
251 /* Prefetch only first PREFETCH_BEFORE
252 iterations. */
253 unsigned reuse_distance; /* The amount of data accessed before the first
254 reuse of this value. */
255 struct mem_ref *next; /* The next reference in the group. */
256 unsigned write_p : 1; /* Is it a write? */
257 unsigned independent_p : 1; /* True if the reference is independent on
258 all other references inside the loop. */
259 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
260 unsigned storent_p : 1; /* True if we changed the store to a
261 nontemporal one. */
262 };
263
264 /* Dumps information about reference REF to FILE. */
265
266 static void
267 dump_mem_ref (FILE *file, struct mem_ref *ref)
268 {
269 fprintf (file, "Reference %p:\n", (void *) ref);
270
271 fprintf (file, " group %p (base ", (void *) ref->group);
272 print_generic_expr (file, ref->group->base, TDF_SLIM);
273 fprintf (file, ", step ");
274 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
275 fprintf (file, ")\n");
276
277 fprintf (file, " delta ");
278 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
279 fprintf (file, "\n");
280
281 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
282
283 fprintf (file, "\n");
284 }
285
286 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
287 exist. */
288
289 static struct mem_ref_group *
290 find_or_create_group (struct mem_ref_group **groups, tree base,
291 HOST_WIDE_INT step)
292 {
293 struct mem_ref_group *group;
294
295 for (; *groups; groups = &(*groups)->next)
296 {
297 if ((*groups)->step == step
298 && operand_equal_p ((*groups)->base, base, 0))
299 return *groups;
300
301 /* Keep the list of groups sorted by decreasing step. */
302 if ((*groups)->step < step)
303 break;
304 }
305
306 group = XNEW (struct mem_ref_group);
307 group->base = base;
308 group->step = step;
309 group->refs = NULL;
310 group->next = *groups;
311 *groups = group;
312
313 return group;
314 }
315
316 /* Records a memory reference MEM in GROUP with offset DELTA and write status
317 WRITE_P. The reference occurs in statement STMT. */
318
319 static void
320 record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
321 HOST_WIDE_INT delta, bool write_p)
322 {
323 struct mem_ref **aref;
324
325 /* Do not record the same address twice. */
326 for (aref = &group->refs; *aref; aref = &(*aref)->next)
327 {
328 /* It does not have to be possible for write reference to reuse the read
329 prefetch, or vice versa. */
330 if (!WRITE_CAN_USE_READ_PREFETCH
331 && write_p
332 && !(*aref)->write_p)
333 continue;
334 if (!READ_CAN_USE_WRITE_PREFETCH
335 && !write_p
336 && (*aref)->write_p)
337 continue;
338
339 if ((*aref)->delta == delta)
340 return;
341 }
342
343 (*aref) = XNEW (struct mem_ref);
344 (*aref)->stmt = stmt;
345 (*aref)->mem = mem;
346 (*aref)->delta = delta;
347 (*aref)->write_p = write_p;
348 (*aref)->prefetch_before = PREFETCH_ALL;
349 (*aref)->prefetch_mod = 1;
350 (*aref)->reuse_distance = 0;
351 (*aref)->issue_prefetch_p = false;
352 (*aref)->group = group;
353 (*aref)->next = NULL;
354 (*aref)->independent_p = false;
355 (*aref)->storent_p = false;
356
357 if (dump_file && (dump_flags & TDF_DETAILS))
358 dump_mem_ref (dump_file, *aref);
359 }
360
361 /* Release memory references in GROUPS. */
362
363 static void
364 release_mem_refs (struct mem_ref_group *groups)
365 {
366 struct mem_ref_group *next_g;
367 struct mem_ref *ref, *next_r;
368
369 for (; groups; groups = next_g)
370 {
371 next_g = groups->next;
372 for (ref = groups->refs; ref; ref = next_r)
373 {
374 next_r = ref->next;
375 free (ref);
376 }
377 free (groups);
378 }
379 }
380
381 /* A structure used to pass arguments to idx_analyze_ref. */
382
383 struct ar_data
384 {
385 struct loop *loop; /* Loop of the reference. */
386 gimple stmt; /* Statement of the reference. */
387 HOST_WIDE_INT *step; /* Step of the memory reference. */
388 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
389 };
390
391 /* Analyzes a single INDEX of a memory reference to obtain information
392 described at analyze_ref. Callback for for_each_index. */
393
394 static bool
395 idx_analyze_ref (tree base, tree *index, void *data)
396 {
397 struct ar_data *ar_data = (struct ar_data *) data;
398 tree ibase, step, stepsize;
399 HOST_WIDE_INT istep, idelta = 0, imult = 1;
400 affine_iv iv;
401
402 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
403 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
404 return false;
405
406 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
407 *index, &iv, false))
408 return false;
409 ibase = iv.base;
410 step = iv.step;
411
412 if (!cst_and_fits_in_hwi (step))
413 return false;
414 istep = int_cst_value (step);
415
416 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
417 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
418 {
419 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
420 ibase = TREE_OPERAND (ibase, 0);
421 }
422 if (cst_and_fits_in_hwi (ibase))
423 {
424 idelta += int_cst_value (ibase);
425 ibase = build_int_cst (TREE_TYPE (ibase), 0);
426 }
427
428 if (TREE_CODE (base) == ARRAY_REF)
429 {
430 stepsize = array_ref_element_size (base);
431 if (!cst_and_fits_in_hwi (stepsize))
432 return false;
433 imult = int_cst_value (stepsize);
434
435 istep *= imult;
436 idelta *= imult;
437 }
438
439 *ar_data->step += istep;
440 *ar_data->delta += idelta;
441 *index = ibase;
442
443 return true;
444 }
445
446 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
447 STEP are integer constants and iter is number of iterations of LOOP. The
448 reference occurs in statement STMT. Strips nonaddressable component
449 references from REF_P. */
450
451 static bool
452 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
453 HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
454 gimple stmt)
455 {
456 struct ar_data ar_data;
457 tree off;
458 HOST_WIDE_INT bit_offset;
459 tree ref = *ref_p;
460
461 *step = 0;
462 *delta = 0;
463
464 /* First strip off the component references. Ignore bitfields. */
465 if (TREE_CODE (ref) == COMPONENT_REF
466 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
467 ref = TREE_OPERAND (ref, 0);
468
469 *ref_p = ref;
470
471 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
472 {
473 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
474 bit_offset = TREE_INT_CST_LOW (off);
475 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
476
477 *delta += bit_offset / BITS_PER_UNIT;
478 }
479
480 *base = unshare_expr (ref);
481 ar_data.loop = loop;
482 ar_data.stmt = stmt;
483 ar_data.step = step;
484 ar_data.delta = delta;
485 return for_each_index (base, idx_analyze_ref, &ar_data);
486 }
487
488 /* Record a memory reference REF to the list REFS. The reference occurs in
489 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
490 reference was recorded, false otherwise. */
491
492 static bool
493 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
494 tree ref, bool write_p, gimple stmt)
495 {
496 tree base;
497 HOST_WIDE_INT step, delta;
498 struct mem_ref_group *agrp;
499
500 if (get_base_address (ref) == NULL)
501 return false;
502
503 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
504 return false;
505
506 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
507 are integer constants. */
508 agrp = find_or_create_group (refs, base, step);
509 record_ref (agrp, stmt, ref, delta, write_p);
510
511 return true;
512 }
513
514 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
515 true if there are no other memory references inside the loop. */
516
517 static struct mem_ref_group *
518 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
519 {
520 basic_block *body = get_loop_body_in_dom_order (loop);
521 basic_block bb;
522 unsigned i;
523 gimple_stmt_iterator bsi;
524 gimple stmt;
525 tree lhs, rhs;
526 struct mem_ref_group *refs = NULL;
527
528 *no_other_refs = true;
529 *ref_count = 0;
530
531 /* Scan the loop body in order, so that the former references precede the
532 later ones. */
533 for (i = 0; i < loop->num_nodes; i++)
534 {
535 bb = body[i];
536 if (bb->loop_father != loop)
537 continue;
538
539 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
540 {
541 stmt = gsi_stmt (bsi);
542
543 if (gimple_code (stmt) != GIMPLE_ASSIGN)
544 {
545 if (gimple_vuse (stmt)
546 || (is_gimple_call (stmt)
547 && !(gimple_call_flags (stmt) & ECF_CONST)))
548 *no_other_refs = false;
549 continue;
550 }
551
552 lhs = gimple_assign_lhs (stmt);
553 rhs = gimple_assign_rhs1 (stmt);
554
555 if (REFERENCE_CLASS_P (rhs))
556 {
557 *no_other_refs &= gather_memory_references_ref (loop, &refs,
558 rhs, false, stmt);
559 *ref_count += 1;
560 }
561 if (REFERENCE_CLASS_P (lhs))
562 {
563 *no_other_refs &= gather_memory_references_ref (loop, &refs,
564 lhs, true, stmt);
565 *ref_count += 1;
566 }
567 }
568 }
569 free (body);
570
571 return refs;
572 }
573
574 /* Prune the prefetch candidate REF using the self-reuse. */
575
576 static void
577 prune_ref_by_self_reuse (struct mem_ref *ref)
578 {
579 HOST_WIDE_INT step = ref->group->step;
580 bool backward = step < 0;
581
582 if (step == 0)
583 {
584 /* Prefetch references to invariant address just once. */
585 ref->prefetch_before = 1;
586 return;
587 }
588
589 if (backward)
590 step = -step;
591
592 if (step > PREFETCH_BLOCK)
593 return;
594
595 if ((backward && HAVE_BACKWARD_PREFETCH)
596 || (!backward && HAVE_FORWARD_PREFETCH))
597 {
598 ref->prefetch_before = 1;
599 return;
600 }
601
602 ref->prefetch_mod = PREFETCH_BLOCK / step;
603 }
604
605 /* Divides X by BY, rounding down. */
606
607 static HOST_WIDE_INT
608 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
609 {
610 gcc_assert (by > 0);
611
612 if (x >= 0)
613 return x / by;
614 else
615 return (x + by - 1) / by;
616 }
617
618 /* Given a CACHE_LINE_SIZE and two inductive memory references
619 with a common STEP greater than CACHE_LINE_SIZE and an address
620 difference DELTA, compute the probability that they will fall
621 in different cache lines. DISTINCT_ITERS is the number of
622 distinct iterations after which the pattern repeats itself.
623 ALIGN_UNIT is the unit of alignment in bytes. */
624
625 static int
626 compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
627 HOST_WIDE_INT step, HOST_WIDE_INT delta,
628 unsigned HOST_WIDE_INT distinct_iters,
629 int align_unit)
630 {
631 unsigned align, iter;
632 int total_positions, miss_positions, miss_rate;
633 int address1, address2, cache_line1, cache_line2;
634
635 total_positions = 0;
636 miss_positions = 0;
637
638 /* Iterate through all possible alignments of the first
639 memory reference within its cache line. */
640 for (align = 0; align < cache_line_size; align += align_unit)
641
642 /* Iterate through all distinct iterations. */
643 for (iter = 0; iter < distinct_iters; iter++)
644 {
645 address1 = align + step * iter;
646 address2 = address1 + delta;
647 cache_line1 = address1 / cache_line_size;
648 cache_line2 = address2 / cache_line_size;
649 total_positions += 1;
650 if (cache_line1 != cache_line2)
651 miss_positions += 1;
652 }
653 miss_rate = 1000 * miss_positions / total_positions;
654 return miss_rate;
655 }
656
657 /* Prune the prefetch candidate REF using the reuse with BY.
658 If BY_IS_BEFORE is true, BY is before REF in the loop. */
659
660 static void
661 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
662 bool by_is_before)
663 {
664 HOST_WIDE_INT step = ref->group->step;
665 bool backward = step < 0;
666 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
667 HOST_WIDE_INT delta = delta_b - delta_r;
668 HOST_WIDE_INT hit_from;
669 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
670 int miss_rate;
671 HOST_WIDE_INT reduced_step;
672 unsigned HOST_WIDE_INT reduced_prefetch_block;
673 tree ref_type;
674 int align_unit;
675
676 if (delta == 0)
677 {
678 /* If the references has the same address, only prefetch the
679 former. */
680 if (by_is_before)
681 ref->prefetch_before = 0;
682
683 return;
684 }
685
686 if (!step)
687 {
688 /* If the reference addresses are invariant and fall into the
689 same cache line, prefetch just the first one. */
690 if (!by_is_before)
691 return;
692
693 if (ddown (ref->delta, PREFETCH_BLOCK)
694 != ddown (by->delta, PREFETCH_BLOCK))
695 return;
696
697 ref->prefetch_before = 0;
698 return;
699 }
700
701 /* Only prune the reference that is behind in the array. */
702 if (backward)
703 {
704 if (delta > 0)
705 return;
706
707 /* Transform the data so that we may assume that the accesses
708 are forward. */
709 delta = - delta;
710 step = -step;
711 delta_r = PREFETCH_BLOCK - 1 - delta_r;
712 delta_b = PREFETCH_BLOCK - 1 - delta_b;
713 }
714 else
715 {
716 if (delta < 0)
717 return;
718 }
719
720 /* Check whether the two references are likely to hit the same cache
721 line, and how distant the iterations in that it occurs are from
722 each other. */
723
724 if (step <= PREFETCH_BLOCK)
725 {
726 /* The accesses are sure to meet. Let us check when. */
727 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
728 prefetch_before = (hit_from - delta_r + step - 1) / step;
729
730 /* Do not reduce prefetch_before if we meet beyond cache size. */
731 if (prefetch_before > (unsigned) abs (L2_CACHE_SIZE_BYTES / step))
732 prefetch_before = PREFETCH_ALL;
733 if (prefetch_before < ref->prefetch_before)
734 ref->prefetch_before = prefetch_before;
735
736 return;
737 }
738
739 /* A more complicated case with step > prefetch_block. First reduce
740 the ratio between the step and the cache line size to its simplest
741 terms. The resulting denominator will then represent the number of
742 distinct iterations after which each address will go back to its
743 initial location within the cache line. This computation assumes
744 that PREFETCH_BLOCK is a power of two. */
745 prefetch_block = PREFETCH_BLOCK;
746 reduced_prefetch_block = prefetch_block;
747 reduced_step = step;
748 while ((reduced_step & 1) == 0
749 && reduced_prefetch_block > 1)
750 {
751 reduced_step >>= 1;
752 reduced_prefetch_block >>= 1;
753 }
754
755 prefetch_before = delta / step;
756 delta %= step;
757 ref_type = TREE_TYPE (ref->mem);
758 align_unit = TYPE_ALIGN (ref_type) / 8;
759 miss_rate = compute_miss_rate(prefetch_block, step, delta,
760 reduced_prefetch_block, align_unit);
761 if (miss_rate <= ACCEPTABLE_MISS_RATE)
762 {
763 /* Do not reduce prefetch_before if we meet beyond cache size. */
764 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
765 prefetch_before = PREFETCH_ALL;
766 if (prefetch_before < ref->prefetch_before)
767 ref->prefetch_before = prefetch_before;
768
769 return;
770 }
771
772 /* Try also the following iteration. */
773 prefetch_before++;
774 delta = step - delta;
775 miss_rate = compute_miss_rate(prefetch_block, step, delta,
776 reduced_prefetch_block, align_unit);
777 if (miss_rate <= ACCEPTABLE_MISS_RATE)
778 {
779 if (prefetch_before < ref->prefetch_before)
780 ref->prefetch_before = prefetch_before;
781
782 return;
783 }
784
785 /* The ref probably does not reuse by. */
786 return;
787 }
788
789 /* Prune the prefetch candidate REF using the reuses with other references
790 in REFS. */
791
792 static void
793 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
794 {
795 struct mem_ref *prune_by;
796 bool before = true;
797
798 prune_ref_by_self_reuse (ref);
799
800 for (prune_by = refs; prune_by; prune_by = prune_by->next)
801 {
802 if (prune_by == ref)
803 {
804 before = false;
805 continue;
806 }
807
808 if (!WRITE_CAN_USE_READ_PREFETCH
809 && ref->write_p
810 && !prune_by->write_p)
811 continue;
812 if (!READ_CAN_USE_WRITE_PREFETCH
813 && !ref->write_p
814 && prune_by->write_p)
815 continue;
816
817 prune_ref_by_group_reuse (ref, prune_by, before);
818 }
819 }
820
821 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
822
823 static void
824 prune_group_by_reuse (struct mem_ref_group *group)
825 {
826 struct mem_ref *ref_pruned;
827
828 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
829 {
830 prune_ref_by_reuse (ref_pruned, group->refs);
831
832 if (dump_file && (dump_flags & TDF_DETAILS))
833 {
834 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
835
836 if (ref_pruned->prefetch_before == PREFETCH_ALL
837 && ref_pruned->prefetch_mod == 1)
838 fprintf (dump_file, " no restrictions");
839 else if (ref_pruned->prefetch_before == 0)
840 fprintf (dump_file, " do not prefetch");
841 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
842 fprintf (dump_file, " prefetch once");
843 else
844 {
845 if (ref_pruned->prefetch_before != PREFETCH_ALL)
846 {
847 fprintf (dump_file, " prefetch before ");
848 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
849 ref_pruned->prefetch_before);
850 }
851 if (ref_pruned->prefetch_mod != 1)
852 {
853 fprintf (dump_file, " prefetch mod ");
854 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
855 ref_pruned->prefetch_mod);
856 }
857 }
858 fprintf (dump_file, "\n");
859 }
860 }
861 }
862
863 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
864
865 static void
866 prune_by_reuse (struct mem_ref_group *groups)
867 {
868 for (; groups; groups = groups->next)
869 prune_group_by_reuse (groups);
870 }
871
872 /* Returns true if we should issue prefetch for REF. */
873
874 static bool
875 should_issue_prefetch_p (struct mem_ref *ref)
876 {
877 /* For now do not issue prefetches for only first few of the
878 iterations. */
879 if (ref->prefetch_before != PREFETCH_ALL)
880 {
881 if (dump_file && (dump_flags & TDF_DETAILS))
882 fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
883 (void *) ref);
884 return false;
885 }
886
887 /* Do not prefetch nontemporal stores. */
888 if (ref->storent_p)
889 {
890 if (dump_file && (dump_flags & TDF_DETAILS))
891 fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
892 return false;
893 }
894
895 return true;
896 }
897
898 /* Decide which of the prefetch candidates in GROUPS to prefetch.
899 AHEAD is the number of iterations to prefetch ahead (which corresponds
900 to the number of simultaneous instances of one prefetch running at a
901 time). UNROLL_FACTOR is the factor by that the loop is going to be
902 unrolled. Returns true if there is anything to prefetch. */
903
904 static bool
905 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
906 unsigned ahead)
907 {
908 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
909 unsigned slots_per_prefetch;
910 struct mem_ref *ref;
911 bool any = false;
912
913 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
914 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
915
916 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
917 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
918 it will need a prefetch slot. */
919 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
920 if (dump_file && (dump_flags & TDF_DETAILS))
921 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
922 slots_per_prefetch);
923
924 /* For now we just take memory references one by one and issue
925 prefetches for as many as possible. The groups are sorted
926 starting with the largest step, since the references with
927 large step are more likely to cause many cache misses. */
928
929 for (; groups; groups = groups->next)
930 for (ref = groups->refs; ref; ref = ref->next)
931 {
932 if (!should_issue_prefetch_p (ref))
933 continue;
934
935 /* The loop is far from being sufficiently unrolled for this
936 prefetch. Do not generate prefetch to avoid many redudant
937 prefetches. */
938 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
939 continue;
940
941 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
942 and we unroll the loop UNROLL_FACTOR times, we need to insert
943 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
944 iteration. */
945 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
946 / ref->prefetch_mod);
947 prefetch_slots = n_prefetches * slots_per_prefetch;
948
949 /* If more than half of the prefetches would be lost anyway, do not
950 issue the prefetch. */
951 if (2 * remaining_prefetch_slots < prefetch_slots)
952 continue;
953
954 ref->issue_prefetch_p = true;
955
956 if (remaining_prefetch_slots <= prefetch_slots)
957 return true;
958 remaining_prefetch_slots -= prefetch_slots;
959 any = true;
960 }
961
962 return any;
963 }
964
965 /* Estimate the number of prefetches in the given GROUPS. */
966
967 static int
968 estimate_prefetch_count (struct mem_ref_group *groups)
969 {
970 struct mem_ref *ref;
971 int prefetch_count = 0;
972
973 for (; groups; groups = groups->next)
974 for (ref = groups->refs; ref; ref = ref->next)
975 if (should_issue_prefetch_p (ref))
976 prefetch_count++;
977
978 return prefetch_count;
979 }
980
981 /* Issue prefetches for the reference REF into loop as decided before.
982 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
983 is the factor by which LOOP was unrolled. */
984
985 static void
986 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
987 {
988 HOST_WIDE_INT delta;
989 tree addr, addr_base, write_p, local;
990 gimple prefetch;
991 gimple_stmt_iterator bsi;
992 unsigned n_prefetches, ap;
993 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
994
995 if (dump_file && (dump_flags & TDF_DETAILS))
996 fprintf (dump_file, "Issued%s prefetch for %p.\n",
997 nontemporal ? " nontemporal" : "",
998 (void *) ref);
999
1000 bsi = gsi_for_stmt (ref->stmt);
1001
1002 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1003 / ref->prefetch_mod);
1004 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1005 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1006 true, NULL, true, GSI_SAME_STMT);
1007 write_p = ref->write_p ? integer_one_node : integer_zero_node;
1008 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
1009
1010 for (ap = 0; ap < n_prefetches; ap++)
1011 {
1012 /* Determine the address to prefetch. */
1013 delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
1014 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
1015 addr_base, size_int (delta));
1016 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1017 true, GSI_SAME_STMT);
1018
1019 /* Create the prefetch instruction. */
1020 prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH],
1021 3, addr, write_p, local);
1022 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1023 }
1024 }
1025
1026 /* Issue prefetches for the references in GROUPS into loop as decided before.
1027 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1028 factor by that LOOP was unrolled. */
1029
1030 static void
1031 issue_prefetches (struct mem_ref_group *groups,
1032 unsigned unroll_factor, unsigned ahead)
1033 {
1034 struct mem_ref *ref;
1035
1036 for (; groups; groups = groups->next)
1037 for (ref = groups->refs; ref; ref = ref->next)
1038 if (ref->issue_prefetch_p)
1039 issue_prefetch_ref (ref, unroll_factor, ahead);
1040 }
1041
1042 /* Returns true if REF is a memory write for that a nontemporal store insn
1043 can be used. */
1044
1045 static bool
1046 nontemporal_store_p (struct mem_ref *ref)
1047 {
1048 enum machine_mode mode;
1049 enum insn_code code;
1050
1051 /* REF must be a write that is not reused. We require it to be independent
1052 on all other memory references in the loop, as the nontemporal stores may
1053 be reordered with respect to other memory references. */
1054 if (!ref->write_p
1055 || !ref->independent_p
1056 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1057 return false;
1058
1059 /* Check that we have the storent instruction for the mode. */
1060 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1061 if (mode == BLKmode)
1062 return false;
1063
1064 code = optab_handler (storent_optab, mode)->insn_code;
1065 return code != CODE_FOR_nothing;
1066 }
1067
1068 /* If REF is a nontemporal store, we mark the corresponding modify statement
1069 and return true. Otherwise, we return false. */
1070
1071 static bool
1072 mark_nontemporal_store (struct mem_ref *ref)
1073 {
1074 if (!nontemporal_store_p (ref))
1075 return false;
1076
1077 if (dump_file && (dump_flags & TDF_DETAILS))
1078 fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1079 (void *) ref);
1080
1081 gimple_assign_set_nontemporal_move (ref->stmt, true);
1082 ref->storent_p = true;
1083
1084 return true;
1085 }
1086
1087 /* Issue a memory fence instruction after LOOP. */
1088
1089 static void
1090 emit_mfence_after_loop (struct loop *loop)
1091 {
1092 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1093 edge exit;
1094 gimple call;
1095 gimple_stmt_iterator bsi;
1096 unsigned i;
1097
1098 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1099 {
1100 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1101
1102 if (!single_pred_p (exit->dest)
1103 /* If possible, we prefer not to insert the fence on other paths
1104 in cfg. */
1105 && !(exit->flags & EDGE_ABNORMAL))
1106 split_loop_exit_edge (exit);
1107 bsi = gsi_after_labels (exit->dest);
1108
1109 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1110 mark_virtual_ops_for_renaming (call);
1111 }
1112
1113 VEC_free (edge, heap, exits);
1114 update_ssa (TODO_update_ssa_only_virtuals);
1115 }
1116
1117 /* Returns true if we can use storent in loop, false otherwise. */
1118
1119 static bool
1120 may_use_storent_in_loop_p (struct loop *loop)
1121 {
1122 bool ret = true;
1123
1124 if (loop->inner != NULL)
1125 return false;
1126
1127 /* If we must issue a mfence insn after using storent, check that there
1128 is a suitable place for it at each of the loop exits. */
1129 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1130 {
1131 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1132 unsigned i;
1133 edge exit;
1134
1135 for (i = 0; VEC_iterate (edge, exits, i, exit); i++)
1136 if ((exit->flags & EDGE_ABNORMAL)
1137 && exit->dest == EXIT_BLOCK_PTR)
1138 ret = false;
1139
1140 VEC_free (edge, heap, exits);
1141 }
1142
1143 return ret;
1144 }
1145
1146 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1147 references in the loop. */
1148
1149 static void
1150 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1151 {
1152 struct mem_ref *ref;
1153 bool any = false;
1154
1155 if (!may_use_storent_in_loop_p (loop))
1156 return;
1157
1158 for (; groups; groups = groups->next)
1159 for (ref = groups->refs; ref; ref = ref->next)
1160 any |= mark_nontemporal_store (ref);
1161
1162 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1163 emit_mfence_after_loop (loop);
1164 }
1165
1166 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1167 this is the case, fill in DESC by the description of number of
1168 iterations. */
1169
1170 static bool
1171 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1172 unsigned factor)
1173 {
1174 if (!can_unroll_loop_p (loop, factor, desc))
1175 return false;
1176
1177 /* We only consider loops without control flow for unrolling. This is not
1178 a hard restriction -- tree_unroll_loop works with arbitrary loops
1179 as well; but the unrolling/prefetching is usually more profitable for
1180 loops consisting of a single basic block, and we want to limit the
1181 code growth. */
1182 if (loop->num_nodes > 2)
1183 return false;
1184
1185 return true;
1186 }
1187
1188 /* Determine the coefficient by that unroll LOOP, from the information
1189 contained in the list of memory references REFS. Description of
1190 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1191 insns of the LOOP. EST_NITER is the estimated number of iterations of
1192 the loop, or -1 if no estimate is available. */
1193
1194 static unsigned
1195 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1196 unsigned ninsns, struct tree_niter_desc *desc,
1197 HOST_WIDE_INT est_niter)
1198 {
1199 unsigned upper_bound;
1200 unsigned nfactor, factor, mod_constraint;
1201 struct mem_ref_group *agp;
1202 struct mem_ref *ref;
1203
1204 /* First check whether the loop is not too large to unroll. We ignore
1205 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1206 from unrolling them enough to make exactly one cache line covered by each
1207 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1208 us from unrolling the loops too many times in cases where we only expect
1209 gains from better scheduling and decreasing loop overhead, which is not
1210 the case here. */
1211 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1212
1213 /* If we unrolled the loop more times than it iterates, the unrolled version
1214 of the loop would be never entered. */
1215 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1216 upper_bound = est_niter;
1217
1218 if (upper_bound <= 1)
1219 return 1;
1220
1221 /* Choose the factor so that we may prefetch each cache just once,
1222 but bound the unrolling by UPPER_BOUND. */
1223 factor = 1;
1224 for (agp = refs; agp; agp = agp->next)
1225 for (ref = agp->refs; ref; ref = ref->next)
1226 if (should_issue_prefetch_p (ref))
1227 {
1228 mod_constraint = ref->prefetch_mod;
1229 nfactor = least_common_multiple (mod_constraint, factor);
1230 if (nfactor <= upper_bound)
1231 factor = nfactor;
1232 }
1233
1234 if (!should_unroll_loop_p (loop, desc, factor))
1235 return 1;
1236
1237 return factor;
1238 }
1239
1240 /* Returns the total volume of the memory references REFS, taking into account
1241 reuses in the innermost loop and cache line size. TODO -- we should also
1242 take into account reuses across the iterations of the loops in the loop
1243 nest. */
1244
1245 static unsigned
1246 volume_of_references (struct mem_ref_group *refs)
1247 {
1248 unsigned volume = 0;
1249 struct mem_ref_group *gr;
1250 struct mem_ref *ref;
1251
1252 for (gr = refs; gr; gr = gr->next)
1253 for (ref = gr->refs; ref; ref = ref->next)
1254 {
1255 /* Almost always reuses another value? */
1256 if (ref->prefetch_before != PREFETCH_ALL)
1257 continue;
1258
1259 /* If several iterations access the same cache line, use the size of
1260 the line divided by this number. Otherwise, a cache line is
1261 accessed in each iteration. TODO -- in the latter case, we should
1262 take the size of the reference into account, rounding it up on cache
1263 line size multiple. */
1264 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1265 }
1266 return volume;
1267 }
1268
1269 /* Returns the volume of memory references accessed across VEC iterations of
1270 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1271 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1272
1273 static unsigned
1274 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1275 {
1276 unsigned i;
1277
1278 for (i = 0; i < n; i++)
1279 if (vec[i] != 0)
1280 break;
1281
1282 if (i == n)
1283 return 0;
1284
1285 gcc_assert (vec[i] > 0);
1286
1287 /* We ignore the parts of the distance vector in subloops, since usually
1288 the numbers of iterations are much smaller. */
1289 return loop_sizes[i] * vec[i];
1290 }
1291
1292 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1293 at the position corresponding to the loop of the step. N is the depth
1294 of the considered loop nest, and, LOOP is its innermost loop. */
1295
1296 static void
1297 add_subscript_strides (tree access_fn, unsigned stride,
1298 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1299 {
1300 struct loop *aloop;
1301 tree step;
1302 HOST_WIDE_INT astep;
1303 unsigned min_depth = loop_depth (loop) - n;
1304
1305 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1306 {
1307 aloop = get_chrec_loop (access_fn);
1308 step = CHREC_RIGHT (access_fn);
1309 access_fn = CHREC_LEFT (access_fn);
1310
1311 if ((unsigned) loop_depth (aloop) <= min_depth)
1312 continue;
1313
1314 if (host_integerp (step, 0))
1315 astep = tree_low_cst (step, 0);
1316 else
1317 astep = L1_CACHE_LINE_SIZE;
1318
1319 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1320
1321 }
1322 }
1323
1324 /* Returns the volume of memory references accessed between two consecutive
1325 self-reuses of the reference DR. We consider the subscripts of DR in N
1326 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1327 loops. LOOP is the innermost loop of the current loop nest. */
1328
1329 static unsigned
1330 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1331 struct loop *loop)
1332 {
1333 tree stride, access_fn;
1334 HOST_WIDE_INT *strides, astride;
1335 VEC (tree, heap) *access_fns;
1336 tree ref = DR_REF (dr);
1337 unsigned i, ret = ~0u;
1338
1339 /* In the following example:
1340
1341 for (i = 0; i < N; i++)
1342 for (j = 0; j < N; j++)
1343 use (a[j][i]);
1344 the same cache line is accessed each N steps (except if the change from
1345 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1346 we cannot rely purely on the results of the data dependence analysis.
1347
1348 Instead, we compute the stride of the reference in each loop, and consider
1349 the innermost loop in that the stride is less than cache size. */
1350
1351 strides = XCNEWVEC (HOST_WIDE_INT, n);
1352 access_fns = DR_ACCESS_FNS (dr);
1353
1354 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
1355 {
1356 /* Keep track of the reference corresponding to the subscript, so that we
1357 know its stride. */
1358 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1359 ref = TREE_OPERAND (ref, 0);
1360
1361 if (TREE_CODE (ref) == ARRAY_REF)
1362 {
1363 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1364 if (host_integerp (stride, 1))
1365 astride = tree_low_cst (stride, 1);
1366 else
1367 astride = L1_CACHE_LINE_SIZE;
1368
1369 ref = TREE_OPERAND (ref, 0);
1370 }
1371 else
1372 astride = 1;
1373
1374 add_subscript_strides (access_fn, astride, strides, n, loop);
1375 }
1376
1377 for (i = n; i-- > 0; )
1378 {
1379 unsigned HOST_WIDE_INT s;
1380
1381 s = strides[i] < 0 ? -strides[i] : strides[i];
1382
1383 if (s < (unsigned) L1_CACHE_LINE_SIZE
1384 && (loop_sizes[i]
1385 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1386 {
1387 ret = loop_sizes[i];
1388 break;
1389 }
1390 }
1391
1392 free (strides);
1393 return ret;
1394 }
1395
1396 /* Determines the distance till the first reuse of each reference in REFS
1397 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1398 memory references in the loop. */
1399
1400 static void
1401 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1402 bool no_other_refs)
1403 {
1404 struct loop *nest, *aloop;
1405 VEC (data_reference_p, heap) *datarefs = NULL;
1406 VEC (ddr_p, heap) *dependences = NULL;
1407 struct mem_ref_group *gr;
1408 struct mem_ref *ref, *refb;
1409 VEC (loop_p, heap) *vloops = NULL;
1410 unsigned *loop_data_size;
1411 unsigned i, j, n;
1412 unsigned volume, dist, adist;
1413 HOST_WIDE_INT vol;
1414 data_reference_p dr;
1415 ddr_p dep;
1416
1417 if (loop->inner)
1418 return;
1419
1420 /* Find the outermost loop of the loop nest of loop (we require that
1421 there are no sibling loops inside the nest). */
1422 nest = loop;
1423 while (1)
1424 {
1425 aloop = loop_outer (nest);
1426
1427 if (aloop == current_loops->tree_root
1428 || aloop->inner->next)
1429 break;
1430
1431 nest = aloop;
1432 }
1433
1434 /* For each loop, determine the amount of data accessed in each iteration.
1435 We use this to estimate whether the reference is evicted from the
1436 cache before its reuse. */
1437 find_loop_nest (nest, &vloops);
1438 n = VEC_length (loop_p, vloops);
1439 loop_data_size = XNEWVEC (unsigned, n);
1440 volume = volume_of_references (refs);
1441 i = n;
1442 while (i-- != 0)
1443 {
1444 loop_data_size[i] = volume;
1445 /* Bound the volume by the L2 cache size, since above this bound,
1446 all dependence distances are equivalent. */
1447 if (volume > L2_CACHE_SIZE_BYTES)
1448 continue;
1449
1450 aloop = VEC_index (loop_p, vloops, i);
1451 vol = estimated_loop_iterations_int (aloop, false);
1452 if (vol < 0)
1453 vol = expected_loop_iterations (aloop);
1454 volume *= vol;
1455 }
1456
1457 /* Prepare the references in the form suitable for data dependence
1458 analysis. We ignore unanalyzable data references (the results
1459 are used just as a heuristics to estimate temporality of the
1460 references, hence we do not need to worry about correctness). */
1461 for (gr = refs; gr; gr = gr->next)
1462 for (ref = gr->refs; ref; ref = ref->next)
1463 {
1464 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1465
1466 if (dr)
1467 {
1468 ref->reuse_distance = volume;
1469 dr->aux = ref;
1470 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1471 }
1472 else
1473 no_other_refs = false;
1474 }
1475
1476 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
1477 {
1478 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1479 ref = (struct mem_ref *) dr->aux;
1480 if (ref->reuse_distance > dist)
1481 ref->reuse_distance = dist;
1482
1483 if (no_other_refs)
1484 ref->independent_p = true;
1485 }
1486
1487 compute_all_dependences (datarefs, &dependences, vloops, true);
1488
1489 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
1490 {
1491 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1492 continue;
1493
1494 ref = (struct mem_ref *) DDR_A (dep)->aux;
1495 refb = (struct mem_ref *) DDR_B (dep)->aux;
1496
1497 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1498 || DDR_NUM_DIST_VECTS (dep) == 0)
1499 {
1500 /* If the dependence cannot be analyzed, assume that there might be
1501 a reuse. */
1502 dist = 0;
1503
1504 ref->independent_p = false;
1505 refb->independent_p = false;
1506 }
1507 else
1508 {
1509 /* The distance vectors are normalized to be always lexicographically
1510 positive, hence we cannot tell just from them whether DDR_A comes
1511 before DDR_B or vice versa. However, it is not important,
1512 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1513 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1514 in cache (and marking it as nontemporal would not affect
1515 anything). */
1516
1517 dist = volume;
1518 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1519 {
1520 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1521 loop_data_size, n);
1522
1523 /* If this is a dependence in the innermost loop (i.e., the
1524 distances in all superloops are zero) and it is not
1525 the trivial self-dependence with distance zero, record that
1526 the references are not completely independent. */
1527 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1528 && (ref != refb
1529 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1530 {
1531 ref->independent_p = false;
1532 refb->independent_p = false;
1533 }
1534
1535 /* Ignore accesses closer than
1536 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1537 so that we use nontemporal prefetches e.g. if single memory
1538 location is accessed several times in a single iteration of
1539 the loop. */
1540 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1541 continue;
1542
1543 if (adist < dist)
1544 dist = adist;
1545 }
1546 }
1547
1548 if (ref->reuse_distance > dist)
1549 ref->reuse_distance = dist;
1550 if (refb->reuse_distance > dist)
1551 refb->reuse_distance = dist;
1552 }
1553
1554 free_dependence_relations (dependences);
1555 free_data_refs (datarefs);
1556 free (loop_data_size);
1557
1558 if (dump_file && (dump_flags & TDF_DETAILS))
1559 {
1560 fprintf (dump_file, "Reuse distances:\n");
1561 for (gr = refs; gr; gr = gr->next)
1562 for (ref = gr->refs; ref; ref = ref->next)
1563 fprintf (dump_file, " ref %p distance %u\n",
1564 (void *) ref, ref->reuse_distance);
1565 }
1566 }
1567
1568 /* Do a cost-benefit analysis to determine if prefetching is profitable
1569 for the current loop given the following parameters:
1570 AHEAD: the iteration ahead distance,
1571 EST_NITER: the estimated trip count,
1572 NINSNS: estimated number of instructions in the loop,
1573 PREFETCH_COUNT: an estimate of the number of prefetches
1574 MEM_REF_COUNT: total number of memory references in the loop. */
1575
1576 static bool
1577 is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
1578 unsigned ninsns, unsigned prefetch_count,
1579 unsigned mem_ref_count, unsigned unroll_factor)
1580 {
1581 int insn_to_mem_ratio, insn_to_prefetch_ratio;
1582
1583 if (mem_ref_count == 0)
1584 return false;
1585
1586 /* Prefetching improves performance by overlapping cache missing
1587 memory accesses with CPU operations. If the loop does not have
1588 enough CPU operations to overlap with memory operations, prefetching
1589 won't give a significant benefit. One approximate way of checking
1590 this is to require the ratio of instructions to memory references to
1591 be above a certain limit. This approximation works well in practice.
1592 TODO: Implement a more precise computation by estimating the time
1593 for each CPU or memory op in the loop. Time estimates for memory ops
1594 should account for cache misses. */
1595 insn_to_mem_ratio = ninsns / mem_ref_count;
1596
1597 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1598 {
1599 if (dump_file && (dump_flags & TDF_DETAILS))
1600 fprintf (dump_file,
1601 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1602 insn_to_mem_ratio);
1603 return false;
1604 }
1605
1606 /* Prefetching most likely causes performance degradation when the instruction
1607 to prefetch ratio is too small. Too many prefetch instructions in a loop
1608 may reduce the I-cache performance.
1609 (unroll_factor * ninsns) is used to estimate the number of instructions in
1610 the unrolled loop. This implementation is a bit simplistic -- the number
1611 of issued prefetch instructions is also affected by unrolling. So,
1612 prefetch_mod and the unroll factor should be taken into account when
1613 determining prefetch_count. Also, the number of insns of the unrolled
1614 loop will usually be significantly smaller than the number of insns of the
1615 original loop * unroll_factor (at least the induction variable increases
1616 and the exit branches will get eliminated), so it might be better to use
1617 tree_estimate_loop_size + estimated_unrolled_size. */
1618 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1619 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1620 {
1621 if (dump_file && (dump_flags & TDF_DETAILS))
1622 fprintf (dump_file,
1623 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1624 insn_to_prefetch_ratio);
1625 return false;
1626 }
1627
1628 /* Could not do further estimation if the trip count is unknown. Just assume
1629 prefetching is profitable. Too aggressive??? */
1630 if (est_niter < 0)
1631 return true;
1632
1633 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1634 {
1635 if (dump_file && (dump_flags & TDF_DETAILS))
1636 fprintf (dump_file,
1637 "Not prefetching -- loop estimated to roll only %d times\n",
1638 (int) est_niter);
1639 return false;
1640 }
1641 return true;
1642 }
1643
1644
1645 /* Issue prefetch instructions for array references in LOOP. Returns
1646 true if the LOOP was unrolled. */
1647
1648 static bool
1649 loop_prefetch_arrays (struct loop *loop)
1650 {
1651 struct mem_ref_group *refs;
1652 unsigned ahead, ninsns, time, unroll_factor;
1653 HOST_WIDE_INT est_niter;
1654 struct tree_niter_desc desc;
1655 bool unrolled = false, no_other_refs;
1656 unsigned prefetch_count;
1657 unsigned mem_ref_count;
1658
1659 if (optimize_loop_nest_for_size_p (loop))
1660 {
1661 if (dump_file && (dump_flags & TDF_DETAILS))
1662 fprintf (dump_file, " ignored (cold area)\n");
1663 return false;
1664 }
1665
1666 /* Step 1: gather the memory references. */
1667 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1668
1669 /* Step 2: estimate the reuse effects. */
1670 prune_by_reuse (refs);
1671
1672 prefetch_count = estimate_prefetch_count (refs);
1673 if (prefetch_count == 0)
1674 goto fail;
1675
1676 determine_loop_nest_reuse (loop, refs, no_other_refs);
1677
1678 /* Step 3: determine the ahead and unroll factor. */
1679
1680 /* FIXME: the time should be weighted by the probabilities of the blocks in
1681 the loop body. */
1682 time = tree_num_loop_insns (loop, &eni_time_weights);
1683 ahead = (PREFETCH_LATENCY + time - 1) / time;
1684 est_niter = estimated_loop_iterations_int (loop, false);
1685
1686 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1687 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1688 est_niter);
1689 if (dump_file && (dump_flags & TDF_DETAILS))
1690 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1691 HOST_WIDE_INT_PRINT_DEC "\n"
1692 "insn count %d, mem ref count %d, prefetch count %d\n",
1693 ahead, unroll_factor, est_niter,
1694 ninsns, mem_ref_count, prefetch_count);
1695
1696 if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns, prefetch_count,
1697 mem_ref_count, unroll_factor))
1698 goto fail;
1699
1700 mark_nontemporal_stores (loop, refs);
1701
1702 /* Step 4: what to prefetch? */
1703 if (!schedule_prefetches (refs, unroll_factor, ahead))
1704 goto fail;
1705
1706 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1707 iterations so that we do not issue superfluous prefetches. */
1708 if (unroll_factor != 1)
1709 {
1710 tree_unroll_loop (loop, unroll_factor,
1711 single_dom_exit (loop), &desc);
1712 unrolled = true;
1713 }
1714
1715 /* Step 6: issue the prefetches. */
1716 issue_prefetches (refs, unroll_factor, ahead);
1717
1718 fail:
1719 release_mem_refs (refs);
1720 return unrolled;
1721 }
1722
1723 /* Issue prefetch instructions for array references in loops. */
1724
1725 unsigned int
1726 tree_ssa_prefetch_arrays (void)
1727 {
1728 loop_iterator li;
1729 struct loop *loop;
1730 bool unrolled = false;
1731 int todo_flags = 0;
1732
1733 if (!HAVE_prefetch
1734 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1735 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1736 of processor costs and i486 does not have prefetch, but
1737 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1738 || PREFETCH_BLOCK == 0)
1739 return 0;
1740
1741 if (dump_file && (dump_flags & TDF_DETAILS))
1742 {
1743 fprintf (dump_file, "Prefetching parameters:\n");
1744 fprintf (dump_file, " simultaneous prefetches: %d\n",
1745 SIMULTANEOUS_PREFETCHES);
1746 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1747 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1748 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
1749 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1750 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1751 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
1752 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
1753 MIN_INSN_TO_PREFETCH_RATIO);
1754 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
1755 PREFETCH_MIN_INSN_TO_MEM_RATIO);
1756 fprintf (dump_file, "\n");
1757 }
1758
1759 initialize_original_copy_tables ();
1760
1761 if (!built_in_decls[BUILT_IN_PREFETCH])
1762 {
1763 tree type = build_function_type (void_type_node,
1764 tree_cons (NULL_TREE,
1765 const_ptr_type_node,
1766 NULL_TREE));
1767 tree decl = add_builtin_function ("__builtin_prefetch", type,
1768 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1769 NULL, NULL_TREE);
1770 DECL_IS_NOVOPS (decl) = true;
1771 built_in_decls[BUILT_IN_PREFETCH] = decl;
1772 }
1773
1774 /* We assume that size of cache line is a power of two, so verify this
1775 here. */
1776 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1777
1778 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1779 {
1780 if (dump_file && (dump_flags & TDF_DETAILS))
1781 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1782
1783 unrolled |= loop_prefetch_arrays (loop);
1784
1785 if (dump_file && (dump_flags & TDF_DETAILS))
1786 fprintf (dump_file, "\n\n");
1787 }
1788
1789 if (unrolled)
1790 {
1791 scev_reset ();
1792 todo_flags |= TODO_cleanup_cfg;
1793 }
1794
1795 free_original_copy_tables ();
1796 return todo_flags;
1797 }