2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
26 #include "basic-block.h"
27 #include "tree-pretty-print.h"
29 #include "gimple-ssa.h"
30 #include "tree-ssa-loop.h"
31 #include "tree-into-ssa.h"
33 #include "tree-pass.h"
34 #include "insn-config.h"
36 #include "tree-chrec.h"
37 #include "tree-scalar-evolution.h"
38 #include "diagnostic-core.h"
40 #include "langhooks.h"
41 #include "tree-inline.h"
42 #include "tree-data-ref.h"
45 /* FIXME: Needed for optabs, but this should all be moved to a TBD interface
46 between the GIMPLE and RTL worlds. */
51 /* This pass inserts prefetch instructions to optimize cache usage during
52 accesses to arrays in loops. It processes loops sequentially and:
54 1) Gathers all memory references in the single loop.
55 2) For each of the references it decides when it is profitable to prefetch
56 it. To do it, we evaluate the reuse among the accesses, and determines
57 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
58 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
59 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
60 iterations of the loop that are zero modulo PREFETCH_MOD). For example
61 (assuming cache line size is 64 bytes, char has size 1 byte and there
62 is no hardware sequential prefetch):
65 for (i = 0; i < max; i++)
72 a[187*i + 50] = ...; (5)
75 (0) obviously has PREFETCH_BEFORE 1
76 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
77 location 64 iterations before it, and PREFETCH_MOD 64 (since
78 it hits the same cache line otherwise).
79 (2) has PREFETCH_MOD 64
80 (3) has PREFETCH_MOD 4
81 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
82 the cache line accessed by (5) is the same with probability only
84 (5) has PREFETCH_MOD 1 as well.
86 Additionally, we use data dependence analysis to determine for each
87 reference the distance till the first reuse; this information is used
88 to determine the temporality of the issued prefetch instruction.
90 3) We determine how much ahead we need to prefetch. The number of
91 iterations needed is time to fetch / time spent in one iteration of
92 the loop. The problem is that we do not know either of these values,
93 so we just make a heuristic guess based on a magic (possibly)
94 target-specific constant and size of the loop.
96 4) Determine which of the references we prefetch. We take into account
97 that there is a maximum number of simultaneous prefetches (provided
98 by machine description). We prefetch as many prefetches as possible
99 while still within this bound (starting with those with lowest
100 prefetch_mod, since they are responsible for most of the cache
103 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
104 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
105 prefetching nonaccessed memory.
106 TODO -- actually implement peeling.
108 6) We actually emit the prefetch instructions. ??? Perhaps emit the
109 prefetch instructions with guards in cases where 5) was not sufficient
110 to satisfy the constraints?
112 A cost model is implemented to determine whether or not prefetching is
113 profitable for a given loop. The cost model has three heuristics:
115 1. Function trip_count_to_ahead_ratio_too_small_p implements a
116 heuristic that determines whether or not the loop has too few
117 iterations (compared to ahead). Prefetching is not likely to be
118 beneficial if the trip count to ahead ratio is below a certain
121 2. Function mem_ref_count_reasonable_p implements a heuristic that
122 determines whether the given loop has enough CPU ops that can be
123 overlapped with cache missing memory ops. If not, the loop
124 won't benefit from prefetching. In the implementation,
125 prefetching is not considered beneficial if the ratio between
126 the instruction count and the mem ref count is below a certain
129 3. Function insn_to_prefetch_ratio_too_small_p implements a
130 heuristic that disables prefetching in a loop if the prefetching
131 cost is above a certain limit. The relative prefetching cost is
132 estimated by taking the ratio between the prefetch count and the
133 total intruction count (this models the I-cache cost).
135 The limits used in these heuristics are defined as parameters with
136 reasonable default values. Machine-specific default values will be
140 -- write and use more general reuse analysis (that could be also used
141 in other cache aimed loop optimizations)
142 -- make it behave sanely together with the prefetches given by user
143 (now we just ignore them; at the very least we should avoid
144 optimizing loops in that user put his own prefetches)
145 -- we assume cache line size alignment of arrays; this could be
148 /* Magic constants follow. These should be replaced by machine specific
151 /* True if write can be prefetched by a read prefetch. */
153 #ifndef WRITE_CAN_USE_READ_PREFETCH
154 #define WRITE_CAN_USE_READ_PREFETCH 1
157 /* True if read can be prefetched by a write prefetch. */
159 #ifndef READ_CAN_USE_WRITE_PREFETCH
160 #define READ_CAN_USE_WRITE_PREFETCH 0
163 /* The size of the block loaded by a single prefetch. Usually, this is
164 the same as cache line size (at the moment, we only consider one level
165 of cache hierarchy). */
167 #ifndef PREFETCH_BLOCK
168 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
171 /* Do we have a forward hardware sequential prefetching? */
173 #ifndef HAVE_FORWARD_PREFETCH
174 #define HAVE_FORWARD_PREFETCH 0
177 /* Do we have a backward hardware sequential prefetching? */
179 #ifndef HAVE_BACKWARD_PREFETCH
180 #define HAVE_BACKWARD_PREFETCH 0
183 /* In some cases we are only able to determine that there is a certain
184 probability that the two accesses hit the same cache line. In this
185 case, we issue the prefetches for both of them if this probability
186 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
188 #ifndef ACCEPTABLE_MISS_RATE
189 #define ACCEPTABLE_MISS_RATE 50
192 #ifndef HAVE_prefetch
193 #define HAVE_prefetch 0
196 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
197 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
199 /* We consider a memory access nontemporal if it is not reused sooner than
200 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
201 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
202 so that we use nontemporal prefetches e.g. if single memory location
203 is accessed several times in a single iteration of the loop. */
204 #define NONTEMPORAL_FRACTION 16
206 /* In case we have to emit a memory fence instruction after the loop that
207 uses nontemporal stores, this defines the builtin to use. */
209 #ifndef FENCE_FOLLOWING_MOVNT
210 #define FENCE_FOLLOWING_MOVNT NULL_TREE
213 /* It is not profitable to prefetch when the trip count is not at
214 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
215 For example, in a loop with a prefetch ahead distance of 10,
216 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
217 profitable to prefetch when the trip count is greater or equal to
218 40. In that case, 30 out of the 40 iterations will benefit from
221 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
222 #define TRIP_COUNT_TO_AHEAD_RATIO 4
225 /* The group of references between that reuse may occur. */
229 tree base
; /* Base of the reference. */
230 tree step
; /* Step of the reference. */
231 struct mem_ref
*refs
; /* References in the group. */
232 struct mem_ref_group
*next
; /* Next group of references. */
235 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
237 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
239 /* Do not generate a prefetch if the unroll factor is significantly less
240 than what is required by the prefetch. This is to avoid redundant
241 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
242 2, prefetching requires unrolling the loop 16 times, but
243 the loop is actually unrolled twice. In this case (ratio = 8),
244 prefetching is not likely to be beneficial. */
246 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
247 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
250 /* Some of the prefetch computations have quadratic complexity. We want to
251 avoid huge compile times and, therefore, want to limit the amount of
252 memory references per loop where we consider prefetching. */
254 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
255 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
258 /* The memory reference. */
262 gimple stmt
; /* Statement in that the reference appears. */
263 tree mem
; /* The reference. */
264 HOST_WIDE_INT delta
; /* Constant offset of the reference. */
265 struct mem_ref_group
*group
; /* The group of references it belongs to. */
266 unsigned HOST_WIDE_INT prefetch_mod
;
267 /* Prefetch only each PREFETCH_MOD-th
269 unsigned HOST_WIDE_INT prefetch_before
;
270 /* Prefetch only first PREFETCH_BEFORE
272 unsigned reuse_distance
; /* The amount of data accessed before the first
273 reuse of this value. */
274 struct mem_ref
*next
; /* The next reference in the group. */
275 unsigned write_p
: 1; /* Is it a write? */
276 unsigned independent_p
: 1; /* True if the reference is independent on
277 all other references inside the loop. */
278 unsigned issue_prefetch_p
: 1; /* Should we really issue the prefetch? */
279 unsigned storent_p
: 1; /* True if we changed the store to a
283 /* Dumps information about memory reference */
285 dump_mem_details (FILE *file
, tree base
, tree step
,
286 HOST_WIDE_INT delta
, bool write_p
)
288 fprintf (file
, "(base ");
289 print_generic_expr (file
, base
, TDF_SLIM
);
290 fprintf (file
, ", step ");
291 if (cst_and_fits_in_hwi (step
))
292 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, int_cst_value (step
));
294 print_generic_expr (file
, step
, TDF_TREE
);
295 fprintf (file
, ")\n");
296 fprintf (file
, " delta ");
297 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, delta
);
298 fprintf (file
, "\n");
299 fprintf (file
, " %s\n", write_p
? "write" : "read");
300 fprintf (file
, "\n");
303 /* Dumps information about reference REF to FILE. */
306 dump_mem_ref (FILE *file
, struct mem_ref
*ref
)
308 fprintf (file
, "Reference %p:\n", (void *) ref
);
310 fprintf (file
, " group %p ", (void *) ref
->group
);
312 dump_mem_details (file
, ref
->group
->base
, ref
->group
->step
, ref
->delta
,
316 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
319 static struct mem_ref_group
*
320 find_or_create_group (struct mem_ref_group
**groups
, tree base
, tree step
)
322 struct mem_ref_group
*group
;
324 for (; *groups
; groups
= &(*groups
)->next
)
326 if (operand_equal_p ((*groups
)->step
, step
, 0)
327 && operand_equal_p ((*groups
)->base
, base
, 0))
330 /* If step is an integer constant, keep the list of groups sorted
331 by decreasing step. */
332 if (cst_and_fits_in_hwi ((*groups
)->step
) && cst_and_fits_in_hwi (step
)
333 && int_cst_value ((*groups
)->step
) < int_cst_value (step
))
337 group
= XNEW (struct mem_ref_group
);
341 group
->next
= *groups
;
347 /* Records a memory reference MEM in GROUP with offset DELTA and write status
348 WRITE_P. The reference occurs in statement STMT. */
351 record_ref (struct mem_ref_group
*group
, gimple stmt
, tree mem
,
352 HOST_WIDE_INT delta
, bool write_p
)
354 struct mem_ref
**aref
;
356 /* Do not record the same address twice. */
357 for (aref
= &group
->refs
; *aref
; aref
= &(*aref
)->next
)
359 /* It does not have to be possible for write reference to reuse the read
360 prefetch, or vice versa. */
361 if (!WRITE_CAN_USE_READ_PREFETCH
363 && !(*aref
)->write_p
)
365 if (!READ_CAN_USE_WRITE_PREFETCH
370 if ((*aref
)->delta
== delta
)
374 (*aref
) = XNEW (struct mem_ref
);
375 (*aref
)->stmt
= stmt
;
377 (*aref
)->delta
= delta
;
378 (*aref
)->write_p
= write_p
;
379 (*aref
)->prefetch_before
= PREFETCH_ALL
;
380 (*aref
)->prefetch_mod
= 1;
381 (*aref
)->reuse_distance
= 0;
382 (*aref
)->issue_prefetch_p
= false;
383 (*aref
)->group
= group
;
384 (*aref
)->next
= NULL
;
385 (*aref
)->independent_p
= false;
386 (*aref
)->storent_p
= false;
388 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
389 dump_mem_ref (dump_file
, *aref
);
392 /* Release memory references in GROUPS. */
395 release_mem_refs (struct mem_ref_group
*groups
)
397 struct mem_ref_group
*next_g
;
398 struct mem_ref
*ref
, *next_r
;
400 for (; groups
; groups
= next_g
)
402 next_g
= groups
->next
;
403 for (ref
= groups
->refs
; ref
; ref
= next_r
)
412 /* A structure used to pass arguments to idx_analyze_ref. */
416 struct loop
*loop
; /* Loop of the reference. */
417 gimple stmt
; /* Statement of the reference. */
418 tree
*step
; /* Step of the memory reference. */
419 HOST_WIDE_INT
*delta
; /* Offset of the memory reference. */
422 /* Analyzes a single INDEX of a memory reference to obtain information
423 described at analyze_ref. Callback for for_each_index. */
426 idx_analyze_ref (tree base
, tree
*index
, void *data
)
428 struct ar_data
*ar_data
= (struct ar_data
*) data
;
429 tree ibase
, step
, stepsize
;
430 HOST_WIDE_INT idelta
= 0, imult
= 1;
433 if (!simple_iv (ar_data
->loop
, loop_containing_stmt (ar_data
->stmt
),
439 if (TREE_CODE (ibase
) == POINTER_PLUS_EXPR
440 && cst_and_fits_in_hwi (TREE_OPERAND (ibase
, 1)))
442 idelta
= int_cst_value (TREE_OPERAND (ibase
, 1));
443 ibase
= TREE_OPERAND (ibase
, 0);
445 if (cst_and_fits_in_hwi (ibase
))
447 idelta
+= int_cst_value (ibase
);
448 ibase
= build_int_cst (TREE_TYPE (ibase
), 0);
451 if (TREE_CODE (base
) == ARRAY_REF
)
453 stepsize
= array_ref_element_size (base
);
454 if (!cst_and_fits_in_hwi (stepsize
))
456 imult
= int_cst_value (stepsize
);
457 step
= fold_build2 (MULT_EXPR
, sizetype
,
458 fold_convert (sizetype
, step
),
459 fold_convert (sizetype
, stepsize
));
463 if (*ar_data
->step
== NULL_TREE
)
464 *ar_data
->step
= step
;
466 *ar_data
->step
= fold_build2 (PLUS_EXPR
, sizetype
,
467 fold_convert (sizetype
, *ar_data
->step
),
468 fold_convert (sizetype
, step
));
469 *ar_data
->delta
+= idelta
;
475 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
476 STEP are integer constants and iter is number of iterations of LOOP. The
477 reference occurs in statement STMT. Strips nonaddressable component
478 references from REF_P. */
481 analyze_ref (struct loop
*loop
, tree
*ref_p
, tree
*base
,
482 tree
*step
, HOST_WIDE_INT
*delta
,
485 struct ar_data ar_data
;
487 HOST_WIDE_INT bit_offset
;
493 /* First strip off the component references. Ignore bitfields.
494 Also strip off the real and imagine parts of a complex, so that
495 they can have the same base. */
496 if (TREE_CODE (ref
) == REALPART_EXPR
497 || TREE_CODE (ref
) == IMAGPART_EXPR
498 || (TREE_CODE (ref
) == COMPONENT_REF
499 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref
, 1))))
501 if (TREE_CODE (ref
) == IMAGPART_EXPR
)
502 *delta
+= int_size_in_bytes (TREE_TYPE (ref
));
503 ref
= TREE_OPERAND (ref
, 0);
508 for (; TREE_CODE (ref
) == COMPONENT_REF
; ref
= TREE_OPERAND (ref
, 0))
510 off
= DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref
, 1));
511 bit_offset
= TREE_INT_CST_LOW (off
);
512 gcc_assert (bit_offset
% BITS_PER_UNIT
== 0);
514 *delta
+= bit_offset
/ BITS_PER_UNIT
;
517 *base
= unshare_expr (ref
);
521 ar_data
.delta
= delta
;
522 return for_each_index (base
, idx_analyze_ref
, &ar_data
);
525 /* Record a memory reference REF to the list REFS. The reference occurs in
526 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
527 reference was recorded, false otherwise. */
530 gather_memory_references_ref (struct loop
*loop
, struct mem_ref_group
**refs
,
531 tree ref
, bool write_p
, gimple stmt
)
535 struct mem_ref_group
*agrp
;
537 if (get_base_address (ref
) == NULL
)
540 if (!analyze_ref (loop
, &ref
, &base
, &step
, &delta
, stmt
))
542 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
543 if (step
== NULL_TREE
)
546 /* Stop if the address of BASE could not be taken. */
547 if (may_be_nonaddressable_p (base
))
550 /* Limit non-constant step prefetching only to the innermost loops and
551 only when the step is loop invariant in the entire loop nest. */
552 if (!cst_and_fits_in_hwi (step
))
554 if (loop
->inner
!= NULL
)
556 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
558 fprintf (dump_file
, "Memory expression %p\n",(void *) ref
);
559 print_generic_expr (dump_file
, ref
, TDF_TREE
);
560 fprintf (dump_file
,":");
561 dump_mem_details (dump_file
, base
, step
, delta
, write_p
);
563 "Ignoring %p, non-constant step prefetching is "
564 "limited to inner most loops \n",
571 if (!expr_invariant_in_loop_p (loop_outermost (loop
), step
))
573 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
575 fprintf (dump_file
, "Memory expression %p\n",(void *) ref
);
576 print_generic_expr (dump_file
, ref
, TDF_TREE
);
577 fprintf (dump_file
,":");
578 dump_mem_details (dump_file
, base
, step
, delta
, write_p
);
580 "Not prefetching, ignoring %p due to "
581 "loop variant step\n",
589 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
590 are integer constants. */
591 agrp
= find_or_create_group (refs
, base
, step
);
592 record_ref (agrp
, stmt
, ref
, delta
, write_p
);
597 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
598 true if there are no other memory references inside the loop. */
600 static struct mem_ref_group
*
601 gather_memory_references (struct loop
*loop
, bool *no_other_refs
, unsigned *ref_count
)
603 basic_block
*body
= get_loop_body_in_dom_order (loop
);
606 gimple_stmt_iterator bsi
;
609 struct mem_ref_group
*refs
= NULL
;
611 *no_other_refs
= true;
614 /* Scan the loop body in order, so that the former references precede the
616 for (i
= 0; i
< loop
->num_nodes
; i
++)
619 if (bb
->loop_father
!= loop
)
622 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
624 stmt
= gsi_stmt (bsi
);
626 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
628 if (gimple_vuse (stmt
)
629 || (is_gimple_call (stmt
)
630 && !(gimple_call_flags (stmt
) & ECF_CONST
)))
631 *no_other_refs
= false;
635 lhs
= gimple_assign_lhs (stmt
);
636 rhs
= gimple_assign_rhs1 (stmt
);
638 if (REFERENCE_CLASS_P (rhs
))
640 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
644 if (REFERENCE_CLASS_P (lhs
))
646 *no_other_refs
&= gather_memory_references_ref (loop
, &refs
,
657 /* Prune the prefetch candidate REF using the self-reuse. */
660 prune_ref_by_self_reuse (struct mem_ref
*ref
)
665 /* If the step size is non constant, we cannot calculate prefetch_mod. */
666 if (!cst_and_fits_in_hwi (ref
->group
->step
))
669 step
= int_cst_value (ref
->group
->step
);
675 /* Prefetch references to invariant address just once. */
676 ref
->prefetch_before
= 1;
683 if (step
> PREFETCH_BLOCK
)
686 if ((backward
&& HAVE_BACKWARD_PREFETCH
)
687 || (!backward
&& HAVE_FORWARD_PREFETCH
))
689 ref
->prefetch_before
= 1;
693 ref
->prefetch_mod
= PREFETCH_BLOCK
/ step
;
696 /* Divides X by BY, rounding down. */
699 ddown (HOST_WIDE_INT x
, unsigned HOST_WIDE_INT by
)
706 return (x
+ by
- 1) / by
;
709 /* Given a CACHE_LINE_SIZE and two inductive memory references
710 with a common STEP greater than CACHE_LINE_SIZE and an address
711 difference DELTA, compute the probability that they will fall
712 in different cache lines. Return true if the computed miss rate
713 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
714 number of distinct iterations after which the pattern repeats itself.
715 ALIGN_UNIT is the unit of alignment in bytes. */
718 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size
,
719 HOST_WIDE_INT step
, HOST_WIDE_INT delta
,
720 unsigned HOST_WIDE_INT distinct_iters
,
723 unsigned align
, iter
;
724 int total_positions
, miss_positions
, max_allowed_miss_positions
;
725 int address1
, address2
, cache_line1
, cache_line2
;
727 /* It always misses if delta is greater than or equal to the cache
729 if (delta
>= (HOST_WIDE_INT
) cache_line_size
)
733 total_positions
= (cache_line_size
/ align_unit
) * distinct_iters
;
734 max_allowed_miss_positions
= (ACCEPTABLE_MISS_RATE
* total_positions
) / 1000;
736 /* Iterate through all possible alignments of the first
737 memory reference within its cache line. */
738 for (align
= 0; align
< cache_line_size
; align
+= align_unit
)
740 /* Iterate through all distinct iterations. */
741 for (iter
= 0; iter
< distinct_iters
; iter
++)
743 address1
= align
+ step
* iter
;
744 address2
= address1
+ delta
;
745 cache_line1
= address1
/ cache_line_size
;
746 cache_line2
= address2
/ cache_line_size
;
747 if (cache_line1
!= cache_line2
)
750 if (miss_positions
> max_allowed_miss_positions
)
757 /* Prune the prefetch candidate REF using the reuse with BY.
758 If BY_IS_BEFORE is true, BY is before REF in the loop. */
761 prune_ref_by_group_reuse (struct mem_ref
*ref
, struct mem_ref
*by
,
766 HOST_WIDE_INT delta_r
= ref
->delta
, delta_b
= by
->delta
;
767 HOST_WIDE_INT delta
= delta_b
- delta_r
;
768 HOST_WIDE_INT hit_from
;
769 unsigned HOST_WIDE_INT prefetch_before
, prefetch_block
;
770 HOST_WIDE_INT reduced_step
;
771 unsigned HOST_WIDE_INT reduced_prefetch_block
;
775 /* If the step is non constant we cannot calculate prefetch_before. */
776 if (!cst_and_fits_in_hwi (ref
->group
->step
)) {
780 step
= int_cst_value (ref
->group
->step
);
787 /* If the references has the same address, only prefetch the
790 ref
->prefetch_before
= 0;
797 /* If the reference addresses are invariant and fall into the
798 same cache line, prefetch just the first one. */
802 if (ddown (ref
->delta
, PREFETCH_BLOCK
)
803 != ddown (by
->delta
, PREFETCH_BLOCK
))
806 ref
->prefetch_before
= 0;
810 /* Only prune the reference that is behind in the array. */
816 /* Transform the data so that we may assume that the accesses
820 delta_r
= PREFETCH_BLOCK
- 1 - delta_r
;
821 delta_b
= PREFETCH_BLOCK
- 1 - delta_b
;
829 /* Check whether the two references are likely to hit the same cache
830 line, and how distant the iterations in that it occurs are from
833 if (step
<= PREFETCH_BLOCK
)
835 /* The accesses are sure to meet. Let us check when. */
836 hit_from
= ddown (delta_b
, PREFETCH_BLOCK
) * PREFETCH_BLOCK
;
837 prefetch_before
= (hit_from
- delta_r
+ step
- 1) / step
;
839 /* Do not reduce prefetch_before if we meet beyond cache size. */
840 if (prefetch_before
> absu_hwi (L2_CACHE_SIZE_BYTES
/ step
))
841 prefetch_before
= PREFETCH_ALL
;
842 if (prefetch_before
< ref
->prefetch_before
)
843 ref
->prefetch_before
= prefetch_before
;
848 /* A more complicated case with step > prefetch_block. First reduce
849 the ratio between the step and the cache line size to its simplest
850 terms. The resulting denominator will then represent the number of
851 distinct iterations after which each address will go back to its
852 initial location within the cache line. This computation assumes
853 that PREFETCH_BLOCK is a power of two. */
854 prefetch_block
= PREFETCH_BLOCK
;
855 reduced_prefetch_block
= prefetch_block
;
857 while ((reduced_step
& 1) == 0
858 && reduced_prefetch_block
> 1)
861 reduced_prefetch_block
>>= 1;
864 prefetch_before
= delta
/ step
;
866 ref_type
= TREE_TYPE (ref
->mem
);
867 align_unit
= TYPE_ALIGN (ref_type
) / 8;
868 if (is_miss_rate_acceptable (prefetch_block
, step
, delta
,
869 reduced_prefetch_block
, align_unit
))
871 /* Do not reduce prefetch_before if we meet beyond cache size. */
872 if (prefetch_before
> L2_CACHE_SIZE_BYTES
/ PREFETCH_BLOCK
)
873 prefetch_before
= PREFETCH_ALL
;
874 if (prefetch_before
< ref
->prefetch_before
)
875 ref
->prefetch_before
= prefetch_before
;
880 /* Try also the following iteration. */
882 delta
= step
- delta
;
883 if (is_miss_rate_acceptable (prefetch_block
, step
, delta
,
884 reduced_prefetch_block
, align_unit
))
886 if (prefetch_before
< ref
->prefetch_before
)
887 ref
->prefetch_before
= prefetch_before
;
892 /* The ref probably does not reuse by. */
896 /* Prune the prefetch candidate REF using the reuses with other references
900 prune_ref_by_reuse (struct mem_ref
*ref
, struct mem_ref
*refs
)
902 struct mem_ref
*prune_by
;
905 prune_ref_by_self_reuse (ref
);
907 for (prune_by
= refs
; prune_by
; prune_by
= prune_by
->next
)
915 if (!WRITE_CAN_USE_READ_PREFETCH
917 && !prune_by
->write_p
)
919 if (!READ_CAN_USE_WRITE_PREFETCH
921 && prune_by
->write_p
)
924 prune_ref_by_group_reuse (ref
, prune_by
, before
);
928 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
931 prune_group_by_reuse (struct mem_ref_group
*group
)
933 struct mem_ref
*ref_pruned
;
935 for (ref_pruned
= group
->refs
; ref_pruned
; ref_pruned
= ref_pruned
->next
)
937 prune_ref_by_reuse (ref_pruned
, group
->refs
);
939 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
941 fprintf (dump_file
, "Reference %p:", (void *) ref_pruned
);
943 if (ref_pruned
->prefetch_before
== PREFETCH_ALL
944 && ref_pruned
->prefetch_mod
== 1)
945 fprintf (dump_file
, " no restrictions");
946 else if (ref_pruned
->prefetch_before
== 0)
947 fprintf (dump_file
, " do not prefetch");
948 else if (ref_pruned
->prefetch_before
<= ref_pruned
->prefetch_mod
)
949 fprintf (dump_file
, " prefetch once");
952 if (ref_pruned
->prefetch_before
!= PREFETCH_ALL
)
954 fprintf (dump_file
, " prefetch before ");
955 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
956 ref_pruned
->prefetch_before
);
958 if (ref_pruned
->prefetch_mod
!= 1)
960 fprintf (dump_file
, " prefetch mod ");
961 fprintf (dump_file
, HOST_WIDE_INT_PRINT_DEC
,
962 ref_pruned
->prefetch_mod
);
965 fprintf (dump_file
, "\n");
970 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
973 prune_by_reuse (struct mem_ref_group
*groups
)
975 for (; groups
; groups
= groups
->next
)
976 prune_group_by_reuse (groups
);
979 /* Returns true if we should issue prefetch for REF. */
982 should_issue_prefetch_p (struct mem_ref
*ref
)
984 /* For now do not issue prefetches for only first few of the
986 if (ref
->prefetch_before
!= PREFETCH_ALL
)
988 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
989 fprintf (dump_file
, "Ignoring %p due to prefetch_before\n",
994 /* Do not prefetch nontemporal stores. */
997 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
998 fprintf (dump_file
, "Ignoring nontemporal store %p\n", (void *) ref
);
1005 /* Decide which of the prefetch candidates in GROUPS to prefetch.
1006 AHEAD is the number of iterations to prefetch ahead (which corresponds
1007 to the number of simultaneous instances of one prefetch running at a
1008 time). UNROLL_FACTOR is the factor by that the loop is going to be
1009 unrolled. Returns true if there is anything to prefetch. */
1012 schedule_prefetches (struct mem_ref_group
*groups
, unsigned unroll_factor
,
1015 unsigned remaining_prefetch_slots
, n_prefetches
, prefetch_slots
;
1016 unsigned slots_per_prefetch
;
1017 struct mem_ref
*ref
;
1020 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
1021 remaining_prefetch_slots
= SIMULTANEOUS_PREFETCHES
;
1023 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1024 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
1025 it will need a prefetch slot. */
1026 slots_per_prefetch
= (ahead
+ unroll_factor
/ 2) / unroll_factor
;
1027 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1028 fprintf (dump_file
, "Each prefetch instruction takes %u prefetch slots.\n",
1029 slots_per_prefetch
);
1031 /* For now we just take memory references one by one and issue
1032 prefetches for as many as possible. The groups are sorted
1033 starting with the largest step, since the references with
1034 large step are more likely to cause many cache misses. */
1036 for (; groups
; groups
= groups
->next
)
1037 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1039 if (!should_issue_prefetch_p (ref
))
1042 /* The loop is far from being sufficiently unrolled for this
1043 prefetch. Do not generate prefetch to avoid many redudant
1045 if (ref
->prefetch_mod
/ unroll_factor
> PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
)
1048 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1049 and we unroll the loop UNROLL_FACTOR times, we need to insert
1050 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1052 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1053 / ref
->prefetch_mod
);
1054 prefetch_slots
= n_prefetches
* slots_per_prefetch
;
1056 /* If more than half of the prefetches would be lost anyway, do not
1057 issue the prefetch. */
1058 if (2 * remaining_prefetch_slots
< prefetch_slots
)
1061 ref
->issue_prefetch_p
= true;
1063 if (remaining_prefetch_slots
<= prefetch_slots
)
1065 remaining_prefetch_slots
-= prefetch_slots
;
1072 /* Return TRUE if no prefetch is going to be generated in the given
1076 nothing_to_prefetch_p (struct mem_ref_group
*groups
)
1078 struct mem_ref
*ref
;
1080 for (; groups
; groups
= groups
->next
)
1081 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1082 if (should_issue_prefetch_p (ref
))
1088 /* Estimate the number of prefetches in the given GROUPS.
1089 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
1092 estimate_prefetch_count (struct mem_ref_group
*groups
, unsigned unroll_factor
)
1094 struct mem_ref
*ref
;
1095 unsigned n_prefetches
;
1096 int prefetch_count
= 0;
1098 for (; groups
; groups
= groups
->next
)
1099 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1100 if (should_issue_prefetch_p (ref
))
1102 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1103 / ref
->prefetch_mod
);
1104 prefetch_count
+= n_prefetches
;
1107 return prefetch_count
;
1110 /* Issue prefetches for the reference REF into loop as decided before.
1111 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1112 is the factor by which LOOP was unrolled. */
1115 issue_prefetch_ref (struct mem_ref
*ref
, unsigned unroll_factor
, unsigned ahead
)
1117 HOST_WIDE_INT delta
;
1118 tree addr
, addr_base
, write_p
, local
, forward
;
1120 gimple_stmt_iterator bsi
;
1121 unsigned n_prefetches
, ap
;
1122 bool nontemporal
= ref
->reuse_distance
>= L2_CACHE_SIZE_BYTES
;
1124 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1125 fprintf (dump_file
, "Issued%s prefetch for %p.\n",
1126 nontemporal
? " nontemporal" : "",
1129 bsi
= gsi_for_stmt (ref
->stmt
);
1131 n_prefetches
= ((unroll_factor
+ ref
->prefetch_mod
- 1)
1132 / ref
->prefetch_mod
);
1133 addr_base
= build_fold_addr_expr_with_type (ref
->mem
, ptr_type_node
);
1134 addr_base
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr_base
),
1135 true, NULL
, true, GSI_SAME_STMT
);
1136 write_p
= ref
->write_p
? integer_one_node
: integer_zero_node
;
1137 local
= nontemporal
? integer_zero_node
: integer_three_node
;
1139 for (ap
= 0; ap
< n_prefetches
; ap
++)
1141 if (cst_and_fits_in_hwi (ref
->group
->step
))
1143 /* Determine the address to prefetch. */
1144 delta
= (ahead
+ ap
* ref
->prefetch_mod
) *
1145 int_cst_value (ref
->group
->step
);
1146 addr
= fold_build_pointer_plus_hwi (addr_base
, delta
);
1147 addr
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr
), true, NULL
,
1148 true, GSI_SAME_STMT
);
1152 /* The step size is non-constant but loop-invariant. We use the
1153 heuristic to simply prefetch ahead iterations ahead. */
1154 forward
= fold_build2 (MULT_EXPR
, sizetype
,
1155 fold_convert (sizetype
, ref
->group
->step
),
1156 fold_convert (sizetype
, size_int (ahead
)));
1157 addr
= fold_build_pointer_plus (addr_base
, forward
);
1158 addr
= force_gimple_operand_gsi (&bsi
, unshare_expr (addr
), true,
1159 NULL
, true, GSI_SAME_STMT
);
1161 /* Create the prefetch instruction. */
1162 prefetch
= gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH
),
1163 3, addr
, write_p
, local
);
1164 gsi_insert_before (&bsi
, prefetch
, GSI_SAME_STMT
);
1168 /* Issue prefetches for the references in GROUPS into loop as decided before.
1169 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1170 factor by that LOOP was unrolled. */
1173 issue_prefetches (struct mem_ref_group
*groups
,
1174 unsigned unroll_factor
, unsigned ahead
)
1176 struct mem_ref
*ref
;
1178 for (; groups
; groups
= groups
->next
)
1179 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1180 if (ref
->issue_prefetch_p
)
1181 issue_prefetch_ref (ref
, unroll_factor
, ahead
);
1184 /* Returns true if REF is a memory write for that a nontemporal store insn
1188 nontemporal_store_p (struct mem_ref
*ref
)
1190 enum machine_mode mode
;
1191 enum insn_code code
;
1193 /* REF must be a write that is not reused. We require it to be independent
1194 on all other memory references in the loop, as the nontemporal stores may
1195 be reordered with respect to other memory references. */
1197 || !ref
->independent_p
1198 || ref
->reuse_distance
< L2_CACHE_SIZE_BYTES
)
1201 /* Check that we have the storent instruction for the mode. */
1202 mode
= TYPE_MODE (TREE_TYPE (ref
->mem
));
1203 if (mode
== BLKmode
)
1206 code
= optab_handler (storent_optab
, mode
);
1207 return code
!= CODE_FOR_nothing
;
1210 /* If REF is a nontemporal store, we mark the corresponding modify statement
1211 and return true. Otherwise, we return false. */
1214 mark_nontemporal_store (struct mem_ref
*ref
)
1216 if (!nontemporal_store_p (ref
))
1219 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1220 fprintf (dump_file
, "Marked reference %p as a nontemporal store.\n",
1223 gimple_assign_set_nontemporal_move (ref
->stmt
, true);
1224 ref
->storent_p
= true;
1229 /* Issue a memory fence instruction after LOOP. */
1232 emit_mfence_after_loop (struct loop
*loop
)
1234 vec
<edge
> exits
= get_loop_exit_edges (loop
);
1237 gimple_stmt_iterator bsi
;
1240 FOR_EACH_VEC_ELT (exits
, i
, exit
)
1242 call
= gimple_build_call (FENCE_FOLLOWING_MOVNT
, 0);
1244 if (!single_pred_p (exit
->dest
)
1245 /* If possible, we prefer not to insert the fence on other paths
1247 && !(exit
->flags
& EDGE_ABNORMAL
))
1248 split_loop_exit_edge (exit
);
1249 bsi
= gsi_after_labels (exit
->dest
);
1251 gsi_insert_before (&bsi
, call
, GSI_NEW_STMT
);
1255 update_ssa (TODO_update_ssa_only_virtuals
);
1258 /* Returns true if we can use storent in loop, false otherwise. */
1261 may_use_storent_in_loop_p (struct loop
*loop
)
1265 if (loop
->inner
!= NULL
)
1268 /* If we must issue a mfence insn after using storent, check that there
1269 is a suitable place for it at each of the loop exits. */
1270 if (FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1272 vec
<edge
> exits
= get_loop_exit_edges (loop
);
1276 FOR_EACH_VEC_ELT (exits
, i
, exit
)
1277 if ((exit
->flags
& EDGE_ABNORMAL
)
1278 && exit
->dest
== EXIT_BLOCK_PTR
)
1287 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1288 references in the loop. */
1291 mark_nontemporal_stores (struct loop
*loop
, struct mem_ref_group
*groups
)
1293 struct mem_ref
*ref
;
1296 if (!may_use_storent_in_loop_p (loop
))
1299 for (; groups
; groups
= groups
->next
)
1300 for (ref
= groups
->refs
; ref
; ref
= ref
->next
)
1301 any
|= mark_nontemporal_store (ref
);
1303 if (any
&& FENCE_FOLLOWING_MOVNT
!= NULL_TREE
)
1304 emit_mfence_after_loop (loop
);
1307 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1308 this is the case, fill in DESC by the description of number of
1312 should_unroll_loop_p (struct loop
*loop
, struct tree_niter_desc
*desc
,
1315 if (!can_unroll_loop_p (loop
, factor
, desc
))
1318 /* We only consider loops without control flow for unrolling. This is not
1319 a hard restriction -- tree_unroll_loop works with arbitrary loops
1320 as well; but the unrolling/prefetching is usually more profitable for
1321 loops consisting of a single basic block, and we want to limit the
1323 if (loop
->num_nodes
> 2)
1329 /* Determine the coefficient by that unroll LOOP, from the information
1330 contained in the list of memory references REFS. Description of
1331 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
1332 insns of the LOOP. EST_NITER is the estimated number of iterations of
1333 the loop, or -1 if no estimate is available. */
1336 determine_unroll_factor (struct loop
*loop
, struct mem_ref_group
*refs
,
1337 unsigned ninsns
, struct tree_niter_desc
*desc
,
1338 HOST_WIDE_INT est_niter
)
1340 unsigned upper_bound
;
1341 unsigned nfactor
, factor
, mod_constraint
;
1342 struct mem_ref_group
*agp
;
1343 struct mem_ref
*ref
;
1345 /* First check whether the loop is not too large to unroll. We ignore
1346 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1347 from unrolling them enough to make exactly one cache line covered by each
1348 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1349 us from unrolling the loops too many times in cases where we only expect
1350 gains from better scheduling and decreasing loop overhead, which is not
1352 upper_bound
= PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS
) / ninsns
;
1354 /* If we unrolled the loop more times than it iterates, the unrolled version
1355 of the loop would be never entered. */
1356 if (est_niter
>= 0 && est_niter
< (HOST_WIDE_INT
) upper_bound
)
1357 upper_bound
= est_niter
;
1359 if (upper_bound
<= 1)
1362 /* Choose the factor so that we may prefetch each cache just once,
1363 but bound the unrolling by UPPER_BOUND. */
1365 for (agp
= refs
; agp
; agp
= agp
->next
)
1366 for (ref
= agp
->refs
; ref
; ref
= ref
->next
)
1367 if (should_issue_prefetch_p (ref
))
1369 mod_constraint
= ref
->prefetch_mod
;
1370 nfactor
= least_common_multiple (mod_constraint
, factor
);
1371 if (nfactor
<= upper_bound
)
1375 if (!should_unroll_loop_p (loop
, desc
, factor
))
1381 /* Returns the total volume of the memory references REFS, taking into account
1382 reuses in the innermost loop and cache line size. TODO -- we should also
1383 take into account reuses across the iterations of the loops in the loop
1387 volume_of_references (struct mem_ref_group
*refs
)
1389 unsigned volume
= 0;
1390 struct mem_ref_group
*gr
;
1391 struct mem_ref
*ref
;
1393 for (gr
= refs
; gr
; gr
= gr
->next
)
1394 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1396 /* Almost always reuses another value? */
1397 if (ref
->prefetch_before
!= PREFETCH_ALL
)
1400 /* If several iterations access the same cache line, use the size of
1401 the line divided by this number. Otherwise, a cache line is
1402 accessed in each iteration. TODO -- in the latter case, we should
1403 take the size of the reference into account, rounding it up on cache
1404 line size multiple. */
1405 volume
+= L1_CACHE_LINE_SIZE
/ ref
->prefetch_mod
;
1410 /* Returns the volume of memory references accessed across VEC iterations of
1411 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1412 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1415 volume_of_dist_vector (lambda_vector vec
, unsigned *loop_sizes
, unsigned n
)
1419 for (i
= 0; i
< n
; i
++)
1426 gcc_assert (vec
[i
] > 0);
1428 /* We ignore the parts of the distance vector in subloops, since usually
1429 the numbers of iterations are much smaller. */
1430 return loop_sizes
[i
] * vec
[i
];
1433 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1434 at the position corresponding to the loop of the step. N is the depth
1435 of the considered loop nest, and, LOOP is its innermost loop. */
1438 add_subscript_strides (tree access_fn
, unsigned stride
,
1439 HOST_WIDE_INT
*strides
, unsigned n
, struct loop
*loop
)
1443 HOST_WIDE_INT astep
;
1444 unsigned min_depth
= loop_depth (loop
) - n
;
1446 while (TREE_CODE (access_fn
) == POLYNOMIAL_CHREC
)
1448 aloop
= get_chrec_loop (access_fn
);
1449 step
= CHREC_RIGHT (access_fn
);
1450 access_fn
= CHREC_LEFT (access_fn
);
1452 if ((unsigned) loop_depth (aloop
) <= min_depth
)
1455 if (host_integerp (step
, 0))
1456 astep
= tree_low_cst (step
, 0);
1458 astep
= L1_CACHE_LINE_SIZE
;
1460 strides
[n
- 1 - loop_depth (loop
) + loop_depth (aloop
)] += astep
* stride
;
1465 /* Returns the volume of memory references accessed between two consecutive
1466 self-reuses of the reference DR. We consider the subscripts of DR in N
1467 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1468 loops. LOOP is the innermost loop of the current loop nest. */
1471 self_reuse_distance (data_reference_p dr
, unsigned *loop_sizes
, unsigned n
,
1474 tree stride
, access_fn
;
1475 HOST_WIDE_INT
*strides
, astride
;
1476 vec
<tree
> access_fns
;
1477 tree ref
= DR_REF (dr
);
1478 unsigned i
, ret
= ~0u;
1480 /* In the following example:
1482 for (i = 0; i < N; i++)
1483 for (j = 0; j < N; j++)
1485 the same cache line is accessed each N steps (except if the change from
1486 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1487 we cannot rely purely on the results of the data dependence analysis.
1489 Instead, we compute the stride of the reference in each loop, and consider
1490 the innermost loop in that the stride is less than cache size. */
1492 strides
= XCNEWVEC (HOST_WIDE_INT
, n
);
1493 access_fns
= DR_ACCESS_FNS (dr
);
1495 FOR_EACH_VEC_ELT (access_fns
, i
, access_fn
)
1497 /* Keep track of the reference corresponding to the subscript, so that we
1499 while (handled_component_p (ref
) && TREE_CODE (ref
) != ARRAY_REF
)
1500 ref
= TREE_OPERAND (ref
, 0);
1502 if (TREE_CODE (ref
) == ARRAY_REF
)
1504 stride
= TYPE_SIZE_UNIT (TREE_TYPE (ref
));
1505 if (host_integerp (stride
, 1))
1506 astride
= tree_low_cst (stride
, 1);
1508 astride
= L1_CACHE_LINE_SIZE
;
1510 ref
= TREE_OPERAND (ref
, 0);
1515 add_subscript_strides (access_fn
, astride
, strides
, n
, loop
);
1518 for (i
= n
; i
-- > 0; )
1520 unsigned HOST_WIDE_INT s
;
1522 s
= strides
[i
] < 0 ? -strides
[i
] : strides
[i
];
1524 if (s
< (unsigned) L1_CACHE_LINE_SIZE
1526 > (unsigned) (L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)))
1528 ret
= loop_sizes
[i
];
1537 /* Determines the distance till the first reuse of each reference in REFS
1538 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1539 memory references in the loop. Return false if the analysis fails. */
1542 determine_loop_nest_reuse (struct loop
*loop
, struct mem_ref_group
*refs
,
1545 struct loop
*nest
, *aloop
;
1546 vec
<data_reference_p
> datarefs
= vNULL
;
1547 vec
<ddr_p
> dependences
= vNULL
;
1548 struct mem_ref_group
*gr
;
1549 struct mem_ref
*ref
, *refb
;
1550 vec
<loop_p
> vloops
= vNULL
;
1551 unsigned *loop_data_size
;
1553 unsigned volume
, dist
, adist
;
1555 data_reference_p dr
;
1561 /* Find the outermost loop of the loop nest of loop (we require that
1562 there are no sibling loops inside the nest). */
1566 aloop
= loop_outer (nest
);
1568 if (aloop
== current_loops
->tree_root
1569 || aloop
->inner
->next
)
1575 /* For each loop, determine the amount of data accessed in each iteration.
1576 We use this to estimate whether the reference is evicted from the
1577 cache before its reuse. */
1578 find_loop_nest (nest
, &vloops
);
1579 n
= vloops
.length ();
1580 loop_data_size
= XNEWVEC (unsigned, n
);
1581 volume
= volume_of_references (refs
);
1585 loop_data_size
[i
] = volume
;
1586 /* Bound the volume by the L2 cache size, since above this bound,
1587 all dependence distances are equivalent. */
1588 if (volume
> L2_CACHE_SIZE_BYTES
)
1592 vol
= estimated_stmt_executions_int (aloop
);
1594 vol
= expected_loop_iterations (aloop
);
1598 /* Prepare the references in the form suitable for data dependence
1599 analysis. We ignore unanalyzable data references (the results
1600 are used just as a heuristics to estimate temporality of the
1601 references, hence we do not need to worry about correctness). */
1602 for (gr
= refs
; gr
; gr
= gr
->next
)
1603 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1605 dr
= create_data_ref (nest
, loop_containing_stmt (ref
->stmt
),
1606 ref
->mem
, ref
->stmt
, !ref
->write_p
);
1610 ref
->reuse_distance
= volume
;
1612 datarefs
.safe_push (dr
);
1615 no_other_refs
= false;
1618 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
1620 dist
= self_reuse_distance (dr
, loop_data_size
, n
, loop
);
1621 ref
= (struct mem_ref
*) dr
->aux
;
1622 if (ref
->reuse_distance
> dist
)
1623 ref
->reuse_distance
= dist
;
1626 ref
->independent_p
= true;
1629 if (!compute_all_dependences (datarefs
, &dependences
, vloops
, true))
1632 FOR_EACH_VEC_ELT (dependences
, i
, dep
)
1634 if (DDR_ARE_DEPENDENT (dep
) == chrec_known
)
1637 ref
= (struct mem_ref
*) DDR_A (dep
)->aux
;
1638 refb
= (struct mem_ref
*) DDR_B (dep
)->aux
;
1640 if (DDR_ARE_DEPENDENT (dep
) == chrec_dont_know
1641 || DDR_NUM_DIST_VECTS (dep
) == 0)
1643 /* If the dependence cannot be analyzed, assume that there might be
1647 ref
->independent_p
= false;
1648 refb
->independent_p
= false;
1652 /* The distance vectors are normalized to be always lexicographically
1653 positive, hence we cannot tell just from them whether DDR_A comes
1654 before DDR_B or vice versa. However, it is not important,
1655 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1656 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1657 in cache (and marking it as nontemporal would not affect
1661 for (j
= 0; j
< DDR_NUM_DIST_VECTS (dep
); j
++)
1663 adist
= volume_of_dist_vector (DDR_DIST_VECT (dep
, j
),
1666 /* If this is a dependence in the innermost loop (i.e., the
1667 distances in all superloops are zero) and it is not
1668 the trivial self-dependence with distance zero, record that
1669 the references are not completely independent. */
1670 if (lambda_vector_zerop (DDR_DIST_VECT (dep
, j
), n
- 1)
1672 || DDR_DIST_VECT (dep
, j
)[n
-1] != 0))
1674 ref
->independent_p
= false;
1675 refb
->independent_p
= false;
1678 /* Ignore accesses closer than
1679 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1680 so that we use nontemporal prefetches e.g. if single memory
1681 location is accessed several times in a single iteration of
1683 if (adist
< L1_CACHE_SIZE_BYTES
/ NONTEMPORAL_FRACTION
)
1691 if (ref
->reuse_distance
> dist
)
1692 ref
->reuse_distance
= dist
;
1693 if (refb
->reuse_distance
> dist
)
1694 refb
->reuse_distance
= dist
;
1697 free_dependence_relations (dependences
);
1698 free_data_refs (datarefs
);
1699 free (loop_data_size
);
1701 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1703 fprintf (dump_file
, "Reuse distances:\n");
1704 for (gr
= refs
; gr
; gr
= gr
->next
)
1705 for (ref
= gr
->refs
; ref
; ref
= ref
->next
)
1706 fprintf (dump_file
, " ref %p distance %u\n",
1707 (void *) ref
, ref
->reuse_distance
);
1713 /* Determine whether or not the trip count to ahead ratio is too small based
1714 on prefitablility consideration.
1715 AHEAD: the iteration ahead distance,
1716 EST_NITER: the estimated trip count. */
1719 trip_count_to_ahead_ratio_too_small_p (unsigned ahead
, HOST_WIDE_INT est_niter
)
1721 /* Assume trip count to ahead ratio is big enough if the trip count could not
1722 be estimated at compile time. */
1726 if (est_niter
< (HOST_WIDE_INT
) (TRIP_COUNT_TO_AHEAD_RATIO
* ahead
))
1728 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1730 "Not prefetching -- loop estimated to roll only %d times\n",
1738 /* Determine whether or not the number of memory references in the loop is
1739 reasonable based on the profitablity and compilation time considerations.
1740 NINSNS: estimated number of instructions in the loop,
1741 MEM_REF_COUNT: total number of memory references in the loop. */
1744 mem_ref_count_reasonable_p (unsigned ninsns
, unsigned mem_ref_count
)
1746 int insn_to_mem_ratio
;
1748 if (mem_ref_count
== 0)
1751 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1752 (compute_all_dependences) have high costs based on quadratic complexity.
1753 To avoid huge compilation time, we give up prefetching if mem_ref_count
1755 if (mem_ref_count
> PREFETCH_MAX_MEM_REFS_PER_LOOP
)
1758 /* Prefetching improves performance by overlapping cache missing
1759 memory accesses with CPU operations. If the loop does not have
1760 enough CPU operations to overlap with memory operations, prefetching
1761 won't give a significant benefit. One approximate way of checking
1762 this is to require the ratio of instructions to memory references to
1763 be above a certain limit. This approximation works well in practice.
1764 TODO: Implement a more precise computation by estimating the time
1765 for each CPU or memory op in the loop. Time estimates for memory ops
1766 should account for cache misses. */
1767 insn_to_mem_ratio
= ninsns
/ mem_ref_count
;
1769 if (insn_to_mem_ratio
< PREFETCH_MIN_INSN_TO_MEM_RATIO
)
1771 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1773 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1781 /* Determine whether or not the instruction to prefetch ratio in the loop is
1782 too small based on the profitablity consideration.
1783 NINSNS: estimated number of instructions in the loop,
1784 PREFETCH_COUNT: an estimate of the number of prefetches,
1785 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1788 insn_to_prefetch_ratio_too_small_p (unsigned ninsns
, unsigned prefetch_count
,
1789 unsigned unroll_factor
)
1791 int insn_to_prefetch_ratio
;
1793 /* Prefetching most likely causes performance degradation when the instruction
1794 to prefetch ratio is too small. Too many prefetch instructions in a loop
1795 may reduce the I-cache performance.
1796 (unroll_factor * ninsns) is used to estimate the number of instructions in
1797 the unrolled loop. This implementation is a bit simplistic -- the number
1798 of issued prefetch instructions is also affected by unrolling. So,
1799 prefetch_mod and the unroll factor should be taken into account when
1800 determining prefetch_count. Also, the number of insns of the unrolled
1801 loop will usually be significantly smaller than the number of insns of the
1802 original loop * unroll_factor (at least the induction variable increases
1803 and the exit branches will get eliminated), so it might be better to use
1804 tree_estimate_loop_size + estimated_unrolled_size. */
1805 insn_to_prefetch_ratio
= (unroll_factor
* ninsns
) / prefetch_count
;
1806 if (insn_to_prefetch_ratio
< MIN_INSN_TO_PREFETCH_RATIO
)
1808 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1810 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1811 insn_to_prefetch_ratio
);
1819 /* Issue prefetch instructions for array references in LOOP. Returns
1820 true if the LOOP was unrolled. */
1823 loop_prefetch_arrays (struct loop
*loop
)
1825 struct mem_ref_group
*refs
;
1826 unsigned ahead
, ninsns
, time
, unroll_factor
;
1827 HOST_WIDE_INT est_niter
;
1828 struct tree_niter_desc desc
;
1829 bool unrolled
= false, no_other_refs
;
1830 unsigned prefetch_count
;
1831 unsigned mem_ref_count
;
1833 if (optimize_loop_nest_for_size_p (loop
))
1835 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1836 fprintf (dump_file
, " ignored (cold area)\n");
1840 /* FIXME: the time should be weighted by the probabilities of the blocks in
1842 time
= tree_num_loop_insns (loop
, &eni_time_weights
);
1846 ahead
= (PREFETCH_LATENCY
+ time
- 1) / time
;
1847 est_niter
= estimated_stmt_executions_int (loop
);
1848 if (est_niter
== -1)
1849 est_niter
= max_stmt_executions_int (loop
);
1851 /* Prefetching is not likely to be profitable if the trip count to ahead
1852 ratio is too small. */
1853 if (trip_count_to_ahead_ratio_too_small_p (ahead
, est_niter
))
1856 ninsns
= tree_num_loop_insns (loop
, &eni_size_weights
);
1858 /* Step 1: gather the memory references. */
1859 refs
= gather_memory_references (loop
, &no_other_refs
, &mem_ref_count
);
1861 /* Give up prefetching if the number of memory references in the
1862 loop is not reasonable based on profitablity and compilation time
1864 if (!mem_ref_count_reasonable_p (ninsns
, mem_ref_count
))
1867 /* Step 2: estimate the reuse effects. */
1868 prune_by_reuse (refs
);
1870 if (nothing_to_prefetch_p (refs
))
1873 if (!determine_loop_nest_reuse (loop
, refs
, no_other_refs
))
1876 /* Step 3: determine unroll factor. */
1877 unroll_factor
= determine_unroll_factor (loop
, refs
, ninsns
, &desc
,
1880 /* Estimate prefetch count for the unrolled loop. */
1881 prefetch_count
= estimate_prefetch_count (refs
, unroll_factor
);
1882 if (prefetch_count
== 0)
1885 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1886 fprintf (dump_file
, "Ahead %d, unroll factor %d, trip count "
1887 HOST_WIDE_INT_PRINT_DEC
"\n"
1888 "insn count %d, mem ref count %d, prefetch count %d\n",
1889 ahead
, unroll_factor
, est_niter
,
1890 ninsns
, mem_ref_count
, prefetch_count
);
1892 /* Prefetching is not likely to be profitable if the instruction to prefetch
1893 ratio is too small. */
1894 if (insn_to_prefetch_ratio_too_small_p (ninsns
, prefetch_count
,
1898 mark_nontemporal_stores (loop
, refs
);
1900 /* Step 4: what to prefetch? */
1901 if (!schedule_prefetches (refs
, unroll_factor
, ahead
))
1904 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1905 iterations so that we do not issue superfluous prefetches. */
1906 if (unroll_factor
!= 1)
1908 tree_unroll_loop (loop
, unroll_factor
,
1909 single_dom_exit (loop
), &desc
);
1913 /* Step 6: issue the prefetches. */
1914 issue_prefetches (refs
, unroll_factor
, ahead
);
1917 release_mem_refs (refs
);
1921 /* Issue prefetch instructions for array references in loops. */
1924 tree_ssa_prefetch_arrays (void)
1928 bool unrolled
= false;
1932 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1933 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1934 of processor costs and i486 does not have prefetch, but
1935 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1936 || PREFETCH_BLOCK
== 0)
1939 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1941 fprintf (dump_file
, "Prefetching parameters:\n");
1942 fprintf (dump_file
, " simultaneous prefetches: %d\n",
1943 SIMULTANEOUS_PREFETCHES
);
1944 fprintf (dump_file
, " prefetch latency: %d\n", PREFETCH_LATENCY
);
1945 fprintf (dump_file
, " prefetch block size: %d\n", PREFETCH_BLOCK
);
1946 fprintf (dump_file
, " L1 cache size: %d lines, %d kB\n",
1947 L1_CACHE_SIZE_BYTES
/ L1_CACHE_LINE_SIZE
, L1_CACHE_SIZE
);
1948 fprintf (dump_file
, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE
);
1949 fprintf (dump_file
, " L2 cache size: %d kB\n", L2_CACHE_SIZE
);
1950 fprintf (dump_file
, " min insn-to-prefetch ratio: %d \n",
1951 MIN_INSN_TO_PREFETCH_RATIO
);
1952 fprintf (dump_file
, " min insn-to-mem ratio: %d \n",
1953 PREFETCH_MIN_INSN_TO_MEM_RATIO
);
1954 fprintf (dump_file
, "\n");
1957 initialize_original_copy_tables ();
1959 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH
))
1961 tree type
= build_function_type_list (void_type_node
,
1962 const_ptr_type_node
, NULL_TREE
);
1963 tree decl
= add_builtin_function ("__builtin_prefetch", type
,
1964 BUILT_IN_PREFETCH
, BUILT_IN_NORMAL
,
1966 DECL_IS_NOVOPS (decl
) = true;
1967 set_builtin_decl (BUILT_IN_PREFETCH
, decl
, false);
1970 /* We assume that size of cache line is a power of two, so verify this
1972 gcc_assert ((PREFETCH_BLOCK
& (PREFETCH_BLOCK
- 1)) == 0);
1974 FOR_EACH_LOOP (li
, loop
, LI_FROM_INNERMOST
)
1976 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1977 fprintf (dump_file
, "Processing loop %d:\n", loop
->num
);
1979 unrolled
|= loop_prefetch_arrays (loop
);
1981 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1982 fprintf (dump_file
, "\n\n");
1988 todo_flags
|= TODO_cleanup_cfg
;
1991 free_original_copy_tables ();
1998 tree_ssa_loop_prefetch (void)
2000 if (number_of_loops (cfun
) <= 1)
2003 return tree_ssa_prefetch_arrays ();
2007 gate_tree_ssa_loop_prefetch (void)
2009 return flag_prefetch_loop_arrays
> 0;
2014 const pass_data pass_data_loop_prefetch
=
2016 GIMPLE_PASS
, /* type */
2017 "aprefetch", /* name */
2018 OPTGROUP_LOOP
, /* optinfo_flags */
2019 true, /* has_gate */
2020 true, /* has_execute */
2021 TV_TREE_PREFETCH
, /* tv_id */
2022 ( PROP_cfg
| PROP_ssa
), /* properties_required */
2023 0, /* properties_provided */
2024 0, /* properties_destroyed */
2025 0, /* todo_flags_start */
2026 0, /* todo_flags_finish */
2029 class pass_loop_prefetch
: public gimple_opt_pass
2032 pass_loop_prefetch (gcc::context
*ctxt
)
2033 : gimple_opt_pass (pass_data_loop_prefetch
, ctxt
)
2036 /* opt_pass methods: */
2037 bool gate () { return gate_tree_ssa_loop_prefetch (); }
2038 unsigned int execute () { return tree_ssa_loop_prefetch (); }
2040 }; // class pass_loop_prefetch
2045 make_pass_loop_prefetch (gcc::context
*ctxt
)
2047 return new pass_loop_prefetch (ctxt
);