]>
Commit | Line | Data |
---|---|---|
8dfbf380 | 1 | /* Array prefetching. |
3aea1f79 | 2 | Copyright (C) 2005-2014 Free Software Foundation, Inc. |
48e1416a | 3 | |
8dfbf380 | 4 | This file is part of GCC. |
48e1416a | 5 | |
8dfbf380 | 6 | GCC is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the | |
8c4c00c1 | 8 | Free Software Foundation; either version 3, or (at your option) any |
8dfbf380 | 9 | later version. |
48e1416a | 10 | |
8dfbf380 | 11 | GCC is distributed in the hope that it will be useful, but WITHOUT |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
48e1416a | 15 | |
8dfbf380 | 16 | You should have received a copy of the GNU General Public License |
8c4c00c1 | 17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
8dfbf380 | 19 | |
20 | #include "config.h" | |
21 | #include "system.h" | |
22 | #include "coretypes.h" | |
23 | #include "tm.h" | |
24 | #include "tree.h" | |
9ed99284 | 25 | #include "stor-layout.h" |
8dfbf380 | 26 | #include "tm_p.h" |
94ea8568 | 27 | #include "predict.h" |
28 | #include "vec.h" | |
29 | #include "hashtab.h" | |
30 | #include "hash-set.h" | |
31 | #include "machmode.h" | |
32 | #include "hard-reg-set.h" | |
33 | #include "input.h" | |
34 | #include "function.h" | |
35 | #include "dominance.h" | |
36 | #include "cfg.h" | |
8dfbf380 | 37 | #include "basic-block.h" |
ce084dfc | 38 | #include "tree-pretty-print.h" |
bc61cadb | 39 | #include "tree-ssa-alias.h" |
40 | #include "internal-fn.h" | |
41 | #include "gimple-expr.h" | |
42 | #include "is-a.h" | |
e795d6e1 | 43 | #include "gimple.h" |
a8783bee | 44 | #include "gimplify.h" |
dcf1a1ec | 45 | #include "gimple-iterator.h" |
e795d6e1 | 46 | #include "gimplify-me.h" |
073c1fd5 | 47 | #include "gimple-ssa.h" |
05d9c18a | 48 | #include "tree-ssa-loop-ivopts.h" |
49 | #include "tree-ssa-loop-manip.h" | |
50 | #include "tree-ssa-loop-niter.h" | |
073c1fd5 | 51 | #include "tree-ssa-loop.h" |
52 | #include "tree-into-ssa.h" | |
8dfbf380 | 53 | #include "cfgloop.h" |
8dfbf380 | 54 | #include "tree-pass.h" |
8dfbf380 | 55 | #include "insn-config.h" |
8dfbf380 | 56 | #include "tree-chrec.h" |
57 | #include "tree-scalar-evolution.h" | |
0b205f4c | 58 | #include "diagnostic-core.h" |
8dfbf380 | 59 | #include "params.h" |
60 | #include "langhooks.h" | |
bc8bb825 | 61 | #include "tree-inline.h" |
5c205353 | 62 | #include "tree-data-ref.h" |
8e3cb73b | 63 | |
64 | ||
65 | /* FIXME: Needed for optabs, but this should all be moved to a TBD interface | |
66 | between the GIMPLE and RTL worlds. */ | |
67 | #include "expr.h" | |
34517c64 | 68 | #include "insn-codes.h" |
5b5037b3 | 69 | #include "optabs.h" |
aedb7bf8 | 70 | #include "recog.h" |
8dfbf380 | 71 | |
72 | /* This pass inserts prefetch instructions to optimize cache usage during | |
73 | accesses to arrays in loops. It processes loops sequentially and: | |
74 | ||
75 | 1) Gathers all memory references in the single loop. | |
76 | 2) For each of the references it decides when it is profitable to prefetch | |
77 | it. To do it, we evaluate the reuse among the accesses, and determines | |
78 | two values: PREFETCH_BEFORE (meaning that it only makes sense to do | |
79 | prefetching in the first PREFETCH_BEFORE iterations of the loop) and | |
80 | PREFETCH_MOD (meaning that it only makes sense to prefetch in the | |
81 | iterations of the loop that are zero modulo PREFETCH_MOD). For example | |
82 | (assuming cache line size is 64 bytes, char has size 1 byte and there | |
83 | is no hardware sequential prefetch): | |
84 | ||
85 | char *a; | |
86 | for (i = 0; i < max; i++) | |
87 | { | |
88 | a[255] = ...; (0) | |
89 | a[i] = ...; (1) | |
90 | a[i + 64] = ...; (2) | |
91 | a[16*i] = ...; (3) | |
92 | a[187*i] = ...; (4) | |
93 | a[187*i + 50] = ...; (5) | |
94 | } | |
95 | ||
96 | (0) obviously has PREFETCH_BEFORE 1 | |
97 | (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory | |
98 | location 64 iterations before it, and PREFETCH_MOD 64 (since | |
99 | it hits the same cache line otherwise). | |
100 | (2) has PREFETCH_MOD 64 | |
101 | (3) has PREFETCH_MOD 4 | |
102 | (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since | |
66f19dbb | 103 | the cache line accessed by (5) is the same with probability only |
8dfbf380 | 104 | 7/32. |
105 | (5) has PREFETCH_MOD 1 as well. | |
106 | ||
5c205353 | 107 | Additionally, we use data dependence analysis to determine for each |
108 | reference the distance till the first reuse; this information is used | |
109 | to determine the temporality of the issued prefetch instruction. | |
110 | ||
8dfbf380 | 111 | 3) We determine how much ahead we need to prefetch. The number of |
112 | iterations needed is time to fetch / time spent in one iteration of | |
113 | the loop. The problem is that we do not know either of these values, | |
114 | so we just make a heuristic guess based on a magic (possibly) | |
115 | target-specific constant and size of the loop. | |
116 | ||
117 | 4) Determine which of the references we prefetch. We take into account | |
118 | that there is a maximum number of simultaneous prefetches (provided | |
119 | by machine description). We prefetch as many prefetches as possible | |
120 | while still within this bound (starting with those with lowest | |
121 | prefetch_mod, since they are responsible for most of the cache | |
122 | misses). | |
48e1416a | 123 | |
8dfbf380 | 124 | 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD |
125 | and PREFETCH_BEFORE requirements (within some bounds), and to avoid | |
126 | prefetching nonaccessed memory. | |
127 | TODO -- actually implement peeling. | |
48e1416a | 128 | |
8dfbf380 | 129 | 6) We actually emit the prefetch instructions. ??? Perhaps emit the |
130 | prefetch instructions with guards in cases where 5) was not sufficient | |
131 | to satisfy the constraints? | |
132 | ||
76595608 | 133 | A cost model is implemented to determine whether or not prefetching is |
134 | profitable for a given loop. The cost model has three heuristics: | |
135 | ||
136 | 1. Function trip_count_to_ahead_ratio_too_small_p implements a | |
137 | heuristic that determines whether or not the loop has too few | |
138 | iterations (compared to ahead). Prefetching is not likely to be | |
139 | beneficial if the trip count to ahead ratio is below a certain | |
140 | minimum. | |
141 | ||
142 | 2. Function mem_ref_count_reasonable_p implements a heuristic that | |
143 | determines whether the given loop has enough CPU ops that can be | |
144 | overlapped with cache missing memory ops. If not, the loop | |
145 | won't benefit from prefetching. In the implementation, | |
146 | prefetching is not considered beneficial if the ratio between | |
147 | the instruction count and the mem ref count is below a certain | |
148 | minimum. | |
149 | ||
150 | 3. Function insn_to_prefetch_ratio_too_small_p implements a | |
151 | heuristic that disables prefetching in a loop if the prefetching | |
152 | cost is above a certain limit. The relative prefetching cost is | |
153 | estimated by taking the ratio between the prefetch count and the | |
154 | total intruction count (this models the I-cache cost). | |
155 | ||
0ab353e1 | 156 | The limits used in these heuristics are defined as parameters with |
48e1416a | 157 | reasonable default values. Machine-specific default values will be |
0ab353e1 | 158 | added later. |
48e1416a | 159 | |
8dfbf380 | 160 | Some other TODO: |
161 | -- write and use more general reuse analysis (that could be also used | |
162 | in other cache aimed loop optimizations) | |
163 | -- make it behave sanely together with the prefetches given by user | |
164 | (now we just ignore them; at the very least we should avoid | |
165 | optimizing loops in that user put his own prefetches) | |
166 | -- we assume cache line size alignment of arrays; this could be | |
167 | improved. */ | |
168 | ||
169 | /* Magic constants follow. These should be replaced by machine specific | |
170 | numbers. */ | |
171 | ||
8dfbf380 | 172 | /* True if write can be prefetched by a read prefetch. */ |
173 | ||
174 | #ifndef WRITE_CAN_USE_READ_PREFETCH | |
175 | #define WRITE_CAN_USE_READ_PREFETCH 1 | |
176 | #endif | |
177 | ||
178 | /* True if read can be prefetched by a write prefetch. */ | |
179 | ||
180 | #ifndef READ_CAN_USE_WRITE_PREFETCH | |
181 | #define READ_CAN_USE_WRITE_PREFETCH 0 | |
182 | #endif | |
183 | ||
07804af5 | 184 | /* The size of the block loaded by a single prefetch. Usually, this is |
185 | the same as cache line size (at the moment, we only consider one level | |
186 | of cache hierarchy). */ | |
8dfbf380 | 187 | |
188 | #ifndef PREFETCH_BLOCK | |
07804af5 | 189 | #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE |
8dfbf380 | 190 | #endif |
191 | ||
192 | /* Do we have a forward hardware sequential prefetching? */ | |
193 | ||
194 | #ifndef HAVE_FORWARD_PREFETCH | |
195 | #define HAVE_FORWARD_PREFETCH 0 | |
196 | #endif | |
197 | ||
198 | /* Do we have a backward hardware sequential prefetching? */ | |
199 | ||
200 | #ifndef HAVE_BACKWARD_PREFETCH | |
201 | #define HAVE_BACKWARD_PREFETCH 0 | |
202 | #endif | |
203 | ||
204 | /* In some cases we are only able to determine that there is a certain | |
205 | probability that the two accesses hit the same cache line. In this | |
206 | case, we issue the prefetches for both of them if this probability | |
f0b5f617 | 207 | is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */ |
8dfbf380 | 208 | |
209 | #ifndef ACCEPTABLE_MISS_RATE | |
210 | #define ACCEPTABLE_MISS_RATE 50 | |
211 | #endif | |
212 | ||
213 | #ifndef HAVE_prefetch | |
214 | #define HAVE_prefetch 0 | |
215 | #endif | |
216 | ||
0c916a7b | 217 | #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024)) |
218 | #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024)) | |
5c205353 | 219 | |
220 | /* We consider a memory access nontemporal if it is not reused sooner than | |
221 | after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore | |
222 | accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION, | |
223 | so that we use nontemporal prefetches e.g. if single memory location | |
224 | is accessed several times in a single iteration of the loop. */ | |
225 | #define NONTEMPORAL_FRACTION 16 | |
226 | ||
5b5037b3 | 227 | /* In case we have to emit a memory fence instruction after the loop that |
228 | uses nontemporal stores, this defines the builtin to use. */ | |
229 | ||
230 | #ifndef FENCE_FOLLOWING_MOVNT | |
231 | #define FENCE_FOLLOWING_MOVNT NULL_TREE | |
232 | #endif | |
233 | ||
e20bb126 | 234 | /* It is not profitable to prefetch when the trip count is not at |
235 | least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance. | |
236 | For example, in a loop with a prefetch ahead distance of 10, | |
237 | supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is | |
238 | profitable to prefetch when the trip count is greater or equal to | |
239 | 40. In that case, 30 out of the 40 iterations will benefit from | |
240 | prefetching. */ | |
241 | ||
242 | #ifndef TRIP_COUNT_TO_AHEAD_RATIO | |
243 | #define TRIP_COUNT_TO_AHEAD_RATIO 4 | |
244 | #endif | |
245 | ||
8dfbf380 | 246 | /* The group of references between that reuse may occur. */ |
247 | ||
248 | struct mem_ref_group | |
249 | { | |
250 | tree base; /* Base of the reference. */ | |
81d2a38f | 251 | tree step; /* Step of the reference. */ |
8dfbf380 | 252 | struct mem_ref *refs; /* References in the group. */ |
253 | struct mem_ref_group *next; /* Next group of references. */ | |
254 | }; | |
255 | ||
256 | /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */ | |
257 | ||
258 | #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0) | |
259 | ||
c0a0de5e | 260 | /* Do not generate a prefetch if the unroll factor is significantly less |
261 | than what is required by the prefetch. This is to avoid redundant | |
1aabe697 | 262 | prefetches. For example, when prefetch_mod is 16 and unroll_factor is |
263 | 2, prefetching requires unrolling the loop 16 times, but | |
264 | the loop is actually unrolled twice. In this case (ratio = 8), | |
c0a0de5e | 265 | prefetching is not likely to be beneficial. */ |
266 | ||
267 | #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO | |
1aabe697 | 268 | #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4 |
c0a0de5e | 269 | #endif |
270 | ||
76595608 | 271 | /* Some of the prefetch computations have quadratic complexity. We want to |
272 | avoid huge compile times and, therefore, want to limit the amount of | |
273 | memory references per loop where we consider prefetching. */ | |
274 | ||
275 | #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP | |
276 | #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200 | |
277 | #endif | |
278 | ||
8dfbf380 | 279 | /* The memory reference. */ |
280 | ||
281 | struct mem_ref | |
282 | { | |
75a70cf9 | 283 | gimple stmt; /* Statement in that the reference appears. */ |
8dfbf380 | 284 | tree mem; /* The reference. */ |
285 | HOST_WIDE_INT delta; /* Constant offset of the reference. */ | |
8dfbf380 | 286 | struct mem_ref_group *group; /* The group of references it belongs to. */ |
287 | unsigned HOST_WIDE_INT prefetch_mod; | |
288 | /* Prefetch only each PREFETCH_MOD-th | |
289 | iteration. */ | |
290 | unsigned HOST_WIDE_INT prefetch_before; | |
291 | /* Prefetch only first PREFETCH_BEFORE | |
292 | iterations. */ | |
5c205353 | 293 | unsigned reuse_distance; /* The amount of data accessed before the first |
294 | reuse of this value. */ | |
8dfbf380 | 295 | struct mem_ref *next; /* The next reference in the group. */ |
5b5037b3 | 296 | unsigned write_p : 1; /* Is it a write? */ |
297 | unsigned independent_p : 1; /* True if the reference is independent on | |
298 | all other references inside the loop. */ | |
299 | unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */ | |
300 | unsigned storent_p : 1; /* True if we changed the store to a | |
301 | nontemporal one. */ | |
8dfbf380 | 302 | }; |
303 | ||
7b64e7e0 | 304 | /* Dumps information about memory reference */ |
8dfbf380 | 305 | static void |
7b64e7e0 | 306 | dump_mem_details (FILE *file, tree base, tree step, |
307 | HOST_WIDE_INT delta, bool write_p) | |
8dfbf380 | 308 | { |
7b64e7e0 | 309 | fprintf (file, "(base "); |
310 | print_generic_expr (file, base, TDF_SLIM); | |
8dfbf380 | 311 | fprintf (file, ", step "); |
7b64e7e0 | 312 | if (cst_and_fits_in_hwi (step)) |
313 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step)); | |
81d2a38f | 314 | else |
7b64e7e0 | 315 | print_generic_expr (file, step, TDF_TREE); |
8dfbf380 | 316 | fprintf (file, ")\n"); |
1da72d7c | 317 | fprintf (file, " delta "); |
7b64e7e0 | 318 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, delta); |
319 | fprintf (file, "\n"); | |
320 | fprintf (file, " %s\n", write_p ? "write" : "read"); | |
8dfbf380 | 321 | fprintf (file, "\n"); |
7b64e7e0 | 322 | } |
8dfbf380 | 323 | |
7b64e7e0 | 324 | /* Dumps information about reference REF to FILE. */ |
8dfbf380 | 325 | |
7b64e7e0 | 326 | static void |
327 | dump_mem_ref (FILE *file, struct mem_ref *ref) | |
328 | { | |
329 | fprintf (file, "Reference %p:\n", (void *) ref); | |
330 | ||
331 | fprintf (file, " group %p ", (void *) ref->group); | |
332 | ||
333 | dump_mem_details (file, ref->group->base, ref->group->step, ref->delta, | |
334 | ref->write_p); | |
8dfbf380 | 335 | } |
336 | ||
337 | /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not | |
338 | exist. */ | |
339 | ||
340 | static struct mem_ref_group * | |
81d2a38f | 341 | find_or_create_group (struct mem_ref_group **groups, tree base, tree step) |
8dfbf380 | 342 | { |
343 | struct mem_ref_group *group; | |
344 | ||
345 | for (; *groups; groups = &(*groups)->next) | |
346 | { | |
81d2a38f | 347 | if (operand_equal_p ((*groups)->step, step, 0) |
8dfbf380 | 348 | && operand_equal_p ((*groups)->base, base, 0)) |
349 | return *groups; | |
350 | ||
81d2a38f | 351 | /* If step is an integer constant, keep the list of groups sorted |
352 | by decreasing step. */ | |
353 | if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step) | |
354 | && int_cst_value ((*groups)->step) < int_cst_value (step)) | |
8dfbf380 | 355 | break; |
356 | } | |
357 | ||
5c205353 | 358 | group = XNEW (struct mem_ref_group); |
8dfbf380 | 359 | group->base = base; |
360 | group->step = step; | |
361 | group->refs = NULL; | |
362 | group->next = *groups; | |
363 | *groups = group; | |
364 | ||
365 | return group; | |
366 | } | |
367 | ||
368 | /* Records a memory reference MEM in GROUP with offset DELTA and write status | |
369 | WRITE_P. The reference occurs in statement STMT. */ | |
370 | ||
371 | static void | |
75a70cf9 | 372 | record_ref (struct mem_ref_group *group, gimple stmt, tree mem, |
8dfbf380 | 373 | HOST_WIDE_INT delta, bool write_p) |
374 | { | |
375 | struct mem_ref **aref; | |
376 | ||
377 | /* Do not record the same address twice. */ | |
378 | for (aref = &group->refs; *aref; aref = &(*aref)->next) | |
379 | { | |
380 | /* It does not have to be possible for write reference to reuse the read | |
381 | prefetch, or vice versa. */ | |
382 | if (!WRITE_CAN_USE_READ_PREFETCH | |
383 | && write_p | |
384 | && !(*aref)->write_p) | |
385 | continue; | |
386 | if (!READ_CAN_USE_WRITE_PREFETCH | |
387 | && !write_p | |
388 | && (*aref)->write_p) | |
389 | continue; | |
390 | ||
391 | if ((*aref)->delta == delta) | |
392 | return; | |
393 | } | |
394 | ||
5c205353 | 395 | (*aref) = XNEW (struct mem_ref); |
8dfbf380 | 396 | (*aref)->stmt = stmt; |
397 | (*aref)->mem = mem; | |
398 | (*aref)->delta = delta; | |
399 | (*aref)->write_p = write_p; | |
400 | (*aref)->prefetch_before = PREFETCH_ALL; | |
401 | (*aref)->prefetch_mod = 1; | |
5c205353 | 402 | (*aref)->reuse_distance = 0; |
8dfbf380 | 403 | (*aref)->issue_prefetch_p = false; |
404 | (*aref)->group = group; | |
405 | (*aref)->next = NULL; | |
5b5037b3 | 406 | (*aref)->independent_p = false; |
407 | (*aref)->storent_p = false; | |
8dfbf380 | 408 | |
409 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
410 | dump_mem_ref (dump_file, *aref); | |
411 | } | |
412 | ||
413 | /* Release memory references in GROUPS. */ | |
414 | ||
415 | static void | |
416 | release_mem_refs (struct mem_ref_group *groups) | |
417 | { | |
418 | struct mem_ref_group *next_g; | |
419 | struct mem_ref *ref, *next_r; | |
420 | ||
421 | for (; groups; groups = next_g) | |
422 | { | |
423 | next_g = groups->next; | |
424 | for (ref = groups->refs; ref; ref = next_r) | |
425 | { | |
426 | next_r = ref->next; | |
427 | free (ref); | |
428 | } | |
429 | free (groups); | |
430 | } | |
431 | } | |
432 | ||
433 | /* A structure used to pass arguments to idx_analyze_ref. */ | |
434 | ||
435 | struct ar_data | |
436 | { | |
437 | struct loop *loop; /* Loop of the reference. */ | |
75a70cf9 | 438 | gimple stmt; /* Statement of the reference. */ |
81d2a38f | 439 | tree *step; /* Step of the memory reference. */ |
8dfbf380 | 440 | HOST_WIDE_INT *delta; /* Offset of the memory reference. */ |
441 | }; | |
442 | ||
443 | /* Analyzes a single INDEX of a memory reference to obtain information | |
444 | described at analyze_ref. Callback for for_each_index. */ | |
445 | ||
446 | static bool | |
447 | idx_analyze_ref (tree base, tree *index, void *data) | |
448 | { | |
f0d6e81c | 449 | struct ar_data *ar_data = (struct ar_data *) data; |
8dfbf380 | 450 | tree ibase, step, stepsize; |
81d2a38f | 451 | HOST_WIDE_INT idelta = 0, imult = 1; |
8dfbf380 | 452 | affine_iv iv; |
453 | ||
76610704 | 454 | if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt), |
81d2a38f | 455 | *index, &iv, true)) |
8dfbf380 | 456 | return false; |
457 | ibase = iv.base; | |
458 | step = iv.step; | |
459 | ||
0de36bdb | 460 | if (TREE_CODE (ibase) == POINTER_PLUS_EXPR |
8dfbf380 | 461 | && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1))) |
462 | { | |
463 | idelta = int_cst_value (TREE_OPERAND (ibase, 1)); | |
464 | ibase = TREE_OPERAND (ibase, 0); | |
465 | } | |
466 | if (cst_and_fits_in_hwi (ibase)) | |
467 | { | |
468 | idelta += int_cst_value (ibase); | |
05db596e | 469 | ibase = build_int_cst (TREE_TYPE (ibase), 0); |
8dfbf380 | 470 | } |
471 | ||
472 | if (TREE_CODE (base) == ARRAY_REF) | |
473 | { | |
474 | stepsize = array_ref_element_size (base); | |
475 | if (!cst_and_fits_in_hwi (stepsize)) | |
476 | return false; | |
477 | imult = int_cst_value (stepsize); | |
f547ca12 | 478 | step = fold_build2 (MULT_EXPR, sizetype, |
479 | fold_convert (sizetype, step), | |
480 | fold_convert (sizetype, stepsize)); | |
8dfbf380 | 481 | idelta *= imult; |
482 | } | |
483 | ||
f547ca12 | 484 | if (*ar_data->step == NULL_TREE) |
485 | *ar_data->step = step; | |
486 | else | |
487 | *ar_data->step = fold_build2 (PLUS_EXPR, sizetype, | |
488 | fold_convert (sizetype, *ar_data->step), | |
489 | fold_convert (sizetype, step)); | |
8dfbf380 | 490 | *ar_data->delta += idelta; |
491 | *index = ibase; | |
492 | ||
493 | return true; | |
494 | } | |
495 | ||
6dce7ee9 | 496 | /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and |
8dfbf380 | 497 | STEP are integer constants and iter is number of iterations of LOOP. The |
6dce7ee9 | 498 | reference occurs in statement STMT. Strips nonaddressable component |
499 | references from REF_P. */ | |
8dfbf380 | 500 | |
501 | static bool | |
6dce7ee9 | 502 | analyze_ref (struct loop *loop, tree *ref_p, tree *base, |
81d2a38f | 503 | tree *step, HOST_WIDE_INT *delta, |
75a70cf9 | 504 | gimple stmt) |
8dfbf380 | 505 | { |
506 | struct ar_data ar_data; | |
507 | tree off; | |
508 | HOST_WIDE_INT bit_offset; | |
6dce7ee9 | 509 | tree ref = *ref_p; |
8dfbf380 | 510 | |
81d2a38f | 511 | *step = NULL_TREE; |
8dfbf380 | 512 | *delta = 0; |
513 | ||
0e948838 | 514 | /* First strip off the component references. Ignore bitfields. |
515 | Also strip off the real and imagine parts of a complex, so that | |
516 | they can have the same base. */ | |
517 | if (TREE_CODE (ref) == REALPART_EXPR | |
518 | || TREE_CODE (ref) == IMAGPART_EXPR | |
519 | || (TREE_CODE (ref) == COMPONENT_REF | |
520 | && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))) | |
521 | { | |
522 | if (TREE_CODE (ref) == IMAGPART_EXPR) | |
523 | *delta += int_size_in_bytes (TREE_TYPE (ref)); | |
524 | ref = TREE_OPERAND (ref, 0); | |
525 | } | |
8dfbf380 | 526 | |
6dce7ee9 | 527 | *ref_p = ref; |
528 | ||
8dfbf380 | 529 | for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0)) |
530 | { | |
531 | off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); | |
532 | bit_offset = TREE_INT_CST_LOW (off); | |
533 | gcc_assert (bit_offset % BITS_PER_UNIT == 0); | |
48e1416a | 534 | |
8dfbf380 | 535 | *delta += bit_offset / BITS_PER_UNIT; |
536 | } | |
537 | ||
538 | *base = unshare_expr (ref); | |
539 | ar_data.loop = loop; | |
540 | ar_data.stmt = stmt; | |
541 | ar_data.step = step; | |
542 | ar_data.delta = delta; | |
543 | return for_each_index (base, idx_analyze_ref, &ar_data); | |
544 | } | |
545 | ||
546 | /* Record a memory reference REF to the list REFS. The reference occurs in | |
5b5037b3 | 547 | LOOP in statement STMT and it is write if WRITE_P. Returns true if the |
548 | reference was recorded, false otherwise. */ | |
8dfbf380 | 549 | |
5b5037b3 | 550 | static bool |
8dfbf380 | 551 | gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs, |
75a70cf9 | 552 | tree ref, bool write_p, gimple stmt) |
8dfbf380 | 553 | { |
81d2a38f | 554 | tree base, step; |
555 | HOST_WIDE_INT delta; | |
8dfbf380 | 556 | struct mem_ref_group *agrp; |
557 | ||
5d4305e1 | 558 | if (get_base_address (ref) == NULL) |
559 | return false; | |
560 | ||
6dce7ee9 | 561 | if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt)) |
5b5037b3 | 562 | return false; |
81d2a38f | 563 | /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */ |
564 | if (step == NULL_TREE) | |
565 | return false; | |
8dfbf380 | 566 | |
bd62669e | 567 | /* Stop if the address of BASE could not be taken. */ |
09a6f6f5 | 568 | if (may_be_nonaddressable_p (base)) |
569 | return false; | |
570 | ||
7b64e7e0 | 571 | /* Limit non-constant step prefetching only to the innermost loops and |
572 | only when the step is loop invariant in the entire loop nest. */ | |
573 | if (!cst_and_fits_in_hwi (step)) | |
574 | { | |
575 | if (loop->inner != NULL) | |
576 | { | |
577 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
578 | { | |
579 | fprintf (dump_file, "Memory expression %p\n",(void *) ref ); | |
580 | print_generic_expr (dump_file, ref, TDF_TREE); | |
581 | fprintf (dump_file,":"); | |
9af5ce0c | 582 | dump_mem_details (dump_file, base, step, delta, write_p); |
7b64e7e0 | 583 | fprintf (dump_file, |
584 | "Ignoring %p, non-constant step prefetching is " | |
585 | "limited to inner most loops \n", | |
586 | (void *) ref); | |
587 | } | |
588 | return false; | |
589 | } | |
590 | else | |
591 | { | |
592 | if (!expr_invariant_in_loop_p (loop_outermost (loop), step)) | |
593 | { | |
594 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
595 | { | |
596 | fprintf (dump_file, "Memory expression %p\n",(void *) ref ); | |
597 | print_generic_expr (dump_file, ref, TDF_TREE); | |
598 | fprintf (dump_file,":"); | |
9af5ce0c | 599 | dump_mem_details (dump_file, base, step, delta, write_p); |
7b64e7e0 | 600 | fprintf (dump_file, |
601 | "Not prefetching, ignoring %p due to " | |
602 | "loop variant step\n", | |
603 | (void *) ref); | |
604 | } | |
605 | return false; | |
606 | } | |
607 | } | |
608 | } | |
94ce9ff0 | 609 | |
8dfbf380 | 610 | /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP |
611 | are integer constants. */ | |
612 | agrp = find_or_create_group (refs, base, step); | |
613 | record_ref (agrp, stmt, ref, delta, write_p); | |
5b5037b3 | 614 | |
615 | return true; | |
8dfbf380 | 616 | } |
617 | ||
5b5037b3 | 618 | /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to |
619 | true if there are no other memory references inside the loop. */ | |
8dfbf380 | 620 | |
621 | static struct mem_ref_group * | |
0ab353e1 | 622 | gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count) |
8dfbf380 | 623 | { |
624 | basic_block *body = get_loop_body_in_dom_order (loop); | |
625 | basic_block bb; | |
626 | unsigned i; | |
75a70cf9 | 627 | gimple_stmt_iterator bsi; |
628 | gimple stmt; | |
629 | tree lhs, rhs; | |
8dfbf380 | 630 | struct mem_ref_group *refs = NULL; |
631 | ||
5b5037b3 | 632 | *no_other_refs = true; |
0ab353e1 | 633 | *ref_count = 0; |
5b5037b3 | 634 | |
8dfbf380 | 635 | /* Scan the loop body in order, so that the former references precede the |
636 | later ones. */ | |
637 | for (i = 0; i < loop->num_nodes; i++) | |
638 | { | |
639 | bb = body[i]; | |
640 | if (bb->loop_father != loop) | |
641 | continue; | |
642 | ||
75a70cf9 | 643 | for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) |
8dfbf380 | 644 | { |
75a70cf9 | 645 | stmt = gsi_stmt (bsi); |
5b5037b3 | 646 | |
75a70cf9 | 647 | if (gimple_code (stmt) != GIMPLE_ASSIGN) |
5b5037b3 | 648 | { |
dd277d48 | 649 | if (gimple_vuse (stmt) |
75a70cf9 | 650 | || (is_gimple_call (stmt) |
651 | && !(gimple_call_flags (stmt) & ECF_CONST))) | |
5b5037b3 | 652 | *no_other_refs = false; |
653 | continue; | |
654 | } | |
8dfbf380 | 655 | |
75a70cf9 | 656 | lhs = gimple_assign_lhs (stmt); |
657 | rhs = gimple_assign_rhs1 (stmt); | |
8dfbf380 | 658 | |
659 | if (REFERENCE_CLASS_P (rhs)) | |
0ab353e1 | 660 | { |
5b5037b3 | 661 | *no_other_refs &= gather_memory_references_ref (loop, &refs, |
662 | rhs, false, stmt); | |
0ab353e1 | 663 | *ref_count += 1; |
664 | } | |
8dfbf380 | 665 | if (REFERENCE_CLASS_P (lhs)) |
0ab353e1 | 666 | { |
5b5037b3 | 667 | *no_other_refs &= gather_memory_references_ref (loop, &refs, |
668 | lhs, true, stmt); | |
0ab353e1 | 669 | *ref_count += 1; |
670 | } | |
8dfbf380 | 671 | } |
672 | } | |
673 | free (body); | |
674 | ||
675 | return refs; | |
676 | } | |
677 | ||
678 | /* Prune the prefetch candidate REF using the self-reuse. */ | |
679 | ||
680 | static void | |
681 | prune_ref_by_self_reuse (struct mem_ref *ref) | |
682 | { | |
81d2a38f | 683 | HOST_WIDE_INT step; |
684 | bool backward; | |
685 | ||
686 | /* If the step size is non constant, we cannot calculate prefetch_mod. */ | |
687 | if (!cst_and_fits_in_hwi (ref->group->step)) | |
688 | return; | |
689 | ||
690 | step = int_cst_value (ref->group->step); | |
691 | ||
692 | backward = step < 0; | |
8dfbf380 | 693 | |
694 | if (step == 0) | |
695 | { | |
696 | /* Prefetch references to invariant address just once. */ | |
697 | ref->prefetch_before = 1; | |
698 | return; | |
699 | } | |
700 | ||
701 | if (backward) | |
702 | step = -step; | |
703 | ||
704 | if (step > PREFETCH_BLOCK) | |
705 | return; | |
706 | ||
707 | if ((backward && HAVE_BACKWARD_PREFETCH) | |
708 | || (!backward && HAVE_FORWARD_PREFETCH)) | |
709 | { | |
710 | ref->prefetch_before = 1; | |
711 | return; | |
712 | } | |
713 | ||
714 | ref->prefetch_mod = PREFETCH_BLOCK / step; | |
715 | } | |
716 | ||
717 | /* Divides X by BY, rounding down. */ | |
718 | ||
719 | static HOST_WIDE_INT | |
720 | ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by) | |
721 | { | |
722 | gcc_assert (by > 0); | |
723 | ||
724 | if (x >= 0) | |
725 | return x / by; | |
726 | else | |
727 | return (x + by - 1) / by; | |
728 | } | |
729 | ||
48e1416a | 730 | /* Given a CACHE_LINE_SIZE and two inductive memory references |
731 | with a common STEP greater than CACHE_LINE_SIZE and an address | |
732 | difference DELTA, compute the probability that they will fall | |
3a2f43cf | 733 | in different cache lines. Return true if the computed miss rate |
734 | is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the | |
735 | number of distinct iterations after which the pattern repeats itself. | |
e17cf2c8 | 736 | ALIGN_UNIT is the unit of alignment in bytes. */ |
737 | ||
3a2f43cf | 738 | static bool |
739 | is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size, | |
e17cf2c8 | 740 | HOST_WIDE_INT step, HOST_WIDE_INT delta, |
741 | unsigned HOST_WIDE_INT distinct_iters, | |
742 | int align_unit) | |
743 | { | |
744 | unsigned align, iter; | |
3a2f43cf | 745 | int total_positions, miss_positions, max_allowed_miss_positions; |
e17cf2c8 | 746 | int address1, address2, cache_line1, cache_line2; |
747 | ||
5a91155f | 748 | /* It always misses if delta is greater than or equal to the cache |
749 | line size. */ | |
3a2f43cf | 750 | if (delta >= (HOST_WIDE_INT) cache_line_size) |
751 | return false; | |
5a91155f | 752 | |
e17cf2c8 | 753 | miss_positions = 0; |
3a2f43cf | 754 | total_positions = (cache_line_size / align_unit) * distinct_iters; |
755 | max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000; | |
48e1416a | 756 | |
e17cf2c8 | 757 | /* Iterate through all possible alignments of the first |
758 | memory reference within its cache line. */ | |
759 | for (align = 0; align < cache_line_size; align += align_unit) | |
760 | ||
761 | /* Iterate through all distinct iterations. */ | |
762 | for (iter = 0; iter < distinct_iters; iter++) | |
763 | { | |
764 | address1 = align + step * iter; | |
765 | address2 = address1 + delta; | |
766 | cache_line1 = address1 / cache_line_size; | |
767 | cache_line2 = address2 / cache_line_size; | |
e17cf2c8 | 768 | if (cache_line1 != cache_line2) |
3a2f43cf | 769 | { |
770 | miss_positions += 1; | |
771 | if (miss_positions > max_allowed_miss_positions) | |
772 | return false; | |
773 | } | |
e17cf2c8 | 774 | } |
3a2f43cf | 775 | return true; |
e17cf2c8 | 776 | } |
777 | ||
8dfbf380 | 778 | /* Prune the prefetch candidate REF using the reuse with BY. |
779 | If BY_IS_BEFORE is true, BY is before REF in the loop. */ | |
780 | ||
781 | static void | |
782 | prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by, | |
783 | bool by_is_before) | |
784 | { | |
81d2a38f | 785 | HOST_WIDE_INT step; |
786 | bool backward; | |
8dfbf380 | 787 | HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta; |
788 | HOST_WIDE_INT delta = delta_b - delta_r; | |
789 | HOST_WIDE_INT hit_from; | |
790 | unsigned HOST_WIDE_INT prefetch_before, prefetch_block; | |
e17cf2c8 | 791 | HOST_WIDE_INT reduced_step; |
792 | unsigned HOST_WIDE_INT reduced_prefetch_block; | |
793 | tree ref_type; | |
794 | int align_unit; | |
8dfbf380 | 795 | |
81d2a38f | 796 | /* If the step is non constant we cannot calculate prefetch_before. */ |
797 | if (!cst_and_fits_in_hwi (ref->group->step)) { | |
798 | return; | |
799 | } | |
800 | ||
801 | step = int_cst_value (ref->group->step); | |
802 | ||
803 | backward = step < 0; | |
804 | ||
805 | ||
8dfbf380 | 806 | if (delta == 0) |
807 | { | |
808 | /* If the references has the same address, only prefetch the | |
809 | former. */ | |
810 | if (by_is_before) | |
811 | ref->prefetch_before = 0; | |
48e1416a | 812 | |
8dfbf380 | 813 | return; |
814 | } | |
815 | ||
816 | if (!step) | |
817 | { | |
818 | /* If the reference addresses are invariant and fall into the | |
819 | same cache line, prefetch just the first one. */ | |
820 | if (!by_is_before) | |
821 | return; | |
822 | ||
823 | if (ddown (ref->delta, PREFETCH_BLOCK) | |
824 | != ddown (by->delta, PREFETCH_BLOCK)) | |
825 | return; | |
826 | ||
827 | ref->prefetch_before = 0; | |
828 | return; | |
829 | } | |
830 | ||
831 | /* Only prune the reference that is behind in the array. */ | |
832 | if (backward) | |
833 | { | |
834 | if (delta > 0) | |
835 | return; | |
836 | ||
837 | /* Transform the data so that we may assume that the accesses | |
838 | are forward. */ | |
839 | delta = - delta; | |
840 | step = -step; | |
841 | delta_r = PREFETCH_BLOCK - 1 - delta_r; | |
842 | delta_b = PREFETCH_BLOCK - 1 - delta_b; | |
843 | } | |
844 | else | |
845 | { | |
846 | if (delta < 0) | |
847 | return; | |
848 | } | |
849 | ||
850 | /* Check whether the two references are likely to hit the same cache | |
851 | line, and how distant the iterations in that it occurs are from | |
852 | each other. */ | |
853 | ||
854 | if (step <= PREFETCH_BLOCK) | |
855 | { | |
856 | /* The accesses are sure to meet. Let us check when. */ | |
857 | hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK; | |
858 | prefetch_before = (hit_from - delta_r + step - 1) / step; | |
859 | ||
8234f090 | 860 | /* Do not reduce prefetch_before if we meet beyond cache size. */ |
b1757d46 | 861 | if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step)) |
8234f090 | 862 | prefetch_before = PREFETCH_ALL; |
8dfbf380 | 863 | if (prefetch_before < ref->prefetch_before) |
864 | ref->prefetch_before = prefetch_before; | |
865 | ||
866 | return; | |
867 | } | |
868 | ||
48e1416a | 869 | /* A more complicated case with step > prefetch_block. First reduce |
e17cf2c8 | 870 | the ratio between the step and the cache line size to its simplest |
48e1416a | 871 | terms. The resulting denominator will then represent the number of |
872 | distinct iterations after which each address will go back to its | |
873 | initial location within the cache line. This computation assumes | |
e17cf2c8 | 874 | that PREFETCH_BLOCK is a power of two. */ |
8dfbf380 | 875 | prefetch_block = PREFETCH_BLOCK; |
e17cf2c8 | 876 | reduced_prefetch_block = prefetch_block; |
877 | reduced_step = step; | |
878 | while ((reduced_step & 1) == 0 | |
879 | && reduced_prefetch_block > 1) | |
8dfbf380 | 880 | { |
e17cf2c8 | 881 | reduced_step >>= 1; |
882 | reduced_prefetch_block >>= 1; | |
8dfbf380 | 883 | } |
884 | ||
8dfbf380 | 885 | prefetch_before = delta / step; |
886 | delta %= step; | |
e17cf2c8 | 887 | ref_type = TREE_TYPE (ref->mem); |
888 | align_unit = TYPE_ALIGN (ref_type) / 8; | |
3a2f43cf | 889 | if (is_miss_rate_acceptable (prefetch_block, step, delta, |
890 | reduced_prefetch_block, align_unit)) | |
8dfbf380 | 891 | { |
8234f090 | 892 | /* Do not reduce prefetch_before if we meet beyond cache size. */ |
893 | if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK) | |
894 | prefetch_before = PREFETCH_ALL; | |
8dfbf380 | 895 | if (prefetch_before < ref->prefetch_before) |
896 | ref->prefetch_before = prefetch_before; | |
897 | ||
898 | return; | |
899 | } | |
900 | ||
901 | /* Try also the following iteration. */ | |
902 | prefetch_before++; | |
903 | delta = step - delta; | |
3a2f43cf | 904 | if (is_miss_rate_acceptable (prefetch_block, step, delta, |
905 | reduced_prefetch_block, align_unit)) | |
8dfbf380 | 906 | { |
907 | if (prefetch_before < ref->prefetch_before) | |
908 | ref->prefetch_before = prefetch_before; | |
909 | ||
910 | return; | |
911 | } | |
912 | ||
913 | /* The ref probably does not reuse by. */ | |
914 | return; | |
915 | } | |
916 | ||
917 | /* Prune the prefetch candidate REF using the reuses with other references | |
918 | in REFS. */ | |
919 | ||
920 | static void | |
921 | prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs) | |
922 | { | |
923 | struct mem_ref *prune_by; | |
924 | bool before = true; | |
925 | ||
926 | prune_ref_by_self_reuse (ref); | |
927 | ||
928 | for (prune_by = refs; prune_by; prune_by = prune_by->next) | |
929 | { | |
930 | if (prune_by == ref) | |
931 | { | |
932 | before = false; | |
933 | continue; | |
934 | } | |
935 | ||
936 | if (!WRITE_CAN_USE_READ_PREFETCH | |
937 | && ref->write_p | |
938 | && !prune_by->write_p) | |
939 | continue; | |
940 | if (!READ_CAN_USE_WRITE_PREFETCH | |
941 | && !ref->write_p | |
942 | && prune_by->write_p) | |
943 | continue; | |
944 | ||
945 | prune_ref_by_group_reuse (ref, prune_by, before); | |
946 | } | |
947 | } | |
948 | ||
949 | /* Prune the prefetch candidates in GROUP using the reuse analysis. */ | |
950 | ||
951 | static void | |
952 | prune_group_by_reuse (struct mem_ref_group *group) | |
953 | { | |
954 | struct mem_ref *ref_pruned; | |
955 | ||
956 | for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next) | |
957 | { | |
958 | prune_ref_by_reuse (ref_pruned, group->refs); | |
959 | ||
960 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
961 | { | |
962 | fprintf (dump_file, "Reference %p:", (void *) ref_pruned); | |
963 | ||
964 | if (ref_pruned->prefetch_before == PREFETCH_ALL | |
965 | && ref_pruned->prefetch_mod == 1) | |
966 | fprintf (dump_file, " no restrictions"); | |
967 | else if (ref_pruned->prefetch_before == 0) | |
968 | fprintf (dump_file, " do not prefetch"); | |
969 | else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod) | |
970 | fprintf (dump_file, " prefetch once"); | |
971 | else | |
972 | { | |
973 | if (ref_pruned->prefetch_before != PREFETCH_ALL) | |
974 | { | |
975 | fprintf (dump_file, " prefetch before "); | |
976 | fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, | |
977 | ref_pruned->prefetch_before); | |
978 | } | |
979 | if (ref_pruned->prefetch_mod != 1) | |
980 | { | |
981 | fprintf (dump_file, " prefetch mod "); | |
982 | fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, | |
983 | ref_pruned->prefetch_mod); | |
984 | } | |
985 | } | |
986 | fprintf (dump_file, "\n"); | |
987 | } | |
988 | } | |
989 | } | |
990 | ||
991 | /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */ | |
992 | ||
993 | static void | |
994 | prune_by_reuse (struct mem_ref_group *groups) | |
995 | { | |
996 | for (; groups; groups = groups->next) | |
997 | prune_group_by_reuse (groups); | |
998 | } | |
999 | ||
1000 | /* Returns true if we should issue prefetch for REF. */ | |
1001 | ||
1002 | static bool | |
1003 | should_issue_prefetch_p (struct mem_ref *ref) | |
1004 | { | |
1005 | /* For now do not issue prefetches for only first few of the | |
1006 | iterations. */ | |
1007 | if (ref->prefetch_before != PREFETCH_ALL) | |
5d68c00f | 1008 | { |
1009 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1010 | fprintf (dump_file, "Ignoring %p due to prefetch_before\n", | |
1011 | (void *) ref); | |
1012 | return false; | |
1013 | } | |
8dfbf380 | 1014 | |
5b5037b3 | 1015 | /* Do not prefetch nontemporal stores. */ |
1016 | if (ref->storent_p) | |
5d68c00f | 1017 | { |
1018 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1019 | fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref); | |
1020 | return false; | |
1021 | } | |
5b5037b3 | 1022 | |
8dfbf380 | 1023 | return true; |
1024 | } | |
1025 | ||
1026 | /* Decide which of the prefetch candidates in GROUPS to prefetch. | |
1027 | AHEAD is the number of iterations to prefetch ahead (which corresponds | |
1028 | to the number of simultaneous instances of one prefetch running at a | |
1029 | time). UNROLL_FACTOR is the factor by that the loop is going to be | |
1030 | unrolled. Returns true if there is anything to prefetch. */ | |
1031 | ||
1032 | static bool | |
1033 | schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor, | |
1034 | unsigned ahead) | |
1035 | { | |
53d4d5cc | 1036 | unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots; |
1037 | unsigned slots_per_prefetch; | |
8dfbf380 | 1038 | struct mem_ref *ref; |
1039 | bool any = false; | |
1040 | ||
53d4d5cc | 1041 | /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */ |
1042 | remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES; | |
8dfbf380 | 1043 | |
53d4d5cc | 1044 | /* The prefetch will run for AHEAD iterations of the original loop, i.e., |
1045 | AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration, | |
1046 | it will need a prefetch slot. */ | |
1047 | slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor; | |
8dfbf380 | 1048 | if (dump_file && (dump_flags & TDF_DETAILS)) |
53d4d5cc | 1049 | fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n", |
1050 | slots_per_prefetch); | |
8dfbf380 | 1051 | |
1052 | /* For now we just take memory references one by one and issue | |
1053 | prefetches for as many as possible. The groups are sorted | |
1054 | starting with the largest step, since the references with | |
334ec2d8 | 1055 | large step are more likely to cause many cache misses. */ |
8dfbf380 | 1056 | |
1057 | for (; groups; groups = groups->next) | |
1058 | for (ref = groups->refs; ref; ref = ref->next) | |
1059 | { | |
1060 | if (!should_issue_prefetch_p (ref)) | |
1061 | continue; | |
1062 | ||
c0a0de5e | 1063 | /* The loop is far from being sufficiently unrolled for this |
1064 | prefetch. Do not generate prefetch to avoid many redudant | |
1065 | prefetches. */ | |
1066 | if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO) | |
1067 | continue; | |
1068 | ||
53d4d5cc | 1069 | /* If we need to prefetch the reference each PREFETCH_MOD iterations, |
1070 | and we unroll the loop UNROLL_FACTOR times, we need to insert | |
1071 | ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each | |
1072 | iteration. */ | |
8dfbf380 | 1073 | n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) |
1074 | / ref->prefetch_mod); | |
53d4d5cc | 1075 | prefetch_slots = n_prefetches * slots_per_prefetch; |
1076 | ||
1077 | /* If more than half of the prefetches would be lost anyway, do not | |
1078 | issue the prefetch. */ | |
1079 | if (2 * remaining_prefetch_slots < prefetch_slots) | |
1080 | continue; | |
1081 | ||
1082 | ref->issue_prefetch_p = true; | |
8dfbf380 | 1083 | |
53d4d5cc | 1084 | if (remaining_prefetch_slots <= prefetch_slots) |
1085 | return true; | |
1086 | remaining_prefetch_slots -= prefetch_slots; | |
8dfbf380 | 1087 | any = true; |
1088 | } | |
1089 | ||
1090 | return any; | |
1091 | } | |
1092 | ||
5da8318c | 1093 | /* Return TRUE if no prefetch is going to be generated in the given |
1094 | GROUPS. */ | |
1095 | ||
1096 | static bool | |
1097 | nothing_to_prefetch_p (struct mem_ref_group *groups) | |
1098 | { | |
1099 | struct mem_ref *ref; | |
1100 | ||
1101 | for (; groups; groups = groups->next) | |
1102 | for (ref = groups->refs; ref; ref = ref->next) | |
1103 | if (should_issue_prefetch_p (ref)) | |
1104 | return false; | |
1105 | ||
1106 | return true; | |
1107 | } | |
1108 | ||
1109 | /* Estimate the number of prefetches in the given GROUPS. | |
1110 | UNROLL_FACTOR is the factor by which LOOP was unrolled. */ | |
8dfbf380 | 1111 | |
0ab353e1 | 1112 | static int |
5da8318c | 1113 | estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor) |
8dfbf380 | 1114 | { |
1115 | struct mem_ref *ref; | |
5da8318c | 1116 | unsigned n_prefetches; |
0ab353e1 | 1117 | int prefetch_count = 0; |
8dfbf380 | 1118 | |
1119 | for (; groups; groups = groups->next) | |
1120 | for (ref = groups->refs; ref; ref = ref->next) | |
1121 | if (should_issue_prefetch_p (ref)) | |
5da8318c | 1122 | { |
1123 | n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) | |
1124 | / ref->prefetch_mod); | |
1125 | prefetch_count += n_prefetches; | |
1126 | } | |
8dfbf380 | 1127 | |
0ab353e1 | 1128 | return prefetch_count; |
8dfbf380 | 1129 | } |
1130 | ||
1131 | /* Issue prefetches for the reference REF into loop as decided before. | |
1132 | HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR | |
9ca2c29a | 1133 | is the factor by which LOOP was unrolled. */ |
8dfbf380 | 1134 | |
1135 | static void | |
1136 | issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead) | |
1137 | { | |
1138 | HOST_WIDE_INT delta; | |
81d2a38f | 1139 | tree addr, addr_base, write_p, local, forward; |
1a91d914 | 1140 | gcall *prefetch; |
75a70cf9 | 1141 | gimple_stmt_iterator bsi; |
8dfbf380 | 1142 | unsigned n_prefetches, ap; |
5c205353 | 1143 | bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES; |
8dfbf380 | 1144 | |
1145 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5c205353 | 1146 | fprintf (dump_file, "Issued%s prefetch for %p.\n", |
1147 | nontemporal ? " nontemporal" : "", | |
1148 | (void *) ref); | |
8dfbf380 | 1149 | |
75a70cf9 | 1150 | bsi = gsi_for_stmt (ref->stmt); |
8dfbf380 | 1151 | |
1152 | n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) | |
1153 | / ref->prefetch_mod); | |
1154 | addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node); | |
75a70cf9 | 1155 | addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base), |
1156 | true, NULL, true, GSI_SAME_STMT); | |
53d4d5cc | 1157 | write_p = ref->write_p ? integer_one_node : integer_zero_node; |
2512209b | 1158 | local = nontemporal ? integer_zero_node : integer_three_node; |
8dfbf380 | 1159 | |
1160 | for (ap = 0; ap < n_prefetches; ap++) | |
1161 | { | |
81d2a38f | 1162 | if (cst_and_fits_in_hwi (ref->group->step)) |
1163 | { | |
1164 | /* Determine the address to prefetch. */ | |
1165 | delta = (ahead + ap * ref->prefetch_mod) * | |
1166 | int_cst_value (ref->group->step); | |
2cc66f2a | 1167 | addr = fold_build_pointer_plus_hwi (addr_base, delta); |
81d2a38f | 1168 | addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL, |
1169 | true, GSI_SAME_STMT); | |
1170 | } | |
1171 | else | |
1172 | { | |
1173 | /* The step size is non-constant but loop-invariant. We use the | |
1174 | heuristic to simply prefetch ahead iterations ahead. */ | |
1175 | forward = fold_build2 (MULT_EXPR, sizetype, | |
1176 | fold_convert (sizetype, ref->group->step), | |
1177 | fold_convert (sizetype, size_int (ahead))); | |
2cc66f2a | 1178 | addr = fold_build_pointer_plus (addr_base, forward); |
81d2a38f | 1179 | addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, |
1180 | NULL, true, GSI_SAME_STMT); | |
1181 | } | |
8dfbf380 | 1182 | /* Create the prefetch instruction. */ |
b9a16870 | 1183 | prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH), |
75a70cf9 | 1184 | 3, addr, write_p, local); |
1185 | gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT); | |
8dfbf380 | 1186 | } |
1187 | } | |
1188 | ||
1189 | /* Issue prefetches for the references in GROUPS into loop as decided before. | |
1190 | HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the | |
1191 | factor by that LOOP was unrolled. */ | |
1192 | ||
1193 | static void | |
1194 | issue_prefetches (struct mem_ref_group *groups, | |
1195 | unsigned unroll_factor, unsigned ahead) | |
1196 | { | |
1197 | struct mem_ref *ref; | |
1198 | ||
1199 | for (; groups; groups = groups->next) | |
1200 | for (ref = groups->refs; ref; ref = ref->next) | |
1201 | if (ref->issue_prefetch_p) | |
1202 | issue_prefetch_ref (ref, unroll_factor, ahead); | |
1203 | } | |
1204 | ||
5b5037b3 | 1205 | /* Returns true if REF is a memory write for that a nontemporal store insn |
1206 | can be used. */ | |
1207 | ||
1208 | static bool | |
1209 | nontemporal_store_p (struct mem_ref *ref) | |
1210 | { | |
3754d046 | 1211 | machine_mode mode; |
5b5037b3 | 1212 | enum insn_code code; |
1213 | ||
1214 | /* REF must be a write that is not reused. We require it to be independent | |
1215 | on all other memory references in the loop, as the nontemporal stores may | |
1216 | be reordered with respect to other memory references. */ | |
1217 | if (!ref->write_p | |
1218 | || !ref->independent_p | |
1219 | || ref->reuse_distance < L2_CACHE_SIZE_BYTES) | |
1220 | return false; | |
1221 | ||
1222 | /* Check that we have the storent instruction for the mode. */ | |
1223 | mode = TYPE_MODE (TREE_TYPE (ref->mem)); | |
1224 | if (mode == BLKmode) | |
1225 | return false; | |
1226 | ||
d6bf3b14 | 1227 | code = optab_handler (storent_optab, mode); |
5b5037b3 | 1228 | return code != CODE_FOR_nothing; |
1229 | } | |
1230 | ||
1231 | /* If REF is a nontemporal store, we mark the corresponding modify statement | |
1232 | and return true. Otherwise, we return false. */ | |
1233 | ||
1234 | static bool | |
1235 | mark_nontemporal_store (struct mem_ref *ref) | |
1236 | { | |
1237 | if (!nontemporal_store_p (ref)) | |
1238 | return false; | |
1239 | ||
1240 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1241 | fprintf (dump_file, "Marked reference %p as a nontemporal store.\n", | |
1242 | (void *) ref); | |
1243 | ||
75a70cf9 | 1244 | gimple_assign_set_nontemporal_move (ref->stmt, true); |
5b5037b3 | 1245 | ref->storent_p = true; |
1246 | ||
1247 | return true; | |
1248 | } | |
1249 | ||
1250 | /* Issue a memory fence instruction after LOOP. */ | |
1251 | ||
1252 | static void | |
1253 | emit_mfence_after_loop (struct loop *loop) | |
1254 | { | |
f1f41a6c | 1255 | vec<edge> exits = get_loop_exit_edges (loop); |
5b5037b3 | 1256 | edge exit; |
1a91d914 | 1257 | gcall *call; |
75a70cf9 | 1258 | gimple_stmt_iterator bsi; |
5b5037b3 | 1259 | unsigned i; |
1260 | ||
f1f41a6c | 1261 | FOR_EACH_VEC_ELT (exits, i, exit) |
5b5037b3 | 1262 | { |
75a70cf9 | 1263 | call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0); |
5b5037b3 | 1264 | |
1265 | if (!single_pred_p (exit->dest) | |
1266 | /* If possible, we prefer not to insert the fence on other paths | |
1267 | in cfg. */ | |
1268 | && !(exit->flags & EDGE_ABNORMAL)) | |
1269 | split_loop_exit_edge (exit); | |
75a70cf9 | 1270 | bsi = gsi_after_labels (exit->dest); |
5b5037b3 | 1271 | |
75a70cf9 | 1272 | gsi_insert_before (&bsi, call, GSI_NEW_STMT); |
5b5037b3 | 1273 | } |
1274 | ||
f1f41a6c | 1275 | exits.release (); |
5b5037b3 | 1276 | update_ssa (TODO_update_ssa_only_virtuals); |
1277 | } | |
1278 | ||
1279 | /* Returns true if we can use storent in loop, false otherwise. */ | |
1280 | ||
1281 | static bool | |
1282 | may_use_storent_in_loop_p (struct loop *loop) | |
1283 | { | |
1284 | bool ret = true; | |
1285 | ||
1286 | if (loop->inner != NULL) | |
1287 | return false; | |
1288 | ||
1289 | /* If we must issue a mfence insn after using storent, check that there | |
1290 | is a suitable place for it at each of the loop exits. */ | |
1291 | if (FENCE_FOLLOWING_MOVNT != NULL_TREE) | |
1292 | { | |
f1f41a6c | 1293 | vec<edge> exits = get_loop_exit_edges (loop); |
5b5037b3 | 1294 | unsigned i; |
1295 | edge exit; | |
1296 | ||
f1f41a6c | 1297 | FOR_EACH_VEC_ELT (exits, i, exit) |
5b5037b3 | 1298 | if ((exit->flags & EDGE_ABNORMAL) |
34154e27 | 1299 | && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
5b5037b3 | 1300 | ret = false; |
1301 | ||
f1f41a6c | 1302 | exits.release (); |
5b5037b3 | 1303 | } |
1304 | ||
1305 | return ret; | |
1306 | } | |
1307 | ||
1308 | /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory | |
1309 | references in the loop. */ | |
1310 | ||
1311 | static void | |
1312 | mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups) | |
1313 | { | |
1314 | struct mem_ref *ref; | |
1315 | bool any = false; | |
1316 | ||
1317 | if (!may_use_storent_in_loop_p (loop)) | |
1318 | return; | |
1319 | ||
1320 | for (; groups; groups = groups->next) | |
1321 | for (ref = groups->refs; ref; ref = ref->next) | |
1322 | any |= mark_nontemporal_store (ref); | |
1323 | ||
1324 | if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE) | |
1325 | emit_mfence_after_loop (loop); | |
1326 | } | |
1327 | ||
8dfbf380 | 1328 | /* Determines whether we can profitably unroll LOOP FACTOR times, and if |
1329 | this is the case, fill in DESC by the description of number of | |
1330 | iterations. */ | |
1331 | ||
1332 | static bool | |
1333 | should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc, | |
1334 | unsigned factor) | |
1335 | { | |
1336 | if (!can_unroll_loop_p (loop, factor, desc)) | |
1337 | return false; | |
1338 | ||
1339 | /* We only consider loops without control flow for unrolling. This is not | |
1340 | a hard restriction -- tree_unroll_loop works with arbitrary loops | |
1341 | as well; but the unrolling/prefetching is usually more profitable for | |
1342 | loops consisting of a single basic block, and we want to limit the | |
1343 | code growth. */ | |
1344 | if (loop->num_nodes > 2) | |
1345 | return false; | |
1346 | ||
1347 | return true; | |
1348 | } | |
1349 | ||
1350 | /* Determine the coefficient by that unroll LOOP, from the information | |
1351 | contained in the list of memory references REFS. Description of | |
78f46d45 | 1352 | umber of iterations of LOOP is stored to DESC. NINSNS is the number of |
1353 | insns of the LOOP. EST_NITER is the estimated number of iterations of | |
1354 | the loop, or -1 if no estimate is available. */ | |
8dfbf380 | 1355 | |
1356 | static unsigned | |
1357 | determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs, | |
78f46d45 | 1358 | unsigned ninsns, struct tree_niter_desc *desc, |
1359 | HOST_WIDE_INT est_niter) | |
8dfbf380 | 1360 | { |
53d4d5cc | 1361 | unsigned upper_bound; |
1362 | unsigned nfactor, factor, mod_constraint; | |
8dfbf380 | 1363 | struct mem_ref_group *agp; |
1364 | struct mem_ref *ref; | |
1365 | ||
53d4d5cc | 1366 | /* First check whether the loop is not too large to unroll. We ignore |
1367 | PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us | |
1368 | from unrolling them enough to make exactly one cache line covered by each | |
1369 | iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent | |
1370 | us from unrolling the loops too many times in cases where we only expect | |
1371 | gains from better scheduling and decreasing loop overhead, which is not | |
1372 | the case here. */ | |
1373 | upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns; | |
78f46d45 | 1374 | |
1375 | /* If we unrolled the loop more times than it iterates, the unrolled version | |
1376 | of the loop would be never entered. */ | |
1377 | if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound) | |
1378 | upper_bound = est_niter; | |
1379 | ||
53d4d5cc | 1380 | if (upper_bound <= 1) |
8dfbf380 | 1381 | return 1; |
1382 | ||
53d4d5cc | 1383 | /* Choose the factor so that we may prefetch each cache just once, |
1384 | but bound the unrolling by UPPER_BOUND. */ | |
1385 | factor = 1; | |
8dfbf380 | 1386 | for (agp = refs; agp; agp = agp->next) |
1387 | for (ref = agp->refs; ref; ref = ref->next) | |
53d4d5cc | 1388 | if (should_issue_prefetch_p (ref)) |
1389 | { | |
1390 | mod_constraint = ref->prefetch_mod; | |
1391 | nfactor = least_common_multiple (mod_constraint, factor); | |
1392 | if (nfactor <= upper_bound) | |
1393 | factor = nfactor; | |
1394 | } | |
8dfbf380 | 1395 | |
1396 | if (!should_unroll_loop_p (loop, desc, factor)) | |
1397 | return 1; | |
1398 | ||
1399 | return factor; | |
1400 | } | |
1401 | ||
5c205353 | 1402 | /* Returns the total volume of the memory references REFS, taking into account |
1403 | reuses in the innermost loop and cache line size. TODO -- we should also | |
1404 | take into account reuses across the iterations of the loops in the loop | |
1405 | nest. */ | |
1406 | ||
1407 | static unsigned | |
1408 | volume_of_references (struct mem_ref_group *refs) | |
1409 | { | |
1410 | unsigned volume = 0; | |
1411 | struct mem_ref_group *gr; | |
1412 | struct mem_ref *ref; | |
1413 | ||
1414 | for (gr = refs; gr; gr = gr->next) | |
1415 | for (ref = gr->refs; ref; ref = ref->next) | |
1416 | { | |
1417 | /* Almost always reuses another value? */ | |
1418 | if (ref->prefetch_before != PREFETCH_ALL) | |
1419 | continue; | |
1420 | ||
1421 | /* If several iterations access the same cache line, use the size of | |
1422 | the line divided by this number. Otherwise, a cache line is | |
1423 | accessed in each iteration. TODO -- in the latter case, we should | |
1424 | take the size of the reference into account, rounding it up on cache | |
1425 | line size multiple. */ | |
1426 | volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod; | |
1427 | } | |
1428 | return volume; | |
1429 | } | |
1430 | ||
1431 | /* Returns the volume of memory references accessed across VEC iterations of | |
1432 | loops, whose sizes are described in the LOOP_SIZES array. N is the number | |
1433 | of the loops in the nest (length of VEC and LOOP_SIZES vectors). */ | |
1434 | ||
1435 | static unsigned | |
1436 | volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n) | |
1437 | { | |
1438 | unsigned i; | |
1439 | ||
1440 | for (i = 0; i < n; i++) | |
1441 | if (vec[i] != 0) | |
1442 | break; | |
1443 | ||
1444 | if (i == n) | |
1445 | return 0; | |
1446 | ||
1447 | gcc_assert (vec[i] > 0); | |
1448 | ||
1449 | /* We ignore the parts of the distance vector in subloops, since usually | |
1450 | the numbers of iterations are much smaller. */ | |
1451 | return loop_sizes[i] * vec[i]; | |
1452 | } | |
1453 | ||
1454 | /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE | |
1455 | at the position corresponding to the loop of the step. N is the depth | |
1456 | of the considered loop nest, and, LOOP is its innermost loop. */ | |
1457 | ||
1458 | static void | |
1459 | add_subscript_strides (tree access_fn, unsigned stride, | |
1460 | HOST_WIDE_INT *strides, unsigned n, struct loop *loop) | |
1461 | { | |
1462 | struct loop *aloop; | |
1463 | tree step; | |
1464 | HOST_WIDE_INT astep; | |
1465 | unsigned min_depth = loop_depth (loop) - n; | |
1466 | ||
1467 | while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC) | |
1468 | { | |
1469 | aloop = get_chrec_loop (access_fn); | |
1470 | step = CHREC_RIGHT (access_fn); | |
1471 | access_fn = CHREC_LEFT (access_fn); | |
1472 | ||
1473 | if ((unsigned) loop_depth (aloop) <= min_depth) | |
1474 | continue; | |
1475 | ||
35ec552a | 1476 | if (tree_fits_shwi_p (step)) |
fcb97e84 | 1477 | astep = tree_to_shwi (step); |
5c205353 | 1478 | else |
1479 | astep = L1_CACHE_LINE_SIZE; | |
1480 | ||
1481 | strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride; | |
1482 | ||
1483 | } | |
1484 | } | |
1485 | ||
1486 | /* Returns the volume of memory references accessed between two consecutive | |
1487 | self-reuses of the reference DR. We consider the subscripts of DR in N | |
1488 | loops, and LOOP_SIZES contains the volumes of accesses in each of the | |
1489 | loops. LOOP is the innermost loop of the current loop nest. */ | |
1490 | ||
1491 | static unsigned | |
1492 | self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n, | |
1493 | struct loop *loop) | |
1494 | { | |
1495 | tree stride, access_fn; | |
1496 | HOST_WIDE_INT *strides, astride; | |
f1f41a6c | 1497 | vec<tree> access_fns; |
5c205353 | 1498 | tree ref = DR_REF (dr); |
1499 | unsigned i, ret = ~0u; | |
1500 | ||
1501 | /* In the following example: | |
1502 | ||
1503 | for (i = 0; i < N; i++) | |
1504 | for (j = 0; j < N; j++) | |
1505 | use (a[j][i]); | |
1506 | the same cache line is accessed each N steps (except if the change from | |
1507 | i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse, | |
1508 | we cannot rely purely on the results of the data dependence analysis. | |
1509 | ||
1510 | Instead, we compute the stride of the reference in each loop, and consider | |
1511 | the innermost loop in that the stride is less than cache size. */ | |
1512 | ||
1513 | strides = XCNEWVEC (HOST_WIDE_INT, n); | |
1514 | access_fns = DR_ACCESS_FNS (dr); | |
1515 | ||
f1f41a6c | 1516 | FOR_EACH_VEC_ELT (access_fns, i, access_fn) |
5c205353 | 1517 | { |
1518 | /* Keep track of the reference corresponding to the subscript, so that we | |
1519 | know its stride. */ | |
1520 | while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF) | |
1521 | ref = TREE_OPERAND (ref, 0); | |
48e1416a | 1522 | |
5c205353 | 1523 | if (TREE_CODE (ref) == ARRAY_REF) |
1524 | { | |
1525 | stride = TYPE_SIZE_UNIT (TREE_TYPE (ref)); | |
cd4547bf | 1526 | if (tree_fits_uhwi_p (stride)) |
6a0712d4 | 1527 | astride = tree_to_uhwi (stride); |
5c205353 | 1528 | else |
1529 | astride = L1_CACHE_LINE_SIZE; | |
1530 | ||
1531 | ref = TREE_OPERAND (ref, 0); | |
1532 | } | |
1533 | else | |
1534 | astride = 1; | |
1535 | ||
1536 | add_subscript_strides (access_fn, astride, strides, n, loop); | |
1537 | } | |
1538 | ||
1539 | for (i = n; i-- > 0; ) | |
1540 | { | |
1541 | unsigned HOST_WIDE_INT s; | |
1542 | ||
1543 | s = strides[i] < 0 ? -strides[i] : strides[i]; | |
1544 | ||
1545 | if (s < (unsigned) L1_CACHE_LINE_SIZE | |
1546 | && (loop_sizes[i] | |
1547 | > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION))) | |
1548 | { | |
1549 | ret = loop_sizes[i]; | |
1550 | break; | |
1551 | } | |
1552 | } | |
1553 | ||
1554 | free (strides); | |
1555 | return ret; | |
1556 | } | |
1557 | ||
1558 | /* Determines the distance till the first reuse of each reference in REFS | |
5b5037b3 | 1559 | in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other |
b920ee38 | 1560 | memory references in the loop. Return false if the analysis fails. */ |
5c205353 | 1561 | |
b920ee38 | 1562 | static bool |
5b5037b3 | 1563 | determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs, |
1564 | bool no_other_refs) | |
5c205353 | 1565 | { |
1566 | struct loop *nest, *aloop; | |
1e094109 | 1567 | vec<data_reference_p> datarefs = vNULL; |
1568 | vec<ddr_p> dependences = vNULL; | |
5c205353 | 1569 | struct mem_ref_group *gr; |
5b5037b3 | 1570 | struct mem_ref *ref, *refb; |
1e094109 | 1571 | vec<loop_p> vloops = vNULL; |
5c205353 | 1572 | unsigned *loop_data_size; |
1573 | unsigned i, j, n; | |
1574 | unsigned volume, dist, adist; | |
1575 | HOST_WIDE_INT vol; | |
1576 | data_reference_p dr; | |
1577 | ddr_p dep; | |
1578 | ||
1579 | if (loop->inner) | |
b920ee38 | 1580 | return true; |
5c205353 | 1581 | |
1582 | /* Find the outermost loop of the loop nest of loop (we require that | |
1583 | there are no sibling loops inside the nest). */ | |
1584 | nest = loop; | |
1585 | while (1) | |
1586 | { | |
1587 | aloop = loop_outer (nest); | |
1588 | ||
1589 | if (aloop == current_loops->tree_root | |
1590 | || aloop->inner->next) | |
1591 | break; | |
1592 | ||
1593 | nest = aloop; | |
1594 | } | |
1595 | ||
1596 | /* For each loop, determine the amount of data accessed in each iteration. | |
1597 | We use this to estimate whether the reference is evicted from the | |
1598 | cache before its reuse. */ | |
1599 | find_loop_nest (nest, &vloops); | |
f1f41a6c | 1600 | n = vloops.length (); |
5c205353 | 1601 | loop_data_size = XNEWVEC (unsigned, n); |
1602 | volume = volume_of_references (refs); | |
1603 | i = n; | |
1604 | while (i-- != 0) | |
1605 | { | |
1606 | loop_data_size[i] = volume; | |
1607 | /* Bound the volume by the L2 cache size, since above this bound, | |
1608 | all dependence distances are equivalent. */ | |
1609 | if (volume > L2_CACHE_SIZE_BYTES) | |
1610 | continue; | |
1611 | ||
f1f41a6c | 1612 | aloop = vloops[i]; |
fee017b3 | 1613 | vol = estimated_stmt_executions_int (aloop); |
b0b097b4 | 1614 | if (vol == -1) |
5c205353 | 1615 | vol = expected_loop_iterations (aloop); |
1616 | volume *= vol; | |
1617 | } | |
1618 | ||
1619 | /* Prepare the references in the form suitable for data dependence | |
bef304b8 | 1620 | analysis. We ignore unanalyzable data references (the results |
5c205353 | 1621 | are used just as a heuristics to estimate temporality of the |
1622 | references, hence we do not need to worry about correctness). */ | |
1623 | for (gr = refs; gr; gr = gr->next) | |
1624 | for (ref = gr->refs; ref; ref = ref->next) | |
1625 | { | |
221a697e | 1626 | dr = create_data_ref (nest, loop_containing_stmt (ref->stmt), |
1627 | ref->mem, ref->stmt, !ref->write_p); | |
5c205353 | 1628 | |
1629 | if (dr) | |
1630 | { | |
1631 | ref->reuse_distance = volume; | |
1632 | dr->aux = ref; | |
f1f41a6c | 1633 | datarefs.safe_push (dr); |
5c205353 | 1634 | } |
5b5037b3 | 1635 | else |
1636 | no_other_refs = false; | |
5c205353 | 1637 | } |
1638 | ||
f1f41a6c | 1639 | FOR_EACH_VEC_ELT (datarefs, i, dr) |
5c205353 | 1640 | { |
1641 | dist = self_reuse_distance (dr, loop_data_size, n, loop); | |
45ba1503 | 1642 | ref = (struct mem_ref *) dr->aux; |
5c205353 | 1643 | if (ref->reuse_distance > dist) |
1644 | ref->reuse_distance = dist; | |
5b5037b3 | 1645 | |
1646 | if (no_other_refs) | |
1647 | ref->independent_p = true; | |
5c205353 | 1648 | } |
1649 | ||
b920ee38 | 1650 | if (!compute_all_dependences (datarefs, &dependences, vloops, true)) |
1651 | return false; | |
5c205353 | 1652 | |
f1f41a6c | 1653 | FOR_EACH_VEC_ELT (dependences, i, dep) |
5c205353 | 1654 | { |
1655 | if (DDR_ARE_DEPENDENT (dep) == chrec_known) | |
1656 | continue; | |
1657 | ||
45ba1503 | 1658 | ref = (struct mem_ref *) DDR_A (dep)->aux; |
1659 | refb = (struct mem_ref *) DDR_B (dep)->aux; | |
5b5037b3 | 1660 | |
5c205353 | 1661 | if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know |
1662 | || DDR_NUM_DIST_VECTS (dep) == 0) | |
1663 | { | |
bef304b8 | 1664 | /* If the dependence cannot be analyzed, assume that there might be |
5c205353 | 1665 | a reuse. */ |
1666 | dist = 0; | |
48e1416a | 1667 | |
5b5037b3 | 1668 | ref->independent_p = false; |
1669 | refb->independent_p = false; | |
5c205353 | 1670 | } |
1671 | else | |
1672 | { | |
bef304b8 | 1673 | /* The distance vectors are normalized to be always lexicographically |
5c205353 | 1674 | positive, hence we cannot tell just from them whether DDR_A comes |
1675 | before DDR_B or vice versa. However, it is not important, | |
1676 | anyway -- if DDR_A is close to DDR_B, then it is either reused in | |
1677 | DDR_B (and it is not nontemporal), or it reuses the value of DDR_B | |
1678 | in cache (and marking it as nontemporal would not affect | |
1679 | anything). */ | |
1680 | ||
1681 | dist = volume; | |
1682 | for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++) | |
1683 | { | |
1684 | adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j), | |
1685 | loop_data_size, n); | |
1686 | ||
5b5037b3 | 1687 | /* If this is a dependence in the innermost loop (i.e., the |
1688 | distances in all superloops are zero) and it is not | |
1689 | the trivial self-dependence with distance zero, record that | |
1690 | the references are not completely independent. */ | |
1691 | if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1) | |
1692 | && (ref != refb | |
1693 | || DDR_DIST_VECT (dep, j)[n-1] != 0)) | |
1694 | { | |
1695 | ref->independent_p = false; | |
1696 | refb->independent_p = false; | |
1697 | } | |
1698 | ||
5c205353 | 1699 | /* Ignore accesses closer than |
1700 | L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION, | |
1701 | so that we use nontemporal prefetches e.g. if single memory | |
1702 | location is accessed several times in a single iteration of | |
1703 | the loop. */ | |
1704 | if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION) | |
1705 | continue; | |
1706 | ||
1707 | if (adist < dist) | |
1708 | dist = adist; | |
1709 | } | |
1710 | } | |
1711 | ||
5c205353 | 1712 | if (ref->reuse_distance > dist) |
1713 | ref->reuse_distance = dist; | |
5b5037b3 | 1714 | if (refb->reuse_distance > dist) |
1715 | refb->reuse_distance = dist; | |
5c205353 | 1716 | } |
1717 | ||
1718 | free_dependence_relations (dependences); | |
1719 | free_data_refs (datarefs); | |
1720 | free (loop_data_size); | |
1721 | ||
1722 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1723 | { | |
1724 | fprintf (dump_file, "Reuse distances:\n"); | |
1725 | for (gr = refs; gr; gr = gr->next) | |
1726 | for (ref = gr->refs; ref; ref = ref->next) | |
1727 | fprintf (dump_file, " ref %p distance %u\n", | |
1728 | (void *) ref, ref->reuse_distance); | |
1729 | } | |
b920ee38 | 1730 | |
1731 | return true; | |
5c205353 | 1732 | } |
1733 | ||
76595608 | 1734 | /* Determine whether or not the trip count to ahead ratio is too small based |
1735 | on prefitablility consideration. | |
0ab353e1 | 1736 | AHEAD: the iteration ahead distance, |
76595608 | 1737 | EST_NITER: the estimated trip count. */ |
1738 | ||
1739 | static bool | |
1740 | trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter) | |
1741 | { | |
1742 | /* Assume trip count to ahead ratio is big enough if the trip count could not | |
1743 | be estimated at compile time. */ | |
1744 | if (est_niter < 0) | |
1745 | return false; | |
1746 | ||
1747 | if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead)) | |
1748 | { | |
1749 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1750 | fprintf (dump_file, | |
1751 | "Not prefetching -- loop estimated to roll only %d times\n", | |
1752 | (int) est_niter); | |
1753 | return true; | |
1754 | } | |
1755 | ||
1756 | return false; | |
1757 | } | |
1758 | ||
1759 | /* Determine whether or not the number of memory references in the loop is | |
1760 | reasonable based on the profitablity and compilation time considerations. | |
0ab353e1 | 1761 | NINSNS: estimated number of instructions in the loop, |
0ab353e1 | 1762 | MEM_REF_COUNT: total number of memory references in the loop. */ |
1763 | ||
48e1416a | 1764 | static bool |
76595608 | 1765 | mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count) |
0ab353e1 | 1766 | { |
76595608 | 1767 | int insn_to_mem_ratio; |
0ab353e1 | 1768 | |
1769 | if (mem_ref_count == 0) | |
1770 | return false; | |
1771 | ||
76595608 | 1772 | /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis |
1773 | (compute_all_dependences) have high costs based on quadratic complexity. | |
1774 | To avoid huge compilation time, we give up prefetching if mem_ref_count | |
1775 | is too large. */ | |
1776 | if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP) | |
1777 | return false; | |
1778 | ||
48e1416a | 1779 | /* Prefetching improves performance by overlapping cache missing |
1780 | memory accesses with CPU operations. If the loop does not have | |
1781 | enough CPU operations to overlap with memory operations, prefetching | |
1782 | won't give a significant benefit. One approximate way of checking | |
1783 | this is to require the ratio of instructions to memory references to | |
0ab353e1 | 1784 | be above a certain limit. This approximation works well in practice. |
1785 | TODO: Implement a more precise computation by estimating the time | |
1786 | for each CPU or memory op in the loop. Time estimates for memory ops | |
1787 | should account for cache misses. */ | |
48e1416a | 1788 | insn_to_mem_ratio = ninsns / mem_ref_count; |
0ab353e1 | 1789 | |
1790 | if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO) | |
3665c1ba | 1791 | { |
1792 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1793 | fprintf (dump_file, | |
1794 | "Not prefetching -- instruction to memory reference ratio (%d) too small\n", | |
1795 | insn_to_mem_ratio); | |
1796 | return false; | |
1797 | } | |
0ab353e1 | 1798 | |
76595608 | 1799 | return true; |
1800 | } | |
1801 | ||
1802 | /* Determine whether or not the instruction to prefetch ratio in the loop is | |
1803 | too small based on the profitablity consideration. | |
1804 | NINSNS: estimated number of instructions in the loop, | |
1805 | PREFETCH_COUNT: an estimate of the number of prefetches, | |
1806 | UNROLL_FACTOR: the factor to unroll the loop if prefetching. */ | |
1807 | ||
1808 | static bool | |
1809 | insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count, | |
1810 | unsigned unroll_factor) | |
1811 | { | |
1812 | int insn_to_prefetch_ratio; | |
1813 | ||
016efb93 | 1814 | /* Prefetching most likely causes performance degradation when the instruction |
1815 | to prefetch ratio is too small. Too many prefetch instructions in a loop | |
1816 | may reduce the I-cache performance. | |
3fa57e84 | 1817 | (unroll_factor * ninsns) is used to estimate the number of instructions in |
1818 | the unrolled loop. This implementation is a bit simplistic -- the number | |
1819 | of issued prefetch instructions is also affected by unrolling. So, | |
1820 | prefetch_mod and the unroll factor should be taken into account when | |
1821 | determining prefetch_count. Also, the number of insns of the unrolled | |
1822 | loop will usually be significantly smaller than the number of insns of the | |
1823 | original loop * unroll_factor (at least the induction variable increases | |
1824 | and the exit branches will get eliminated), so it might be better to use | |
1825 | tree_estimate_loop_size + estimated_unrolled_size. */ | |
016efb93 | 1826 | insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count; |
1827 | if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO) | |
0ab353e1 | 1828 | { |
016efb93 | 1829 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1830 | fprintf (dump_file, | |
1831 | "Not prefetching -- instruction to prefetch ratio (%d) too small\n", | |
1832 | insn_to_prefetch_ratio); | |
76595608 | 1833 | return true; |
0ab353e1 | 1834 | } |
48e1416a | 1835 | |
76595608 | 1836 | return false; |
0ab353e1 | 1837 | } |
1838 | ||
1839 | ||
8dfbf380 | 1840 | /* Issue prefetch instructions for array references in LOOP. Returns |
7194de72 | 1841 | true if the LOOP was unrolled. */ |
8dfbf380 | 1842 | |
1843 | static bool | |
7194de72 | 1844 | loop_prefetch_arrays (struct loop *loop) |
8dfbf380 | 1845 | { |
1846 | struct mem_ref_group *refs; | |
78f46d45 | 1847 | unsigned ahead, ninsns, time, unroll_factor; |
1848 | HOST_WIDE_INT est_niter; | |
8dfbf380 | 1849 | struct tree_niter_desc desc; |
5b5037b3 | 1850 | bool unrolled = false, no_other_refs; |
0ab353e1 | 1851 | unsigned prefetch_count; |
1852 | unsigned mem_ref_count; | |
8dfbf380 | 1853 | |
0bfd8d5c | 1854 | if (optimize_loop_nest_for_size_p (loop)) |
a30d0a5b | 1855 | { |
1856 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1857 | fprintf (dump_file, " ignored (cold area)\n"); | |
1858 | return false; | |
1859 | } | |
1860 | ||
76595608 | 1861 | /* FIXME: the time should be weighted by the probabilities of the blocks in |
1862 | the loop body. */ | |
1863 | time = tree_num_loop_insns (loop, &eni_time_weights); | |
1864 | if (time == 0) | |
1865 | return false; | |
1866 | ||
1867 | ahead = (PREFETCH_LATENCY + time - 1) / time; | |
fee017b3 | 1868 | est_niter = estimated_stmt_executions_int (loop); |
b0b097b4 | 1869 | if (est_niter == -1) |
1870 | est_niter = max_stmt_executions_int (loop); | |
76595608 | 1871 | |
1872 | /* Prefetching is not likely to be profitable if the trip count to ahead | |
1873 | ratio is too small. */ | |
1874 | if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter)) | |
1875 | return false; | |
1876 | ||
1877 | ninsns = tree_num_loop_insns (loop, &eni_size_weights); | |
1878 | ||
8dfbf380 | 1879 | /* Step 1: gather the memory references. */ |
0ab353e1 | 1880 | refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count); |
8dfbf380 | 1881 | |
76595608 | 1882 | /* Give up prefetching if the number of memory references in the |
1883 | loop is not reasonable based on profitablity and compilation time | |
1884 | considerations. */ | |
1885 | if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count)) | |
1886 | goto fail; | |
1887 | ||
8dfbf380 | 1888 | /* Step 2: estimate the reuse effects. */ |
1889 | prune_by_reuse (refs); | |
1890 | ||
5da8318c | 1891 | if (nothing_to_prefetch_p (refs)) |
8dfbf380 | 1892 | goto fail; |
1893 | ||
b920ee38 | 1894 | if (!determine_loop_nest_reuse (loop, refs, no_other_refs)) |
1895 | goto fail; | |
5c205353 | 1896 | |
76595608 | 1897 | /* Step 3: determine unroll factor. */ |
78f46d45 | 1898 | unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc, |
1899 | est_niter); | |
5da8318c | 1900 | |
1901 | /* Estimate prefetch count for the unrolled loop. */ | |
1902 | prefetch_count = estimate_prefetch_count (refs, unroll_factor); | |
1903 | if (prefetch_count == 0) | |
1904 | goto fail; | |
1905 | ||
78f46d45 | 1906 | if (dump_file && (dump_flags & TDF_DETAILS)) |
48e1416a | 1907 | fprintf (dump_file, "Ahead %d, unroll factor %d, trip count " |
f937ec59 | 1908 | HOST_WIDE_INT_PRINT_DEC "\n" |
48e1416a | 1909 | "insn count %d, mem ref count %d, prefetch count %d\n", |
1910 | ahead, unroll_factor, est_niter, | |
1911 | ninsns, mem_ref_count, prefetch_count); | |
0ab353e1 | 1912 | |
76595608 | 1913 | /* Prefetching is not likely to be profitable if the instruction to prefetch |
1914 | ratio is too small. */ | |
1915 | if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count, | |
1916 | unroll_factor)) | |
0ab353e1 | 1917 | goto fail; |
1918 | ||
1919 | mark_nontemporal_stores (loop, refs); | |
78f46d45 | 1920 | |
8dfbf380 | 1921 | /* Step 4: what to prefetch? */ |
1922 | if (!schedule_prefetches (refs, unroll_factor, ahead)) | |
1923 | goto fail; | |
1924 | ||
1925 | /* Step 5: unroll the loop. TODO -- peeling of first and last few | |
1926 | iterations so that we do not issue superfluous prefetches. */ | |
1927 | if (unroll_factor != 1) | |
1928 | { | |
7194de72 | 1929 | tree_unroll_loop (loop, unroll_factor, |
8dfbf380 | 1930 | single_dom_exit (loop), &desc); |
1931 | unrolled = true; | |
1932 | } | |
1933 | ||
1934 | /* Step 6: issue the prefetches. */ | |
1935 | issue_prefetches (refs, unroll_factor, ahead); | |
1936 | ||
1937 | fail: | |
1938 | release_mem_refs (refs); | |
1939 | return unrolled; | |
1940 | } | |
1941 | ||
7194de72 | 1942 | /* Issue prefetch instructions for array references in loops. */ |
8dfbf380 | 1943 | |
4c641bf8 | 1944 | unsigned int |
7194de72 | 1945 | tree_ssa_prefetch_arrays (void) |
8dfbf380 | 1946 | { |
8dfbf380 | 1947 | struct loop *loop; |
1948 | bool unrolled = false; | |
4c641bf8 | 1949 | int todo_flags = 0; |
8dfbf380 | 1950 | |
1951 | if (!HAVE_prefetch | |
1952 | /* It is possible to ask compiler for say -mtune=i486 -march=pentium4. | |
1953 | -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part | |
1954 | of processor costs and i486 does not have prefetch, but | |
1955 | -march=pentium4 causes HAVE_prefetch to be true. Ugh. */ | |
1956 | || PREFETCH_BLOCK == 0) | |
4c641bf8 | 1957 | return 0; |
8dfbf380 | 1958 | |
07804af5 | 1959 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1960 | { | |
1961 | fprintf (dump_file, "Prefetching parameters:\n"); | |
1962 | fprintf (dump_file, " simultaneous prefetches: %d\n", | |
1963 | SIMULTANEOUS_PREFETCHES); | |
1964 | fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY); | |
07804af5 | 1965 | fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK); |
0c916a7b | 1966 | fprintf (dump_file, " L1 cache size: %d lines, %d kB\n", |
1967 | L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE); | |
5c205353 | 1968 | fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE); |
48e1416a | 1969 | fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE); |
1970 | fprintf (dump_file, " min insn-to-prefetch ratio: %d \n", | |
0ab353e1 | 1971 | MIN_INSN_TO_PREFETCH_RATIO); |
48e1416a | 1972 | fprintf (dump_file, " min insn-to-mem ratio: %d \n", |
0ab353e1 | 1973 | PREFETCH_MIN_INSN_TO_MEM_RATIO); |
07804af5 | 1974 | fprintf (dump_file, "\n"); |
1975 | } | |
1976 | ||
8dfbf380 | 1977 | initialize_original_copy_tables (); |
1978 | ||
b9a16870 | 1979 | if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH)) |
8dfbf380 | 1980 | { |
dbdf4b31 | 1981 | tree type = build_function_type_list (void_type_node, |
1982 | const_ptr_type_node, NULL_TREE); | |
54be5d7e | 1983 | tree decl = add_builtin_function ("__builtin_prefetch", type, |
1984 | BUILT_IN_PREFETCH, BUILT_IN_NORMAL, | |
1985 | NULL, NULL_TREE); | |
8dfbf380 | 1986 | DECL_IS_NOVOPS (decl) = true; |
b9a16870 | 1987 | set_builtin_decl (BUILT_IN_PREFETCH, decl, false); |
8dfbf380 | 1988 | } |
1989 | ||
1990 | /* We assume that size of cache line is a power of two, so verify this | |
1991 | here. */ | |
1992 | gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0); | |
1993 | ||
f21d4d00 | 1994 | FOR_EACH_LOOP (loop, LI_FROM_INNERMOST) |
8dfbf380 | 1995 | { |
8dfbf380 | 1996 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1997 | fprintf (dump_file, "Processing loop %d:\n", loop->num); | |
1998 | ||
7194de72 | 1999 | unrolled |= loop_prefetch_arrays (loop); |
8dfbf380 | 2000 | |
2001 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2002 | fprintf (dump_file, "\n\n"); | |
2003 | } | |
2004 | ||
2005 | if (unrolled) | |
2006 | { | |
2007 | scev_reset (); | |
4c641bf8 | 2008 | todo_flags |= TODO_cleanup_cfg; |
8dfbf380 | 2009 | } |
2010 | ||
2011 | free_original_copy_tables (); | |
4c641bf8 | 2012 | return todo_flags; |
8dfbf380 | 2013 | } |
f86b328b | 2014 | |
2015 | /* Prefetching. */ | |
2016 | ||
f86b328b | 2017 | namespace { |
2018 | ||
2019 | const pass_data pass_data_loop_prefetch = | |
2020 | { | |
2021 | GIMPLE_PASS, /* type */ | |
2022 | "aprefetch", /* name */ | |
2023 | OPTGROUP_LOOP, /* optinfo_flags */ | |
f86b328b | 2024 | TV_TREE_PREFETCH, /* tv_id */ |
2025 | ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
2026 | 0, /* properties_provided */ | |
2027 | 0, /* properties_destroyed */ | |
2028 | 0, /* todo_flags_start */ | |
2029 | 0, /* todo_flags_finish */ | |
2030 | }; | |
2031 | ||
2032 | class pass_loop_prefetch : public gimple_opt_pass | |
2033 | { | |
2034 | public: | |
2035 | pass_loop_prefetch (gcc::context *ctxt) | |
2036 | : gimple_opt_pass (pass_data_loop_prefetch, ctxt) | |
2037 | {} | |
2038 | ||
2039 | /* opt_pass methods: */ | |
31315c24 | 2040 | virtual bool gate (function *) { return flag_prefetch_loop_arrays > 0; } |
65b0537f | 2041 | virtual unsigned int execute (function *); |
f86b328b | 2042 | |
2043 | }; // class pass_loop_prefetch | |
2044 | ||
65b0537f | 2045 | unsigned int |
2046 | pass_loop_prefetch::execute (function *fun) | |
2047 | { | |
2048 | if (number_of_loops (fun) <= 1) | |
2049 | return 0; | |
2050 | ||
2051 | return tree_ssa_prefetch_arrays (); | |
2052 | } | |
2053 | ||
f86b328b | 2054 | } // anon namespace |
2055 | ||
2056 | gimple_opt_pass * | |
2057 | make_pass_loop_prefetch (gcc::context *ctxt) | |
2058 | { | |
2059 | return new pass_loop_prefetch (ctxt); | |
2060 | } | |
2061 | ||
2062 |