]>
Commit | Line | Data |
---|---|---|
b076a3fd ZD |
1 | /* Array prefetching. |
2 | Copyright (C) 2005 Free Software Foundation, Inc. | |
3 | ||
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it | |
7 | under the terms of the GNU General Public License as published by the | |
8 | Free Software Foundation; either version 2, or (at your option) any | |
9 | later version. | |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT | |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GCC; see the file COPYING. If not, write to the Free | |
18 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | |
19 | 02111-1307, USA. */ | |
20 | ||
21 | #include "config.h" | |
22 | #include "system.h" | |
23 | #include "coretypes.h" | |
24 | #include "tm.h" | |
25 | #include "tree.h" | |
26 | #include "rtl.h" | |
27 | #include "tm_p.h" | |
28 | #include "hard-reg-set.h" | |
29 | #include "basic-block.h" | |
30 | #include "output.h" | |
31 | #include "diagnostic.h" | |
32 | #include "tree-flow.h" | |
33 | #include "tree-dump.h" | |
34 | #include "timevar.h" | |
35 | #include "cfgloop.h" | |
36 | #include "varray.h" | |
37 | #include "expr.h" | |
38 | #include "tree-pass.h" | |
39 | #include "ggc.h" | |
40 | #include "insn-config.h" | |
41 | #include "recog.h" | |
42 | #include "hashtab.h" | |
43 | #include "tree-chrec.h" | |
44 | #include "tree-scalar-evolution.h" | |
45 | #include "toplev.h" | |
46 | #include "params.h" | |
47 | #include "langhooks.h" | |
7f9bc51b | 48 | #include "tree-inline.h" |
5417e022 | 49 | #include "tree-data-ref.h" |
79f5e442 | 50 | #include "optabs.h" |
b076a3fd ZD |
51 | |
52 | /* This pass inserts prefetch instructions to optimize cache usage during | |
53 | accesses to arrays in loops. It processes loops sequentially and: | |
54 | ||
55 | 1) Gathers all memory references in the single loop. | |
56 | 2) For each of the references it decides when it is profitable to prefetch | |
57 | it. To do it, we evaluate the reuse among the accesses, and determines | |
58 | two values: PREFETCH_BEFORE (meaning that it only makes sense to do | |
59 | prefetching in the first PREFETCH_BEFORE iterations of the loop) and | |
60 | PREFETCH_MOD (meaning that it only makes sense to prefetch in the | |
61 | iterations of the loop that are zero modulo PREFETCH_MOD). For example | |
62 | (assuming cache line size is 64 bytes, char has size 1 byte and there | |
63 | is no hardware sequential prefetch): | |
64 | ||
65 | char *a; | |
66 | for (i = 0; i < max; i++) | |
67 | { | |
68 | a[255] = ...; (0) | |
69 | a[i] = ...; (1) | |
70 | a[i + 64] = ...; (2) | |
71 | a[16*i] = ...; (3) | |
72 | a[187*i] = ...; (4) | |
73 | a[187*i + 50] = ...; (5) | |
74 | } | |
75 | ||
76 | (0) obviously has PREFETCH_BEFORE 1 | |
77 | (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory | |
78 | location 64 iterations before it, and PREFETCH_MOD 64 (since | |
79 | it hits the same cache line otherwise). | |
80 | (2) has PREFETCH_MOD 64 | |
81 | (3) has PREFETCH_MOD 4 | |
82 | (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since | |
83 | the cache line accessed by (4) is the same with probability only | |
84 | 7/32. | |
85 | (5) has PREFETCH_MOD 1 as well. | |
86 | ||
5417e022 ZD |
87 | Additionally, we use data dependence analysis to determine for each |
88 | reference the distance till the first reuse; this information is used | |
89 | to determine the temporality of the issued prefetch instruction. | |
90 | ||
b076a3fd ZD |
91 | 3) We determine how much ahead we need to prefetch. The number of |
92 | iterations needed is time to fetch / time spent in one iteration of | |
93 | the loop. The problem is that we do not know either of these values, | |
94 | so we just make a heuristic guess based on a magic (possibly) | |
95 | target-specific constant and size of the loop. | |
96 | ||
97 | 4) Determine which of the references we prefetch. We take into account | |
98 | that there is a maximum number of simultaneous prefetches (provided | |
99 | by machine description). We prefetch as many prefetches as possible | |
100 | while still within this bound (starting with those with lowest | |
101 | prefetch_mod, since they are responsible for most of the cache | |
102 | misses). | |
103 | ||
104 | 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD | |
105 | and PREFETCH_BEFORE requirements (within some bounds), and to avoid | |
106 | prefetching nonaccessed memory. | |
107 | TODO -- actually implement peeling. | |
108 | ||
109 | 6) We actually emit the prefetch instructions. ??? Perhaps emit the | |
110 | prefetch instructions with guards in cases where 5) was not sufficient | |
111 | to satisfy the constraints? | |
112 | ||
113 | Some other TODO: | |
114 | -- write and use more general reuse analysis (that could be also used | |
115 | in other cache aimed loop optimizations) | |
116 | -- make it behave sanely together with the prefetches given by user | |
117 | (now we just ignore them; at the very least we should avoid | |
118 | optimizing loops in that user put his own prefetches) | |
119 | -- we assume cache line size alignment of arrays; this could be | |
120 | improved. */ | |
121 | ||
122 | /* Magic constants follow. These should be replaced by machine specific | |
123 | numbers. */ | |
124 | ||
b076a3fd ZD |
125 | /* True if write can be prefetched by a read prefetch. */ |
126 | ||
127 | #ifndef WRITE_CAN_USE_READ_PREFETCH | |
128 | #define WRITE_CAN_USE_READ_PREFETCH 1 | |
129 | #endif | |
130 | ||
131 | /* True if read can be prefetched by a write prefetch. */ | |
132 | ||
133 | #ifndef READ_CAN_USE_WRITE_PREFETCH | |
134 | #define READ_CAN_USE_WRITE_PREFETCH 0 | |
135 | #endif | |
136 | ||
47eb5b32 ZD |
137 | /* The size of the block loaded by a single prefetch. Usually, this is |
138 | the same as cache line size (at the moment, we only consider one level | |
139 | of cache hierarchy). */ | |
b076a3fd ZD |
140 | |
141 | #ifndef PREFETCH_BLOCK | |
47eb5b32 | 142 | #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE |
b076a3fd ZD |
143 | #endif |
144 | ||
145 | /* Do we have a forward hardware sequential prefetching? */ | |
146 | ||
147 | #ifndef HAVE_FORWARD_PREFETCH | |
148 | #define HAVE_FORWARD_PREFETCH 0 | |
149 | #endif | |
150 | ||
151 | /* Do we have a backward hardware sequential prefetching? */ | |
152 | ||
153 | #ifndef HAVE_BACKWARD_PREFETCH | |
154 | #define HAVE_BACKWARD_PREFETCH 0 | |
155 | #endif | |
156 | ||
157 | /* In some cases we are only able to determine that there is a certain | |
158 | probability that the two accesses hit the same cache line. In this | |
159 | case, we issue the prefetches for both of them if this probability | |
160 | is less then (1000 - ACCEPTABLE_MISS_RATE) promile. */ | |
161 | ||
162 | #ifndef ACCEPTABLE_MISS_RATE | |
163 | #define ACCEPTABLE_MISS_RATE 50 | |
164 | #endif | |
165 | ||
166 | #ifndef HAVE_prefetch | |
167 | #define HAVE_prefetch 0 | |
168 | #endif | |
169 | ||
5417e022 ZD |
170 | #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * L1_CACHE_LINE_SIZE)) |
171 | /* TODO: Add parameter to specify L2 cache size. */ | |
172 | #define L2_CACHE_SIZE_BYTES (8 * L1_CACHE_SIZE_BYTES) | |
173 | ||
174 | /* We consider a memory access nontemporal if it is not reused sooner than | |
175 | after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore | |
176 | accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION, | |
177 | so that we use nontemporal prefetches e.g. if single memory location | |
178 | is accessed several times in a single iteration of the loop. */ | |
179 | #define NONTEMPORAL_FRACTION 16 | |
180 | ||
79f5e442 ZD |
181 | /* In case we have to emit a memory fence instruction after the loop that |
182 | uses nontemporal stores, this defines the builtin to use. */ | |
183 | ||
184 | #ifndef FENCE_FOLLOWING_MOVNT | |
185 | #define FENCE_FOLLOWING_MOVNT NULL_TREE | |
186 | #endif | |
187 | ||
b076a3fd ZD |
188 | /* The group of references between that reuse may occur. */ |
189 | ||
190 | struct mem_ref_group | |
191 | { | |
192 | tree base; /* Base of the reference. */ | |
193 | HOST_WIDE_INT step; /* Step of the reference. */ | |
194 | struct mem_ref *refs; /* References in the group. */ | |
195 | struct mem_ref_group *next; /* Next group of references. */ | |
196 | }; | |
197 | ||
198 | /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */ | |
199 | ||
200 | #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0) | |
201 | ||
202 | /* The memory reference. */ | |
203 | ||
204 | struct mem_ref | |
205 | { | |
206 | tree stmt; /* Statement in that the reference appears. */ | |
207 | tree mem; /* The reference. */ | |
208 | HOST_WIDE_INT delta; /* Constant offset of the reference. */ | |
b076a3fd ZD |
209 | struct mem_ref_group *group; /* The group of references it belongs to. */ |
210 | unsigned HOST_WIDE_INT prefetch_mod; | |
211 | /* Prefetch only each PREFETCH_MOD-th | |
212 | iteration. */ | |
213 | unsigned HOST_WIDE_INT prefetch_before; | |
214 | /* Prefetch only first PREFETCH_BEFORE | |
215 | iterations. */ | |
5417e022 ZD |
216 | unsigned reuse_distance; /* The amount of data accessed before the first |
217 | reuse of this value. */ | |
b076a3fd | 218 | struct mem_ref *next; /* The next reference in the group. */ |
79f5e442 ZD |
219 | unsigned write_p : 1; /* Is it a write? */ |
220 | unsigned independent_p : 1; /* True if the reference is independent on | |
221 | all other references inside the loop. */ | |
222 | unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */ | |
223 | unsigned storent_p : 1; /* True if we changed the store to a | |
224 | nontemporal one. */ | |
b076a3fd ZD |
225 | }; |
226 | ||
75c40d56 | 227 | /* Dumps information about reference REF to FILE. */ |
b076a3fd ZD |
228 | |
229 | static void | |
230 | dump_mem_ref (FILE *file, struct mem_ref *ref) | |
231 | { | |
232 | fprintf (file, "Reference %p:\n", (void *) ref); | |
233 | ||
234 | fprintf (file, " group %p (base ", (void *) ref->group); | |
235 | print_generic_expr (file, ref->group->base, TDF_SLIM); | |
236 | fprintf (file, ", step "); | |
237 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step); | |
238 | fprintf (file, ")\n"); | |
239 | ||
e324a72f | 240 | fprintf (file, " delta "); |
b076a3fd ZD |
241 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta); |
242 | fprintf (file, "\n"); | |
243 | ||
244 | fprintf (file, " %s\n", ref->write_p ? "write" : "read"); | |
245 | ||
246 | fprintf (file, "\n"); | |
247 | } | |
248 | ||
249 | /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not | |
250 | exist. */ | |
251 | ||
252 | static struct mem_ref_group * | |
253 | find_or_create_group (struct mem_ref_group **groups, tree base, | |
254 | HOST_WIDE_INT step) | |
255 | { | |
256 | struct mem_ref_group *group; | |
257 | ||
258 | for (; *groups; groups = &(*groups)->next) | |
259 | { | |
260 | if ((*groups)->step == step | |
261 | && operand_equal_p ((*groups)->base, base, 0)) | |
262 | return *groups; | |
263 | ||
264 | /* Keep the list of groups sorted by decreasing step. */ | |
265 | if ((*groups)->step < step) | |
266 | break; | |
267 | } | |
268 | ||
5417e022 | 269 | group = XNEW (struct mem_ref_group); |
b076a3fd ZD |
270 | group->base = base; |
271 | group->step = step; | |
272 | group->refs = NULL; | |
273 | group->next = *groups; | |
274 | *groups = group; | |
275 | ||
276 | return group; | |
277 | } | |
278 | ||
279 | /* Records a memory reference MEM in GROUP with offset DELTA and write status | |
280 | WRITE_P. The reference occurs in statement STMT. */ | |
281 | ||
282 | static void | |
283 | record_ref (struct mem_ref_group *group, tree stmt, tree mem, | |
284 | HOST_WIDE_INT delta, bool write_p) | |
285 | { | |
286 | struct mem_ref **aref; | |
287 | ||
288 | /* Do not record the same address twice. */ | |
289 | for (aref = &group->refs; *aref; aref = &(*aref)->next) | |
290 | { | |
291 | /* It does not have to be possible for write reference to reuse the read | |
292 | prefetch, or vice versa. */ | |
293 | if (!WRITE_CAN_USE_READ_PREFETCH | |
294 | && write_p | |
295 | && !(*aref)->write_p) | |
296 | continue; | |
297 | if (!READ_CAN_USE_WRITE_PREFETCH | |
298 | && !write_p | |
299 | && (*aref)->write_p) | |
300 | continue; | |
301 | ||
302 | if ((*aref)->delta == delta) | |
303 | return; | |
304 | } | |
305 | ||
5417e022 | 306 | (*aref) = XNEW (struct mem_ref); |
b076a3fd ZD |
307 | (*aref)->stmt = stmt; |
308 | (*aref)->mem = mem; | |
309 | (*aref)->delta = delta; | |
310 | (*aref)->write_p = write_p; | |
311 | (*aref)->prefetch_before = PREFETCH_ALL; | |
312 | (*aref)->prefetch_mod = 1; | |
5417e022 | 313 | (*aref)->reuse_distance = 0; |
b076a3fd ZD |
314 | (*aref)->issue_prefetch_p = false; |
315 | (*aref)->group = group; | |
316 | (*aref)->next = NULL; | |
79f5e442 ZD |
317 | (*aref)->independent_p = false; |
318 | (*aref)->storent_p = false; | |
b076a3fd ZD |
319 | |
320 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
321 | dump_mem_ref (dump_file, *aref); | |
322 | } | |
323 | ||
324 | /* Release memory references in GROUPS. */ | |
325 | ||
326 | static void | |
327 | release_mem_refs (struct mem_ref_group *groups) | |
328 | { | |
329 | struct mem_ref_group *next_g; | |
330 | struct mem_ref *ref, *next_r; | |
331 | ||
332 | for (; groups; groups = next_g) | |
333 | { | |
334 | next_g = groups->next; | |
335 | for (ref = groups->refs; ref; ref = next_r) | |
336 | { | |
337 | next_r = ref->next; | |
338 | free (ref); | |
339 | } | |
340 | free (groups); | |
341 | } | |
342 | } | |
343 | ||
344 | /* A structure used to pass arguments to idx_analyze_ref. */ | |
345 | ||
346 | struct ar_data | |
347 | { | |
348 | struct loop *loop; /* Loop of the reference. */ | |
349 | tree stmt; /* Statement of the reference. */ | |
350 | HOST_WIDE_INT *step; /* Step of the memory reference. */ | |
351 | HOST_WIDE_INT *delta; /* Offset of the memory reference. */ | |
352 | }; | |
353 | ||
354 | /* Analyzes a single INDEX of a memory reference to obtain information | |
355 | described at analyze_ref. Callback for for_each_index. */ | |
356 | ||
357 | static bool | |
358 | idx_analyze_ref (tree base, tree *index, void *data) | |
359 | { | |
c22940cd | 360 | struct ar_data *ar_data = (struct ar_data *) data; |
b076a3fd ZD |
361 | tree ibase, step, stepsize; |
362 | HOST_WIDE_INT istep, idelta = 0, imult = 1; | |
363 | affine_iv iv; | |
364 | ||
365 | if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF | |
366 | || TREE_CODE (base) == ALIGN_INDIRECT_REF) | |
367 | return false; | |
368 | ||
369 | if (!simple_iv (ar_data->loop, ar_data->stmt, *index, &iv, false)) | |
370 | return false; | |
371 | ibase = iv.base; | |
372 | step = iv.step; | |
373 | ||
6e42ce54 ZD |
374 | if (!cst_and_fits_in_hwi (step)) |
375 | return false; | |
376 | istep = int_cst_value (step); | |
b076a3fd ZD |
377 | |
378 | if (TREE_CODE (ibase) == PLUS_EXPR | |
379 | && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1))) | |
380 | { | |
381 | idelta = int_cst_value (TREE_OPERAND (ibase, 1)); | |
382 | ibase = TREE_OPERAND (ibase, 0); | |
383 | } | |
384 | if (cst_and_fits_in_hwi (ibase)) | |
385 | { | |
386 | idelta += int_cst_value (ibase); | |
ff5e9a94 | 387 | ibase = build_int_cst (TREE_TYPE (ibase), 0); |
b076a3fd ZD |
388 | } |
389 | ||
390 | if (TREE_CODE (base) == ARRAY_REF) | |
391 | { | |
392 | stepsize = array_ref_element_size (base); | |
393 | if (!cst_and_fits_in_hwi (stepsize)) | |
394 | return false; | |
395 | imult = int_cst_value (stepsize); | |
396 | ||
397 | istep *= imult; | |
398 | idelta *= imult; | |
399 | } | |
400 | ||
401 | *ar_data->step += istep; | |
402 | *ar_data->delta += idelta; | |
403 | *index = ibase; | |
404 | ||
405 | return true; | |
406 | } | |
407 | ||
aac8b8ed | 408 | /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and |
b076a3fd | 409 | STEP are integer constants and iter is number of iterations of LOOP. The |
aac8b8ed RS |
410 | reference occurs in statement STMT. Strips nonaddressable component |
411 | references from REF_P. */ | |
b076a3fd ZD |
412 | |
413 | static bool | |
aac8b8ed | 414 | analyze_ref (struct loop *loop, tree *ref_p, tree *base, |
b076a3fd ZD |
415 | HOST_WIDE_INT *step, HOST_WIDE_INT *delta, |
416 | tree stmt) | |
417 | { | |
418 | struct ar_data ar_data; | |
419 | tree off; | |
420 | HOST_WIDE_INT bit_offset; | |
aac8b8ed | 421 | tree ref = *ref_p; |
b076a3fd ZD |
422 | |
423 | *step = 0; | |
424 | *delta = 0; | |
425 | ||
426 | /* First strip off the component references. Ignore bitfields. */ | |
427 | if (TREE_CODE (ref) == COMPONENT_REF | |
428 | && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))) | |
429 | ref = TREE_OPERAND (ref, 0); | |
430 | ||
aac8b8ed RS |
431 | *ref_p = ref; |
432 | ||
b076a3fd ZD |
433 | for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0)) |
434 | { | |
435 | off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); | |
436 | bit_offset = TREE_INT_CST_LOW (off); | |
437 | gcc_assert (bit_offset % BITS_PER_UNIT == 0); | |
438 | ||
439 | *delta += bit_offset / BITS_PER_UNIT; | |
440 | } | |
441 | ||
442 | *base = unshare_expr (ref); | |
443 | ar_data.loop = loop; | |
444 | ar_data.stmt = stmt; | |
445 | ar_data.step = step; | |
446 | ar_data.delta = delta; | |
447 | return for_each_index (base, idx_analyze_ref, &ar_data); | |
448 | } | |
449 | ||
450 | /* Record a memory reference REF to the list REFS. The reference occurs in | |
79f5e442 ZD |
451 | LOOP in statement STMT and it is write if WRITE_P. Returns true if the |
452 | reference was recorded, false otherwise. */ | |
b076a3fd | 453 | |
79f5e442 | 454 | static bool |
b076a3fd ZD |
455 | gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs, |
456 | tree ref, bool write_p, tree stmt) | |
457 | { | |
458 | tree base; | |
459 | HOST_WIDE_INT step, delta; | |
460 | struct mem_ref_group *agrp; | |
461 | ||
aac8b8ed | 462 | if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt)) |
79f5e442 | 463 | return false; |
b076a3fd ZD |
464 | |
465 | /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP | |
466 | are integer constants. */ | |
467 | agrp = find_or_create_group (refs, base, step); | |
468 | record_ref (agrp, stmt, ref, delta, write_p); | |
79f5e442 ZD |
469 | |
470 | return true; | |
b076a3fd ZD |
471 | } |
472 | ||
79f5e442 ZD |
473 | /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to |
474 | true if there are no other memory references inside the loop. */ | |
b076a3fd ZD |
475 | |
476 | static struct mem_ref_group * | |
79f5e442 | 477 | gather_memory_references (struct loop *loop, bool *no_other_refs) |
b076a3fd ZD |
478 | { |
479 | basic_block *body = get_loop_body_in_dom_order (loop); | |
480 | basic_block bb; | |
481 | unsigned i; | |
482 | block_stmt_iterator bsi; | |
79f5e442 | 483 | tree stmt, lhs, rhs, call; |
b076a3fd ZD |
484 | struct mem_ref_group *refs = NULL; |
485 | ||
79f5e442 ZD |
486 | *no_other_refs = true; |
487 | ||
b076a3fd ZD |
488 | /* Scan the loop body in order, so that the former references precede the |
489 | later ones. */ | |
490 | for (i = 0; i < loop->num_nodes; i++) | |
491 | { | |
492 | bb = body[i]; | |
493 | if (bb->loop_father != loop) | |
494 | continue; | |
495 | ||
496 | for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) | |
497 | { | |
498 | stmt = bsi_stmt (bsi); | |
79f5e442 ZD |
499 | call = get_call_expr_in (stmt); |
500 | if (call && !(call_expr_flags (call) & ECF_CONST)) | |
501 | *no_other_refs = false; | |
502 | ||
07beea0d | 503 | if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) |
79f5e442 ZD |
504 | { |
505 | if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)) | |
506 | *no_other_refs = false; | |
507 | continue; | |
508 | } | |
b076a3fd | 509 | |
07beea0d AH |
510 | lhs = GIMPLE_STMT_OPERAND (stmt, 0); |
511 | rhs = GIMPLE_STMT_OPERAND (stmt, 1); | |
b076a3fd ZD |
512 | |
513 | if (REFERENCE_CLASS_P (rhs)) | |
79f5e442 ZD |
514 | *no_other_refs &= gather_memory_references_ref (loop, &refs, |
515 | rhs, false, stmt); | |
b076a3fd | 516 | if (REFERENCE_CLASS_P (lhs)) |
79f5e442 ZD |
517 | *no_other_refs &= gather_memory_references_ref (loop, &refs, |
518 | lhs, true, stmt); | |
b076a3fd ZD |
519 | } |
520 | } | |
521 | free (body); | |
522 | ||
523 | return refs; | |
524 | } | |
525 | ||
526 | /* Prune the prefetch candidate REF using the self-reuse. */ | |
527 | ||
528 | static void | |
529 | prune_ref_by_self_reuse (struct mem_ref *ref) | |
530 | { | |
531 | HOST_WIDE_INT step = ref->group->step; | |
532 | bool backward = step < 0; | |
533 | ||
534 | if (step == 0) | |
535 | { | |
536 | /* Prefetch references to invariant address just once. */ | |
537 | ref->prefetch_before = 1; | |
538 | return; | |
539 | } | |
540 | ||
541 | if (backward) | |
542 | step = -step; | |
543 | ||
544 | if (step > PREFETCH_BLOCK) | |
545 | return; | |
546 | ||
547 | if ((backward && HAVE_BACKWARD_PREFETCH) | |
548 | || (!backward && HAVE_FORWARD_PREFETCH)) | |
549 | { | |
550 | ref->prefetch_before = 1; | |
551 | return; | |
552 | } | |
553 | ||
554 | ref->prefetch_mod = PREFETCH_BLOCK / step; | |
555 | } | |
556 | ||
557 | /* Divides X by BY, rounding down. */ | |
558 | ||
559 | static HOST_WIDE_INT | |
560 | ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by) | |
561 | { | |
562 | gcc_assert (by > 0); | |
563 | ||
564 | if (x >= 0) | |
565 | return x / by; | |
566 | else | |
567 | return (x + by - 1) / by; | |
568 | } | |
569 | ||
570 | /* Prune the prefetch candidate REF using the reuse with BY. | |
571 | If BY_IS_BEFORE is true, BY is before REF in the loop. */ | |
572 | ||
573 | static void | |
574 | prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by, | |
575 | bool by_is_before) | |
576 | { | |
577 | HOST_WIDE_INT step = ref->group->step; | |
578 | bool backward = step < 0; | |
579 | HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta; | |
580 | HOST_WIDE_INT delta = delta_b - delta_r; | |
581 | HOST_WIDE_INT hit_from; | |
582 | unsigned HOST_WIDE_INT prefetch_before, prefetch_block; | |
583 | ||
584 | if (delta == 0) | |
585 | { | |
586 | /* If the references has the same address, only prefetch the | |
587 | former. */ | |
588 | if (by_is_before) | |
589 | ref->prefetch_before = 0; | |
590 | ||
591 | return; | |
592 | } | |
593 | ||
594 | if (!step) | |
595 | { | |
596 | /* If the reference addresses are invariant and fall into the | |
597 | same cache line, prefetch just the first one. */ | |
598 | if (!by_is_before) | |
599 | return; | |
600 | ||
601 | if (ddown (ref->delta, PREFETCH_BLOCK) | |
602 | != ddown (by->delta, PREFETCH_BLOCK)) | |
603 | return; | |
604 | ||
605 | ref->prefetch_before = 0; | |
606 | return; | |
607 | } | |
608 | ||
609 | /* Only prune the reference that is behind in the array. */ | |
610 | if (backward) | |
611 | { | |
612 | if (delta > 0) | |
613 | return; | |
614 | ||
615 | /* Transform the data so that we may assume that the accesses | |
616 | are forward. */ | |
617 | delta = - delta; | |
618 | step = -step; | |
619 | delta_r = PREFETCH_BLOCK - 1 - delta_r; | |
620 | delta_b = PREFETCH_BLOCK - 1 - delta_b; | |
621 | } | |
622 | else | |
623 | { | |
624 | if (delta < 0) | |
625 | return; | |
626 | } | |
627 | ||
628 | /* Check whether the two references are likely to hit the same cache | |
629 | line, and how distant the iterations in that it occurs are from | |
630 | each other. */ | |
631 | ||
632 | if (step <= PREFETCH_BLOCK) | |
633 | { | |
634 | /* The accesses are sure to meet. Let us check when. */ | |
635 | hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK; | |
636 | prefetch_before = (hit_from - delta_r + step - 1) / step; | |
637 | ||
638 | if (prefetch_before < ref->prefetch_before) | |
639 | ref->prefetch_before = prefetch_before; | |
640 | ||
641 | return; | |
642 | } | |
643 | ||
644 | /* A more complicated case. First let us ensure that size of cache line | |
645 | and step are coprime (here we assume that PREFETCH_BLOCK is a power | |
646 | of two. */ | |
647 | prefetch_block = PREFETCH_BLOCK; | |
648 | while ((step & 1) == 0 | |
649 | && prefetch_block > 1) | |
650 | { | |
651 | step >>= 1; | |
652 | prefetch_block >>= 1; | |
653 | delta >>= 1; | |
654 | } | |
655 | ||
656 | /* Now step > prefetch_block, and step and prefetch_block are coprime. | |
657 | Determine the probability that the accesses hit the same cache line. */ | |
658 | ||
659 | prefetch_before = delta / step; | |
660 | delta %= step; | |
661 | if ((unsigned HOST_WIDE_INT) delta | |
662 | <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000)) | |
663 | { | |
664 | if (prefetch_before < ref->prefetch_before) | |
665 | ref->prefetch_before = prefetch_before; | |
666 | ||
667 | return; | |
668 | } | |
669 | ||
670 | /* Try also the following iteration. */ | |
671 | prefetch_before++; | |
672 | delta = step - delta; | |
673 | if ((unsigned HOST_WIDE_INT) delta | |
674 | <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000)) | |
675 | { | |
676 | if (prefetch_before < ref->prefetch_before) | |
677 | ref->prefetch_before = prefetch_before; | |
678 | ||
679 | return; | |
680 | } | |
681 | ||
682 | /* The ref probably does not reuse by. */ | |
683 | return; | |
684 | } | |
685 | ||
686 | /* Prune the prefetch candidate REF using the reuses with other references | |
687 | in REFS. */ | |
688 | ||
689 | static void | |
690 | prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs) | |
691 | { | |
692 | struct mem_ref *prune_by; | |
693 | bool before = true; | |
694 | ||
695 | prune_ref_by_self_reuse (ref); | |
696 | ||
697 | for (prune_by = refs; prune_by; prune_by = prune_by->next) | |
698 | { | |
699 | if (prune_by == ref) | |
700 | { | |
701 | before = false; | |
702 | continue; | |
703 | } | |
704 | ||
705 | if (!WRITE_CAN_USE_READ_PREFETCH | |
706 | && ref->write_p | |
707 | && !prune_by->write_p) | |
708 | continue; | |
709 | if (!READ_CAN_USE_WRITE_PREFETCH | |
710 | && !ref->write_p | |
711 | && prune_by->write_p) | |
712 | continue; | |
713 | ||
714 | prune_ref_by_group_reuse (ref, prune_by, before); | |
715 | } | |
716 | } | |
717 | ||
718 | /* Prune the prefetch candidates in GROUP using the reuse analysis. */ | |
719 | ||
720 | static void | |
721 | prune_group_by_reuse (struct mem_ref_group *group) | |
722 | { | |
723 | struct mem_ref *ref_pruned; | |
724 | ||
725 | for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next) | |
726 | { | |
727 | prune_ref_by_reuse (ref_pruned, group->refs); | |
728 | ||
729 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
730 | { | |
731 | fprintf (dump_file, "Reference %p:", (void *) ref_pruned); | |
732 | ||
733 | if (ref_pruned->prefetch_before == PREFETCH_ALL | |
734 | && ref_pruned->prefetch_mod == 1) | |
735 | fprintf (dump_file, " no restrictions"); | |
736 | else if (ref_pruned->prefetch_before == 0) | |
737 | fprintf (dump_file, " do not prefetch"); | |
738 | else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod) | |
739 | fprintf (dump_file, " prefetch once"); | |
740 | else | |
741 | { | |
742 | if (ref_pruned->prefetch_before != PREFETCH_ALL) | |
743 | { | |
744 | fprintf (dump_file, " prefetch before "); | |
745 | fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, | |
746 | ref_pruned->prefetch_before); | |
747 | } | |
748 | if (ref_pruned->prefetch_mod != 1) | |
749 | { | |
750 | fprintf (dump_file, " prefetch mod "); | |
751 | fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, | |
752 | ref_pruned->prefetch_mod); | |
753 | } | |
754 | } | |
755 | fprintf (dump_file, "\n"); | |
756 | } | |
757 | } | |
758 | } | |
759 | ||
760 | /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */ | |
761 | ||
762 | static void | |
763 | prune_by_reuse (struct mem_ref_group *groups) | |
764 | { | |
765 | for (; groups; groups = groups->next) | |
766 | prune_group_by_reuse (groups); | |
767 | } | |
768 | ||
769 | /* Returns true if we should issue prefetch for REF. */ | |
770 | ||
771 | static bool | |
772 | should_issue_prefetch_p (struct mem_ref *ref) | |
773 | { | |
774 | /* For now do not issue prefetches for only first few of the | |
775 | iterations. */ | |
776 | if (ref->prefetch_before != PREFETCH_ALL) | |
777 | return false; | |
778 | ||
79f5e442 ZD |
779 | /* Do not prefetch nontemporal stores. */ |
780 | if (ref->storent_p) | |
781 | return false; | |
782 | ||
b076a3fd ZD |
783 | return true; |
784 | } | |
785 | ||
786 | /* Decide which of the prefetch candidates in GROUPS to prefetch. | |
787 | AHEAD is the number of iterations to prefetch ahead (which corresponds | |
788 | to the number of simultaneous instances of one prefetch running at a | |
789 | time). UNROLL_FACTOR is the factor by that the loop is going to be | |
790 | unrolled. Returns true if there is anything to prefetch. */ | |
791 | ||
792 | static bool | |
793 | schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor, | |
794 | unsigned ahead) | |
795 | { | |
911b3fdb ZD |
796 | unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots; |
797 | unsigned slots_per_prefetch; | |
b076a3fd ZD |
798 | struct mem_ref *ref; |
799 | bool any = false; | |
800 | ||
911b3fdb ZD |
801 | /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */ |
802 | remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES; | |
b076a3fd | 803 | |
911b3fdb ZD |
804 | /* The prefetch will run for AHEAD iterations of the original loop, i.e., |
805 | AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration, | |
806 | it will need a prefetch slot. */ | |
807 | slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor; | |
b076a3fd | 808 | if (dump_file && (dump_flags & TDF_DETAILS)) |
911b3fdb ZD |
809 | fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n", |
810 | slots_per_prefetch); | |
b076a3fd ZD |
811 | |
812 | /* For now we just take memory references one by one and issue | |
813 | prefetches for as many as possible. The groups are sorted | |
814 | starting with the largest step, since the references with | |
c0220ea4 | 815 | large step are more likely to cause many cache misses. */ |
b076a3fd ZD |
816 | |
817 | for (; groups; groups = groups->next) | |
818 | for (ref = groups->refs; ref; ref = ref->next) | |
819 | { | |
820 | if (!should_issue_prefetch_p (ref)) | |
821 | continue; | |
822 | ||
911b3fdb ZD |
823 | /* If we need to prefetch the reference each PREFETCH_MOD iterations, |
824 | and we unroll the loop UNROLL_FACTOR times, we need to insert | |
825 | ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each | |
826 | iteration. */ | |
b076a3fd ZD |
827 | n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) |
828 | / ref->prefetch_mod); | |
911b3fdb ZD |
829 | prefetch_slots = n_prefetches * slots_per_prefetch; |
830 | ||
831 | /* If more than half of the prefetches would be lost anyway, do not | |
832 | issue the prefetch. */ | |
833 | if (2 * remaining_prefetch_slots < prefetch_slots) | |
834 | continue; | |
835 | ||
836 | ref->issue_prefetch_p = true; | |
b076a3fd | 837 | |
911b3fdb ZD |
838 | if (remaining_prefetch_slots <= prefetch_slots) |
839 | return true; | |
840 | remaining_prefetch_slots -= prefetch_slots; | |
b076a3fd ZD |
841 | any = true; |
842 | } | |
843 | ||
844 | return any; | |
845 | } | |
846 | ||
847 | /* Determine whether there is any reference suitable for prefetching | |
848 | in GROUPS. */ | |
849 | ||
850 | static bool | |
851 | anything_to_prefetch_p (struct mem_ref_group *groups) | |
852 | { | |
853 | struct mem_ref *ref; | |
854 | ||
855 | for (; groups; groups = groups->next) | |
856 | for (ref = groups->refs; ref; ref = ref->next) | |
857 | if (should_issue_prefetch_p (ref)) | |
858 | return true; | |
859 | ||
860 | return false; | |
861 | } | |
862 | ||
863 | /* Issue prefetches for the reference REF into loop as decided before. | |
864 | HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR | |
917f1b7e | 865 | is the factor by which LOOP was unrolled. */ |
b076a3fd ZD |
866 | |
867 | static void | |
868 | issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead) | |
869 | { | |
870 | HOST_WIDE_INT delta; | |
5417e022 | 871 | tree addr, addr_base, prefetch, write_p, local; |
b076a3fd ZD |
872 | block_stmt_iterator bsi; |
873 | unsigned n_prefetches, ap; | |
5417e022 | 874 | bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES; |
b076a3fd ZD |
875 | |
876 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
5417e022 ZD |
877 | fprintf (dump_file, "Issued%s prefetch for %p.\n", |
878 | nontemporal ? " nontemporal" : "", | |
879 | (void *) ref); | |
b076a3fd ZD |
880 | |
881 | bsi = bsi_for_stmt (ref->stmt); | |
882 | ||
883 | n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) | |
884 | / ref->prefetch_mod); | |
885 | addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node); | |
886 | addr_base = force_gimple_operand_bsi (&bsi, unshare_expr (addr_base), true, NULL); | |
911b3fdb | 887 | write_p = ref->write_p ? integer_one_node : integer_zero_node; |
5417e022 | 888 | local = build_int_cst (integer_type_node, nontemporal ? 0 : 3); |
b076a3fd ZD |
889 | |
890 | for (ap = 0; ap < n_prefetches; ap++) | |
891 | { | |
892 | /* Determine the address to prefetch. */ | |
893 | delta = (ahead + ap * ref->prefetch_mod) * ref->group->step; | |
894 | addr = fold_build2 (PLUS_EXPR, ptr_type_node, | |
895 | addr_base, build_int_cst (ptr_type_node, delta)); | |
896 | addr = force_gimple_operand_bsi (&bsi, unshare_expr (addr), true, NULL); | |
897 | ||
898 | /* Create the prefetch instruction. */ | |
5039610b | 899 | prefetch = build_call_expr (built_in_decls[BUILT_IN_PREFETCH], |
5417e022 | 900 | 3, addr, write_p, local); |
b076a3fd ZD |
901 | bsi_insert_before (&bsi, prefetch, BSI_SAME_STMT); |
902 | } | |
903 | } | |
904 | ||
905 | /* Issue prefetches for the references in GROUPS into loop as decided before. | |
906 | HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the | |
907 | factor by that LOOP was unrolled. */ | |
908 | ||
909 | static void | |
910 | issue_prefetches (struct mem_ref_group *groups, | |
911 | unsigned unroll_factor, unsigned ahead) | |
912 | { | |
913 | struct mem_ref *ref; | |
914 | ||
915 | for (; groups; groups = groups->next) | |
916 | for (ref = groups->refs; ref; ref = ref->next) | |
917 | if (ref->issue_prefetch_p) | |
918 | issue_prefetch_ref (ref, unroll_factor, ahead); | |
919 | } | |
920 | ||
79f5e442 ZD |
921 | /* Returns true if REF is a memory write for that a nontemporal store insn |
922 | can be used. */ | |
923 | ||
924 | static bool | |
925 | nontemporal_store_p (struct mem_ref *ref) | |
926 | { | |
927 | enum machine_mode mode; | |
928 | enum insn_code code; | |
929 | ||
930 | /* REF must be a write that is not reused. We require it to be independent | |
931 | on all other memory references in the loop, as the nontemporal stores may | |
932 | be reordered with respect to other memory references. */ | |
933 | if (!ref->write_p | |
934 | || !ref->independent_p | |
935 | || ref->reuse_distance < L2_CACHE_SIZE_BYTES) | |
936 | return false; | |
937 | ||
938 | /* Check that we have the storent instruction for the mode. */ | |
939 | mode = TYPE_MODE (TREE_TYPE (ref->mem)); | |
940 | if (mode == BLKmode) | |
941 | return false; | |
942 | ||
943 | code = storent_optab->handlers[mode].insn_code; | |
944 | return code != CODE_FOR_nothing; | |
945 | } | |
946 | ||
947 | /* If REF is a nontemporal store, we mark the corresponding modify statement | |
948 | and return true. Otherwise, we return false. */ | |
949 | ||
950 | static bool | |
951 | mark_nontemporal_store (struct mem_ref *ref) | |
952 | { | |
953 | if (!nontemporal_store_p (ref)) | |
954 | return false; | |
955 | ||
956 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
957 | fprintf (dump_file, "Marked reference %p as a nontemporal store.\n", | |
958 | (void *) ref); | |
959 | ||
960 | MOVE_NONTEMPORAL (ref->stmt) = true; | |
961 | ref->storent_p = true; | |
962 | ||
963 | return true; | |
964 | } | |
965 | ||
966 | /* Issue a memory fence instruction after LOOP. */ | |
967 | ||
968 | static void | |
969 | emit_mfence_after_loop (struct loop *loop) | |
970 | { | |
971 | VEC (edge, heap) *exits = get_loop_exit_edges (loop); | |
972 | edge exit; | |
973 | tree call; | |
974 | block_stmt_iterator bsi; | |
975 | unsigned i; | |
976 | ||
977 | for (i = 0; VEC_iterate (edge, exits, i, exit); i++) | |
978 | { | |
979 | call = build_function_call_expr (FENCE_FOLLOWING_MOVNT, NULL_TREE); | |
980 | ||
981 | if (!single_pred_p (exit->dest) | |
982 | /* If possible, we prefer not to insert the fence on other paths | |
983 | in cfg. */ | |
984 | && !(exit->flags & EDGE_ABNORMAL)) | |
985 | split_loop_exit_edge (exit); | |
986 | bsi = bsi_after_labels (exit->dest); | |
987 | ||
988 | bsi_insert_before (&bsi, call, BSI_NEW_STMT); | |
989 | mark_virtual_ops_for_renaming (call); | |
990 | } | |
991 | ||
992 | VEC_free (edge, heap, exits); | |
993 | update_ssa (TODO_update_ssa_only_virtuals); | |
994 | } | |
995 | ||
996 | /* Returns true if we can use storent in loop, false otherwise. */ | |
997 | ||
998 | static bool | |
999 | may_use_storent_in_loop_p (struct loop *loop) | |
1000 | { | |
1001 | bool ret = true; | |
1002 | ||
1003 | if (loop->inner != NULL) | |
1004 | return false; | |
1005 | ||
1006 | /* If we must issue a mfence insn after using storent, check that there | |
1007 | is a suitable place for it at each of the loop exits. */ | |
1008 | if (FENCE_FOLLOWING_MOVNT != NULL_TREE) | |
1009 | { | |
1010 | VEC (edge, heap) *exits = get_loop_exit_edges (loop); | |
1011 | unsigned i; | |
1012 | edge exit; | |
1013 | ||
1014 | for (i = 0; VEC_iterate (edge, exits, i, exit); i++) | |
1015 | if ((exit->flags & EDGE_ABNORMAL) | |
1016 | && exit->dest == EXIT_BLOCK_PTR) | |
1017 | ret = false; | |
1018 | ||
1019 | VEC_free (edge, heap, exits); | |
1020 | } | |
1021 | ||
1022 | return ret; | |
1023 | } | |
1024 | ||
1025 | /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory | |
1026 | references in the loop. */ | |
1027 | ||
1028 | static void | |
1029 | mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups) | |
1030 | { | |
1031 | struct mem_ref *ref; | |
1032 | bool any = false; | |
1033 | ||
1034 | if (!may_use_storent_in_loop_p (loop)) | |
1035 | return; | |
1036 | ||
1037 | for (; groups; groups = groups->next) | |
1038 | for (ref = groups->refs; ref; ref = ref->next) | |
1039 | any |= mark_nontemporal_store (ref); | |
1040 | ||
1041 | if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE) | |
1042 | emit_mfence_after_loop (loop); | |
1043 | } | |
1044 | ||
b076a3fd ZD |
1045 | /* Determines whether we can profitably unroll LOOP FACTOR times, and if |
1046 | this is the case, fill in DESC by the description of number of | |
1047 | iterations. */ | |
1048 | ||
1049 | static bool | |
1050 | should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc, | |
1051 | unsigned factor) | |
1052 | { | |
1053 | if (!can_unroll_loop_p (loop, factor, desc)) | |
1054 | return false; | |
1055 | ||
1056 | /* We only consider loops without control flow for unrolling. This is not | |
1057 | a hard restriction -- tree_unroll_loop works with arbitrary loops | |
1058 | as well; but the unrolling/prefetching is usually more profitable for | |
1059 | loops consisting of a single basic block, and we want to limit the | |
1060 | code growth. */ | |
1061 | if (loop->num_nodes > 2) | |
1062 | return false; | |
1063 | ||
1064 | return true; | |
1065 | } | |
1066 | ||
1067 | /* Determine the coefficient by that unroll LOOP, from the information | |
1068 | contained in the list of memory references REFS. Description of | |
2711355f ZD |
1069 | umber of iterations of LOOP is stored to DESC. NINSNS is the number of |
1070 | insns of the LOOP. EST_NITER is the estimated number of iterations of | |
1071 | the loop, or -1 if no estimate is available. */ | |
b076a3fd ZD |
1072 | |
1073 | static unsigned | |
1074 | determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs, | |
2711355f ZD |
1075 | unsigned ninsns, struct tree_niter_desc *desc, |
1076 | HOST_WIDE_INT est_niter) | |
b076a3fd | 1077 | { |
911b3fdb ZD |
1078 | unsigned upper_bound; |
1079 | unsigned nfactor, factor, mod_constraint; | |
b076a3fd ZD |
1080 | struct mem_ref_group *agp; |
1081 | struct mem_ref *ref; | |
1082 | ||
911b3fdb ZD |
1083 | /* First check whether the loop is not too large to unroll. We ignore |
1084 | PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us | |
1085 | from unrolling them enough to make exactly one cache line covered by each | |
1086 | iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent | |
1087 | us from unrolling the loops too many times in cases where we only expect | |
1088 | gains from better scheduling and decreasing loop overhead, which is not | |
1089 | the case here. */ | |
1090 | upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns; | |
2711355f ZD |
1091 | |
1092 | /* If we unrolled the loop more times than it iterates, the unrolled version | |
1093 | of the loop would be never entered. */ | |
1094 | if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound) | |
1095 | upper_bound = est_niter; | |
1096 | ||
911b3fdb | 1097 | if (upper_bound <= 1) |
b076a3fd ZD |
1098 | return 1; |
1099 | ||
911b3fdb ZD |
1100 | /* Choose the factor so that we may prefetch each cache just once, |
1101 | but bound the unrolling by UPPER_BOUND. */ | |
1102 | factor = 1; | |
b076a3fd ZD |
1103 | for (agp = refs; agp; agp = agp->next) |
1104 | for (ref = agp->refs; ref; ref = ref->next) | |
911b3fdb ZD |
1105 | if (should_issue_prefetch_p (ref)) |
1106 | { | |
1107 | mod_constraint = ref->prefetch_mod; | |
1108 | nfactor = least_common_multiple (mod_constraint, factor); | |
1109 | if (nfactor <= upper_bound) | |
1110 | factor = nfactor; | |
1111 | } | |
b076a3fd ZD |
1112 | |
1113 | if (!should_unroll_loop_p (loop, desc, factor)) | |
1114 | return 1; | |
1115 | ||
1116 | return factor; | |
1117 | } | |
1118 | ||
5417e022 ZD |
1119 | /* Returns the total volume of the memory references REFS, taking into account |
1120 | reuses in the innermost loop and cache line size. TODO -- we should also | |
1121 | take into account reuses across the iterations of the loops in the loop | |
1122 | nest. */ | |
1123 | ||
1124 | static unsigned | |
1125 | volume_of_references (struct mem_ref_group *refs) | |
1126 | { | |
1127 | unsigned volume = 0; | |
1128 | struct mem_ref_group *gr; | |
1129 | struct mem_ref *ref; | |
1130 | ||
1131 | for (gr = refs; gr; gr = gr->next) | |
1132 | for (ref = gr->refs; ref; ref = ref->next) | |
1133 | { | |
1134 | /* Almost always reuses another value? */ | |
1135 | if (ref->prefetch_before != PREFETCH_ALL) | |
1136 | continue; | |
1137 | ||
1138 | /* If several iterations access the same cache line, use the size of | |
1139 | the line divided by this number. Otherwise, a cache line is | |
1140 | accessed in each iteration. TODO -- in the latter case, we should | |
1141 | take the size of the reference into account, rounding it up on cache | |
1142 | line size multiple. */ | |
1143 | volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod; | |
1144 | } | |
1145 | return volume; | |
1146 | } | |
1147 | ||
1148 | /* Returns the volume of memory references accessed across VEC iterations of | |
1149 | loops, whose sizes are described in the LOOP_SIZES array. N is the number | |
1150 | of the loops in the nest (length of VEC and LOOP_SIZES vectors). */ | |
1151 | ||
1152 | static unsigned | |
1153 | volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n) | |
1154 | { | |
1155 | unsigned i; | |
1156 | ||
1157 | for (i = 0; i < n; i++) | |
1158 | if (vec[i] != 0) | |
1159 | break; | |
1160 | ||
1161 | if (i == n) | |
1162 | return 0; | |
1163 | ||
1164 | gcc_assert (vec[i] > 0); | |
1165 | ||
1166 | /* We ignore the parts of the distance vector in subloops, since usually | |
1167 | the numbers of iterations are much smaller. */ | |
1168 | return loop_sizes[i] * vec[i]; | |
1169 | } | |
1170 | ||
1171 | /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE | |
1172 | at the position corresponding to the loop of the step. N is the depth | |
1173 | of the considered loop nest, and, LOOP is its innermost loop. */ | |
1174 | ||
1175 | static void | |
1176 | add_subscript_strides (tree access_fn, unsigned stride, | |
1177 | HOST_WIDE_INT *strides, unsigned n, struct loop *loop) | |
1178 | { | |
1179 | struct loop *aloop; | |
1180 | tree step; | |
1181 | HOST_WIDE_INT astep; | |
1182 | unsigned min_depth = loop_depth (loop) - n; | |
1183 | ||
1184 | while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC) | |
1185 | { | |
1186 | aloop = get_chrec_loop (access_fn); | |
1187 | step = CHREC_RIGHT (access_fn); | |
1188 | access_fn = CHREC_LEFT (access_fn); | |
1189 | ||
1190 | if ((unsigned) loop_depth (aloop) <= min_depth) | |
1191 | continue; | |
1192 | ||
1193 | if (host_integerp (step, 0)) | |
1194 | astep = tree_low_cst (step, 0); | |
1195 | else | |
1196 | astep = L1_CACHE_LINE_SIZE; | |
1197 | ||
1198 | strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride; | |
1199 | ||
1200 | } | |
1201 | } | |
1202 | ||
1203 | /* Returns the volume of memory references accessed between two consecutive | |
1204 | self-reuses of the reference DR. We consider the subscripts of DR in N | |
1205 | loops, and LOOP_SIZES contains the volumes of accesses in each of the | |
1206 | loops. LOOP is the innermost loop of the current loop nest. */ | |
1207 | ||
1208 | static unsigned | |
1209 | self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n, | |
1210 | struct loop *loop) | |
1211 | { | |
1212 | tree stride, access_fn; | |
1213 | HOST_WIDE_INT *strides, astride; | |
1214 | VEC (tree, heap) *access_fns; | |
1215 | tree ref = DR_REF (dr); | |
1216 | unsigned i, ret = ~0u; | |
1217 | ||
1218 | /* In the following example: | |
1219 | ||
1220 | for (i = 0; i < N; i++) | |
1221 | for (j = 0; j < N; j++) | |
1222 | use (a[j][i]); | |
1223 | the same cache line is accessed each N steps (except if the change from | |
1224 | i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse, | |
1225 | we cannot rely purely on the results of the data dependence analysis. | |
1226 | ||
1227 | Instead, we compute the stride of the reference in each loop, and consider | |
1228 | the innermost loop in that the stride is less than cache size. */ | |
1229 | ||
1230 | strides = XCNEWVEC (HOST_WIDE_INT, n); | |
1231 | access_fns = DR_ACCESS_FNS (dr); | |
1232 | ||
1233 | for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++) | |
1234 | { | |
1235 | /* Keep track of the reference corresponding to the subscript, so that we | |
1236 | know its stride. */ | |
1237 | while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF) | |
1238 | ref = TREE_OPERAND (ref, 0); | |
1239 | ||
1240 | if (TREE_CODE (ref) == ARRAY_REF) | |
1241 | { | |
1242 | stride = TYPE_SIZE_UNIT (TREE_TYPE (ref)); | |
1243 | if (host_integerp (stride, 1)) | |
1244 | astride = tree_low_cst (stride, 1); | |
1245 | else | |
1246 | astride = L1_CACHE_LINE_SIZE; | |
1247 | ||
1248 | ref = TREE_OPERAND (ref, 0); | |
1249 | } | |
1250 | else | |
1251 | astride = 1; | |
1252 | ||
1253 | add_subscript_strides (access_fn, astride, strides, n, loop); | |
1254 | } | |
1255 | ||
1256 | for (i = n; i-- > 0; ) | |
1257 | { | |
1258 | unsigned HOST_WIDE_INT s; | |
1259 | ||
1260 | s = strides[i] < 0 ? -strides[i] : strides[i]; | |
1261 | ||
1262 | if (s < (unsigned) L1_CACHE_LINE_SIZE | |
1263 | && (loop_sizes[i] | |
1264 | > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION))) | |
1265 | { | |
1266 | ret = loop_sizes[i]; | |
1267 | break; | |
1268 | } | |
1269 | } | |
1270 | ||
1271 | free (strides); | |
1272 | return ret; | |
1273 | } | |
1274 | ||
1275 | /* Determines the distance till the first reuse of each reference in REFS | |
79f5e442 ZD |
1276 | in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other |
1277 | memory references in the loop. */ | |
5417e022 ZD |
1278 | |
1279 | static void | |
79f5e442 ZD |
1280 | determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs, |
1281 | bool no_other_refs) | |
5417e022 ZD |
1282 | { |
1283 | struct loop *nest, *aloop; | |
1284 | VEC (data_reference_p, heap) *datarefs = NULL; | |
1285 | VEC (ddr_p, heap) *dependences = NULL; | |
1286 | struct mem_ref_group *gr; | |
79f5e442 | 1287 | struct mem_ref *ref, *refb; |
5417e022 ZD |
1288 | VEC (loop_p, heap) *vloops = NULL; |
1289 | unsigned *loop_data_size; | |
1290 | unsigned i, j, n; | |
1291 | unsigned volume, dist, adist; | |
1292 | HOST_WIDE_INT vol; | |
1293 | data_reference_p dr; | |
1294 | ddr_p dep; | |
1295 | ||
1296 | if (loop->inner) | |
1297 | return; | |
1298 | ||
1299 | /* Find the outermost loop of the loop nest of loop (we require that | |
1300 | there are no sibling loops inside the nest). */ | |
1301 | nest = loop; | |
1302 | while (1) | |
1303 | { | |
1304 | aloop = loop_outer (nest); | |
1305 | ||
1306 | if (aloop == current_loops->tree_root | |
1307 | || aloop->inner->next) | |
1308 | break; | |
1309 | ||
1310 | nest = aloop; | |
1311 | } | |
1312 | ||
1313 | /* For each loop, determine the amount of data accessed in each iteration. | |
1314 | We use this to estimate whether the reference is evicted from the | |
1315 | cache before its reuse. */ | |
1316 | find_loop_nest (nest, &vloops); | |
1317 | n = VEC_length (loop_p, vloops); | |
1318 | loop_data_size = XNEWVEC (unsigned, n); | |
1319 | volume = volume_of_references (refs); | |
1320 | i = n; | |
1321 | while (i-- != 0) | |
1322 | { | |
1323 | loop_data_size[i] = volume; | |
1324 | /* Bound the volume by the L2 cache size, since above this bound, | |
1325 | all dependence distances are equivalent. */ | |
1326 | if (volume > L2_CACHE_SIZE_BYTES) | |
1327 | continue; | |
1328 | ||
1329 | aloop = VEC_index (loop_p, vloops, i); | |
1330 | vol = estimated_loop_iterations_int (aloop, false); | |
1331 | if (vol < 0) | |
1332 | vol = expected_loop_iterations (aloop); | |
1333 | volume *= vol; | |
1334 | } | |
1335 | ||
1336 | /* Prepare the references in the form suitable for data dependence | |
0d52bcc1 | 1337 | analysis. We ignore unanalyzable data references (the results |
5417e022 ZD |
1338 | are used just as a heuristics to estimate temporality of the |
1339 | references, hence we do not need to worry about correctness). */ | |
1340 | for (gr = refs; gr; gr = gr->next) | |
1341 | for (ref = gr->refs; ref; ref = ref->next) | |
1342 | { | |
1343 | dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p); | |
1344 | ||
1345 | if (dr) | |
1346 | { | |
1347 | ref->reuse_distance = volume; | |
1348 | dr->aux = ref; | |
1349 | VEC_safe_push (data_reference_p, heap, datarefs, dr); | |
1350 | } | |
79f5e442 ZD |
1351 | else |
1352 | no_other_refs = false; | |
5417e022 ZD |
1353 | } |
1354 | ||
1355 | for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++) | |
1356 | { | |
1357 | dist = self_reuse_distance (dr, loop_data_size, n, loop); | |
1358 | ref = dr->aux; | |
1359 | if (ref->reuse_distance > dist) | |
1360 | ref->reuse_distance = dist; | |
79f5e442 ZD |
1361 | |
1362 | if (no_other_refs) | |
1363 | ref->independent_p = true; | |
5417e022 ZD |
1364 | } |
1365 | ||
1366 | compute_all_dependences (datarefs, &dependences, vloops, true); | |
1367 | ||
1368 | for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++) | |
1369 | { | |
1370 | if (DDR_ARE_DEPENDENT (dep) == chrec_known) | |
1371 | continue; | |
1372 | ||
79f5e442 ZD |
1373 | ref = DDR_A (dep)->aux; |
1374 | refb = DDR_B (dep)->aux; | |
1375 | ||
5417e022 ZD |
1376 | if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know |
1377 | || DDR_NUM_DIST_VECTS (dep) == 0) | |
1378 | { | |
0d52bcc1 | 1379 | /* If the dependence cannot be analyzed, assume that there might be |
5417e022 ZD |
1380 | a reuse. */ |
1381 | dist = 0; | |
79f5e442 ZD |
1382 | |
1383 | ref->independent_p = false; | |
1384 | refb->independent_p = false; | |
5417e022 ZD |
1385 | } |
1386 | else | |
1387 | { | |
0d52bcc1 | 1388 | /* The distance vectors are normalized to be always lexicographically |
5417e022 ZD |
1389 | positive, hence we cannot tell just from them whether DDR_A comes |
1390 | before DDR_B or vice versa. However, it is not important, | |
1391 | anyway -- if DDR_A is close to DDR_B, then it is either reused in | |
1392 | DDR_B (and it is not nontemporal), or it reuses the value of DDR_B | |
1393 | in cache (and marking it as nontemporal would not affect | |
1394 | anything). */ | |
1395 | ||
1396 | dist = volume; | |
1397 | for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++) | |
1398 | { | |
1399 | adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j), | |
1400 | loop_data_size, n); | |
1401 | ||
79f5e442 ZD |
1402 | /* If this is a dependence in the innermost loop (i.e., the |
1403 | distances in all superloops are zero) and it is not | |
1404 | the trivial self-dependence with distance zero, record that | |
1405 | the references are not completely independent. */ | |
1406 | if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1) | |
1407 | && (ref != refb | |
1408 | || DDR_DIST_VECT (dep, j)[n-1] != 0)) | |
1409 | { | |
1410 | ref->independent_p = false; | |
1411 | refb->independent_p = false; | |
1412 | } | |
1413 | ||
5417e022 ZD |
1414 | /* Ignore accesses closer than |
1415 | L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION, | |
1416 | so that we use nontemporal prefetches e.g. if single memory | |
1417 | location is accessed several times in a single iteration of | |
1418 | the loop. */ | |
1419 | if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION) | |
1420 | continue; | |
1421 | ||
1422 | if (adist < dist) | |
1423 | dist = adist; | |
1424 | } | |
1425 | } | |
1426 | ||
5417e022 ZD |
1427 | if (ref->reuse_distance > dist) |
1428 | ref->reuse_distance = dist; | |
79f5e442 ZD |
1429 | if (refb->reuse_distance > dist) |
1430 | refb->reuse_distance = dist; | |
5417e022 ZD |
1431 | } |
1432 | ||
1433 | free_dependence_relations (dependences); | |
1434 | free_data_refs (datarefs); | |
1435 | free (loop_data_size); | |
1436 | ||
1437 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1438 | { | |
1439 | fprintf (dump_file, "Reuse distances:\n"); | |
1440 | for (gr = refs; gr; gr = gr->next) | |
1441 | for (ref = gr->refs; ref; ref = ref->next) | |
1442 | fprintf (dump_file, " ref %p distance %u\n", | |
1443 | (void *) ref, ref->reuse_distance); | |
1444 | } | |
1445 | } | |
1446 | ||
b076a3fd | 1447 | /* Issue prefetch instructions for array references in LOOP. Returns |
d73be268 | 1448 | true if the LOOP was unrolled. */ |
b076a3fd ZD |
1449 | |
1450 | static bool | |
d73be268 | 1451 | loop_prefetch_arrays (struct loop *loop) |
b076a3fd ZD |
1452 | { |
1453 | struct mem_ref_group *refs; | |
2711355f ZD |
1454 | unsigned ahead, ninsns, time, unroll_factor; |
1455 | HOST_WIDE_INT est_niter; | |
b076a3fd | 1456 | struct tree_niter_desc desc; |
79f5e442 | 1457 | bool unrolled = false, no_other_refs; |
b076a3fd | 1458 | |
2732d767 ZD |
1459 | if (!maybe_hot_bb_p (loop->header)) |
1460 | { | |
1461 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1462 | fprintf (dump_file, " ignored (cold area)\n"); | |
1463 | return false; | |
1464 | } | |
1465 | ||
b076a3fd | 1466 | /* Step 1: gather the memory references. */ |
79f5e442 | 1467 | refs = gather_memory_references (loop, &no_other_refs); |
b076a3fd ZD |
1468 | |
1469 | /* Step 2: estimate the reuse effects. */ | |
1470 | prune_by_reuse (refs); | |
1471 | ||
1472 | if (!anything_to_prefetch_p (refs)) | |
1473 | goto fail; | |
1474 | ||
79f5e442 | 1475 | determine_loop_nest_reuse (loop, refs, no_other_refs); |
5417e022 | 1476 | |
b076a3fd ZD |
1477 | /* Step 3: determine the ahead and unroll factor. */ |
1478 | ||
2711355f ZD |
1479 | /* FIXME: the time should be weighted by the probabilities of the blocks in |
1480 | the loop body. */ | |
1481 | time = tree_num_loop_insns (loop, &eni_time_weights); | |
1482 | ahead = (PREFETCH_LATENCY + time - 1) / time; | |
1483 | est_niter = estimated_loop_iterations_int (loop, false); | |
b076a3fd | 1484 | |
2711355f ZD |
1485 | /* The prefetches will run for AHEAD iterations of the original loop. Unless |
1486 | the loop rolls at least AHEAD times, prefetching the references does not | |
1487 | make sense. */ | |
1488 | if (est_niter >= 0 && est_niter <= (HOST_WIDE_INT) ahead) | |
9bdb685e ZD |
1489 | { |
1490 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1491 | fprintf (dump_file, | |
1492 | "Not prefetching -- loop estimated to roll only %d times\n", | |
1493 | (int) est_niter); | |
1494 | goto fail; | |
1495 | } | |
b076a3fd | 1496 | |
79f5e442 ZD |
1497 | mark_nontemporal_stores (loop, refs); |
1498 | ||
2711355f ZD |
1499 | ninsns = tree_num_loop_insns (loop, &eni_size_weights); |
1500 | unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc, | |
1501 | est_niter); | |
1502 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1503 | fprintf (dump_file, "Ahead %d, unroll factor %d\n", ahead, unroll_factor); | |
1504 | ||
b076a3fd ZD |
1505 | /* Step 4: what to prefetch? */ |
1506 | if (!schedule_prefetches (refs, unroll_factor, ahead)) | |
1507 | goto fail; | |
1508 | ||
1509 | /* Step 5: unroll the loop. TODO -- peeling of first and last few | |
1510 | iterations so that we do not issue superfluous prefetches. */ | |
1511 | if (unroll_factor != 1) | |
1512 | { | |
d73be268 | 1513 | tree_unroll_loop (loop, unroll_factor, |
b076a3fd ZD |
1514 | single_dom_exit (loop), &desc); |
1515 | unrolled = true; | |
1516 | } | |
1517 | ||
1518 | /* Step 6: issue the prefetches. */ | |
1519 | issue_prefetches (refs, unroll_factor, ahead); | |
1520 | ||
1521 | fail: | |
1522 | release_mem_refs (refs); | |
1523 | return unrolled; | |
1524 | } | |
1525 | ||
d73be268 | 1526 | /* Issue prefetch instructions for array references in loops. */ |
b076a3fd | 1527 | |
c7f965b6 | 1528 | unsigned int |
d73be268 | 1529 | tree_ssa_prefetch_arrays (void) |
b076a3fd | 1530 | { |
42fd6772 | 1531 | loop_iterator li; |
b076a3fd ZD |
1532 | struct loop *loop; |
1533 | bool unrolled = false; | |
c7f965b6 | 1534 | int todo_flags = 0; |
b076a3fd ZD |
1535 | |
1536 | if (!HAVE_prefetch | |
1537 | /* It is possible to ask compiler for say -mtune=i486 -march=pentium4. | |
1538 | -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part | |
1539 | of processor costs and i486 does not have prefetch, but | |
1540 | -march=pentium4 causes HAVE_prefetch to be true. Ugh. */ | |
1541 | || PREFETCH_BLOCK == 0) | |
c7f965b6 | 1542 | return 0; |
b076a3fd | 1543 | |
47eb5b32 ZD |
1544 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1545 | { | |
1546 | fprintf (dump_file, "Prefetching parameters:\n"); | |
1547 | fprintf (dump_file, " simultaneous prefetches: %d\n", | |
1548 | SIMULTANEOUS_PREFETCHES); | |
1549 | fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY); | |
47eb5b32 | 1550 | fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK); |
5417e022 ZD |
1551 | fprintf (dump_file, " L1 cache size: %d lines, %d bytes\n", |
1552 | L1_CACHE_SIZE, L1_CACHE_SIZE_BYTES); | |
1553 | fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE); | |
1554 | fprintf (dump_file, " L2 cache size: %d bytes\n", L2_CACHE_SIZE_BYTES); | |
47eb5b32 ZD |
1555 | fprintf (dump_file, "\n"); |
1556 | } | |
1557 | ||
b076a3fd ZD |
1558 | initialize_original_copy_tables (); |
1559 | ||
1560 | if (!built_in_decls[BUILT_IN_PREFETCH]) | |
1561 | { | |
1562 | tree type = build_function_type (void_type_node, | |
1563 | tree_cons (NULL_TREE, | |
1564 | const_ptr_type_node, | |
1565 | NULL_TREE)); | |
c79efc4d RÁE |
1566 | tree decl = add_builtin_function ("__builtin_prefetch", type, |
1567 | BUILT_IN_PREFETCH, BUILT_IN_NORMAL, | |
1568 | NULL, NULL_TREE); | |
b076a3fd ZD |
1569 | DECL_IS_NOVOPS (decl) = true; |
1570 | built_in_decls[BUILT_IN_PREFETCH] = decl; | |
1571 | } | |
1572 | ||
1573 | /* We assume that size of cache line is a power of two, so verify this | |
1574 | here. */ | |
1575 | gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0); | |
1576 | ||
42fd6772 | 1577 | FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) |
b076a3fd | 1578 | { |
b076a3fd ZD |
1579 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1580 | fprintf (dump_file, "Processing loop %d:\n", loop->num); | |
1581 | ||
d73be268 | 1582 | unrolled |= loop_prefetch_arrays (loop); |
b076a3fd ZD |
1583 | |
1584 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1585 | fprintf (dump_file, "\n\n"); | |
1586 | } | |
1587 | ||
1588 | if (unrolled) | |
1589 | { | |
1590 | scev_reset (); | |
c7f965b6 | 1591 | todo_flags |= TODO_cleanup_cfg; |
b076a3fd ZD |
1592 | } |
1593 | ||
1594 | free_original_copy_tables (); | |
c7f965b6 | 1595 | return todo_flags; |
b076a3fd | 1596 | } |