]>
Commit | Line | Data |
---|---|---|
dea61d92 | 1 | /* Loop distribution. |
a5544970 | 2 | Copyright (C) 2006-2019 Free Software Foundation, Inc. |
dea61d92 SP |
3 | Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr> |
4 | and Sebastian Pop <sebastian.pop@amd.com>. | |
5 | ||
6 | This file is part of GCC. | |
b8698a0f | 7 | |
dea61d92 SP |
8 | GCC is free software; you can redistribute it and/or modify it |
9 | under the terms of the GNU General Public License as published by the | |
10 | Free Software Foundation; either version 3, or (at your option) any | |
11 | later version. | |
b8698a0f | 12 | |
dea61d92 SP |
13 | GCC is distributed in the hope that it will be useful, but WITHOUT |
14 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
b8698a0f | 17 | |
dea61d92 SP |
18 | You should have received a copy of the GNU General Public License |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | /* This pass performs loop distribution: for example, the loop | |
23 | ||
24 | |DO I = 2, N | |
25 | | A(I) = B(I) + C | |
26 | | D(I) = A(I-1)*E | |
27 | |ENDDO | |
28 | ||
b8698a0f | 29 | is transformed to |
dea61d92 SP |
30 | |
31 | |DOALL I = 2, N | |
32 | | A(I) = B(I) + C | |
33 | |ENDDO | |
34 | | | |
35 | |DOALL I = 2, N | |
36 | | D(I) = A(I-1)*E | |
37 | |ENDDO | |
38 | ||
a8745cc2 BC |
39 | Loop distribution is the dual of loop fusion. It separates statements |
40 | of a loop (or loop nest) into multiple loops (or loop nests) with the | |
41 | same loop header. The major goal is to separate statements which may | |
42 | be vectorized from those that can't. This pass implements distribution | |
43 | in the following steps: | |
44 | ||
45 | 1) Seed partitions with specific type statements. For now we support | |
46 | two types seed statements: statement defining variable used outside | |
47 | of loop; statement storing to memory. | |
48 | 2) Build reduced dependence graph (RDG) for loop to be distributed. | |
49 | The vertices (RDG:V) model all statements in the loop and the edges | |
50 | (RDG:E) model flow and control dependencies between statements. | |
51 | 3) Apart from RDG, compute data dependencies between memory references. | |
52 | 4) Starting from seed statement, build up partition by adding depended | |
53 | statements according to RDG's dependence information. Partition is | |
54 | classified as parallel type if it can be executed paralleled; or as | |
55 | sequential type if it can't. Parallel type partition is further | |
56 | classified as different builtin kinds if it can be implemented as | |
57 | builtin function calls. | |
58 | 5) Build partition dependence graph (PG) based on data dependencies. | |
59 | The vertices (PG:V) model all partitions and the edges (PG:E) model | |
60 | all data dependencies between every partitions pair. In general, | |
61 | data dependence is either compilation time known or unknown. In C | |
62 | family languages, there exists quite amount compilation time unknown | |
63 | dependencies because of possible alias relation of data references. | |
64 | We categorize PG's edge to two types: "true" edge that represents | |
65 | compilation time known data dependencies; "alias" edge for all other | |
66 | data dependencies. | |
67 | 6) Traverse subgraph of PG as if all "alias" edges don't exist. Merge | |
68 | partitions in each strong connected component (SCC) correspondingly. | |
69 | Build new PG for merged partitions. | |
70 | 7) Traverse PG again and this time with both "true" and "alias" edges | |
71 | included. We try to break SCCs by removing some edges. Because | |
72 | SCCs by "true" edges are all fused in step 6), we can break SCCs | |
73 | by removing some "alias" edges. It's NP-hard to choose optimal | |
74 | edge set, fortunately simple approximation is good enough for us | |
75 | given the small problem scale. | |
76 | 8) Collect all data dependencies of the removed "alias" edges. Create | |
77 | runtime alias checks for collected data dependencies. | |
78 | 9) Version loop under the condition of runtime alias checks. Given | |
79 | loop distribution generally introduces additional overhead, it is | |
80 | only useful if vectorization is achieved in distributed loop. We | |
81 | version loop with internal function call IFN_LOOP_DIST_ALIAS. If | |
82 | no distributed loop can be vectorized, we simply remove distributed | |
83 | loops and recover to the original one. | |
84 | ||
85 | TODO: | |
163aa51b BC |
86 | 1) We only distribute innermost two-level loop nest now. We should |
87 | extend it for arbitrary loop nests in the future. | |
a8745cc2 BC |
88 | 2) We only fuse partitions in SCC now. A better fusion algorithm is |
89 | desired to minimize loop overhead, maximize parallelism and maximize | |
90 | data reuse. */ | |
dea61d92 SP |
91 | |
92 | #include "config.h" | |
93 | #include "system.h" | |
94 | #include "coretypes.h" | |
c7131fb2 | 95 | #include "backend.h" |
40e23961 | 96 | #include "tree.h" |
c7131fb2 | 97 | #include "gimple.h" |
957060b5 AM |
98 | #include "cfghooks.h" |
99 | #include "tree-pass.h" | |
c7131fb2 | 100 | #include "ssa.h" |
957060b5 | 101 | #include "gimple-pretty-print.h" |
c7131fb2 | 102 | #include "fold-const.h" |
60393bbc | 103 | #include "cfganal.h" |
5be5c238 | 104 | #include "gimple-iterator.h" |
18f429e2 | 105 | #include "gimplify-me.h" |
d8a2d370 | 106 | #include "stor-layout.h" |
442b4905 | 107 | #include "tree-cfg.h" |
e28030cf | 108 | #include "tree-ssa-loop-manip.h" |
957f0d8f | 109 | #include "tree-ssa-loop-ivopts.h" |
442b4905 AM |
110 | #include "tree-ssa-loop.h" |
111 | #include "tree-into-ssa.h" | |
7a300452 | 112 | #include "tree-ssa.h" |
dea61d92 | 113 | #include "cfgloop.h" |
dea61d92 | 114 | #include "tree-scalar-evolution.h" |
9fafb14a | 115 | #include "params.h" |
826a536d | 116 | #include "tree-vectorizer.h" |
4c9ed22a | 117 | #include "tree-eh.h" |
5879ab5f | 118 | #include "gimple-fold.h" |
80ab0b19 RB |
119 | |
120 | ||
9fafb14a BC |
121 | #define MAX_DATAREFS_NUM \ |
122 | ((unsigned) PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS)) | |
123 | ||
163aa51b BC |
124 | /* Threshold controlling number of distributed partitions. Given it may |
125 | be unnecessary if a memory stream cost model is invented in the future, | |
126 | we define it as a temporary macro, rather than a parameter. */ | |
127 | #define NUM_PARTITION_THRESHOLD (4) | |
128 | ||
17c5cbdf BC |
129 | /* Hashtable helpers. */ |
130 | ||
131 | struct ddr_hasher : nofree_ptr_hash <struct data_dependence_relation> | |
132 | { | |
133 | static inline hashval_t hash (const data_dependence_relation *); | |
134 | static inline bool equal (const data_dependence_relation *, | |
135 | const data_dependence_relation *); | |
136 | }; | |
137 | ||
138 | /* Hash function for data dependence. */ | |
139 | ||
140 | inline hashval_t | |
141 | ddr_hasher::hash (const data_dependence_relation *ddr) | |
142 | { | |
143 | inchash::hash h; | |
144 | h.add_ptr (DDR_A (ddr)); | |
145 | h.add_ptr (DDR_B (ddr)); | |
146 | return h.end (); | |
147 | } | |
148 | ||
149 | /* Hash table equality function for data dependence. */ | |
150 | ||
151 | inline bool | |
152 | ddr_hasher::equal (const data_dependence_relation *ddr1, | |
153 | const data_dependence_relation *ddr2) | |
154 | { | |
155 | return (DDR_A (ddr1) == DDR_A (ddr2) && DDR_B (ddr1) == DDR_B (ddr2)); | |
156 | } | |
157 | ||
4084ea5f BC |
158 | /* The loop (nest) to be distributed. */ |
159 | static vec<loop_p> loop_nest; | |
160 | ||
9fafb14a BC |
161 | /* Vector of data references in the loop to be distributed. */ |
162 | static vec<data_reference_p> datarefs_vec; | |
163 | ||
c4450491 BC |
164 | /* If there is nonaddressable data reference in above vector. */ |
165 | static bool has_nonaddressable_dataref_p; | |
166 | ||
9fafb14a BC |
167 | /* Store index of data reference in aux field. */ |
168 | #define DR_INDEX(dr) ((uintptr_t) (dr)->aux) | |
169 | ||
17c5cbdf | 170 | /* Hash table for data dependence relation in the loop to be distributed. */ |
1e485f89 | 171 | static hash_table<ddr_hasher> *ddrs_table; |
17c5cbdf | 172 | |
80ab0b19 | 173 | /* A Reduced Dependence Graph (RDG) vertex representing a statement. */ |
526ceb68 | 174 | struct rdg_vertex |
80ab0b19 RB |
175 | { |
176 | /* The statement represented by this vertex. */ | |
355fe088 | 177 | gimple *stmt; |
80ab0b19 RB |
178 | |
179 | /* Vector of data-references in this statement. */ | |
180 | vec<data_reference_p> datarefs; | |
181 | ||
182 | /* True when the statement contains a write to memory. */ | |
183 | bool has_mem_write; | |
184 | ||
185 | /* True when the statement contains a read from memory. */ | |
186 | bool has_mem_reads; | |
526ceb68 | 187 | }; |
80ab0b19 RB |
188 | |
189 | #define RDGV_STMT(V) ((struct rdg_vertex *) ((V)->data))->stmt | |
190 | #define RDGV_DATAREFS(V) ((struct rdg_vertex *) ((V)->data))->datarefs | |
191 | #define RDGV_HAS_MEM_WRITE(V) ((struct rdg_vertex *) ((V)->data))->has_mem_write | |
192 | #define RDGV_HAS_MEM_READS(V) ((struct rdg_vertex *) ((V)->data))->has_mem_reads | |
193 | #define RDG_STMT(RDG, I) RDGV_STMT (&(RDG->vertices[I])) | |
194 | #define RDG_DATAREFS(RDG, I) RDGV_DATAREFS (&(RDG->vertices[I])) | |
195 | #define RDG_MEM_WRITE_STMT(RDG, I) RDGV_HAS_MEM_WRITE (&(RDG->vertices[I])) | |
196 | #define RDG_MEM_READS_STMT(RDG, I) RDGV_HAS_MEM_READS (&(RDG->vertices[I])) | |
197 | ||
198 | /* Data dependence type. */ | |
199 | ||
200 | enum rdg_dep_type | |
201 | { | |
202 | /* Read After Write (RAW). */ | |
203 | flow_dd = 'f', | |
204 | ||
36875e8f RB |
205 | /* Control dependence (execute conditional on). */ |
206 | control_dd = 'c' | |
80ab0b19 RB |
207 | }; |
208 | ||
209 | /* Dependence information attached to an edge of the RDG. */ | |
210 | ||
526ceb68 | 211 | struct rdg_edge |
80ab0b19 RB |
212 | { |
213 | /* Type of the dependence. */ | |
214 | enum rdg_dep_type type; | |
526ceb68 | 215 | }; |
80ab0b19 RB |
216 | |
217 | #define RDGE_TYPE(E) ((struct rdg_edge *) ((E)->data))->type | |
80ab0b19 | 218 | |
80ab0b19 RB |
219 | /* Dump vertex I in RDG to FILE. */ |
220 | ||
221 | static void | |
222 | dump_rdg_vertex (FILE *file, struct graph *rdg, int i) | |
223 | { | |
224 | struct vertex *v = &(rdg->vertices[i]); | |
225 | struct graph_edge *e; | |
226 | ||
227 | fprintf (file, "(vertex %d: (%s%s) (in:", i, | |
228 | RDG_MEM_WRITE_STMT (rdg, i) ? "w" : "", | |
229 | RDG_MEM_READS_STMT (rdg, i) ? "r" : ""); | |
230 | ||
231 | if (v->pred) | |
232 | for (e = v->pred; e; e = e->pred_next) | |
233 | fprintf (file, " %d", e->src); | |
234 | ||
235 | fprintf (file, ") (out:"); | |
236 | ||
237 | if (v->succ) | |
238 | for (e = v->succ; e; e = e->succ_next) | |
239 | fprintf (file, " %d", e->dest); | |
240 | ||
241 | fprintf (file, ")\n"); | |
242 | print_gimple_stmt (file, RDGV_STMT (v), 0, TDF_VOPS|TDF_MEMSYMS); | |
243 | fprintf (file, ")\n"); | |
244 | } | |
245 | ||
246 | /* Call dump_rdg_vertex on stderr. */ | |
247 | ||
248 | DEBUG_FUNCTION void | |
249 | debug_rdg_vertex (struct graph *rdg, int i) | |
250 | { | |
251 | dump_rdg_vertex (stderr, rdg, i); | |
252 | } | |
253 | ||
80ab0b19 RB |
254 | /* Dump the reduced dependence graph RDG to FILE. */ |
255 | ||
256 | static void | |
257 | dump_rdg (FILE *file, struct graph *rdg) | |
258 | { | |
80ab0b19 | 259 | fprintf (file, "(rdg\n"); |
2fd5894f RB |
260 | for (int i = 0; i < rdg->n_vertices; i++) |
261 | dump_rdg_vertex (file, rdg, i); | |
80ab0b19 | 262 | fprintf (file, ")\n"); |
80ab0b19 RB |
263 | } |
264 | ||
265 | /* Call dump_rdg on stderr. */ | |
266 | ||
267 | DEBUG_FUNCTION void | |
268 | debug_rdg (struct graph *rdg) | |
269 | { | |
270 | dump_rdg (stderr, rdg); | |
271 | } | |
272 | ||
273 | static void | |
274 | dot_rdg_1 (FILE *file, struct graph *rdg) | |
275 | { | |
276 | int i; | |
174ec470 RB |
277 | pretty_printer buffer; |
278 | pp_needs_newline (&buffer) = false; | |
279 | buffer.buffer->stream = file; | |
80ab0b19 RB |
280 | |
281 | fprintf (file, "digraph RDG {\n"); | |
282 | ||
283 | for (i = 0; i < rdg->n_vertices; i++) | |
284 | { | |
285 | struct vertex *v = &(rdg->vertices[i]); | |
286 | struct graph_edge *e; | |
287 | ||
174ec470 RB |
288 | fprintf (file, "%d [label=\"[%d] ", i, i); |
289 | pp_gimple_stmt_1 (&buffer, RDGV_STMT (v), 0, TDF_SLIM); | |
290 | pp_flush (&buffer); | |
291 | fprintf (file, "\"]\n"); | |
292 | ||
80ab0b19 RB |
293 | /* Highlight reads from memory. */ |
294 | if (RDG_MEM_READS_STMT (rdg, i)) | |
295 | fprintf (file, "%d [style=filled, fillcolor=green]\n", i); | |
296 | ||
297 | /* Highlight stores to memory. */ | |
298 | if (RDG_MEM_WRITE_STMT (rdg, i)) | |
299 | fprintf (file, "%d [style=filled, fillcolor=red]\n", i); | |
300 | ||
301 | if (v->succ) | |
302 | for (e = v->succ; e; e = e->succ_next) | |
303 | switch (RDGE_TYPE (e)) | |
304 | { | |
80ab0b19 RB |
305 | case flow_dd: |
306 | /* These are the most common dependences: don't print these. */ | |
307 | fprintf (file, "%d -> %d \n", i, e->dest); | |
308 | break; | |
309 | ||
36875e8f RB |
310 | case control_dd: |
311 | fprintf (file, "%d -> %d [label=control] \n", i, e->dest); | |
312 | break; | |
313 | ||
80ab0b19 RB |
314 | default: |
315 | gcc_unreachable (); | |
316 | } | |
317 | } | |
318 | ||
319 | fprintf (file, "}\n\n"); | |
320 | } | |
321 | ||
322 | /* Display the Reduced Dependence Graph using dotty. */ | |
323 | ||
324 | DEBUG_FUNCTION void | |
325 | dot_rdg (struct graph *rdg) | |
326 | { | |
174ec470 | 327 | /* When debugging, you may want to enable the following code. */ |
b6d94045 | 328 | #ifdef HAVE_POPEN |
c3284718 | 329 | FILE *file = popen ("dot -Tx11", "w"); |
174ec470 RB |
330 | if (!file) |
331 | return; | |
80ab0b19 | 332 | dot_rdg_1 (file, rdg); |
174ec470 RB |
333 | fflush (file); |
334 | close (fileno (file)); | |
335 | pclose (file); | |
80ab0b19 RB |
336 | #else |
337 | dot_rdg_1 (stderr, rdg); | |
338 | #endif | |
339 | } | |
340 | ||
341 | /* Returns the index of STMT in RDG. */ | |
342 | ||
343 | static int | |
355fe088 | 344 | rdg_vertex_for_stmt (struct graph *rdg ATTRIBUTE_UNUSED, gimple *stmt) |
80ab0b19 RB |
345 | { |
346 | int index = gimple_uid (stmt); | |
347 | gcc_checking_assert (index == -1 || RDG_STMT (rdg, index) == stmt); | |
348 | return index; | |
349 | } | |
350 | ||
80ab0b19 RB |
351 | /* Creates dependence edges in RDG for all the uses of DEF. IDEF is |
352 | the index of DEF in RDG. */ | |
353 | ||
354 | static void | |
355 | create_rdg_edges_for_scalar (struct graph *rdg, tree def, int idef) | |
356 | { | |
357 | use_operand_p imm_use_p; | |
358 | imm_use_iterator iterator; | |
359 | ||
360 | FOR_EACH_IMM_USE_FAST (imm_use_p, iterator, def) | |
361 | { | |
362 | struct graph_edge *e; | |
363 | int use = rdg_vertex_for_stmt (rdg, USE_STMT (imm_use_p)); | |
364 | ||
365 | if (use < 0) | |
366 | continue; | |
367 | ||
368 | e = add_edge (rdg, idef, use); | |
369 | e->data = XNEW (struct rdg_edge); | |
370 | RDGE_TYPE (e) = flow_dd; | |
80ab0b19 RB |
371 | } |
372 | } | |
373 | ||
36875e8f RB |
374 | /* Creates an edge for the control dependences of BB to the vertex V. */ |
375 | ||
376 | static void | |
377 | create_edge_for_control_dependence (struct graph *rdg, basic_block bb, | |
378 | int v, control_dependences *cd) | |
379 | { | |
380 | bitmap_iterator bi; | |
381 | unsigned edge_n; | |
382 | EXECUTE_IF_SET_IN_BITMAP (cd->get_edges_dependent_on (bb->index), | |
383 | 0, edge_n, bi) | |
384 | { | |
30fd2977 | 385 | basic_block cond_bb = cd->get_edge_src (edge_n); |
355fe088 | 386 | gimple *stmt = last_stmt (cond_bb); |
36875e8f RB |
387 | if (stmt && is_ctrl_stmt (stmt)) |
388 | { | |
389 | struct graph_edge *e; | |
390 | int c = rdg_vertex_for_stmt (rdg, stmt); | |
391 | if (c < 0) | |
392 | continue; | |
393 | ||
394 | e = add_edge (rdg, c, v); | |
395 | e->data = XNEW (struct rdg_edge); | |
396 | RDGE_TYPE (e) = control_dd; | |
36875e8f RB |
397 | } |
398 | } | |
399 | } | |
400 | ||
80ab0b19 RB |
401 | /* Creates the edges of the reduced dependence graph RDG. */ |
402 | ||
403 | static void | |
447f3223 | 404 | create_rdg_flow_edges (struct graph *rdg) |
80ab0b19 RB |
405 | { |
406 | int i; | |
80ab0b19 RB |
407 | def_operand_p def_p; |
408 | ssa_op_iter iter; | |
409 | ||
80ab0b19 RB |
410 | for (i = 0; i < rdg->n_vertices; i++) |
411 | FOR_EACH_PHI_OR_STMT_DEF (def_p, RDG_STMT (rdg, i), | |
412 | iter, SSA_OP_DEF) | |
413 | create_rdg_edges_for_scalar (rdg, DEF_FROM_PTR (def_p), i); | |
447f3223 | 414 | } |
36875e8f | 415 | |
447f3223 RB |
416 | /* Creates the edges of the reduced dependence graph RDG. */ |
417 | ||
418 | static void | |
b71b7a8e | 419 | create_rdg_cd_edges (struct graph *rdg, control_dependences *cd, loop_p loop) |
447f3223 RB |
420 | { |
421 | int i; | |
422 | ||
423 | for (i = 0; i < rdg->n_vertices; i++) | |
424 | { | |
355fe088 | 425 | gimple *stmt = RDG_STMT (rdg, i); |
447f3223 RB |
426 | if (gimple_code (stmt) == GIMPLE_PHI) |
427 | { | |
428 | edge_iterator ei; | |
429 | edge e; | |
430 | FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds) | |
b71b7a8e | 431 | if (flow_bb_inside_loop_p (loop, e->src)) |
36875e8f | 432 | create_edge_for_control_dependence (rdg, e->src, i, cd); |
447f3223 RB |
433 | } |
434 | else | |
435 | create_edge_for_control_dependence (rdg, gimple_bb (stmt), i, cd); | |
436 | } | |
80ab0b19 RB |
437 | } |
438 | ||
439 | /* Build the vertices of the reduced dependence graph RDG. Return false | |
440 | if that failed. */ | |
441 | ||
442 | static bool | |
9fafb14a | 443 | create_rdg_vertices (struct graph *rdg, vec<gimple *> stmts, loop_p loop) |
80ab0b19 RB |
444 | { |
445 | int i; | |
355fe088 | 446 | gimple *stmt; |
80ab0b19 RB |
447 | |
448 | FOR_EACH_VEC_ELT (stmts, i, stmt) | |
449 | { | |
450 | struct vertex *v = &(rdg->vertices[i]); | |
451 | ||
452 | /* Record statement to vertex mapping. */ | |
453 | gimple_set_uid (stmt, i); | |
454 | ||
455 | v->data = XNEW (struct rdg_vertex); | |
456 | RDGV_STMT (v) = stmt; | |
457 | RDGV_DATAREFS (v).create (0); | |
458 | RDGV_HAS_MEM_WRITE (v) = false; | |
459 | RDGV_HAS_MEM_READS (v) = false; | |
460 | if (gimple_code (stmt) == GIMPLE_PHI) | |
461 | continue; | |
462 | ||
9fafb14a BC |
463 | unsigned drp = datarefs_vec.length (); |
464 | if (!find_data_references_in_stmt (loop, stmt, &datarefs_vec)) | |
80ab0b19 | 465 | return false; |
9fafb14a | 466 | for (unsigned j = drp; j < datarefs_vec.length (); ++j) |
80ab0b19 | 467 | { |
9fafb14a | 468 | data_reference_p dr = datarefs_vec[j]; |
80ab0b19 RB |
469 | if (DR_IS_READ (dr)) |
470 | RDGV_HAS_MEM_READS (v) = true; | |
471 | else | |
472 | RDGV_HAS_MEM_WRITE (v) = true; | |
473 | RDGV_DATAREFS (v).safe_push (dr); | |
c4450491 | 474 | has_nonaddressable_dataref_p |= may_be_nonaddressable_p (dr->ref); |
80ab0b19 RB |
475 | } |
476 | } | |
477 | return true; | |
478 | } | |
479 | ||
3be57c56 BC |
480 | /* Array mapping basic block's index to its topological order. */ |
481 | static int *bb_top_order_index; | |
482 | /* And size of the array. */ | |
483 | static int bb_top_order_index_size; | |
484 | ||
485 | /* If X has a smaller topological sort number than Y, returns -1; | |
486 | if greater, returns 1. */ | |
487 | ||
488 | static int | |
489 | bb_top_order_cmp (const void *x, const void *y) | |
490 | { | |
491 | basic_block bb1 = *(const basic_block *) x; | |
492 | basic_block bb2 = *(const basic_block *) y; | |
493 | ||
494 | gcc_assert (bb1->index < bb_top_order_index_size | |
495 | && bb2->index < bb_top_order_index_size); | |
496 | gcc_assert (bb1 == bb2 | |
497 | || bb_top_order_index[bb1->index] | |
498 | != bb_top_order_index[bb2->index]); | |
499 | ||
500 | return (bb_top_order_index[bb1->index] - bb_top_order_index[bb2->index]); | |
501 | } | |
502 | ||
503 | /* Initialize STMTS with all the statements of LOOP. We use topological | |
504 | order to discover all statements. The order is important because | |
505 | generate_loops_for_partition is using the same traversal for identifying | |
506 | statements in loop copies. */ | |
80ab0b19 RB |
507 | |
508 | static void | |
355fe088 | 509 | stmts_from_loop (struct loop *loop, vec<gimple *> *stmts) |
80ab0b19 RB |
510 | { |
511 | unsigned int i; | |
3be57c56 | 512 | basic_block *bbs = get_loop_body_in_custom_order (loop, bb_top_order_cmp); |
80ab0b19 RB |
513 | |
514 | for (i = 0; i < loop->num_nodes; i++) | |
515 | { | |
516 | basic_block bb = bbs[i]; | |
80ab0b19 | 517 | |
538dd0b7 DM |
518 | for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); |
519 | gsi_next (&bsi)) | |
520 | if (!virtual_operand_p (gimple_phi_result (bsi.phi ()))) | |
521 | stmts->safe_push (bsi.phi ()); | |
80ab0b19 | 522 | |
538dd0b7 DM |
523 | for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); |
524 | gsi_next (&bsi)) | |
80ab0b19 | 525 | { |
355fe088 | 526 | gimple *stmt = gsi_stmt (bsi); |
80ab0b19 RB |
527 | if (gimple_code (stmt) != GIMPLE_LABEL && !is_gimple_debug (stmt)) |
528 | stmts->safe_push (stmt); | |
529 | } | |
530 | } | |
531 | ||
532 | free (bbs); | |
533 | } | |
534 | ||
80ab0b19 RB |
535 | /* Free the reduced dependence graph RDG. */ |
536 | ||
537 | static void | |
538 | free_rdg (struct graph *rdg) | |
539 | { | |
540 | int i; | |
541 | ||
542 | for (i = 0; i < rdg->n_vertices; i++) | |
543 | { | |
544 | struct vertex *v = &(rdg->vertices[i]); | |
545 | struct graph_edge *e; | |
546 | ||
547 | for (e = v->succ; e; e = e->succ_next) | |
447f3223 | 548 | free (e->data); |
80ab0b19 RB |
549 | |
550 | if (v->data) | |
551 | { | |
552 | gimple_set_uid (RDGV_STMT (v), -1); | |
9fafb14a | 553 | (RDGV_DATAREFS (v)).release (); |
80ab0b19 RB |
554 | free (v->data); |
555 | } | |
556 | } | |
557 | ||
558 | free_graph (rdg); | |
559 | } | |
560 | ||
4084ea5f BC |
561 | /* Build the Reduced Dependence Graph (RDG) with one vertex per statement of |
562 | LOOP, and one edge per flow dependence or control dependence from control | |
9fafb14a BC |
563 | dependence CD. During visiting each statement, data references are also |
564 | collected and recorded in global data DATAREFS_VEC. */ | |
80ab0b19 RB |
565 | |
566 | static struct graph * | |
4084ea5f | 567 | build_rdg (struct loop *loop, control_dependences *cd) |
80ab0b19 RB |
568 | { |
569 | struct graph *rdg; | |
80ab0b19 | 570 | |
97463b2b | 571 | /* Create the RDG vertices from the stmts of the loop nest. */ |
355fe088 | 572 | auto_vec<gimple *, 10> stmts; |
4084ea5f | 573 | stmts_from_loop (loop, &stmts); |
24f161fd | 574 | rdg = new_graph (stmts.length ()); |
9fafb14a | 575 | if (!create_rdg_vertices (rdg, stmts, loop)) |
80ab0b19 | 576 | { |
80ab0b19 RB |
577 | free_rdg (rdg); |
578 | return NULL; | |
579 | } | |
580 | stmts.release (); | |
97463b2b | 581 | |
447f3223 RB |
582 | create_rdg_flow_edges (rdg); |
583 | if (cd) | |
4084ea5f | 584 | create_rdg_cd_edges (rdg, cd, loop); |
447f3223 | 585 | |
80ab0b19 RB |
586 | return rdg; |
587 | } | |
588 | ||
80ab0b19 | 589 | |
f1eb4621 | 590 | /* Kind of distributed loop. */ |
b9fc0497 | 591 | enum partition_kind { |
5955438a BC |
592 | PKIND_NORMAL, |
593 | /* Partial memset stands for a paritition can be distributed into a loop | |
594 | of memset calls, rather than a single memset call. It's handled just | |
595 | like a normal parition, i.e, distributed as separate loop, no memset | |
596 | call is generated. | |
597 | ||
598 | Note: This is a hacking fix trying to distribute ZERO-ing stmt in a | |
599 | loop nest as deep as possible. As a result, parloop achieves better | |
600 | parallelization by parallelizing deeper loop nest. This hack should | |
601 | be unnecessary and removed once distributed memset can be understood | |
602 | and analyzed in data reference analysis. See PR82604 for more. */ | |
603 | PKIND_PARTIAL_MEMSET, | |
604 | PKIND_MEMSET, PKIND_MEMCPY, PKIND_MEMMOVE | |
b9fc0497 | 605 | }; |
30d55936 | 606 | |
f1eb4621 BC |
607 | /* Type of distributed loop. */ |
608 | enum partition_type { | |
609 | /* The distributed loop can be executed parallelly. */ | |
610 | PTYPE_PARALLEL = 0, | |
611 | /* The distributed loop has to be executed sequentially. */ | |
612 | PTYPE_SEQUENTIAL | |
613 | }; | |
614 | ||
939cf90f BC |
615 | /* Builtin info for loop distribution. */ |
616 | struct builtin_info | |
617 | { | |
618 | /* data-references a kind != PKIND_NORMAL partition is about. */ | |
619 | data_reference_p dst_dr; | |
620 | data_reference_p src_dr; | |
621 | /* Base address and size of memory objects operated by the builtin. Note | |
622 | both dest and source memory objects must have the same size. */ | |
623 | tree dst_base; | |
624 | tree src_base; | |
625 | tree size; | |
957f0d8f BC |
626 | /* Base and offset part of dst_base after stripping constant offset. This |
627 | is only used in memset builtin distribution for now. */ | |
628 | tree dst_base_base; | |
629 | unsigned HOST_WIDE_INT dst_base_offset; | |
939cf90f BC |
630 | }; |
631 | ||
a7a44c07 | 632 | /* Partition for loop distribution. */ |
526ceb68 | 633 | struct partition |
c61f8985 | 634 | { |
a7a44c07 | 635 | /* Statements of the partition. */ |
c61f8985 | 636 | bitmap stmts; |
a7a44c07 | 637 | /* True if the partition defines variable which is used outside of loop. */ |
826a536d | 638 | bool reduction_p; |
30d55936 | 639 | enum partition_kind kind; |
f1eb4621 | 640 | enum partition_type type; |
a7a44c07 BC |
641 | /* Data references in the partition. */ |
642 | bitmap datarefs; | |
939cf90f BC |
643 | /* Information of builtin parition. */ |
644 | struct builtin_info *builtin; | |
526ceb68 | 645 | }; |
c61f8985 | 646 | |
c61f8985 RG |
647 | |
648 | /* Allocate and initialize a partition from BITMAP. */ | |
649 | ||
526ceb68 | 650 | static partition * |
a7a44c07 | 651 | partition_alloc (void) |
c61f8985 | 652 | { |
526ceb68 | 653 | partition *partition = XCNEW (struct partition); |
a7a44c07 | 654 | partition->stmts = BITMAP_ALLOC (NULL); |
826a536d | 655 | partition->reduction_p = false; |
30d55936 | 656 | partition->kind = PKIND_NORMAL; |
a7a44c07 | 657 | partition->datarefs = BITMAP_ALLOC (NULL); |
c61f8985 RG |
658 | return partition; |
659 | } | |
660 | ||
661 | /* Free PARTITION. */ | |
662 | ||
663 | static void | |
526ceb68 | 664 | partition_free (partition *partition) |
c61f8985 RG |
665 | { |
666 | BITMAP_FREE (partition->stmts); | |
a7a44c07 | 667 | BITMAP_FREE (partition->datarefs); |
939cf90f BC |
668 | if (partition->builtin) |
669 | free (partition->builtin); | |
670 | ||
c61f8985 RG |
671 | free (partition); |
672 | } | |
673 | ||
30d55936 RG |
674 | /* Returns true if the partition can be generated as a builtin. */ |
675 | ||
676 | static bool | |
526ceb68 | 677 | partition_builtin_p (partition *partition) |
30d55936 | 678 | { |
5955438a | 679 | return partition->kind > PKIND_PARTIAL_MEMSET; |
30d55936 | 680 | } |
c61f8985 | 681 | |
826a536d | 682 | /* Returns true if the partition contains a reduction. */ |
7ad672e4 RG |
683 | |
684 | static bool | |
526ceb68 | 685 | partition_reduction_p (partition *partition) |
7ad672e4 | 686 | { |
826a536d | 687 | return partition->reduction_p; |
7ad672e4 RG |
688 | } |
689 | ||
821dbeef BC |
690 | /* Partitions are fused because of different reasons. */ |
691 | enum fuse_type | |
692 | { | |
693 | FUSE_NON_BUILTIN = 0, | |
694 | FUSE_REDUCTION = 1, | |
695 | FUSE_SHARE_REF = 2, | |
696 | FUSE_SAME_SCC = 3, | |
697 | FUSE_FINALIZE = 4 | |
698 | }; | |
699 | ||
700 | /* Description on different fusing reason. */ | |
701 | static const char *fuse_message[] = { | |
702 | "they are non-builtins", | |
703 | "they have reductions", | |
704 | "they have shared memory refs", | |
705 | "they are in the same dependence scc", | |
706 | "there is no point to distribute loop"}; | |
707 | ||
826a536d | 708 | static void |
f1eb4621 | 709 | update_type_for_merge (struct graph *, partition *, partition *); |
821dbeef | 710 | |
f1eb4621 BC |
711 | /* Merge PARTITION into the partition DEST. RDG is the reduced dependence |
712 | graph and we update type for result partition if it is non-NULL. */ | |
a7a44c07 | 713 | |
f1eb4621 BC |
714 | static void |
715 | partition_merge_into (struct graph *rdg, partition *dest, | |
716 | partition *partition, enum fuse_type ft) | |
717 | { | |
821dbeef BC |
718 | if (dump_file && (dump_flags & TDF_DETAILS)) |
719 | { | |
720 | fprintf (dump_file, "Fuse partitions because %s:\n", fuse_message[ft]); | |
721 | fprintf (dump_file, " Part 1: "); | |
722 | dump_bitmap (dump_file, dest->stmts); | |
723 | fprintf (dump_file, " Part 2: "); | |
724 | dump_bitmap (dump_file, partition->stmts); | |
725 | } | |
f1eb4621 BC |
726 | |
727 | dest->kind = PKIND_NORMAL; | |
728 | if (dest->type == PTYPE_PARALLEL) | |
729 | dest->type = partition->type; | |
730 | ||
731 | bitmap_ior_into (dest->stmts, partition->stmts); | |
732 | if (partition_reduction_p (partition)) | |
733 | dest->reduction_p = true; | |
734 | ||
735 | /* Further check if any data dependence prevents us from executing the | |
736 | new partition parallelly. */ | |
737 | if (dest->type == PTYPE_PARALLEL && rdg != NULL) | |
738 | update_type_for_merge (rdg, dest, partition); | |
739 | ||
740 | bitmap_ior_into (dest->datarefs, partition->datarefs); | |
826a536d RB |
741 | } |
742 | ||
743 | ||
c07a8cb3 RG |
744 | /* Returns true when DEF is an SSA_NAME defined in LOOP and used after |
745 | the LOOP. */ | |
746 | ||
747 | static bool | |
748 | ssa_name_has_uses_outside_loop_p (tree def, loop_p loop) | |
749 | { | |
750 | imm_use_iterator imm_iter; | |
751 | use_operand_p use_p; | |
752 | ||
753 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def) | |
e665269a | 754 | { |
163aa51b BC |
755 | if (is_gimple_debug (USE_STMT (use_p))) |
756 | continue; | |
757 | ||
758 | basic_block use_bb = gimple_bb (USE_STMT (use_p)); | |
759 | if (!flow_bb_inside_loop_p (loop, use_bb)) | |
e665269a RG |
760 | return true; |
761 | } | |
c07a8cb3 RG |
762 | |
763 | return false; | |
764 | } | |
765 | ||
766 | /* Returns true when STMT defines a scalar variable used after the | |
88af7c1a | 767 | loop LOOP. */ |
c07a8cb3 RG |
768 | |
769 | static bool | |
355fe088 | 770 | stmt_has_scalar_dependences_outside_loop (loop_p loop, gimple *stmt) |
c07a8cb3 | 771 | { |
88af7c1a RG |
772 | def_operand_p def_p; |
773 | ssa_op_iter op_iter; | |
c07a8cb3 | 774 | |
9ca86fc3 RG |
775 | if (gimple_code (stmt) == GIMPLE_PHI) |
776 | return ssa_name_has_uses_outside_loop_p (gimple_phi_result (stmt), loop); | |
777 | ||
88af7c1a RG |
778 | FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF) |
779 | if (ssa_name_has_uses_outside_loop_p (DEF_FROM_PTR (def_p), loop)) | |
780 | return true; | |
c07a8cb3 | 781 | |
88af7c1a | 782 | return false; |
c07a8cb3 RG |
783 | } |
784 | ||
dea61d92 SP |
785 | /* Return a copy of LOOP placed before LOOP. */ |
786 | ||
787 | static struct loop * | |
788 | copy_loop_before (struct loop *loop) | |
789 | { | |
790 | struct loop *res; | |
791 | edge preheader = loop_preheader_edge (loop); | |
792 | ||
dea61d92 | 793 | initialize_original_copy_tables (); |
5ce9450f | 794 | res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, NULL, preheader); |
30d55936 | 795 | gcc_assert (res != NULL); |
dea61d92 | 796 | free_original_copy_tables (); |
2cfc56b9 | 797 | delete_update_ssa (); |
dea61d92 SP |
798 | |
799 | return res; | |
800 | } | |
801 | ||
802 | /* Creates an empty basic block after LOOP. */ | |
803 | ||
804 | static void | |
805 | create_bb_after_loop (struct loop *loop) | |
806 | { | |
807 | edge exit = single_exit (loop); | |
808 | ||
809 | if (!exit) | |
810 | return; | |
811 | ||
812 | split_edge (exit); | |
813 | } | |
814 | ||
815 | /* Generate code for PARTITION from the code in LOOP. The loop is | |
816 | copied when COPY_P is true. All the statements not flagged in the | |
817 | PARTITION bitmap are removed from the loop or from its copy. The | |
818 | statements are indexed in sequence inside a basic block, and the | |
30d55936 | 819 | basic blocks of a loop are taken in dom order. */ |
dea61d92 | 820 | |
30d55936 | 821 | static void |
526ceb68 | 822 | generate_loops_for_partition (struct loop *loop, partition *partition, |
c61f8985 | 823 | bool copy_p) |
dea61d92 | 824 | { |
2fd5894f | 825 | unsigned i; |
dea61d92 SP |
826 | basic_block *bbs; |
827 | ||
828 | if (copy_p) | |
829 | { | |
a8745cc2 | 830 | int orig_loop_num = loop->orig_loop_num; |
dea61d92 | 831 | loop = copy_loop_before (loop); |
30d55936 | 832 | gcc_assert (loop != NULL); |
a8745cc2 | 833 | loop->orig_loop_num = orig_loop_num; |
dea61d92 SP |
834 | create_preheader (loop, CP_SIMPLE_PREHEADERS); |
835 | create_bb_after_loop (loop); | |
836 | } | |
a8745cc2 BC |
837 | else |
838 | { | |
839 | /* Origin number is set to the new versioned loop's num. */ | |
840 | gcc_assert (loop->orig_loop_num != loop->num); | |
841 | } | |
dea61d92 | 842 | |
2fd5894f | 843 | /* Remove stmts not in the PARTITION bitmap. */ |
dea61d92 SP |
844 | bbs = get_loop_body_in_dom_order (loop); |
845 | ||
36f52e8f | 846 | if (MAY_HAVE_DEBUG_BIND_STMTS) |
2fd5894f | 847 | for (i = 0; i < loop->num_nodes; i++) |
b03c3082 JJ |
848 | { |
849 | basic_block bb = bbs[i]; | |
850 | ||
538dd0b7 DM |
851 | for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); |
852 | gsi_next (&bsi)) | |
2fd5894f | 853 | { |
538dd0b7 | 854 | gphi *phi = bsi.phi (); |
2fd5894f RB |
855 | if (!virtual_operand_p (gimple_phi_result (phi)) |
856 | && !bitmap_bit_p (partition->stmts, gimple_uid (phi))) | |
857 | reset_debug_uses (phi); | |
858 | } | |
b03c3082 | 859 | |
538dd0b7 | 860 | for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) |
b03c3082 | 861 | { |
355fe088 | 862 | gimple *stmt = gsi_stmt (bsi); |
b03c3082 JJ |
863 | if (gimple_code (stmt) != GIMPLE_LABEL |
864 | && !is_gimple_debug (stmt) | |
2fd5894f | 865 | && !bitmap_bit_p (partition->stmts, gimple_uid (stmt))) |
b03c3082 JJ |
866 | reset_debug_uses (stmt); |
867 | } | |
868 | } | |
869 | ||
2fd5894f | 870 | for (i = 0; i < loop->num_nodes; i++) |
dea61d92 SP |
871 | { |
872 | basic_block bb = bbs[i]; | |
efe040bf BC |
873 | edge inner_exit = NULL; |
874 | ||
875 | if (loop != bb->loop_father) | |
876 | inner_exit = single_exit (bb->loop_father); | |
dea61d92 | 877 | |
538dd0b7 | 878 | for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);) |
2fd5894f | 879 | { |
538dd0b7 | 880 | gphi *phi = bsi.phi (); |
2fd5894f RB |
881 | if (!virtual_operand_p (gimple_phi_result (phi)) |
882 | && !bitmap_bit_p (partition->stmts, gimple_uid (phi))) | |
2706a615 | 883 | remove_phi_node (&bsi, true); |
2fd5894f RB |
884 | else |
885 | gsi_next (&bsi); | |
886 | } | |
dea61d92 | 887 | |
538dd0b7 | 888 | for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);) |
2706a615 | 889 | { |
355fe088 | 890 | gimple *stmt = gsi_stmt (bsi); |
b03c3082 JJ |
891 | if (gimple_code (stmt) != GIMPLE_LABEL |
892 | && !is_gimple_debug (stmt) | |
2fd5894f | 893 | && !bitmap_bit_p (partition->stmts, gimple_uid (stmt))) |
2706a615 | 894 | { |
efe040bf BC |
895 | /* In distribution of loop nest, if bb is inner loop's exit_bb, |
896 | we choose its exit edge/path in order to avoid generating | |
897 | infinite loop. For all other cases, we choose an arbitrary | |
898 | path through the empty CFG part that this unnecessary | |
899 | control stmt controls. */ | |
538dd0b7 | 900 | if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) |
36875e8f | 901 | { |
efe040bf BC |
902 | if (inner_exit && inner_exit->flags & EDGE_TRUE_VALUE) |
903 | gimple_cond_make_true (cond_stmt); | |
904 | else | |
905 | gimple_cond_make_false (cond_stmt); | |
36875e8f RB |
906 | update_stmt (stmt); |
907 | } | |
908 | else if (gimple_code (stmt) == GIMPLE_SWITCH) | |
909 | { | |
538dd0b7 | 910 | gswitch *switch_stmt = as_a <gswitch *> (stmt); |
36875e8f | 911 | gimple_switch_set_index |
538dd0b7 | 912 | (switch_stmt, CASE_LOW (gimple_switch_label (switch_stmt, 1))); |
36875e8f RB |
913 | update_stmt (stmt); |
914 | } | |
915 | else | |
916 | { | |
917 | unlink_stmt_vdef (stmt); | |
918 | gsi_remove (&bsi, true); | |
919 | release_defs (stmt); | |
920 | continue; | |
921 | } | |
2706a615 | 922 | } |
36875e8f | 923 | gsi_next (&bsi); |
2706a615 | 924 | } |
dea61d92 SP |
925 | } |
926 | ||
927 | free (bbs); | |
dea61d92 SP |
928 | } |
929 | ||
401f3a81 JJ |
930 | /* If VAL memory representation contains the same value in all bytes, |
931 | return that value, otherwise return -1. | |
932 | E.g. for 0x24242424 return 0x24, for IEEE double | |
933 | 747708026454360457216.0 return 0x44, etc. */ | |
934 | ||
935 | static int | |
936 | const_with_all_bytes_same (tree val) | |
937 | { | |
938 | unsigned char buf[64]; | |
939 | int i, len; | |
940 | ||
941 | if (integer_zerop (val) | |
401f3a81 JJ |
942 | || (TREE_CODE (val) == CONSTRUCTOR |
943 | && !TREE_CLOBBER_P (val) | |
944 | && CONSTRUCTOR_NELTS (val) == 0)) | |
945 | return 0; | |
946 | ||
9e207d6f JJ |
947 | if (real_zerop (val)) |
948 | { | |
949 | /* Only return 0 for +0.0, not for -0.0, which doesn't have | |
950 | an all bytes same memory representation. Don't transform | |
951 | -0.0 stores into +0.0 even for !HONOR_SIGNED_ZEROS. */ | |
952 | switch (TREE_CODE (val)) | |
953 | { | |
954 | case REAL_CST: | |
955 | if (!real_isneg (TREE_REAL_CST_PTR (val))) | |
956 | return 0; | |
957 | break; | |
958 | case COMPLEX_CST: | |
959 | if (!const_with_all_bytes_same (TREE_REALPART (val)) | |
960 | && !const_with_all_bytes_same (TREE_IMAGPART (val))) | |
961 | return 0; | |
962 | break; | |
963 | case VECTOR_CST: | |
63570af0 RS |
964 | { |
965 | unsigned int count = vector_cst_encoded_nelts (val); | |
966 | unsigned int j; | |
967 | for (j = 0; j < count; ++j) | |
968 | if (const_with_all_bytes_same (VECTOR_CST_ENCODED_ELT (val, j))) | |
969 | break; | |
970 | if (j == count) | |
971 | return 0; | |
972 | break; | |
973 | } | |
9e207d6f JJ |
974 | default: |
975 | break; | |
976 | } | |
977 | } | |
978 | ||
401f3a81 JJ |
979 | if (CHAR_BIT != 8 || BITS_PER_UNIT != 8) |
980 | return -1; | |
981 | ||
982 | len = native_encode_expr (val, buf, sizeof (buf)); | |
983 | if (len == 0) | |
984 | return -1; | |
985 | for (i = 1; i < len; i++) | |
986 | if (buf[i] != buf[0]) | |
987 | return -1; | |
988 | return buf[0]; | |
989 | } | |
990 | ||
30d55936 | 991 | /* Generate a call to memset for PARTITION in LOOP. */ |
dea61d92 | 992 | |
cfee318d | 993 | static void |
526ceb68 | 994 | generate_memset_builtin (struct loop *loop, partition *partition) |
dea61d92 | 995 | { |
30d55936 | 996 | gimple_stmt_iterator gsi; |
818625cf | 997 | tree mem, fn, nb_bytes; |
b6dd5261 | 998 | tree val; |
939cf90f BC |
999 | struct builtin_info *builtin = partition->builtin; |
1000 | gimple *fn_call; | |
30d55936 RG |
1001 | |
1002 | /* The new statements will be placed before LOOP. */ | |
1003 | gsi = gsi_last_bb (loop_preheader_edge (loop)->src); | |
dea61d92 | 1004 | |
4c9ed22a | 1005 | nb_bytes = rewrite_to_non_trapping_overflow (builtin->size); |
d0582dc1 RG |
1006 | nb_bytes = force_gimple_operand_gsi (&gsi, nb_bytes, true, NULL_TREE, |
1007 | false, GSI_CONTINUE_LINKING); | |
939cf90f | 1008 | mem = builtin->dst_base; |
d0582dc1 RG |
1009 | mem = force_gimple_operand_gsi (&gsi, mem, true, NULL_TREE, |
1010 | false, GSI_CONTINUE_LINKING); | |
dea61d92 | 1011 | |
b6dd5261 | 1012 | /* This exactly matches the pattern recognition in classify_partition. */ |
939cf90f | 1013 | val = gimple_assign_rhs1 (DR_STMT (builtin->dst_dr)); |
401f3a81 JJ |
1014 | /* Handle constants like 0x15151515 and similarly |
1015 | floating point constants etc. where all bytes are the same. */ | |
1016 | int bytev = const_with_all_bytes_same (val); | |
1017 | if (bytev != -1) | |
1018 | val = build_int_cst (integer_type_node, bytev); | |
1019 | else if (TREE_CODE (val) == INTEGER_CST) | |
1020 | val = fold_convert (integer_type_node, val); | |
1021 | else if (!useless_type_conversion_p (integer_type_node, TREE_TYPE (val))) | |
b6dd5261 | 1022 | { |
b731b390 | 1023 | tree tem = make_ssa_name (integer_type_node); |
355fe088 | 1024 | gimple *cstmt = gimple_build_assign (tem, NOP_EXPR, val); |
401f3a81 JJ |
1025 | gsi_insert_after (&gsi, cstmt, GSI_CONTINUE_LINKING); |
1026 | val = tem; | |
b6dd5261 RG |
1027 | } |
1028 | ||
e79983f4 | 1029 | fn = build_fold_addr_expr (builtin_decl_implicit (BUILT_IN_MEMSET)); |
b6dd5261 | 1030 | fn_call = gimple_build_call (fn, 3, mem, val, nb_bytes); |
d0582dc1 | 1031 | gsi_insert_after (&gsi, fn_call, GSI_CONTINUE_LINKING); |
5879ab5f | 1032 | fold_stmt (&gsi); |
dea61d92 SP |
1033 | |
1034 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
b6dd5261 RG |
1035 | { |
1036 | fprintf (dump_file, "generated memset"); | |
401f3a81 | 1037 | if (bytev == 0) |
b6dd5261 | 1038 | fprintf (dump_file, " zero\n"); |
b6dd5261 RG |
1039 | else |
1040 | fprintf (dump_file, "\n"); | |
1041 | } | |
dea61d92 SP |
1042 | } |
1043 | ||
d0582dc1 RG |
1044 | /* Generate a call to memcpy for PARTITION in LOOP. */ |
1045 | ||
1046 | static void | |
526ceb68 | 1047 | generate_memcpy_builtin (struct loop *loop, partition *partition) |
d0582dc1 RG |
1048 | { |
1049 | gimple_stmt_iterator gsi; | |
939cf90f | 1050 | gimple *fn_call; |
818625cf | 1051 | tree dest, src, fn, nb_bytes; |
d0582dc1 | 1052 | enum built_in_function kind; |
939cf90f | 1053 | struct builtin_info *builtin = partition->builtin; |
d0582dc1 RG |
1054 | |
1055 | /* The new statements will be placed before LOOP. */ | |
1056 | gsi = gsi_last_bb (loop_preheader_edge (loop)->src); | |
1057 | ||
4c9ed22a | 1058 | nb_bytes = rewrite_to_non_trapping_overflow (builtin->size); |
d0582dc1 RG |
1059 | nb_bytes = force_gimple_operand_gsi (&gsi, nb_bytes, true, NULL_TREE, |
1060 | false, GSI_CONTINUE_LINKING); | |
939cf90f BC |
1061 | dest = builtin->dst_base; |
1062 | src = builtin->src_base; | |
510d73a0 RB |
1063 | if (partition->kind == PKIND_MEMCPY |
1064 | || ! ptr_derefs_may_alias_p (dest, src)) | |
d0582dc1 | 1065 | kind = BUILT_IN_MEMCPY; |
510d73a0 RB |
1066 | else |
1067 | kind = BUILT_IN_MEMMOVE; | |
d0582dc1 RG |
1068 | |
1069 | dest = force_gimple_operand_gsi (&gsi, dest, true, NULL_TREE, | |
1070 | false, GSI_CONTINUE_LINKING); | |
1071 | src = force_gimple_operand_gsi (&gsi, src, true, NULL_TREE, | |
1072 | false, GSI_CONTINUE_LINKING); | |
1073 | fn = build_fold_addr_expr (builtin_decl_implicit (kind)); | |
1074 | fn_call = gimple_build_call (fn, 3, dest, src, nb_bytes); | |
1075 | gsi_insert_after (&gsi, fn_call, GSI_CONTINUE_LINKING); | |
5879ab5f | 1076 | fold_stmt (&gsi); |
d0582dc1 RG |
1077 | |
1078 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1079 | { | |
1080 | if (kind == BUILT_IN_MEMCPY) | |
1081 | fprintf (dump_file, "generated memcpy\n"); | |
1082 | else | |
1083 | fprintf (dump_file, "generated memmove\n"); | |
1084 | } | |
1085 | } | |
1086 | ||
30d55936 | 1087 | /* Remove and destroy the loop LOOP. */ |
dea61d92 | 1088 | |
30d55936 RG |
1089 | static void |
1090 | destroy_loop (struct loop *loop) | |
dea61d92 | 1091 | { |
30d55936 RG |
1092 | unsigned nbbs = loop->num_nodes; |
1093 | edge exit = single_exit (loop); | |
1094 | basic_block src = loop_preheader_edge (loop)->src, dest = exit->dest; | |
dea61d92 | 1095 | basic_block *bbs; |
30d55936 | 1096 | unsigned i; |
dea61d92 SP |
1097 | |
1098 | bbs = get_loop_body_in_dom_order (loop); | |
1099 | ||
b7a9e9f4 RB |
1100 | gimple_stmt_iterator dst_gsi = gsi_after_labels (exit->dest); |
1101 | bool safe_p = single_pred_p (exit->dest); | |
b9aba0a0 RB |
1102 | i = nbbs; |
1103 | do | |
c014f6f5 RG |
1104 | { |
1105 | /* We have made sure to not leave any dangling uses of SSA | |
1106 | names defined in the loop. With the exception of virtuals. | |
1107 | Make sure we replace all uses of virtual defs that will remain | |
1108 | outside of the loop with the bare symbol as delete_basic_block | |
1109 | will release them. */ | |
b9aba0a0 | 1110 | --i; |
538dd0b7 DM |
1111 | for (gphi_iterator gsi = gsi_start_phis (bbs[i]); !gsi_end_p (gsi); |
1112 | gsi_next (&gsi)) | |
c014f6f5 | 1113 | { |
538dd0b7 | 1114 | gphi *phi = gsi.phi (); |
ea057359 | 1115 | if (virtual_operand_p (gimple_phi_result (phi))) |
c014f6f5 RG |
1116 | mark_virtual_phi_result_for_renaming (phi); |
1117 | } | |
b7a9e9f4 | 1118 | for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi);) |
c014f6f5 | 1119 | { |
355fe088 | 1120 | gimple *stmt = gsi_stmt (gsi); |
c014f6f5 RG |
1121 | tree vdef = gimple_vdef (stmt); |
1122 | if (vdef && TREE_CODE (vdef) == SSA_NAME) | |
1123 | mark_virtual_operand_for_renaming (vdef); | |
b7a9e9f4 RB |
1124 | /* Also move and eventually reset debug stmts. We can leave |
1125 | constant values in place in case the stmt dominates the exit. | |
1126 | ??? Non-constant values from the last iteration can be | |
1127 | replaced with final values if we can compute them. */ | |
1128 | if (gimple_debug_bind_p (stmt)) | |
1129 | { | |
1130 | tree val = gimple_debug_bind_get_value (stmt); | |
1131 | gsi_move_before (&gsi, &dst_gsi); | |
1132 | if (val | |
1133 | && (!safe_p | |
1134 | || !is_gimple_min_invariant (val) | |
1135 | || !dominated_by_p (CDI_DOMINATORS, exit->src, bbs[i]))) | |
1136 | { | |
1137 | gimple_debug_bind_reset_value (stmt); | |
1138 | update_stmt (stmt); | |
1139 | } | |
1140 | } | |
1141 | else | |
1142 | gsi_next (&gsi); | |
c014f6f5 | 1143 | } |
b7a9e9f4 RB |
1144 | } |
1145 | while (i != 0); | |
1146 | ||
1147 | redirect_edge_pred (exit, src); | |
1148 | exit->flags &= ~(EDGE_TRUE_VALUE|EDGE_FALSE_VALUE); | |
1149 | exit->flags |= EDGE_FALLTHRU; | |
1150 | cancel_loop_tree (loop); | |
1151 | rescan_loop_exit (exit, false, true); | |
1152 | ||
1153 | i = nbbs; | |
1154 | do | |
1155 | { | |
1156 | --i; | |
c014f6f5 RG |
1157 | delete_basic_block (bbs[i]); |
1158 | } | |
b9aba0a0 RB |
1159 | while (i != 0); |
1160 | ||
dea61d92 | 1161 | free (bbs); |
30d55936 RG |
1162 | |
1163 | set_immediate_dominator (CDI_DOMINATORS, dest, | |
1164 | recompute_dominator (CDI_DOMINATORS, dest)); | |
dea61d92 SP |
1165 | } |
1166 | ||
b71b7a8e | 1167 | /* Generates code for PARTITION. Return whether LOOP needs to be destroyed. */ |
dea61d92 | 1168 | |
b71b7a8e | 1169 | static bool |
d0582dc1 | 1170 | generate_code_for_partition (struct loop *loop, |
526ceb68 | 1171 | partition *partition, bool copy_p) |
dea61d92 | 1172 | { |
30d55936 RG |
1173 | switch (partition->kind) |
1174 | { | |
826a536d | 1175 | case PKIND_NORMAL: |
5955438a | 1176 | case PKIND_PARTIAL_MEMSET: |
826a536d RB |
1177 | /* Reductions all have to be in the last partition. */ |
1178 | gcc_assert (!partition_reduction_p (partition) | |
1179 | || !copy_p); | |
1180 | generate_loops_for_partition (loop, partition, copy_p); | |
b71b7a8e | 1181 | return false; |
826a536d | 1182 | |
30d55936 | 1183 | case PKIND_MEMSET: |
d0582dc1 | 1184 | generate_memset_builtin (loop, partition); |
d0582dc1 RG |
1185 | break; |
1186 | ||
1187 | case PKIND_MEMCPY: | |
510d73a0 | 1188 | case PKIND_MEMMOVE: |
d0582dc1 | 1189 | generate_memcpy_builtin (loop, partition); |
30d55936 RG |
1190 | break; |
1191 | ||
1192 | default: | |
1193 | gcc_unreachable (); | |
1194 | } | |
dea61d92 | 1195 | |
826a536d RB |
1196 | /* Common tail for partitions we turn into a call. If this was the last |
1197 | partition for which we generate code, we have to destroy the loop. */ | |
1198 | if (!copy_p) | |
b71b7a8e RB |
1199 | return true; |
1200 | return false; | |
dea61d92 SP |
1201 | } |
1202 | ||
17c5cbdf BC |
1203 | /* Return data dependence relation for data references A and B. The two |
1204 | data references must be in lexicographic order wrto reduced dependence | |
1205 | graph RDG. We firstly try to find ddr from global ddr hash table. If | |
1206 | it doesn't exist, compute the ddr and cache it. */ | |
1207 | ||
1208 | static data_dependence_relation * | |
1209 | get_data_dependence (struct graph *rdg, data_reference_p a, data_reference_p b) | |
1210 | { | |
1211 | struct data_dependence_relation ent, **slot; | |
1212 | struct data_dependence_relation *ddr; | |
1213 | ||
1214 | gcc_assert (DR_IS_WRITE (a) || DR_IS_WRITE (b)); | |
1215 | gcc_assert (rdg_vertex_for_stmt (rdg, DR_STMT (a)) | |
1216 | <= rdg_vertex_for_stmt (rdg, DR_STMT (b))); | |
1217 | ent.a = a; | |
1218 | ent.b = b; | |
1e485f89 | 1219 | slot = ddrs_table->find_slot (&ent, INSERT); |
17c5cbdf BC |
1220 | if (*slot == NULL) |
1221 | { | |
1222 | ddr = initialize_data_dependence_relation (a, b, loop_nest); | |
1223 | compute_affine_dependence (ddr, loop_nest[0]); | |
1224 | *slot = ddr; | |
1225 | } | |
1226 | ||
1227 | return *slot; | |
1228 | } | |
dea61d92 | 1229 | |
f1eb4621 BC |
1230 | /* In reduced dependence graph RDG for loop distribution, return true if |
1231 | dependence between references DR1 and DR2 leads to a dependence cycle | |
1232 | and such dependence cycle can't be resolved by runtime alias check. */ | |
1233 | ||
1234 | static bool | |
1235 | data_dep_in_cycle_p (struct graph *rdg, | |
1236 | data_reference_p dr1, data_reference_p dr2) | |
1237 | { | |
1238 | struct data_dependence_relation *ddr; | |
1239 | ||
1240 | /* Re-shuffle data-refs to be in topological order. */ | |
1241 | if (rdg_vertex_for_stmt (rdg, DR_STMT (dr1)) | |
1242 | > rdg_vertex_for_stmt (rdg, DR_STMT (dr2))) | |
1243 | std::swap (dr1, dr2); | |
1244 | ||
1245 | ddr = get_data_dependence (rdg, dr1, dr2); | |
1246 | ||
1247 | /* In case of no data dependence. */ | |
1248 | if (DDR_ARE_DEPENDENT (ddr) == chrec_known) | |
1249 | return false; | |
1250 | /* For unknown data dependence or known data dependence which can't be | |
1251 | expressed in classic distance vector, we check if it can be resolved | |
1252 | by runtime alias check. If yes, we still consider data dependence | |
1253 | as won't introduce data dependence cycle. */ | |
1254 | else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know | |
1255 | || DDR_NUM_DIST_VECTS (ddr) == 0) | |
1256 | return !runtime_alias_check_p (ddr, NULL, true); | |
1257 | else if (DDR_NUM_DIST_VECTS (ddr) > 1) | |
1258 | return true; | |
1259 | else if (DDR_REVERSED_P (ddr) | |
1260 | || lambda_vector_zerop (DDR_DIST_VECT (ddr, 0), 1)) | |
1261 | return false; | |
1262 | ||
1263 | return true; | |
1264 | } | |
1265 | ||
1266 | /* Given reduced dependence graph RDG, PARTITION1 and PARTITION2, update | |
1267 | PARTITION1's type after merging PARTITION2 into PARTITION1. */ | |
1268 | ||
1269 | static void | |
1270 | update_type_for_merge (struct graph *rdg, | |
1271 | partition *partition1, partition *partition2) | |
1272 | { | |
1273 | unsigned i, j; | |
1274 | bitmap_iterator bi, bj; | |
1275 | data_reference_p dr1, dr2; | |
1276 | ||
1277 | EXECUTE_IF_SET_IN_BITMAP (partition1->datarefs, 0, i, bi) | |
1278 | { | |
1279 | unsigned start = (partition1 == partition2) ? i + 1 : 0; | |
1280 | ||
1281 | dr1 = datarefs_vec[i]; | |
1282 | EXECUTE_IF_SET_IN_BITMAP (partition2->datarefs, start, j, bj) | |
1283 | { | |
1284 | dr2 = datarefs_vec[j]; | |
1285 | if (DR_IS_READ (dr1) && DR_IS_READ (dr2)) | |
1286 | continue; | |
1287 | ||
1288 | /* Partition can only be executed sequentially if there is any | |
1289 | data dependence cycle. */ | |
1290 | if (data_dep_in_cycle_p (rdg, dr1, dr2)) | |
1291 | { | |
1292 | partition1->type = PTYPE_SEQUENTIAL; | |
1293 | return; | |
1294 | } | |
1295 | } | |
1296 | } | |
1297 | } | |
1298 | ||
24f161fd RB |
1299 | /* Returns a partition with all the statements needed for computing |
1300 | the vertex V of the RDG, also including the loop exit conditions. */ | |
dea61d92 | 1301 | |
526ceb68 | 1302 | static partition * |
24f161fd | 1303 | build_rdg_partition_for_vertex (struct graph *rdg, int v) |
dea61d92 | 1304 | { |
a7a44c07 | 1305 | partition *partition = partition_alloc (); |
00f96dc9 | 1306 | auto_vec<int, 3> nodes; |
a7a44c07 | 1307 | unsigned i, j; |
dea61d92 | 1308 | int x; |
a7a44c07 | 1309 | data_reference_p dr; |
dea61d92 | 1310 | |
174ec470 | 1311 | graphds_dfs (rdg, &v, 1, &nodes, false, NULL); |
dea61d92 | 1312 | |
9771b263 | 1313 | FOR_EACH_VEC_ELT (nodes, i, x) |
24f161fd RB |
1314 | { |
1315 | bitmap_set_bit (partition->stmts, x); | |
a7a44c07 BC |
1316 | |
1317 | for (j = 0; RDG_DATAREFS (rdg, x).iterate (j, &dr); ++j) | |
1318 | { | |
1319 | unsigned idx = (unsigned) DR_INDEX (dr); | |
1320 | gcc_assert (idx < datarefs_vec.length ()); | |
1321 | ||
f1eb4621 BC |
1322 | /* Partition can only be executed sequentially if there is any |
1323 | unknown data reference. */ | |
1324 | if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) | |
1325 | || !DR_INIT (dr) || !DR_STEP (dr)) | |
1326 | partition->type = PTYPE_SEQUENTIAL; | |
1327 | ||
a7a44c07 BC |
1328 | bitmap_set_bit (partition->datarefs, idx); |
1329 | } | |
24f161fd | 1330 | } |
dea61d92 | 1331 | |
f1eb4621 BC |
1332 | if (partition->type == PTYPE_SEQUENTIAL) |
1333 | return partition; | |
1334 | ||
1335 | /* Further check if any data dependence prevents us from executing the | |
1336 | partition parallelly. */ | |
1337 | update_type_for_merge (rdg, partition, partition); | |
1338 | ||
dea61d92 SP |
1339 | return partition; |
1340 | } | |
1341 | ||
85aa9ed6 BC |
1342 | /* Given PARTITION of LOOP and RDG, record single load/store data references |
1343 | for builtin partition in SRC_DR/DST_DR, return false if there is no such | |
939cf90f | 1344 | data references. */ |
cfee318d | 1345 | |
939cf90f | 1346 | static bool |
85aa9ed6 | 1347 | find_single_drs (struct loop *loop, struct graph *rdg, partition *partition, |
939cf90f | 1348 | data_reference_p *dst_dr, data_reference_p *src_dr) |
cfee318d | 1349 | { |
30d55936 | 1350 | unsigned i; |
939cf90f BC |
1351 | data_reference_p single_ld = NULL, single_st = NULL; |
1352 | bitmap_iterator bi; | |
b9fc0497 | 1353 | |
30d55936 RG |
1354 | EXECUTE_IF_SET_IN_BITMAP (partition->stmts, 0, i, bi) |
1355 | { | |
355fe088 | 1356 | gimple *stmt = RDG_STMT (rdg, i); |
d0582dc1 | 1357 | data_reference_p dr; |
30d55936 RG |
1358 | |
1359 | if (gimple_code (stmt) == GIMPLE_PHI) | |
1360 | continue; | |
1361 | ||
1362 | /* Any scalar stmts are ok. */ | |
1363 | if (!gimple_vuse (stmt)) | |
1364 | continue; | |
1365 | ||
d0582dc1 RG |
1366 | /* Otherwise just regular loads/stores. */ |
1367 | if (!gimple_assign_single_p (stmt)) | |
939cf90f | 1368 | return false; |
d0582dc1 RG |
1369 | |
1370 | /* But exactly one store and/or load. */ | |
939cf90f | 1371 | for (unsigned j = 0; RDG_DATAREFS (rdg, i).iterate (j, &dr); ++j) |
30d55936 | 1372 | { |
d002d099 JJ |
1373 | tree type = TREE_TYPE (DR_REF (dr)); |
1374 | ||
1375 | /* The memset, memcpy and memmove library calls are only | |
1376 | able to deal with generic address space. */ | |
1377 | if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type))) | |
939cf90f | 1378 | return false; |
d002d099 | 1379 | |
d0582dc1 RG |
1380 | if (DR_IS_READ (dr)) |
1381 | { | |
939cf90f BC |
1382 | if (single_ld != NULL) |
1383 | return false; | |
1384 | single_ld = dr; | |
d0582dc1 RG |
1385 | } |
1386 | else | |
1387 | { | |
939cf90f BC |
1388 | if (single_st != NULL) |
1389 | return false; | |
1390 | single_st = dr; | |
d0582dc1 | 1391 | } |
30d55936 | 1392 | } |
30d55936 RG |
1393 | } |
1394 | ||
939cf90f BC |
1395 | if (!single_st) |
1396 | return false; | |
1397 | ||
1398 | /* Bail out if this is a bitfield memory reference. */ | |
1399 | if (TREE_CODE (DR_REF (single_st)) == COMPONENT_REF | |
1400 | && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (single_st), 1))) | |
1401 | return false; | |
818625cf | 1402 | |
85aa9ed6 BC |
1403 | /* Data reference must be executed exactly once per iteration of each |
1404 | loop in the loop nest. We only need to check dominance information | |
1405 | against the outermost one in a perfect loop nest because a bb can't | |
1406 | dominate outermost loop's latch without dominating inner loop's. */ | |
939cf90f | 1407 | basic_block bb_st = gimple_bb (DR_STMT (single_st)); |
85aa9ed6 | 1408 | if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb_st)) |
939cf90f BC |
1409 | return false; |
1410 | ||
1411 | if (single_ld) | |
163aa51b | 1412 | { |
939cf90f BC |
1413 | gimple *store = DR_STMT (single_st), *load = DR_STMT (single_ld); |
1414 | /* Direct aggregate copy or via an SSA name temporary. */ | |
1415 | if (load != store | |
1416 | && gimple_assign_lhs (load) != gimple_assign_rhs1 (store)) | |
1417 | return false; | |
163aa51b | 1418 | |
939cf90f BC |
1419 | /* Bail out if this is a bitfield memory reference. */ |
1420 | if (TREE_CODE (DR_REF (single_ld)) == COMPONENT_REF | |
1421 | && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (single_ld), 1))) | |
1422 | return false; | |
1423 | ||
1424 | /* Load and store must be in the same loop nest. */ | |
1425 | basic_block bb_ld = gimple_bb (DR_STMT (single_ld)); | |
85aa9ed6 | 1426 | if (bb_st->loop_father != bb_ld->loop_father) |
939cf90f BC |
1427 | return false; |
1428 | ||
85aa9ed6 BC |
1429 | /* Data reference must be executed exactly once per iteration. |
1430 | Same as single_st, we only need to check against the outermost | |
1431 | loop. */ | |
1432 | if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb_ld)) | |
939cf90f BC |
1433 | return false; |
1434 | ||
85aa9ed6 | 1435 | edge e = single_exit (bb_st->loop_father); |
939cf90f BC |
1436 | bool dom_ld = dominated_by_p (CDI_DOMINATORS, e->src, bb_ld); |
1437 | bool dom_st = dominated_by_p (CDI_DOMINATORS, e->src, bb_st); | |
1438 | if (dom_ld != dom_st) | |
1439 | return false; | |
1440 | } | |
1441 | ||
1442 | *src_dr = single_ld; | |
1443 | *dst_dr = single_st; | |
1444 | return true; | |
1445 | } | |
1446 | ||
1447 | /* Given data reference DR in LOOP_NEST, this function checks the enclosing | |
1448 | loops from inner to outer to see if loop's step equals to access size at | |
5955438a BC |
1449 | each level of loop. Return 2 if we can prove this at all level loops; |
1450 | record access base and size in BASE and SIZE; save loop's step at each | |
1451 | level of loop in STEPS if it is not null. For example: | |
939cf90f BC |
1452 | |
1453 | int arr[100][100][100]; | |
1454 | for (i = 0; i < 100; i++) ;steps[2] = 40000 | |
1455 | for (j = 100; j > 0; j--) ;steps[1] = -400 | |
1456 | for (k = 0; k < 100; k++) ;steps[0] = 4 | |
5955438a | 1457 | arr[i][j - 1][k] = 0; ;base = &arr, size = 4000000 |
939cf90f | 1458 | |
5955438a BC |
1459 | Return 1 if we can prove the equality at the innermost loop, but not all |
1460 | level loops. In this case, no information is recorded. | |
1461 | ||
1462 | Return 0 if no equality can be proven at any level loops. */ | |
1463 | ||
1464 | static int | |
939cf90f BC |
1465 | compute_access_range (loop_p loop_nest, data_reference_p dr, tree *base, |
1466 | tree *size, vec<tree> *steps = NULL) | |
1467 | { | |
1468 | location_t loc = gimple_location (DR_STMT (dr)); | |
1469 | basic_block bb = gimple_bb (DR_STMT (dr)); | |
1470 | struct loop *loop = bb->loop_father; | |
1471 | tree ref = DR_REF (dr); | |
1472 | tree access_base = build_fold_addr_expr (ref); | |
1473 | tree access_size = TYPE_SIZE_UNIT (TREE_TYPE (ref)); | |
5955438a | 1474 | int res = 0; |
939cf90f BC |
1475 | |
1476 | do { | |
1477 | tree scev_fn = analyze_scalar_evolution (loop, access_base); | |
1478 | if (TREE_CODE (scev_fn) != POLYNOMIAL_CHREC) | |
5955438a | 1479 | return res; |
163aa51b | 1480 | |
939cf90f BC |
1481 | access_base = CHREC_LEFT (scev_fn); |
1482 | if (tree_contains_chrecs (access_base, NULL)) | |
5955438a | 1483 | return res; |
939cf90f BC |
1484 | |
1485 | tree scev_step = CHREC_RIGHT (scev_fn); | |
1486 | /* Only support constant steps. */ | |
1487 | if (TREE_CODE (scev_step) != INTEGER_CST) | |
5955438a | 1488 | return res; |
939cf90f BC |
1489 | |
1490 | enum ev_direction access_dir = scev_direction (scev_fn); | |
1491 | if (access_dir == EV_DIR_UNKNOWN) | |
5955438a | 1492 | return res; |
939cf90f BC |
1493 | |
1494 | if (steps != NULL) | |
1495 | steps->safe_push (scev_step); | |
1496 | ||
1497 | scev_step = fold_convert_loc (loc, sizetype, scev_step); | |
1498 | /* Compute absolute value of scev step. */ | |
1499 | if (access_dir == EV_DIR_DECREASES) | |
1500 | scev_step = fold_build1_loc (loc, NEGATE_EXPR, sizetype, scev_step); | |
1501 | ||
1502 | /* At each level of loop, scev step must equal to access size. In other | |
1503 | words, DR must access consecutive memory between loop iterations. */ | |
1504 | if (!operand_equal_p (scev_step, access_size, 0)) | |
5955438a BC |
1505 | return res; |
1506 | ||
1507 | /* Access stride can be computed for data reference at least for the | |
1508 | innermost loop. */ | |
1509 | res = 1; | |
939cf90f BC |
1510 | |
1511 | /* Compute DR's execution times in loop. */ | |
1512 | tree niters = number_of_latch_executions (loop); | |
1513 | niters = fold_convert_loc (loc, sizetype, niters); | |
1514 | if (dominated_by_p (CDI_DOMINATORS, single_exit (loop)->src, bb)) | |
1515 | niters = size_binop_loc (loc, PLUS_EXPR, niters, size_one_node); | |
1516 | ||
1517 | /* Compute DR's overall access size in loop. */ | |
1518 | access_size = fold_build2_loc (loc, MULT_EXPR, sizetype, | |
1519 | niters, scev_step); | |
1520 | /* Adjust base address in case of negative step. */ | |
1521 | if (access_dir == EV_DIR_DECREASES) | |
163aa51b | 1522 | { |
939cf90f BC |
1523 | tree adj = fold_build2_loc (loc, MINUS_EXPR, sizetype, |
1524 | scev_step, access_size); | |
1525 | access_base = fold_build_pointer_plus_loc (loc, access_base, adj); | |
163aa51b | 1526 | } |
939cf90f BC |
1527 | } while (loop != loop_nest && (loop = loop_outer (loop)) != NULL); |
1528 | ||
1529 | *base = access_base; | |
1530 | *size = access_size; | |
5955438a BC |
1531 | /* Access stride can be computed for data reference at each level loop. */ |
1532 | return 2; | |
939cf90f BC |
1533 | } |
1534 | ||
1535 | /* Allocate and return builtin struct. Record information like DST_DR, | |
1536 | SRC_DR, DST_BASE, SRC_BASE and SIZE in the allocated struct. */ | |
1537 | ||
1538 | static struct builtin_info * | |
1539 | alloc_builtin (data_reference_p dst_dr, data_reference_p src_dr, | |
1540 | tree dst_base, tree src_base, tree size) | |
1541 | { | |
1542 | struct builtin_info *builtin = XNEW (struct builtin_info); | |
1543 | builtin->dst_dr = dst_dr; | |
1544 | builtin->src_dr = src_dr; | |
1545 | builtin->dst_base = dst_base; | |
1546 | builtin->src_base = src_base; | |
1547 | builtin->size = size; | |
1548 | return builtin; | |
1549 | } | |
1550 | ||
1551 | /* Given data reference DR in loop nest LOOP, classify if it forms builtin | |
1552 | memset call. */ | |
1553 | ||
1554 | static void | |
1555 | classify_builtin_st (loop_p loop, partition *partition, data_reference_p dr) | |
1556 | { | |
1557 | gimple *stmt = DR_STMT (dr); | |
1558 | tree base, size, rhs = gimple_assign_rhs1 (stmt); | |
1559 | ||
1560 | if (const_with_all_bytes_same (rhs) == -1 | |
1561 | && (!INTEGRAL_TYPE_P (TREE_TYPE (rhs)) | |
1562 | || (TYPE_MODE (TREE_TYPE (rhs)) | |
1563 | != TYPE_MODE (unsigned_char_type_node)))) | |
1564 | return; | |
1565 | ||
1566 | if (TREE_CODE (rhs) == SSA_NAME | |
1567 | && !SSA_NAME_IS_DEFAULT_DEF (rhs) | |
1568 | && flow_bb_inside_loop_p (loop, gimple_bb (SSA_NAME_DEF_STMT (rhs)))) | |
1569 | return; | |
1570 | ||
5955438a BC |
1571 | int res = compute_access_range (loop, dr, &base, &size); |
1572 | if (res == 0) | |
939cf90f | 1573 | return; |
5955438a BC |
1574 | if (res == 1) |
1575 | { | |
1576 | partition->kind = PKIND_PARTIAL_MEMSET; | |
1577 | return; | |
1578 | } | |
939cf90f | 1579 | |
d2fd6a04 RS |
1580 | poly_uint64 base_offset; |
1581 | unsigned HOST_WIDE_INT const_base_offset; | |
1582 | tree base_base = strip_offset (base, &base_offset); | |
1583 | if (!base_offset.is_constant (&const_base_offset)) | |
1584 | return; | |
1585 | ||
957f0d8f BC |
1586 | struct builtin_info *builtin; |
1587 | builtin = alloc_builtin (dr, NULL, base, NULL_TREE, size); | |
d2fd6a04 RS |
1588 | builtin->dst_base_base = base_base; |
1589 | builtin->dst_base_offset = const_base_offset; | |
957f0d8f | 1590 | partition->builtin = builtin; |
939cf90f BC |
1591 | partition->kind = PKIND_MEMSET; |
1592 | } | |
1593 | ||
1594 | /* Given data references DST_DR and SRC_DR in loop nest LOOP and RDG, classify | |
1595 | if it forms builtin memcpy or memmove call. */ | |
1596 | ||
1597 | static void | |
1598 | classify_builtin_ldst (loop_p loop, struct graph *rdg, partition *partition, | |
1599 | data_reference_p dst_dr, data_reference_p src_dr) | |
1600 | { | |
1601 | tree base, size, src_base, src_size; | |
1602 | auto_vec<tree> dst_steps, src_steps; | |
1603 | ||
5955438a BC |
1604 | /* Compute access range of both load and store. */ |
1605 | int res = compute_access_range (loop, dst_dr, &base, &size, &dst_steps); | |
1606 | if (res != 2) | |
1607 | return; | |
1608 | res = compute_access_range (loop, src_dr, &src_base, &src_size, &src_steps); | |
1609 | if (res != 2) | |
1610 | return; | |
1611 | ||
1612 | /* They much have the same access size. */ | |
1613 | if (!operand_equal_p (size, src_size, 0)) | |
939cf90f BC |
1614 | return; |
1615 | ||
1616 | /* Load and store in loop nest must access memory in the same way, i.e, | |
1617 | their must have the same steps in each loop of the nest. */ | |
1618 | if (dst_steps.length () != src_steps.length ()) | |
1619 | return; | |
1620 | for (unsigned i = 0; i < dst_steps.length (); ++i) | |
1621 | if (!operand_equal_p (dst_steps[i], src_steps[i], 0)) | |
1622 | return; | |
1623 | ||
1624 | /* Now check that if there is a dependence. */ | |
1625 | ddr_p ddr = get_data_dependence (rdg, src_dr, dst_dr); | |
1626 | ||
1627 | /* Classify as memcpy if no dependence between load and store. */ | |
1628 | if (DDR_ARE_DEPENDENT (ddr) == chrec_known) | |
1629 | { | |
1630 | partition->builtin = alloc_builtin (dst_dr, src_dr, base, src_base, size); | |
1631 | partition->kind = PKIND_MEMCPY; | |
1632 | return; | |
163aa51b BC |
1633 | } |
1634 | ||
939cf90f BC |
1635 | /* Can't do memmove in case of unknown dependence or dependence without |
1636 | classical distance vector. */ | |
1637 | if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know | |
1638 | || DDR_NUM_DIST_VECTS (ddr) == 0) | |
1639 | return; | |
818625cf | 1640 | |
939cf90f BC |
1641 | unsigned i; |
1642 | lambda_vector dist_v; | |
1643 | int num_lev = (DDR_LOOP_NEST (ddr)).length (); | |
1644 | FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) | |
d0582dc1 | 1645 | { |
939cf90f BC |
1646 | unsigned dep_lev = dependence_level (dist_v, num_lev); |
1647 | /* Can't do memmove if load depends on store. */ | |
1648 | if (dep_lev > 0 && dist_v[dep_lev - 1] > 0 && !DDR_REVERSED_P (ddr)) | |
d0582dc1 | 1649 | return; |
d0582dc1 | 1650 | } |
939cf90f BC |
1651 | |
1652 | partition->builtin = alloc_builtin (dst_dr, src_dr, base, src_base, size); | |
1653 | partition->kind = PKIND_MEMMOVE; | |
1654 | return; | |
1655 | } | |
1656 | ||
1657 | /* Classifies the builtin kind we can generate for PARTITION of RDG and LOOP. | |
1658 | For the moment we detect memset, memcpy and memmove patterns. Bitmap | |
1659 | STMT_IN_ALL_PARTITIONS contains statements belonging to all partitions. */ | |
1660 | ||
1661 | static void | |
1662 | classify_partition (loop_p loop, struct graph *rdg, partition *partition, | |
1663 | bitmap stmt_in_all_partitions) | |
1664 | { | |
1665 | bitmap_iterator bi; | |
1666 | unsigned i; | |
1667 | data_reference_p single_ld = NULL, single_st = NULL; | |
1668 | bool volatiles_p = false, has_reduction = false; | |
1669 | ||
1670 | EXECUTE_IF_SET_IN_BITMAP (partition->stmts, 0, i, bi) | |
d0582dc1 | 1671 | { |
939cf90f | 1672 | gimple *stmt = RDG_STMT (rdg, i); |
17c5cbdf | 1673 | |
939cf90f BC |
1674 | if (gimple_has_volatile_ops (stmt)) |
1675 | volatiles_p = true; | |
17c5cbdf | 1676 | |
939cf90f BC |
1677 | /* If the stmt is not included by all partitions and there is uses |
1678 | outside of the loop, then mark the partition as reduction. */ | |
1679 | if (stmt_has_scalar_dependences_outside_loop (loop, stmt)) | |
1680 | { | |
1681 | /* Due to limitation in the transform phase we have to fuse all | |
1682 | reduction partitions. As a result, this could cancel valid | |
1683 | loop distribution especially for loop that induction variable | |
1684 | is used outside of loop. To workaround this issue, we skip | |
1685 | marking partition as reudction if the reduction stmt belongs | |
1686 | to all partitions. In such case, reduction will be computed | |
1687 | correctly no matter how partitions are fused/distributed. */ | |
1688 | if (!bitmap_bit_p (stmt_in_all_partitions, i)) | |
f20132e7 | 1689 | { |
939cf90f BC |
1690 | partition->reduction_p = true; |
1691 | return; | |
f20132e7 | 1692 | } |
939cf90f | 1693 | has_reduction = true; |
f20132e7 | 1694 | } |
d0582dc1 | 1695 | } |
939cf90f BC |
1696 | |
1697 | /* Perform general partition disqualification for builtins. */ | |
1698 | if (volatiles_p | |
1699 | /* Simple workaround to prevent classifying the partition as builtin | |
1700 | if it contains any use outside of loop. */ | |
1701 | || has_reduction | |
1702 | || !flag_tree_loop_distribute_patterns) | |
1703 | return; | |
1704 | ||
1705 | /* Find single load/store data references for builtin partition. */ | |
85aa9ed6 | 1706 | if (!find_single_drs (loop, rdg, partition, &single_st, &single_ld)) |
939cf90f BC |
1707 | return; |
1708 | ||
1709 | /* Classify the builtin kind. */ | |
1710 | if (single_ld == NULL) | |
1711 | classify_builtin_st (loop, partition, single_st); | |
1712 | else | |
1713 | classify_builtin_ldst (loop, rdg, partition, single_st, single_ld); | |
cfee318d SP |
1714 | } |
1715 | ||
95f7d11b BC |
1716 | /* Returns true when PARTITION1 and PARTITION2 access the same memory |
1717 | object in RDG. */ | |
cfee318d SP |
1718 | |
1719 | static bool | |
95f7d11b BC |
1720 | share_memory_accesses (struct graph *rdg, |
1721 | partition *partition1, partition *partition2) | |
cfee318d | 1722 | { |
95f7d11b | 1723 | unsigned i, j; |
cfee318d | 1724 | bitmap_iterator bi, bj; |
95f7d11b | 1725 | data_reference_p dr1, dr2; |
1fa0c180 RG |
1726 | |
1727 | /* First check whether in the intersection of the two partitions are | |
1728 | any loads or stores. Common loads are the situation that happens | |
1729 | most often. */ | |
1730 | EXECUTE_IF_AND_IN_BITMAP (partition1->stmts, partition2->stmts, 0, i, bi) | |
1731 | if (RDG_MEM_WRITE_STMT (rdg, i) | |
1732 | || RDG_MEM_READS_STMT (rdg, i)) | |
1733 | return true; | |
cfee318d | 1734 | |
95f7d11b BC |
1735 | /* Then check whether the two partitions access the same memory object. */ |
1736 | EXECUTE_IF_SET_IN_BITMAP (partition1->datarefs, 0, i, bi) | |
1737 | { | |
1738 | dr1 = datarefs_vec[i]; | |
1739 | ||
1740 | if (!DR_BASE_ADDRESS (dr1) | |
1741 | || !DR_OFFSET (dr1) || !DR_INIT (dr1) || !DR_STEP (dr1)) | |
1742 | continue; | |
1743 | ||
1744 | EXECUTE_IF_SET_IN_BITMAP (partition2->datarefs, 0, j, bj) | |
1745 | { | |
1746 | dr2 = datarefs_vec[j]; | |
1747 | ||
1748 | if (!DR_BASE_ADDRESS (dr2) | |
1749 | || !DR_OFFSET (dr2) || !DR_INIT (dr2) || !DR_STEP (dr2)) | |
1750 | continue; | |
1751 | ||
1752 | if (operand_equal_p (DR_BASE_ADDRESS (dr1), DR_BASE_ADDRESS (dr2), 0) | |
1753 | && operand_equal_p (DR_OFFSET (dr1), DR_OFFSET (dr2), 0) | |
1754 | && operand_equal_p (DR_INIT (dr1), DR_INIT (dr2), 0) | |
1755 | && operand_equal_p (DR_STEP (dr1), DR_STEP (dr2), 0)) | |
1756 | return true; | |
1757 | } | |
1758 | } | |
cfee318d SP |
1759 | |
1760 | return false; | |
1761 | } | |
1762 | ||
4a52eb19 BC |
1763 | /* For each seed statement in STARTING_STMTS, this function builds |
1764 | partition for it by adding depended statements according to RDG. | |
1765 | All partitions are recorded in PARTITIONS. */ | |
dea61d92 SP |
1766 | |
1767 | static void | |
83a95546 | 1768 | rdg_build_partitions (struct graph *rdg, |
355fe088 | 1769 | vec<gimple *> starting_stmts, |
526ceb68 | 1770 | vec<partition *> *partitions) |
dea61d92 | 1771 | { |
0e3de1d4 | 1772 | auto_bitmap processed; |
2fd5894f | 1773 | int i; |
355fe088 | 1774 | gimple *stmt; |
dea61d92 | 1775 | |
2fd5894f | 1776 | FOR_EACH_VEC_ELT (starting_stmts, i, stmt) |
dea61d92 | 1777 | { |
2fd5894f RB |
1778 | int v = rdg_vertex_for_stmt (rdg, stmt); |
1779 | ||
1780 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1781 | fprintf (dump_file, | |
1782 | "ldist asked to generate code for vertex %d\n", v); | |
b8698a0f | 1783 | |
24f161fd RB |
1784 | /* If the vertex is already contained in another partition so |
1785 | is the partition rooted at it. */ | |
dea61d92 SP |
1786 | if (bitmap_bit_p (processed, v)) |
1787 | continue; | |
b8698a0f | 1788 | |
526ceb68 | 1789 | partition *partition = build_rdg_partition_for_vertex (rdg, v); |
24f161fd | 1790 | bitmap_ior_into (processed, partition->stmts); |
dea61d92 | 1791 | |
826a536d | 1792 | if (dump_file && (dump_flags & TDF_DETAILS)) |
dea61d92 | 1793 | { |
f1eb4621 BC |
1794 | fprintf (dump_file, "ldist creates useful %s partition:\n", |
1795 | partition->type == PTYPE_PARALLEL ? "parallel" : "sequent"); | |
1796 | bitmap_print (dump_file, partition->stmts, " ", "\n"); | |
dea61d92 | 1797 | } |
826a536d RB |
1798 | |
1799 | partitions->safe_push (partition); | |
dea61d92 SP |
1800 | } |
1801 | ||
83a95546 RB |
1802 | /* All vertices should have been assigned to at least one partition now, |
1803 | other than vertices belonging to dead code. */ | |
dea61d92 SP |
1804 | } |
1805 | ||
1806 | /* Dump to FILE the PARTITIONS. */ | |
1807 | ||
1808 | static void | |
526ceb68 | 1809 | dump_rdg_partitions (FILE *file, vec<partition *> partitions) |
dea61d92 SP |
1810 | { |
1811 | int i; | |
526ceb68 | 1812 | partition *partition; |
dea61d92 | 1813 | |
9771b263 | 1814 | FOR_EACH_VEC_ELT (partitions, i, partition) |
c61f8985 | 1815 | debug_bitmap_file (file, partition->stmts); |
dea61d92 SP |
1816 | } |
1817 | ||
1818 | /* Debug PARTITIONS. */ | |
526ceb68 | 1819 | extern void debug_rdg_partitions (vec<partition *> ); |
dea61d92 | 1820 | |
24e47c76 | 1821 | DEBUG_FUNCTION void |
526ceb68 | 1822 | debug_rdg_partitions (vec<partition *> partitions) |
dea61d92 SP |
1823 | { |
1824 | dump_rdg_partitions (stderr, partitions); | |
1825 | } | |
1826 | ||
2b8aee8e SP |
1827 | /* Returns the number of read and write operations in the RDG. */ |
1828 | ||
1829 | static int | |
1830 | number_of_rw_in_rdg (struct graph *rdg) | |
1831 | { | |
1832 | int i, res = 0; | |
1833 | ||
1834 | for (i = 0; i < rdg->n_vertices; i++) | |
1835 | { | |
1836 | if (RDG_MEM_WRITE_STMT (rdg, i)) | |
1837 | ++res; | |
1838 | ||
1839 | if (RDG_MEM_READS_STMT (rdg, i)) | |
1840 | ++res; | |
1841 | } | |
1842 | ||
1843 | return res; | |
1844 | } | |
1845 | ||
1846 | /* Returns the number of read and write operations in a PARTITION of | |
1847 | the RDG. */ | |
1848 | ||
1849 | static int | |
526ceb68 | 1850 | number_of_rw_in_partition (struct graph *rdg, partition *partition) |
2b8aee8e SP |
1851 | { |
1852 | int res = 0; | |
1853 | unsigned i; | |
1854 | bitmap_iterator ii; | |
1855 | ||
c61f8985 | 1856 | EXECUTE_IF_SET_IN_BITMAP (partition->stmts, 0, i, ii) |
2b8aee8e SP |
1857 | { |
1858 | if (RDG_MEM_WRITE_STMT (rdg, i)) | |
1859 | ++res; | |
1860 | ||
1861 | if (RDG_MEM_READS_STMT (rdg, i)) | |
1862 | ++res; | |
1863 | } | |
1864 | ||
1865 | return res; | |
1866 | } | |
1867 | ||
1868 | /* Returns true when one of the PARTITIONS contains all the read or | |
1869 | write operations of RDG. */ | |
1870 | ||
1871 | static bool | |
9771b263 | 1872 | partition_contains_all_rw (struct graph *rdg, |
526ceb68 | 1873 | vec<partition *> partitions) |
2b8aee8e SP |
1874 | { |
1875 | int i; | |
526ceb68 | 1876 | partition *partition; |
2b8aee8e SP |
1877 | int nrw = number_of_rw_in_rdg (rdg); |
1878 | ||
9771b263 | 1879 | FOR_EACH_VEC_ELT (partitions, i, partition) |
2b8aee8e SP |
1880 | if (nrw == number_of_rw_in_partition (rdg, partition)) |
1881 | return true; | |
1882 | ||
1883 | return false; | |
1884 | } | |
1885 | ||
447f3223 | 1886 | /* Compute partition dependence created by the data references in DRS1 |
a8745cc2 BC |
1887 | and DRS2, modify and return DIR according to that. IF ALIAS_DDR is |
1888 | not NULL, we record dependence introduced by possible alias between | |
1889 | two data references in ALIAS_DDRS; otherwise, we simply ignore such | |
1890 | dependence as if it doesn't exist at all. */ | |
447f3223 RB |
1891 | |
1892 | static int | |
4084ea5f | 1893 | pg_add_dependence_edges (struct graph *rdg, int dir, |
a8745cc2 | 1894 | bitmap drs1, bitmap drs2, vec<ddr_p> *alias_ddrs) |
447f3223 | 1895 | { |
a7a44c07 BC |
1896 | unsigned i, j; |
1897 | bitmap_iterator bi, bj; | |
1898 | data_reference_p dr1, dr2, saved_dr1; | |
447f3223 RB |
1899 | |
1900 | /* dependence direction - 0 is no dependence, -1 is back, | |
1901 | 1 is forth, 2 is both (we can stop then, merging will occur). */ | |
a7a44c07 BC |
1902 | EXECUTE_IF_SET_IN_BITMAP (drs1, 0, i, bi) |
1903 | { | |
1904 | dr1 = datarefs_vec[i]; | |
1905 | ||
1906 | EXECUTE_IF_SET_IN_BITMAP (drs2, 0, j, bj) | |
1907 | { | |
a8745cc2 BC |
1908 | int res, this_dir = 1; |
1909 | ddr_p ddr; | |
1910 | ||
a7a44c07 BC |
1911 | dr2 = datarefs_vec[j]; |
1912 | ||
1913 | /* Skip all <read, read> data dependence. */ | |
1914 | if (DR_IS_READ (dr1) && DR_IS_READ (dr2)) | |
1915 | continue; | |
1916 | ||
1917 | saved_dr1 = dr1; | |
a8745cc2 | 1918 | /* Re-shuffle data-refs to be in topological order. */ |
a7a44c07 BC |
1919 | if (rdg_vertex_for_stmt (rdg, DR_STMT (dr1)) |
1920 | > rdg_vertex_for_stmt (rdg, DR_STMT (dr2))) | |
1921 | { | |
1922 | std::swap (dr1, dr2); | |
1923 | this_dir = -this_dir; | |
1924 | } | |
17c5cbdf | 1925 | ddr = get_data_dependence (rdg, dr1, dr2); |
a7a44c07 | 1926 | if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) |
a8745cc2 BC |
1927 | { |
1928 | this_dir = 0; | |
1929 | res = data_ref_compare_tree (DR_BASE_ADDRESS (dr1), | |
1930 | DR_BASE_ADDRESS (dr2)); | |
1931 | /* Be conservative. If data references are not well analyzed, | |
1932 | or the two data references have the same base address and | |
1933 | offset, add dependence and consider it alias to each other. | |
67914693 | 1934 | In other words, the dependence cannot be resolved by |
a8745cc2 BC |
1935 | runtime alias check. */ |
1936 | if (!DR_BASE_ADDRESS (dr1) || !DR_BASE_ADDRESS (dr2) | |
1937 | || !DR_OFFSET (dr1) || !DR_OFFSET (dr2) | |
1938 | || !DR_INIT (dr1) || !DR_INIT (dr2) | |
1939 | || !DR_STEP (dr1) || !tree_fits_uhwi_p (DR_STEP (dr1)) | |
1940 | || !DR_STEP (dr2) || !tree_fits_uhwi_p (DR_STEP (dr2)) | |
1941 | || res == 0) | |
1942 | this_dir = 2; | |
1943 | /* Data dependence could be resolved by runtime alias check, | |
1944 | record it in ALIAS_DDRS. */ | |
1945 | else if (alias_ddrs != NULL) | |
1946 | alias_ddrs->safe_push (ddr); | |
1947 | /* Or simply ignore it. */ | |
1948 | } | |
a7a44c07 BC |
1949 | else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE) |
1950 | { | |
1951 | if (DDR_REVERSED_P (ddr)) | |
a8745cc2 BC |
1952 | this_dir = -this_dir; |
1953 | ||
a7a44c07 BC |
1954 | /* Known dependences can still be unordered througout the |
1955 | iteration space, see gcc.dg/tree-ssa/ldist-16.c. */ | |
1956 | if (DDR_NUM_DIST_VECTS (ddr) != 1) | |
1957 | this_dir = 2; | |
1958 | /* If the overlap is exact preserve stmt order. */ | |
0df7c778 BC |
1959 | else if (lambda_vector_zerop (DDR_DIST_VECT (ddr, 0), |
1960 | DDR_NB_LOOPS (ddr))) | |
a7a44c07 | 1961 | ; |
a8745cc2 BC |
1962 | /* Else as the distance vector is lexicographic positive swap |
1963 | the dependence direction. */ | |
a7a44c07 | 1964 | else |
a8745cc2 | 1965 | this_dir = -this_dir; |
a7a44c07 BC |
1966 | } |
1967 | else | |
1968 | this_dir = 0; | |
a7a44c07 BC |
1969 | if (this_dir == 2) |
1970 | return 2; | |
1971 | else if (dir == 0) | |
1972 | dir = this_dir; | |
1973 | else if (this_dir != 0 && dir != this_dir) | |
1974 | return 2; | |
1975 | /* Shuffle "back" dr1. */ | |
1976 | dr1 = saved_dr1; | |
1977 | } | |
1978 | } | |
447f3223 RB |
1979 | return dir; |
1980 | } | |
1981 | ||
1982 | /* Compare postorder number of the partition graph vertices V1 and V2. */ | |
1983 | ||
1984 | static int | |
1985 | pgcmp (const void *v1_, const void *v2_) | |
1986 | { | |
1987 | const vertex *v1 = (const vertex *)v1_; | |
1988 | const vertex *v2 = (const vertex *)v2_; | |
1989 | return v2->post - v1->post; | |
1990 | } | |
2fd5894f | 1991 | |
a8745cc2 BC |
1992 | /* Data attached to vertices of partition dependence graph. */ |
1993 | struct pg_vdata | |
1994 | { | |
1995 | /* ID of the corresponding partition. */ | |
1996 | int id; | |
1997 | /* The partition. */ | |
1998 | struct partition *partition; | |
1999 | }; | |
2000 | ||
2001 | /* Data attached to edges of partition dependence graph. */ | |
2002 | struct pg_edata | |
2003 | { | |
2004 | /* If the dependence edge can be resolved by runtime alias check, | |
2005 | this vector contains data dependence relations for runtime alias | |
2006 | check. On the other hand, if the dependence edge is introduced | |
2007 | because of compilation time known data dependence, this vector | |
2008 | contains nothing. */ | |
2009 | vec<ddr_p> alias_ddrs; | |
2010 | }; | |
2011 | ||
2012 | /* Callback data for traversing edges in graph. */ | |
2013 | struct pg_edge_callback_data | |
2014 | { | |
2015 | /* Bitmap contains strong connected components should be merged. */ | |
2016 | bitmap sccs_to_merge; | |
2017 | /* Array constains component information for all vertices. */ | |
2018 | int *vertices_component; | |
2019 | /* Vector to record all data dependence relations which are needed | |
2020 | to break strong connected components by runtime alias checks. */ | |
2021 | vec<ddr_p> *alias_ddrs; | |
2022 | }; | |
2023 | ||
2024 | /* Initialize vertice's data for partition dependence graph PG with | |
2025 | PARTITIONS. */ | |
2026 | ||
2027 | static void | |
2028 | init_partition_graph_vertices (struct graph *pg, | |
2029 | vec<struct partition *> *partitions) | |
2030 | { | |
2031 | int i; | |
2032 | partition *partition; | |
2033 | struct pg_vdata *data; | |
2034 | ||
2035 | for (i = 0; partitions->iterate (i, &partition); ++i) | |
2036 | { | |
2037 | data = new pg_vdata; | |
2038 | pg->vertices[i].data = data; | |
2039 | data->id = i; | |
2040 | data->partition = partition; | |
2041 | } | |
2042 | } | |
2043 | ||
2044 | /* Add edge <I, J> to partition dependence graph PG. Attach vector of data | |
2045 | dependence relations to the EDGE if DDRS isn't NULL. */ | |
2046 | ||
2047 | static void | |
2048 | add_partition_graph_edge (struct graph *pg, int i, int j, vec<ddr_p> *ddrs) | |
2049 | { | |
2050 | struct graph_edge *e = add_edge (pg, i, j); | |
2051 | ||
2052 | /* If the edge is attached with data dependence relations, it means this | |
2053 | dependence edge can be resolved by runtime alias checks. */ | |
2054 | if (ddrs != NULL) | |
2055 | { | |
2056 | struct pg_edata *data = new pg_edata; | |
2057 | ||
2058 | gcc_assert (ddrs->length () > 0); | |
2059 | e->data = data; | |
2060 | data->alias_ddrs = vNULL; | |
2061 | data->alias_ddrs.safe_splice (*ddrs); | |
2062 | } | |
2063 | } | |
2064 | ||
2065 | /* Callback function for graph travesal algorithm. It returns true | |
2066 | if edge E should skipped when traversing the graph. */ | |
2067 | ||
2068 | static bool | |
2069 | pg_skip_alias_edge (struct graph_edge *e) | |
2070 | { | |
2071 | struct pg_edata *data = (struct pg_edata *)e->data; | |
2072 | return (data != NULL && data->alias_ddrs.length () > 0); | |
2073 | } | |
2074 | ||
2075 | /* Callback function freeing data attached to edge E of graph. */ | |
2076 | ||
2077 | static void | |
2078 | free_partition_graph_edata_cb (struct graph *, struct graph_edge *e, void *) | |
2079 | { | |
2080 | if (e->data != NULL) | |
2081 | { | |
2082 | struct pg_edata *data = (struct pg_edata *)e->data; | |
2083 | data->alias_ddrs.release (); | |
2084 | delete data; | |
2085 | } | |
2086 | } | |
2087 | ||
2088 | /* Free data attached to vertice of partition dependence graph PG. */ | |
2089 | ||
2090 | static void | |
2091 | free_partition_graph_vdata (struct graph *pg) | |
2092 | { | |
2093 | int i; | |
2094 | struct pg_vdata *data; | |
2095 | ||
2096 | for (i = 0; i < pg->n_vertices; ++i) | |
2097 | { | |
2098 | data = (struct pg_vdata *)pg->vertices[i].data; | |
2099 | delete data; | |
2100 | } | |
2101 | } | |
2102 | ||
2103 | /* Build and return partition dependence graph for PARTITIONS. RDG is | |
2104 | reduced dependence graph for the loop to be distributed. If IGNORE_ALIAS_P | |
2105 | is true, data dependence caused by possible alias between references | |
2106 | is ignored, as if it doesn't exist at all; otherwise all depdendences | |
2107 | are considered. */ | |
2108 | ||
2109 | static struct graph * | |
2110 | build_partition_graph (struct graph *rdg, | |
2111 | vec<struct partition *> *partitions, | |
2112 | bool ignore_alias_p) | |
2113 | { | |
2114 | int i, j; | |
2115 | struct partition *partition1, *partition2; | |
2116 | graph *pg = new_graph (partitions->length ()); | |
2117 | auto_vec<ddr_p> alias_ddrs, *alias_ddrs_p; | |
2118 | ||
2119 | alias_ddrs_p = ignore_alias_p ? NULL : &alias_ddrs; | |
2120 | ||
2121 | init_partition_graph_vertices (pg, partitions); | |
2122 | ||
2123 | for (i = 0; partitions->iterate (i, &partition1); ++i) | |
2124 | { | |
2125 | for (j = i + 1; partitions->iterate (j, &partition2); ++j) | |
2126 | { | |
2127 | /* dependence direction - 0 is no dependence, -1 is back, | |
2128 | 1 is forth, 2 is both (we can stop then, merging will occur). */ | |
2129 | int dir = 0; | |
2130 | ||
2131 | /* If the first partition has reduction, add back edge; if the | |
2132 | second partition has reduction, add forth edge. This makes | |
2133 | sure that reduction partition will be sorted as the last one. */ | |
2134 | if (partition_reduction_p (partition1)) | |
2135 | dir = -1; | |
2136 | else if (partition_reduction_p (partition2)) | |
2137 | dir = 1; | |
2138 | ||
2139 | /* Cleanup the temporary vector. */ | |
2140 | alias_ddrs.truncate (0); | |
2141 | ||
2142 | dir = pg_add_dependence_edges (rdg, dir, partition1->datarefs, | |
2143 | partition2->datarefs, alias_ddrs_p); | |
2144 | ||
2145 | /* Add edge to partition graph if there exists dependence. There | |
2146 | are two types of edges. One type edge is caused by compilation | |
67914693 | 2147 | time known dependence, this type cannot be resolved by runtime |
a8745cc2 BC |
2148 | alias check. The other type can be resolved by runtime alias |
2149 | check. */ | |
2150 | if (dir == 1 || dir == 2 | |
2151 | || alias_ddrs.length () > 0) | |
2152 | { | |
2153 | /* Attach data dependence relations to edge that can be resolved | |
2154 | by runtime alias check. */ | |
2155 | bool alias_edge_p = (dir != 1 && dir != 2); | |
2156 | add_partition_graph_edge (pg, i, j, | |
2157 | (alias_edge_p) ? &alias_ddrs : NULL); | |
2158 | } | |
2159 | if (dir == -1 || dir == 2 | |
2160 | || alias_ddrs.length () > 0) | |
2161 | { | |
2162 | /* Attach data dependence relations to edge that can be resolved | |
2163 | by runtime alias check. */ | |
2164 | bool alias_edge_p = (dir != -1 && dir != 2); | |
2165 | add_partition_graph_edge (pg, j, i, | |
2166 | (alias_edge_p) ? &alias_ddrs : NULL); | |
2167 | } | |
2168 | } | |
2169 | } | |
2170 | return pg; | |
2171 | } | |
2172 | ||
b4ec1d31 BC |
2173 | /* Sort partitions in PG in descending post order and store them in |
2174 | PARTITIONS. */ | |
a8745cc2 BC |
2175 | |
2176 | static void | |
2177 | sort_partitions_by_post_order (struct graph *pg, | |
2178 | vec<struct partition *> *partitions) | |
2179 | { | |
2180 | int i; | |
2181 | struct pg_vdata *data; | |
2182 | ||
b4ec1d31 | 2183 | /* Now order the remaining nodes in descending postorder. */ |
a8745cc2 BC |
2184 | qsort (pg->vertices, pg->n_vertices, sizeof (vertex), pgcmp); |
2185 | partitions->truncate (0); | |
2186 | for (i = 0; i < pg->n_vertices; ++i) | |
2187 | { | |
2188 | data = (struct pg_vdata *)pg->vertices[i].data; | |
2189 | if (data->partition) | |
2190 | partitions->safe_push (data->partition); | |
2191 | } | |
2192 | } | |
2193 | ||
2194 | /* Given reduced dependence graph RDG merge strong connected components | |
163aa51b BC |
2195 | of PARTITIONS. If IGNORE_ALIAS_P is true, data dependence caused by |
2196 | possible alias between references is ignored, as if it doesn't exist | |
2197 | at all; otherwise all depdendences are considered. */ | |
a8745cc2 BC |
2198 | |
2199 | static void | |
2200 | merge_dep_scc_partitions (struct graph *rdg, | |
163aa51b BC |
2201 | vec<struct partition *> *partitions, |
2202 | bool ignore_alias_p) | |
a8745cc2 BC |
2203 | { |
2204 | struct partition *partition1, *partition2; | |
2205 | struct pg_vdata *data; | |
163aa51b | 2206 | graph *pg = build_partition_graph (rdg, partitions, ignore_alias_p); |
a8745cc2 BC |
2207 | int i, j, num_sccs = graphds_scc (pg, NULL); |
2208 | ||
2209 | /* Strong connected compoenent means dependence cycle, we cannot distribute | |
2210 | them. So fuse them together. */ | |
2211 | if ((unsigned) num_sccs < partitions->length ()) | |
2212 | { | |
2213 | for (i = 0; i < num_sccs; ++i) | |
2214 | { | |
2215 | for (j = 0; partitions->iterate (j, &partition1); ++j) | |
2216 | if (pg->vertices[j].component == i) | |
2217 | break; | |
2218 | for (j = j + 1; partitions->iterate (j, &partition2); ++j) | |
2219 | if (pg->vertices[j].component == i) | |
2220 | { | |
2221 | partition_merge_into (NULL, partition1, | |
2222 | partition2, FUSE_SAME_SCC); | |
2223 | partition1->type = PTYPE_SEQUENTIAL; | |
2224 | (*partitions)[j] = NULL; | |
2225 | partition_free (partition2); | |
2226 | data = (struct pg_vdata *)pg->vertices[j].data; | |
2227 | data->partition = NULL; | |
2228 | } | |
2229 | } | |
a8745cc2 | 2230 | } |
aa1528b5 BC |
2231 | |
2232 | sort_partitions_by_post_order (pg, partitions); | |
a8745cc2 BC |
2233 | gcc_assert (partitions->length () == (unsigned)num_sccs); |
2234 | free_partition_graph_vdata (pg); | |
2235 | free_graph (pg); | |
2236 | } | |
2237 | ||
2238 | /* Callback function for traversing edge E in graph G. DATA is private | |
2239 | callback data. */ | |
2240 | ||
2241 | static void | |
2242 | pg_collect_alias_ddrs (struct graph *g, struct graph_edge *e, void *data) | |
2243 | { | |
2244 | int i, j, component; | |
2245 | struct pg_edge_callback_data *cbdata; | |
2246 | struct pg_edata *edata = (struct pg_edata *) e->data; | |
2247 | ||
2248 | /* If the edge doesn't have attached data dependence, it represents | |
2249 | compilation time known dependences. This type dependence cannot | |
2250 | be resolved by runtime alias check. */ | |
2251 | if (edata == NULL || edata->alias_ddrs.length () == 0) | |
2252 | return; | |
2253 | ||
2254 | cbdata = (struct pg_edge_callback_data *) data; | |
2255 | i = e->src; | |
2256 | j = e->dest; | |
2257 | component = cbdata->vertices_component[i]; | |
2258 | /* Vertices are topologically sorted according to compilation time | |
2259 | known dependences, so we can break strong connected components | |
2260 | by removing edges of the opposite direction, i.e, edges pointing | |
2261 | from vertice with smaller post number to vertice with bigger post | |
2262 | number. */ | |
2263 | if (g->vertices[i].post < g->vertices[j].post | |
2264 | /* We only need to remove edges connecting vertices in the same | |
2265 | strong connected component to break it. */ | |
2266 | && component == cbdata->vertices_component[j] | |
2267 | /* Check if we want to break the strong connected component or not. */ | |
2268 | && !bitmap_bit_p (cbdata->sccs_to_merge, component)) | |
2269 | cbdata->alias_ddrs->safe_splice (edata->alias_ddrs); | |
2270 | } | |
2271 | ||
2272 | /* This is the main function breaking strong conected components in | |
2273 | PARTITIONS giving reduced depdendence graph RDG. Store data dependence | |
2274 | relations for runtime alias check in ALIAS_DDRS. */ | |
2275 | ||
2276 | static void | |
2277 | break_alias_scc_partitions (struct graph *rdg, | |
2278 | vec<struct partition *> *partitions, | |
2279 | vec<ddr_p> *alias_ddrs) | |
2280 | { | |
b4ec1d31 | 2281 | int i, j, k, num_sccs, num_sccs_no_alias; |
a8745cc2 BC |
2282 | /* Build partition dependence graph. */ |
2283 | graph *pg = build_partition_graph (rdg, partitions, false); | |
2284 | ||
2285 | alias_ddrs->truncate (0); | |
2286 | /* Find strong connected components in the graph, with all dependence edges | |
2287 | considered. */ | |
2288 | num_sccs = graphds_scc (pg, NULL); | |
2289 | /* All SCCs now can be broken by runtime alias checks because SCCs caused by | |
2290 | compilation time known dependences are merged before this function. */ | |
2291 | if ((unsigned) num_sccs < partitions->length ()) | |
2292 | { | |
2293 | struct pg_edge_callback_data cbdata; | |
2294 | auto_bitmap sccs_to_merge; | |
2295 | auto_vec<enum partition_type> scc_types; | |
2296 | struct partition *partition, *first; | |
2297 | ||
6dc29d3a | 2298 | /* If all partitions in a SCC have the same type, we can simply merge the |
a8745cc2 BC |
2299 | SCC. This loop finds out such SCCS and record them in bitmap. */ |
2300 | bitmap_set_range (sccs_to_merge, 0, (unsigned) num_sccs); | |
2301 | for (i = 0; i < num_sccs; ++i) | |
2302 | { | |
2303 | for (j = 0; partitions->iterate (j, &first); ++j) | |
2304 | if (pg->vertices[j].component == i) | |
2305 | break; | |
1623d9f3 BC |
2306 | |
2307 | bool same_type = true, all_builtins = partition_builtin_p (first); | |
a8745cc2 BC |
2308 | for (++j; partitions->iterate (j, &partition); ++j) |
2309 | { | |
2310 | if (pg->vertices[j].component != i) | |
2311 | continue; | |
2312 | ||
2313 | if (first->type != partition->type) | |
2314 | { | |
1623d9f3 | 2315 | same_type = false; |
a8745cc2 BC |
2316 | break; |
2317 | } | |
1623d9f3 | 2318 | all_builtins &= partition_builtin_p (partition); |
a8745cc2 | 2319 | } |
1623d9f3 BC |
2320 | /* Merge SCC if all partitions in SCC have the same type, though the |
2321 | result partition is sequential, because vectorizer can do better | |
2322 | runtime alias check. One expecption is all partitions in SCC are | |
2323 | builtins. */ | |
2324 | if (!same_type || all_builtins) | |
2325 | bitmap_clear_bit (sccs_to_merge, i); | |
a8745cc2 BC |
2326 | } |
2327 | ||
2328 | /* Initialize callback data for traversing. */ | |
2329 | cbdata.sccs_to_merge = sccs_to_merge; | |
2330 | cbdata.alias_ddrs = alias_ddrs; | |
2331 | cbdata.vertices_component = XNEWVEC (int, pg->n_vertices); | |
2332 | /* Record the component information which will be corrupted by next | |
2333 | graph scc finding call. */ | |
2334 | for (i = 0; i < pg->n_vertices; ++i) | |
2335 | cbdata.vertices_component[i] = pg->vertices[i].component; | |
2336 | ||
2337 | /* Collect data dependences for runtime alias checks to break SCCs. */ | |
2338 | if (bitmap_count_bits (sccs_to_merge) != (unsigned) num_sccs) | |
2339 | { | |
2340 | /* Run SCC finding algorithm again, with alias dependence edges | |
6dc29d3a | 2341 | skipped. This is to topologically sort partitions according to |
a8745cc2 BC |
2342 | compilation time known dependence. Note the topological order |
2343 | is stored in the form of pg's post order number. */ | |
2344 | num_sccs_no_alias = graphds_scc (pg, NULL, pg_skip_alias_edge); | |
2345 | gcc_assert (partitions->length () == (unsigned) num_sccs_no_alias); | |
2346 | /* With topological order, we can construct two subgraphs L and R. | |
2347 | L contains edge <x, y> where x < y in terms of post order, while | |
2348 | R contains edge <x, y> where x > y. Edges for compilation time | |
2349 | known dependence all fall in R, so we break SCCs by removing all | |
2350 | (alias) edges of in subgraph L. */ | |
2351 | for_each_edge (pg, pg_collect_alias_ddrs, &cbdata); | |
2352 | } | |
2353 | ||
2354 | /* For SCC that doesn't need to be broken, merge it. */ | |
2355 | for (i = 0; i < num_sccs; ++i) | |
2356 | { | |
2357 | if (!bitmap_bit_p (sccs_to_merge, i)) | |
2358 | continue; | |
2359 | ||
2360 | for (j = 0; partitions->iterate (j, &first); ++j) | |
2361 | if (cbdata.vertices_component[j] == i) | |
2362 | break; | |
b4ec1d31 | 2363 | for (k = j + 1; partitions->iterate (k, &partition); ++k) |
a8745cc2 BC |
2364 | { |
2365 | struct pg_vdata *data; | |
2366 | ||
b4ec1d31 | 2367 | if (cbdata.vertices_component[k] != i) |
a8745cc2 BC |
2368 | continue; |
2369 | ||
b4ec1d31 BC |
2370 | /* Update postorder number so that merged reduction partition is |
2371 | sorted after other partitions. */ | |
2372 | if (!partition_reduction_p (first) | |
2373 | && partition_reduction_p (partition)) | |
2374 | { | |
2375 | gcc_assert (pg->vertices[k].post < pg->vertices[j].post); | |
2376 | pg->vertices[j].post = pg->vertices[k].post; | |
2377 | } | |
a8745cc2 | 2378 | partition_merge_into (NULL, first, partition, FUSE_SAME_SCC); |
b4ec1d31 | 2379 | (*partitions)[k] = NULL; |
a8745cc2 | 2380 | partition_free (partition); |
b4ec1d31 BC |
2381 | data = (struct pg_vdata *)pg->vertices[k].data; |
2382 | gcc_assert (data->id == k); | |
a8745cc2 | 2383 | data->partition = NULL; |
6dc29d3a BC |
2384 | /* The result partition of merged SCC must be sequential. */ |
2385 | first->type = PTYPE_SEQUENTIAL; | |
a8745cc2 BC |
2386 | } |
2387 | } | |
2388 | } | |
2389 | ||
2390 | sort_partitions_by_post_order (pg, partitions); | |
2391 | free_partition_graph_vdata (pg); | |
2392 | for_each_edge (pg, free_partition_graph_edata_cb, NULL); | |
2393 | free_graph (pg); | |
2394 | ||
2395 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2396 | { | |
2397 | fprintf (dump_file, "Possible alias data dependence to break:\n"); | |
2398 | dump_data_dependence_relations (dump_file, *alias_ddrs); | |
2399 | } | |
2400 | } | |
2401 | ||
2402 | /* Compute and return an expression whose value is the segment length which | |
2403 | will be accessed by DR in NITERS iterations. */ | |
2404 | ||
2405 | static tree | |
2406 | data_ref_segment_size (struct data_reference *dr, tree niters) | |
2407 | { | |
a57776a1 RS |
2408 | niters = size_binop (MINUS_EXPR, |
2409 | fold_convert (sizetype, niters), | |
2410 | size_one_node); | |
2411 | return size_binop (MULT_EXPR, | |
2412 | fold_convert (sizetype, DR_STEP (dr)), | |
2413 | fold_convert (sizetype, niters)); | |
a8745cc2 BC |
2414 | } |
2415 | ||
2416 | /* Return true if LOOP's latch is dominated by statement for data reference | |
2417 | DR. */ | |
2418 | ||
2419 | static inline bool | |
2420 | latch_dominated_by_data_ref (struct loop *loop, data_reference *dr) | |
2421 | { | |
2422 | return dominated_by_p (CDI_DOMINATORS, single_exit (loop)->src, | |
2423 | gimple_bb (DR_STMT (dr))); | |
2424 | } | |
2425 | ||
2426 | /* Compute alias check pairs and store them in COMP_ALIAS_PAIRS for LOOP's | |
2427 | data dependence relations ALIAS_DDRS. */ | |
2428 | ||
2429 | static void | |
2430 | compute_alias_check_pairs (struct loop *loop, vec<ddr_p> *alias_ddrs, | |
2431 | vec<dr_with_seg_len_pair_t> *comp_alias_pairs) | |
2432 | { | |
2433 | unsigned int i; | |
2434 | unsigned HOST_WIDE_INT factor = 1; | |
2435 | tree niters_plus_one, niters = number_of_latch_executions (loop); | |
2436 | ||
2437 | gcc_assert (niters != NULL_TREE && niters != chrec_dont_know); | |
2438 | niters = fold_convert (sizetype, niters); | |
2439 | niters_plus_one = size_binop (PLUS_EXPR, niters, size_one_node); | |
2440 | ||
2441 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2442 | fprintf (dump_file, "Creating alias check pairs:\n"); | |
2443 | ||
2444 | /* Iterate all data dependence relations and compute alias check pairs. */ | |
2445 | for (i = 0; i < alias_ddrs->length (); i++) | |
2446 | { | |
2447 | ddr_p ddr = (*alias_ddrs)[i]; | |
2448 | struct data_reference *dr_a = DDR_A (ddr); | |
2449 | struct data_reference *dr_b = DDR_B (ddr); | |
2450 | tree seg_length_a, seg_length_b; | |
2451 | int comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a), | |
2452 | DR_BASE_ADDRESS (dr_b)); | |
2453 | ||
2454 | if (comp_res == 0) | |
2455 | comp_res = data_ref_compare_tree (DR_OFFSET (dr_a), DR_OFFSET (dr_b)); | |
2456 | gcc_assert (comp_res != 0); | |
2457 | ||
2458 | if (latch_dominated_by_data_ref (loop, dr_a)) | |
2459 | seg_length_a = data_ref_segment_size (dr_a, niters_plus_one); | |
2460 | else | |
2461 | seg_length_a = data_ref_segment_size (dr_a, niters); | |
2462 | ||
2463 | if (latch_dominated_by_data_ref (loop, dr_b)) | |
2464 | seg_length_b = data_ref_segment_size (dr_b, niters_plus_one); | |
2465 | else | |
2466 | seg_length_b = data_ref_segment_size (dr_b, niters); | |
2467 | ||
a57776a1 RS |
2468 | unsigned HOST_WIDE_INT access_size_a |
2469 | = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_a)))); | |
2470 | unsigned HOST_WIDE_INT access_size_b | |
2471 | = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_b)))); | |
2472 | unsigned int align_a = TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_a))); | |
2473 | unsigned int align_b = TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_b))); | |
2474 | ||
a8745cc2 | 2475 | dr_with_seg_len_pair_t dr_with_seg_len_pair |
a57776a1 RS |
2476 | (dr_with_seg_len (dr_a, seg_length_a, access_size_a, align_a), |
2477 | dr_with_seg_len (dr_b, seg_length_b, access_size_b, align_b)); | |
a8745cc2 BC |
2478 | |
2479 | /* Canonicalize pairs by sorting the two DR members. */ | |
2480 | if (comp_res > 0) | |
2481 | std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second); | |
2482 | ||
2483 | comp_alias_pairs->safe_push (dr_with_seg_len_pair); | |
2484 | } | |
2485 | ||
2486 | if (tree_fits_uhwi_p (niters)) | |
2487 | factor = tree_to_uhwi (niters); | |
2488 | ||
2489 | /* Prune alias check pairs. */ | |
2490 | prune_runtime_alias_test_list (comp_alias_pairs, factor); | |
2491 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2492 | fprintf (dump_file, | |
2493 | "Improved number of alias checks from %d to %d\n", | |
2494 | alias_ddrs->length (), comp_alias_pairs->length ()); | |
2495 | } | |
2496 | ||
2497 | /* Given data dependence relations in ALIAS_DDRS, generate runtime alias | |
2498 | checks and version LOOP under condition of these runtime alias checks. */ | |
2499 | ||
2500 | static void | |
1623d9f3 BC |
2501 | version_loop_by_alias_check (vec<struct partition *> *partitions, |
2502 | struct loop *loop, vec<ddr_p> *alias_ddrs) | |
a8745cc2 BC |
2503 | { |
2504 | profile_probability prob; | |
2505 | basic_block cond_bb; | |
2506 | struct loop *nloop; | |
2507 | tree lhs, arg0, cond_expr = NULL_TREE; | |
2508 | gimple_seq cond_stmts = NULL; | |
2509 | gimple *call_stmt = NULL; | |
2510 | auto_vec<dr_with_seg_len_pair_t> comp_alias_pairs; | |
2511 | ||
2512 | /* Generate code for runtime alias checks if necessary. */ | |
2513 | gcc_assert (alias_ddrs->length () > 0); | |
2514 | ||
2515 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2516 | fprintf (dump_file, | |
2517 | "Version loop <%d> with runtime alias check\n", loop->num); | |
2518 | ||
2519 | compute_alias_check_pairs (loop, alias_ddrs, &comp_alias_pairs); | |
2520 | create_runtime_alias_checks (loop, &comp_alias_pairs, &cond_expr); | |
2521 | cond_expr = force_gimple_operand_1 (cond_expr, &cond_stmts, | |
8d2d0de9 | 2522 | is_gimple_val, NULL_TREE); |
a8745cc2 BC |
2523 | |
2524 | /* Depend on vectorizer to fold IFN_LOOP_DIST_ALIAS. */ | |
1623d9f3 BC |
2525 | bool cancelable_p = flag_tree_loop_vectorize; |
2526 | if (cancelable_p) | |
2527 | { | |
2528 | unsigned i = 0; | |
2529 | struct partition *partition; | |
2530 | for (; partitions->iterate (i, &partition); ++i) | |
2531 | if (!partition_builtin_p (partition)) | |
2532 | break; | |
2533 | ||
2534 | /* If all partitions are builtins, distributing it would be profitable and | |
2535 | we don't want to cancel the runtime alias checks. */ | |
2536 | if (i == partitions->length ()) | |
2537 | cancelable_p = false; | |
2538 | } | |
2539 | ||
2540 | /* Generate internal function call for loop distribution alias check if the | |
2541 | runtime alias check should be cancelable. */ | |
2542 | if (cancelable_p) | |
a8745cc2 | 2543 | { |
a8745cc2 BC |
2544 | call_stmt = gimple_build_call_internal (IFN_LOOP_DIST_ALIAS, |
2545 | 2, NULL_TREE, cond_expr); | |
2546 | lhs = make_ssa_name (boolean_type_node); | |
2547 | gimple_call_set_lhs (call_stmt, lhs); | |
2548 | } | |
2549 | else | |
2550 | lhs = cond_expr; | |
2551 | ||
2552 | prob = profile_probability::guessed_always ().apply_scale (9, 10); | |
2553 | initialize_original_copy_tables (); | |
2554 | nloop = loop_version (loop, lhs, &cond_bb, prob, prob.invert (), | |
2555 | prob, prob.invert (), true); | |
2556 | free_original_copy_tables (); | |
2557 | /* Record the original loop number in newly generated loops. In case of | |
2558 | distribution, the original loop will be distributed and the new loop | |
2559 | is kept. */ | |
2560 | loop->orig_loop_num = nloop->num; | |
2561 | nloop->orig_loop_num = nloop->num; | |
2562 | nloop->dont_vectorize = true; | |
2563 | nloop->force_vectorize = false; | |
2564 | ||
2565 | if (call_stmt) | |
2566 | { | |
2567 | /* Record new loop's num in IFN_LOOP_DIST_ALIAS because the original | |
2568 | loop could be destroyed. */ | |
2569 | arg0 = build_int_cst (integer_type_node, loop->orig_loop_num); | |
2570 | gimple_call_set_arg (call_stmt, 0, arg0); | |
2571 | gimple_seq_add_stmt_without_update (&cond_stmts, call_stmt); | |
2572 | } | |
2573 | ||
2574 | if (cond_stmts) | |
2575 | { | |
2576 | gimple_stmt_iterator cond_gsi = gsi_last_bb (cond_bb); | |
2577 | gsi_insert_seq_before (&cond_gsi, cond_stmts, GSI_SAME_STMT); | |
2578 | } | |
2579 | update_ssa (TODO_update_ssa); | |
2580 | } | |
2581 | ||
2582 | /* Return true if loop versioning is needed to distrubute PARTITIONS. | |
2583 | ALIAS_DDRS are data dependence relations for runtime alias check. */ | |
2584 | ||
2585 | static inline bool | |
2586 | version_for_distribution_p (vec<struct partition *> *partitions, | |
2587 | vec<ddr_p> *alias_ddrs) | |
2588 | { | |
2589 | /* No need to version loop if we have only one partition. */ | |
2590 | if (partitions->length () == 1) | |
2591 | return false; | |
2592 | ||
2593 | /* Need to version loop if runtime alias check is necessary. */ | |
2594 | return (alias_ddrs->length () > 0); | |
2595 | } | |
2596 | ||
957f0d8f BC |
2597 | /* Compare base offset of builtin mem* partitions P1 and P2. */ |
2598 | ||
d2391983 AM |
2599 | static int |
2600 | offset_cmp (const void *vp1, const void *vp2) | |
957f0d8f | 2601 | { |
d2391983 AM |
2602 | struct partition *p1 = *(struct partition *const *) vp1; |
2603 | struct partition *p2 = *(struct partition *const *) vp2; | |
2604 | unsigned HOST_WIDE_INT o1 = p1->builtin->dst_base_offset; | |
2605 | unsigned HOST_WIDE_INT o2 = p2->builtin->dst_base_offset; | |
2606 | return (o2 < o1) - (o1 < o2); | |
957f0d8f BC |
2607 | } |
2608 | ||
2609 | /* Fuse adjacent memset builtin PARTITIONS if possible. This is a special | |
2610 | case optimization transforming below code: | |
2611 | ||
2612 | __builtin_memset (&obj, 0, 100); | |
2613 | _1 = &obj + 100; | |
2614 | __builtin_memset (_1, 0, 200); | |
2615 | _2 = &obj + 300; | |
2616 | __builtin_memset (_2, 0, 100); | |
2617 | ||
2618 | into: | |
2619 | ||
2620 | __builtin_memset (&obj, 0, 400); | |
2621 | ||
2622 | Note we don't have dependence information between different partitions | |
2623 | at this point, as a result, we can't handle nonadjacent memset builtin | |
2624 | partitions since dependence might be broken. */ | |
2625 | ||
2626 | static void | |
2627 | fuse_memset_builtins (vec<struct partition *> *partitions) | |
2628 | { | |
2629 | unsigned i, j; | |
2630 | struct partition *part1, *part2; | |
49e4ca31 | 2631 | tree rhs1, rhs2; |
957f0d8f BC |
2632 | |
2633 | for (i = 0; partitions->iterate (i, &part1);) | |
2634 | { | |
2635 | if (part1->kind != PKIND_MEMSET) | |
2636 | { | |
2637 | i++; | |
2638 | continue; | |
2639 | } | |
2640 | ||
2641 | /* Find sub-array of memset builtins of the same base. Index range | |
2642 | of the sub-array is [i, j) with "j > i". */ | |
2643 | for (j = i + 1; partitions->iterate (j, &part2); ++j) | |
2644 | { | |
2645 | if (part2->kind != PKIND_MEMSET | |
2646 | || !operand_equal_p (part1->builtin->dst_base_base, | |
2647 | part2->builtin->dst_base_base, 0)) | |
2648 | break; | |
49e4ca31 BC |
2649 | |
2650 | /* Memset calls setting different values can't be merged. */ | |
2651 | rhs1 = gimple_assign_rhs1 (DR_STMT (part1->builtin->dst_dr)); | |
2652 | rhs2 = gimple_assign_rhs1 (DR_STMT (part2->builtin->dst_dr)); | |
2653 | if (!operand_equal_p (rhs1, rhs2, 0)) | |
2654 | break; | |
957f0d8f BC |
2655 | } |
2656 | ||
2657 | /* Stable sort is required in order to avoid breaking dependence. */ | |
d2391983 AM |
2658 | gcc_stablesort (&(*partitions)[i], j - i, sizeof (*partitions)[i], |
2659 | offset_cmp); | |
957f0d8f BC |
2660 | /* Continue with next partition. */ |
2661 | i = j; | |
2662 | } | |
2663 | ||
2664 | /* Merge all consecutive memset builtin partitions. */ | |
2665 | for (i = 0; i < partitions->length () - 1;) | |
2666 | { | |
2667 | part1 = (*partitions)[i]; | |
2668 | if (part1->kind != PKIND_MEMSET) | |
2669 | { | |
2670 | i++; | |
2671 | continue; | |
2672 | } | |
2673 | ||
2674 | part2 = (*partitions)[i + 1]; | |
2675 | /* Only merge memset partitions of the same base and with constant | |
2676 | access sizes. */ | |
2677 | if (part2->kind != PKIND_MEMSET | |
2678 | || TREE_CODE (part1->builtin->size) != INTEGER_CST | |
2679 | || TREE_CODE (part2->builtin->size) != INTEGER_CST | |
2680 | || !operand_equal_p (part1->builtin->dst_base_base, | |
2681 | part2->builtin->dst_base_base, 0)) | |
2682 | { | |
2683 | i++; | |
2684 | continue; | |
2685 | } | |
49e4ca31 BC |
2686 | rhs1 = gimple_assign_rhs1 (DR_STMT (part1->builtin->dst_dr)); |
2687 | rhs2 = gimple_assign_rhs1 (DR_STMT (part2->builtin->dst_dr)); | |
957f0d8f BC |
2688 | int bytev1 = const_with_all_bytes_same (rhs1); |
2689 | int bytev2 = const_with_all_bytes_same (rhs2); | |
2690 | /* Only merge memset partitions of the same value. */ | |
2691 | if (bytev1 != bytev2 || bytev1 == -1) | |
2692 | { | |
2693 | i++; | |
2694 | continue; | |
2695 | } | |
2696 | wide_int end1 = wi::add (part1->builtin->dst_base_offset, | |
2697 | wi::to_wide (part1->builtin->size)); | |
2698 | /* Only merge adjacent memset partitions. */ | |
2699 | if (wi::ne_p (end1, part2->builtin->dst_base_offset)) | |
2700 | { | |
2701 | i++; | |
2702 | continue; | |
2703 | } | |
2704 | /* Merge partitions[i] and partitions[i+1]. */ | |
2705 | part1->builtin->size = fold_build2 (PLUS_EXPR, sizetype, | |
2706 | part1->builtin->size, | |
2707 | part2->builtin->size); | |
2708 | partition_free (part2); | |
2709 | partitions->ordered_remove (i + 1); | |
2710 | } | |
2711 | } | |
2712 | ||
163aa51b BC |
2713 | /* Fuse PARTITIONS of LOOP if necessary before finalizing distribution. |
2714 | ALIAS_DDRS contains ddrs which need runtime alias check. */ | |
a8745cc2 BC |
2715 | |
2716 | static void | |
163aa51b | 2717 | finalize_partitions (struct loop *loop, vec<struct partition *> *partitions, |
a8745cc2 BC |
2718 | vec<ddr_p> *alias_ddrs) |
2719 | { | |
2720 | unsigned i; | |
163aa51b | 2721 | struct partition *partition, *a; |
a8745cc2 BC |
2722 | |
2723 | if (partitions->length () == 1 | |
2724 | || alias_ddrs->length () > 0) | |
2725 | return; | |
2726 | ||
5955438a | 2727 | unsigned num_builtin = 0, num_normal = 0, num_partial_memset = 0; |
163aa51b BC |
2728 | bool same_type_p = true; |
2729 | enum partition_type type = ((*partitions)[0])->type; | |
2730 | for (i = 0; partitions->iterate (i, &partition); ++i) | |
a8745cc2 | 2731 | { |
163aa51b | 2732 | same_type_p &= (type == partition->type); |
5955438a BC |
2733 | if (partition_builtin_p (partition)) |
2734 | { | |
2735 | num_builtin++; | |
2736 | continue; | |
2737 | } | |
2738 | num_normal++; | |
2739 | if (partition->kind == PKIND_PARTIAL_MEMSET) | |
2740 | num_partial_memset++; | |
a8745cc2 BC |
2741 | } |
2742 | ||
163aa51b BC |
2743 | /* Don't distribute current loop into too many loops given we don't have |
2744 | memory stream cost model. Be even more conservative in case of loop | |
2745 | nest distribution. */ | |
5955438a BC |
2746 | if ((same_type_p && num_builtin == 0 |
2747 | && (loop->inner == NULL || num_normal != 2 || num_partial_memset != 1)) | |
163aa51b BC |
2748 | || (loop->inner != NULL |
2749 | && i >= NUM_PARTITION_THRESHOLD && num_normal > 1) | |
2750 | || (loop->inner == NULL | |
2751 | && i >= NUM_PARTITION_THRESHOLD && num_normal > num_builtin)) | |
a8745cc2 | 2752 | { |
163aa51b BC |
2753 | a = (*partitions)[0]; |
2754 | for (i = 1; partitions->iterate (i, &partition); ++i) | |
2755 | { | |
2756 | partition_merge_into (NULL, a, partition, FUSE_FINALIZE); | |
2757 | partition_free (partition); | |
2758 | } | |
2759 | partitions->truncate (1); | |
a8745cc2 | 2760 | } |
957f0d8f BC |
2761 | |
2762 | /* Fuse memset builtins if possible. */ | |
2763 | if (partitions->length () > 1) | |
2764 | fuse_memset_builtins (partitions); | |
a8745cc2 BC |
2765 | } |
2766 | ||
2767 | /* Distributes the code from LOOP in such a way that producer statements | |
2768 | are placed before consumer statements. Tries to separate only the | |
2769 | statements from STMTS into separate loops. Returns the number of | |
2770 | distributed loops. Set NB_CALLS to number of generated builtin calls. | |
2771 | Set *DESTROY_P to whether LOOP needs to be destroyed. */ | |
dea61d92 SP |
2772 | |
2773 | static int | |
355fe088 | 2774 | distribute_loop (struct loop *loop, vec<gimple *> stmts, |
5879ab5f RB |
2775 | control_dependences *cd, int *nb_calls, bool *destroy_p, |
2776 | bool only_patterns_p) | |
dea61d92 | 2777 | { |
1e485f89 | 2778 | ddrs_table = new hash_table<ddr_hasher> (389); |
2fd5894f | 2779 | struct graph *rdg; |
526ceb68 | 2780 | partition *partition; |
be6b029b | 2781 | bool any_builtin; |
2fd5894f | 2782 | int i, nbp; |
dea61d92 | 2783 | |
c9326aef | 2784 | *destroy_p = false; |
826a536d | 2785 | *nb_calls = 0; |
4084ea5f | 2786 | loop_nest.create (0); |
2fd5894f | 2787 | if (!find_loop_nest (loop, &loop_nest)) |
4084ea5f BC |
2788 | { |
2789 | loop_nest.release (); | |
1e485f89 | 2790 | delete ddrs_table; |
4084ea5f BC |
2791 | return 0; |
2792 | } | |
2fd5894f | 2793 | |
9fafb14a | 2794 | datarefs_vec.create (20); |
c4450491 | 2795 | has_nonaddressable_dataref_p = false; |
4084ea5f | 2796 | rdg = build_rdg (loop, cd); |
2fd5894f RB |
2797 | if (!rdg) |
2798 | { | |
2799 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2800 | fprintf (dump_file, | |
2801 | "Loop %d not distributed: failed to build the RDG.\n", | |
2802 | loop->num); | |
2803 | ||
4084ea5f | 2804 | loop_nest.release (); |
9fafb14a | 2805 | free_data_refs (datarefs_vec); |
1e485f89 | 2806 | delete ddrs_table; |
9fafb14a BC |
2807 | return 0; |
2808 | } | |
2809 | ||
2810 | if (datarefs_vec.length () > MAX_DATAREFS_NUM) | |
2811 | { | |
2812 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2813 | fprintf (dump_file, | |
2814 | "Loop %d not distributed: too many memory references.\n", | |
2815 | loop->num); | |
2816 | ||
2817 | free_rdg (rdg); | |
2818 | loop_nest.release (); | |
2819 | free_data_refs (datarefs_vec); | |
1e485f89 | 2820 | delete ddrs_table; |
2fd5894f RB |
2821 | return 0; |
2822 | } | |
2823 | ||
9fafb14a BC |
2824 | data_reference_p dref; |
2825 | for (i = 0; datarefs_vec.iterate (i, &dref); ++i) | |
2826 | dref->aux = (void *) (uintptr_t) i; | |
2827 | ||
2fd5894f RB |
2828 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2829 | dump_rdg (dump_file, rdg); | |
2830 | ||
526ceb68 | 2831 | auto_vec<struct partition *, 3> partitions; |
2fd5894f | 2832 | rdg_build_partitions (rdg, stmts, &partitions); |
dea61d92 | 2833 | |
a8745cc2 BC |
2834 | auto_vec<ddr_p> alias_ddrs; |
2835 | ||
4a52eb19 BC |
2836 | auto_bitmap stmt_in_all_partitions; |
2837 | bitmap_copy (stmt_in_all_partitions, partitions[0]->stmts); | |
2838 | for (i = 1; partitions.iterate (i, &partition); ++i) | |
2839 | bitmap_and_into (stmt_in_all_partitions, partitions[i]->stmts); | |
2840 | ||
be6b029b | 2841 | any_builtin = false; |
9771b263 | 2842 | FOR_EACH_VEC_ELT (partitions, i, partition) |
be6b029b | 2843 | { |
4a52eb19 | 2844 | classify_partition (loop, rdg, partition, stmt_in_all_partitions); |
be6b029b RG |
2845 | any_builtin |= partition_builtin_p (partition); |
2846 | } | |
30d55936 | 2847 | |
447f3223 RB |
2848 | /* If we are only distributing patterns but did not detect any, |
2849 | simply bail out. */ | |
5879ab5f | 2850 | if (only_patterns_p |
9fed7f3a RB |
2851 | && !any_builtin) |
2852 | { | |
2853 | nbp = 0; | |
2854 | goto ldist_done; | |
2855 | } | |
2856 | ||
447f3223 RB |
2857 | /* If we are only distributing patterns fuse all partitions that |
2858 | were not classified as builtins. This also avoids chopping | |
2859 | a loop into pieces, separated by builtin calls. That is, we | |
2860 | only want no or a single loop body remaining. */ | |
526ceb68 | 2861 | struct partition *into; |
5879ab5f | 2862 | if (only_patterns_p) |
447f3223 RB |
2863 | { |
2864 | for (i = 0; partitions.iterate (i, &into); ++i) | |
2865 | if (!partition_builtin_p (into)) | |
2866 | break; | |
2867 | for (++i; partitions.iterate (i, &partition); ++i) | |
2868 | if (!partition_builtin_p (partition)) | |
2869 | { | |
f1eb4621 | 2870 | partition_merge_into (NULL, into, partition, FUSE_NON_BUILTIN); |
447f3223 RB |
2871 | partitions.unordered_remove (i); |
2872 | partition_free (partition); | |
2873 | i--; | |
2874 | } | |
2875 | } | |
2876 | ||
2877 | /* Due to limitations in the transform phase we have to fuse all | |
2878 | reduction partitions into the last partition so the existing | |
2879 | loop will contain all loop-closed PHI nodes. */ | |
2880 | for (i = 0; partitions.iterate (i, &into); ++i) | |
2881 | if (partition_reduction_p (into)) | |
2882 | break; | |
2883 | for (i = i + 1; partitions.iterate (i, &partition); ++i) | |
2884 | if (partition_reduction_p (partition)) | |
2885 | { | |
f1eb4621 | 2886 | partition_merge_into (rdg, into, partition, FUSE_REDUCTION); |
447f3223 RB |
2887 | partitions.unordered_remove (i); |
2888 | partition_free (partition); | |
2889 | i--; | |
2890 | } | |
2891 | ||
9fed7f3a RB |
2892 | /* Apply our simple cost model - fuse partitions with similar |
2893 | memory accesses. */ | |
9fed7f3a RB |
2894 | for (i = 0; partitions.iterate (i, &into); ++i) |
2895 | { | |
16eba420 | 2896 | bool changed = false; |
5955438a | 2897 | if (partition_builtin_p (into) || into->kind == PKIND_PARTIAL_MEMSET) |
9fed7f3a RB |
2898 | continue; |
2899 | for (int j = i + 1; | |
2900 | partitions.iterate (j, &partition); ++j) | |
2901 | { | |
95f7d11b | 2902 | if (share_memory_accesses (rdg, into, partition)) |
9fed7f3a | 2903 | { |
f1eb4621 | 2904 | partition_merge_into (rdg, into, partition, FUSE_SHARE_REF); |
447f3223 | 2905 | partitions.unordered_remove (j); |
9fed7f3a RB |
2906 | partition_free (partition); |
2907 | j--; | |
16eba420 | 2908 | changed = true; |
9fed7f3a RB |
2909 | } |
2910 | } | |
16eba420 RB |
2911 | /* If we fused 0 1 2 in step 1 to 0,2 1 as 0 and 2 have similar |
2912 | accesses when 1 and 2 have similar accesses but not 0 and 1 | |
2913 | then in the next iteration we will fail to consider merging | |
2914 | 1 into 0,2. So try again if we did any merging into 0. */ | |
2915 | if (changed) | |
2916 | i--; | |
9fed7f3a RB |
2917 | } |
2918 | ||
163aa51b BC |
2919 | /* Build the partition dependency graph and fuse partitions in strong |
2920 | connected component. */ | |
447f3223 | 2921 | if (partitions.length () > 1) |
c014f6f5 | 2922 | { |
163aa51b | 2923 | /* Don't support loop nest distribution under runtime alias check |
c4450491 BC |
2924 | since it's not likely to enable many vectorization opportunities. |
2925 | Also if loop has any data reference which may be not addressable | |
2926 | since alias check needs to take, compare address of the object. */ | |
2927 | if (loop->inner || has_nonaddressable_dataref_p) | |
163aa51b BC |
2928 | merge_dep_scc_partitions (rdg, &partitions, false); |
2929 | else | |
2930 | { | |
2931 | merge_dep_scc_partitions (rdg, &partitions, true); | |
2932 | if (partitions.length () > 1) | |
2933 | break_alias_scc_partitions (rdg, &partitions, &alias_ddrs); | |
2934 | } | |
b9fc0497 RB |
2935 | } |
2936 | ||
163aa51b | 2937 | finalize_partitions (loop, &partitions, &alias_ddrs); |
a8745cc2 | 2938 | |
9771b263 | 2939 | nbp = partitions.length (); |
a4293fa6 | 2940 | if (nbp == 0 |
9771b263 DN |
2941 | || (nbp == 1 && !partition_builtin_p (partitions[0])) |
2942 | || (nbp > 1 && partition_contains_all_rw (rdg, partitions))) | |
c014f6f5 RG |
2943 | { |
2944 | nbp = 0; | |
2945 | goto ldist_done; | |
2946 | } | |
dea61d92 | 2947 | |
a8745cc2 | 2948 | if (version_for_distribution_p (&partitions, &alias_ddrs)) |
1623d9f3 | 2949 | version_loop_by_alias_check (&partitions, loop, &alias_ddrs); |
a8745cc2 | 2950 | |
dea61d92 | 2951 | if (dump_file && (dump_flags & TDF_DETAILS)) |
a8745cc2 BC |
2952 | { |
2953 | fprintf (dump_file, | |
2954 | "distribute loop <%d> into partitions:\n", loop->num); | |
2955 | dump_rdg_partitions (dump_file, partitions); | |
2956 | } | |
dea61d92 | 2957 | |
9771b263 | 2958 | FOR_EACH_VEC_ELT (partitions, i, partition) |
826a536d RB |
2959 | { |
2960 | if (partition_builtin_p (partition)) | |
2961 | (*nb_calls)++; | |
b71b7a8e | 2962 | *destroy_p |= generate_code_for_partition (loop, partition, i < nbp - 1); |
826a536d | 2963 | } |
dea61d92 | 2964 | |
dea61d92 | 2965 | ldist_done: |
4084ea5f | 2966 | loop_nest.release (); |
9fafb14a | 2967 | free_data_refs (datarefs_vec); |
1e485f89 ML |
2968 | for (hash_table<ddr_hasher>::iterator iter = ddrs_table->begin (); |
2969 | iter != ddrs_table->end (); ++iter) | |
17c5cbdf BC |
2970 | { |
2971 | free_dependence_relation (*iter); | |
2972 | *iter = NULL; | |
2973 | } | |
1e485f89 | 2974 | delete ddrs_table; |
dea61d92 | 2975 | |
9771b263 | 2976 | FOR_EACH_VEC_ELT (partitions, i, partition) |
c61f8985 | 2977 | partition_free (partition); |
dea61d92 | 2978 | |
dea61d92 | 2979 | free_rdg (rdg); |
826a536d | 2980 | return nbp - *nb_calls; |
dea61d92 SP |
2981 | } |
2982 | ||
2983 | /* Distribute all loops in the current function. */ | |
2984 | ||
be55bfe6 TS |
2985 | namespace { |
2986 | ||
2987 | const pass_data pass_data_loop_distribution = | |
2988 | { | |
2989 | GIMPLE_PASS, /* type */ | |
2990 | "ldist", /* name */ | |
2991 | OPTGROUP_LOOP, /* optinfo_flags */ | |
be55bfe6 TS |
2992 | TV_TREE_LOOP_DISTRIBUTION, /* tv_id */ |
2993 | ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
2994 | 0, /* properties_provided */ | |
2995 | 0, /* properties_destroyed */ | |
2996 | 0, /* todo_flags_start */ | |
3bea341f | 2997 | 0, /* todo_flags_finish */ |
be55bfe6 TS |
2998 | }; |
2999 | ||
3000 | class pass_loop_distribution : public gimple_opt_pass | |
3001 | { | |
3002 | public: | |
3003 | pass_loop_distribution (gcc::context *ctxt) | |
3004 | : gimple_opt_pass (pass_data_loop_distribution, ctxt) | |
3005 | {} | |
3006 | ||
3007 | /* opt_pass methods: */ | |
3008 | virtual bool gate (function *) | |
3009 | { | |
3010 | return flag_tree_loop_distribution | |
3011 | || flag_tree_loop_distribute_patterns; | |
3012 | } | |
3013 | ||
3014 | virtual unsigned int execute (function *); | |
3015 | ||
3016 | }; // class pass_loop_distribution | |
3017 | ||
163aa51b BC |
3018 | |
3019 | /* Given LOOP, this function records seed statements for distribution in | |
3020 | WORK_LIST. Return false if there is nothing for distribution. */ | |
3021 | ||
3022 | static bool | |
3023 | find_seed_stmts_for_distribution (struct loop *loop, vec<gimple *> *work_list) | |
3024 | { | |
3025 | basic_block *bbs = get_loop_body_in_dom_order (loop); | |
3026 | ||
3027 | /* Initialize the worklist with stmts we seed the partitions with. */ | |
3028 | for (unsigned i = 0; i < loop->num_nodes; ++i) | |
3029 | { | |
3030 | for (gphi_iterator gsi = gsi_start_phis (bbs[i]); | |
3031 | !gsi_end_p (gsi); gsi_next (&gsi)) | |
3032 | { | |
3033 | gphi *phi = gsi.phi (); | |
3034 | if (virtual_operand_p (gimple_phi_result (phi))) | |
3035 | continue; | |
3036 | /* Distribute stmts which have defs that are used outside of | |
3037 | the loop. */ | |
3038 | if (!stmt_has_scalar_dependences_outside_loop (loop, phi)) | |
3039 | continue; | |
3040 | work_list->safe_push (phi); | |
3041 | } | |
3042 | for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); | |
3043 | !gsi_end_p (gsi); gsi_next (&gsi)) | |
3044 | { | |
3045 | gimple *stmt = gsi_stmt (gsi); | |
3046 | ||
051ef623 MG |
3047 | /* Ignore clobbers, they do not have true side effects. */ |
3048 | if (gimple_clobber_p (stmt)) | |
3049 | continue; | |
3050 | ||
163aa51b BC |
3051 | /* If there is a stmt with side-effects bail out - we |
3052 | cannot and should not distribute this loop. */ | |
3053 | if (gimple_has_side_effects (stmt)) | |
3054 | { | |
3055 | free (bbs); | |
3056 | return false; | |
3057 | } | |
3058 | ||
3059 | /* Distribute stmts which have defs that are used outside of | |
3060 | the loop. */ | |
3061 | if (stmt_has_scalar_dependences_outside_loop (loop, stmt)) | |
3062 | ; | |
3063 | /* Otherwise only distribute stores for now. */ | |
3064 | else if (!gimple_vdef (stmt)) | |
3065 | continue; | |
3066 | ||
3067 | work_list->safe_push (stmt); | |
3068 | } | |
3069 | } | |
3070 | free (bbs); | |
3071 | return work_list->length () > 0; | |
3072 | } | |
3073 | ||
3074 | /* Given innermost LOOP, return the outermost enclosing loop that forms a | |
3075 | perfect loop nest. */ | |
3076 | ||
3077 | static struct loop * | |
3078 | prepare_perfect_loop_nest (struct loop *loop) | |
3079 | { | |
3080 | struct loop *outer = loop_outer (loop); | |
3081 | tree niters = number_of_latch_executions (loop); | |
3082 | ||
5955438a | 3083 | /* TODO: We only support the innermost 3-level loop nest distribution |
163aa51b | 3084 | because of compilation time issue for now. This should be relaxed |
5955438a BC |
3085 | in the future. Note we only allow 3-level loop nest distribution |
3086 | when parallelizing loops. */ | |
3087 | while ((loop->inner == NULL | |
3088 | || (loop->inner->inner == NULL && flag_tree_parallelize_loops > 1)) | |
163aa51b BC |
3089 | && loop_outer (outer) |
3090 | && outer->inner == loop && loop->next == NULL | |
3091 | && single_exit (outer) | |
163aa51b BC |
3092 | && !chrec_contains_symbols_defined_in_loop (niters, outer->num) |
3093 | && (niters = number_of_latch_executions (outer)) != NULL_TREE | |
3094 | && niters != chrec_dont_know) | |
3095 | { | |
3096 | loop = outer; | |
3097 | outer = loop_outer (loop); | |
3098 | } | |
3099 | ||
3100 | return loop; | |
3101 | } | |
3102 | ||
be55bfe6 TS |
3103 | unsigned int |
3104 | pass_loop_distribution::execute (function *fun) | |
dea61d92 SP |
3105 | { |
3106 | struct loop *loop; | |
c014f6f5 | 3107 | bool changed = false; |
1fa0c180 | 3108 | basic_block bb; |
36875e8f | 3109 | control_dependences *cd = NULL; |
b71b7a8e | 3110 | auto_vec<loop_p> loops_to_be_destroyed; |
1fa0c180 | 3111 | |
773d9217 BC |
3112 | if (number_of_loops (fun) <= 1) |
3113 | return 0; | |
3114 | ||
3be57c56 BC |
3115 | /* Compute topological order for basic blocks. Topological order is |
3116 | needed because data dependence is computed for data references in | |
3117 | lexicographical order. */ | |
3118 | if (bb_top_order_index == NULL) | |
3119 | { | |
3fb82452 | 3120 | int rpo_num; |
3be57c56 BC |
3121 | int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
3122 | ||
3123 | bb_top_order_index = XNEWVEC (int, last_basic_block_for_fn (cfun)); | |
3fb82452 BC |
3124 | bb_top_order_index_size = last_basic_block_for_fn (cfun); |
3125 | rpo_num = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, true); | |
3126 | for (int i = 0; i < rpo_num; i++) | |
3be57c56 BC |
3127 | bb_top_order_index[rpo[i]] = i; |
3128 | ||
3129 | free (rpo); | |
3130 | } | |
3131 | ||
be55bfe6 | 3132 | FOR_ALL_BB_FN (bb, fun) |
1fa0c180 RG |
3133 | { |
3134 | gimple_stmt_iterator gsi; | |
3135 | for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3136 | gimple_set_uid (gsi_stmt (gsi), -1); | |
3137 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3138 | gimple_set_uid (gsi_stmt (gsi), -1); | |
3139 | } | |
dea61d92 | 3140 | |
c014f6f5 RG |
3141 | /* We can at the moment only distribute non-nested loops, thus restrict |
3142 | walking to innermost loops. */ | |
f0bd40b1 | 3143 | FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST) |
dea61d92 | 3144 | { |
5879ab5f RB |
3145 | /* Don't distribute multiple exit edges loop, or cold loop when |
3146 | not doing pattern detection. */ | |
163aa51b | 3147 | if (!single_exit (loop) |
5879ab5f RB |
3148 | || (!flag_tree_loop_distribute_patterns |
3149 | && !optimize_loop_for_speed_p (loop))) | |
f56f2d33 JH |
3150 | continue; |
3151 | ||
6ff37519 BC |
3152 | /* Don't distribute loop if niters is unknown. */ |
3153 | tree niters = number_of_latch_executions (loop); | |
3154 | if (niters == NULL_TREE || niters == chrec_dont_know) | |
3155 | continue; | |
3156 | ||
163aa51b BC |
3157 | /* Get the perfect loop nest for distribution. */ |
3158 | loop = prepare_perfect_loop_nest (loop); | |
3159 | for (; loop; loop = loop->inner) | |
be6b029b | 3160 | { |
163aa51b BC |
3161 | auto_vec<gimple *> work_list; |
3162 | if (!find_seed_stmts_for_distribution (loop, &work_list)) | |
3163 | break; | |
c014f6f5 | 3164 | |
163aa51b | 3165 | const char *str = loop->inner ? " nest" : ""; |
4f5b9c80 | 3166 | dump_user_location_t loc = find_loop_location (loop); |
36875e8f RB |
3167 | if (!cd) |
3168 | { | |
ca406576 | 3169 | calculate_dominance_info (CDI_DOMINATORS); |
36875e8f | 3170 | calculate_dominance_info (CDI_POST_DOMINATORS); |
30fd2977 | 3171 | cd = new control_dependences (); |
36875e8f RB |
3172 | free_dominance_info (CDI_POST_DOMINATORS); |
3173 | } | |
163aa51b | 3174 | |
b71b7a8e | 3175 | bool destroy_p; |
163aa51b | 3176 | int nb_generated_loops, nb_generated_calls; |
5879ab5f RB |
3177 | nb_generated_loops |
3178 | = distribute_loop (loop, work_list, cd, &nb_generated_calls, | |
3179 | &destroy_p, (!optimize_loop_for_speed_p (loop) | |
3180 | || !flag_tree_loop_distribution)); | |
b71b7a8e RB |
3181 | if (destroy_p) |
3182 | loops_to_be_destroyed.safe_push (loop); | |
c014f6f5 | 3183 | |
163aa51b BC |
3184 | if (nb_generated_loops + nb_generated_calls > 0) |
3185 | { | |
3186 | changed = true; | |
bbeeac91 DM |
3187 | if (dump_enabled_p ()) |
3188 | dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, | |
3189 | loc, "Loop%s %d distributed: split to %d loops " | |
3190 | "and %d library calls.\n", str, loop->num, | |
3191 | nb_generated_loops, nb_generated_calls); | |
163aa51b BC |
3192 | |
3193 | break; | |
3194 | } | |
3195 | ||
3196 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
3197 | fprintf (dump_file, "Loop%s %d not distributed.\n", str, loop->num); | |
dea61d92 | 3198 | } |
dea61d92 SP |
3199 | } |
3200 | ||
36875e8f RB |
3201 | if (cd) |
3202 | delete cd; | |
3203 | ||
3be57c56 BC |
3204 | if (bb_top_order_index != NULL) |
3205 | { | |
3206 | free (bb_top_order_index); | |
3207 | bb_top_order_index = NULL; | |
3208 | bb_top_order_index_size = 0; | |
3209 | } | |
3210 | ||
c014f6f5 RG |
3211 | if (changed) |
3212 | { | |
30fd2977 RB |
3213 | /* Destroy loop bodies that could not be reused. Do this late as we |
3214 | otherwise can end up refering to stale data in control dependences. */ | |
3215 | unsigned i; | |
3216 | FOR_EACH_VEC_ELT (loops_to_be_destroyed, i, loop) | |
3be57c56 | 3217 | destroy_loop (loop); |
30fd2977 | 3218 | |
d0ed943c RB |
3219 | /* Cached scalar evolutions now may refer to wrong or non-existing |
3220 | loops. */ | |
3221 | scev_reset_htab (); | |
be55bfe6 | 3222 | mark_virtual_operands_for_renaming (fun); |
c014f6f5 RG |
3223 | rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); |
3224 | } | |
3225 | ||
b2b29377 | 3226 | checking_verify_loop_structure (); |
c014f6f5 | 3227 | |
1bfb3b8b | 3228 | return changed ? TODO_cleanup_cfg : 0; |
dea61d92 SP |
3229 | } |
3230 | ||
27a4cd48 DM |
3231 | } // anon namespace |
3232 | ||
3233 | gimple_opt_pass * | |
3234 | make_pass_loop_distribution (gcc::context *ctxt) | |
3235 | { | |
3236 | return new pass_loop_distribution (ctxt); | |
3237 | } |