]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-loop-distribution.c
PR c++/89705 - ICE with reference binding with conversion function.
[thirdparty/gcc.git] / gcc / tree-loop-distribution.c
CommitLineData
801c5610 1/* Loop distribution.
fbd26352 2 Copyright (C) 2006-2019 Free Software Foundation, Inc.
801c5610 3 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
4 and Sebastian Pop <sebastian.pop@amd.com>.
5
6This file is part of GCC.
48e1416a 7
801c5610 8GCC is free software; you can redistribute it and/or modify it
9under the terms of the GNU General Public License as published by the
10Free Software Foundation; either version 3, or (at your option) any
11later version.
48e1416a 12
801c5610 13GCC is distributed in the hope that it will be useful, but WITHOUT
14ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
48e1416a 17
801c5610 18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22/* This pass performs loop distribution: for example, the loop
23
24 |DO I = 2, N
25 | A(I) = B(I) + C
26 | D(I) = A(I-1)*E
27 |ENDDO
28
48e1416a 29 is transformed to
801c5610 30
31 |DOALL I = 2, N
32 | A(I) = B(I) + C
33 |ENDDO
34 |
35 |DOALL I = 2, N
36 | D(I) = A(I-1)*E
37 |ENDDO
38
f562e2ea 39 Loop distribution is the dual of loop fusion. It separates statements
40 of a loop (or loop nest) into multiple loops (or loop nests) with the
41 same loop header. The major goal is to separate statements which may
42 be vectorized from those that can't. This pass implements distribution
43 in the following steps:
44
45 1) Seed partitions with specific type statements. For now we support
46 two types seed statements: statement defining variable used outside
47 of loop; statement storing to memory.
48 2) Build reduced dependence graph (RDG) for loop to be distributed.
49 The vertices (RDG:V) model all statements in the loop and the edges
50 (RDG:E) model flow and control dependencies between statements.
51 3) Apart from RDG, compute data dependencies between memory references.
52 4) Starting from seed statement, build up partition by adding depended
53 statements according to RDG's dependence information. Partition is
54 classified as parallel type if it can be executed paralleled; or as
55 sequential type if it can't. Parallel type partition is further
56 classified as different builtin kinds if it can be implemented as
57 builtin function calls.
58 5) Build partition dependence graph (PG) based on data dependencies.
59 The vertices (PG:V) model all partitions and the edges (PG:E) model
60 all data dependencies between every partitions pair. In general,
61 data dependence is either compilation time known or unknown. In C
62 family languages, there exists quite amount compilation time unknown
63 dependencies because of possible alias relation of data references.
64 We categorize PG's edge to two types: "true" edge that represents
65 compilation time known data dependencies; "alias" edge for all other
66 data dependencies.
67 6) Traverse subgraph of PG as if all "alias" edges don't exist. Merge
68 partitions in each strong connected component (SCC) correspondingly.
69 Build new PG for merged partitions.
70 7) Traverse PG again and this time with both "true" and "alias" edges
71 included. We try to break SCCs by removing some edges. Because
72 SCCs by "true" edges are all fused in step 6), we can break SCCs
73 by removing some "alias" edges. It's NP-hard to choose optimal
74 edge set, fortunately simple approximation is good enough for us
75 given the small problem scale.
76 8) Collect all data dependencies of the removed "alias" edges. Create
77 runtime alias checks for collected data dependencies.
78 9) Version loop under the condition of runtime alias checks. Given
79 loop distribution generally introduces additional overhead, it is
80 only useful if vectorization is achieved in distributed loop. We
81 version loop with internal function call IFN_LOOP_DIST_ALIAS. If
82 no distributed loop can be vectorized, we simply remove distributed
83 loops and recover to the original one.
84
85 TODO:
883b4905 86 1) We only distribute innermost two-level loop nest now. We should
87 extend it for arbitrary loop nests in the future.
f562e2ea 88 2) We only fuse partitions in SCC now. A better fusion algorithm is
89 desired to minimize loop overhead, maximize parallelism and maximize
90 data reuse. */
801c5610 91
92#include "config.h"
93#include "system.h"
94#include "coretypes.h"
9ef16211 95#include "backend.h"
b20a8bb4 96#include "tree.h"
9ef16211 97#include "gimple.h"
7c29e30e 98#include "cfghooks.h"
99#include "tree-pass.h"
9ef16211 100#include "ssa.h"
7c29e30e 101#include "gimple-pretty-print.h"
9ef16211 102#include "fold-const.h"
94ea8568 103#include "cfganal.h"
dcf1a1ec 104#include "gimple-iterator.h"
e795d6e1 105#include "gimplify-me.h"
9ed99284 106#include "stor-layout.h"
073c1fd5 107#include "tree-cfg.h"
05d9c18a 108#include "tree-ssa-loop-manip.h"
05ebeee6 109#include "tree-ssa-loop-ivopts.h"
073c1fd5 110#include "tree-ssa-loop.h"
111#include "tree-into-ssa.h"
69ee5dbb 112#include "tree-ssa.h"
801c5610 113#include "cfgloop.h"
801c5610 114#include "tree-scalar-evolution.h"
f3754041 115#include "params.h"
df9892ff 116#include "tree-vectorizer.h"
bc6e884f 117#include "tree-eh.h"
a2740310 118
119
f3754041 120#define MAX_DATAREFS_NUM \
121 ((unsigned) PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
122
883b4905 123/* Threshold controlling number of distributed partitions. Given it may
124 be unnecessary if a memory stream cost model is invented in the future,
125 we define it as a temporary macro, rather than a parameter. */
126#define NUM_PARTITION_THRESHOLD (4)
127
50f5937e 128/* Hashtable helpers. */
129
130struct ddr_hasher : nofree_ptr_hash <struct data_dependence_relation>
131{
132 static inline hashval_t hash (const data_dependence_relation *);
133 static inline bool equal (const data_dependence_relation *,
134 const data_dependence_relation *);
135};
136
137/* Hash function for data dependence. */
138
139inline hashval_t
140ddr_hasher::hash (const data_dependence_relation *ddr)
141{
142 inchash::hash h;
143 h.add_ptr (DDR_A (ddr));
144 h.add_ptr (DDR_B (ddr));
145 return h.end ();
146}
147
148/* Hash table equality function for data dependence. */
149
150inline bool
151ddr_hasher::equal (const data_dependence_relation *ddr1,
152 const data_dependence_relation *ddr2)
153{
154 return (DDR_A (ddr1) == DDR_A (ddr2) && DDR_B (ddr1) == DDR_B (ddr2));
155}
156
209a62a6 157/* The loop (nest) to be distributed. */
158static vec<loop_p> loop_nest;
159
f3754041 160/* Vector of data references in the loop to be distributed. */
161static vec<data_reference_p> datarefs_vec;
162
6079e9be 163/* If there is nonaddressable data reference in above vector. */
164static bool has_nonaddressable_dataref_p;
165
f3754041 166/* Store index of data reference in aux field. */
167#define DR_INDEX(dr) ((uintptr_t) (dr)->aux)
168
50f5937e 169/* Hash table for data dependence relation in the loop to be distributed. */
bbb229ef 170static hash_table<ddr_hasher> *ddrs_table;
50f5937e 171
a2740310 172/* A Reduced Dependence Graph (RDG) vertex representing a statement. */
04009ada 173struct rdg_vertex
a2740310 174{
175 /* The statement represented by this vertex. */
42acab1c 176 gimple *stmt;
a2740310 177
178 /* Vector of data-references in this statement. */
179 vec<data_reference_p> datarefs;
180
181 /* True when the statement contains a write to memory. */
182 bool has_mem_write;
183
184 /* True when the statement contains a read from memory. */
185 bool has_mem_reads;
04009ada 186};
a2740310 187
188#define RDGV_STMT(V) ((struct rdg_vertex *) ((V)->data))->stmt
189#define RDGV_DATAREFS(V) ((struct rdg_vertex *) ((V)->data))->datarefs
190#define RDGV_HAS_MEM_WRITE(V) ((struct rdg_vertex *) ((V)->data))->has_mem_write
191#define RDGV_HAS_MEM_READS(V) ((struct rdg_vertex *) ((V)->data))->has_mem_reads
192#define RDG_STMT(RDG, I) RDGV_STMT (&(RDG->vertices[I]))
193#define RDG_DATAREFS(RDG, I) RDGV_DATAREFS (&(RDG->vertices[I]))
194#define RDG_MEM_WRITE_STMT(RDG, I) RDGV_HAS_MEM_WRITE (&(RDG->vertices[I]))
195#define RDG_MEM_READS_STMT(RDG, I) RDGV_HAS_MEM_READS (&(RDG->vertices[I]))
196
197/* Data dependence type. */
198
199enum rdg_dep_type
200{
201 /* Read After Write (RAW). */
202 flow_dd = 'f',
203
f1ce84d9 204 /* Control dependence (execute conditional on). */
205 control_dd = 'c'
a2740310 206};
207
208/* Dependence information attached to an edge of the RDG. */
209
04009ada 210struct rdg_edge
a2740310 211{
212 /* Type of the dependence. */
213 enum rdg_dep_type type;
04009ada 214};
a2740310 215
216#define RDGE_TYPE(E) ((struct rdg_edge *) ((E)->data))->type
a2740310 217
a2740310 218/* Dump vertex I in RDG to FILE. */
219
220static void
221dump_rdg_vertex (FILE *file, struct graph *rdg, int i)
222{
223 struct vertex *v = &(rdg->vertices[i]);
224 struct graph_edge *e;
225
226 fprintf (file, "(vertex %d: (%s%s) (in:", i,
227 RDG_MEM_WRITE_STMT (rdg, i) ? "w" : "",
228 RDG_MEM_READS_STMT (rdg, i) ? "r" : "");
229
230 if (v->pred)
231 for (e = v->pred; e; e = e->pred_next)
232 fprintf (file, " %d", e->src);
233
234 fprintf (file, ") (out:");
235
236 if (v->succ)
237 for (e = v->succ; e; e = e->succ_next)
238 fprintf (file, " %d", e->dest);
239
240 fprintf (file, ")\n");
241 print_gimple_stmt (file, RDGV_STMT (v), 0, TDF_VOPS|TDF_MEMSYMS);
242 fprintf (file, ")\n");
243}
244
245/* Call dump_rdg_vertex on stderr. */
246
247DEBUG_FUNCTION void
248debug_rdg_vertex (struct graph *rdg, int i)
249{
250 dump_rdg_vertex (stderr, rdg, i);
251}
252
a2740310 253/* Dump the reduced dependence graph RDG to FILE. */
254
255static void
256dump_rdg (FILE *file, struct graph *rdg)
257{
a2740310 258 fprintf (file, "(rdg\n");
15c8650d 259 for (int i = 0; i < rdg->n_vertices; i++)
260 dump_rdg_vertex (file, rdg, i);
a2740310 261 fprintf (file, ")\n");
a2740310 262}
263
264/* Call dump_rdg on stderr. */
265
266DEBUG_FUNCTION void
267debug_rdg (struct graph *rdg)
268{
269 dump_rdg (stderr, rdg);
270}
271
272static void
273dot_rdg_1 (FILE *file, struct graph *rdg)
274{
275 int i;
5529df60 276 pretty_printer buffer;
277 pp_needs_newline (&buffer) = false;
278 buffer.buffer->stream = file;
a2740310 279
280 fprintf (file, "digraph RDG {\n");
281
282 for (i = 0; i < rdg->n_vertices; i++)
283 {
284 struct vertex *v = &(rdg->vertices[i]);
285 struct graph_edge *e;
286
5529df60 287 fprintf (file, "%d [label=\"[%d] ", i, i);
288 pp_gimple_stmt_1 (&buffer, RDGV_STMT (v), 0, TDF_SLIM);
289 pp_flush (&buffer);
290 fprintf (file, "\"]\n");
291
a2740310 292 /* Highlight reads from memory. */
293 if (RDG_MEM_READS_STMT (rdg, i))
294 fprintf (file, "%d [style=filled, fillcolor=green]\n", i);
295
296 /* Highlight stores to memory. */
297 if (RDG_MEM_WRITE_STMT (rdg, i))
298 fprintf (file, "%d [style=filled, fillcolor=red]\n", i);
299
300 if (v->succ)
301 for (e = v->succ; e; e = e->succ_next)
302 switch (RDGE_TYPE (e))
303 {
a2740310 304 case flow_dd:
305 /* These are the most common dependences: don't print these. */
306 fprintf (file, "%d -> %d \n", i, e->dest);
307 break;
308
f1ce84d9 309 case control_dd:
310 fprintf (file, "%d -> %d [label=control] \n", i, e->dest);
311 break;
312
a2740310 313 default:
314 gcc_unreachable ();
315 }
316 }
317
318 fprintf (file, "}\n\n");
319}
320
321/* Display the Reduced Dependence Graph using dotty. */
322
323DEBUG_FUNCTION void
324dot_rdg (struct graph *rdg)
325{
5529df60 326 /* When debugging, you may want to enable the following code. */
6cc8a944 327#ifdef HAVE_POPEN
9af5ce0c 328 FILE *file = popen ("dot -Tx11", "w");
5529df60 329 if (!file)
330 return;
a2740310 331 dot_rdg_1 (file, rdg);
5529df60 332 fflush (file);
333 close (fileno (file));
334 pclose (file);
a2740310 335#else
336 dot_rdg_1 (stderr, rdg);
337#endif
338}
339
340/* Returns the index of STMT in RDG. */
341
342static int
42acab1c 343rdg_vertex_for_stmt (struct graph *rdg ATTRIBUTE_UNUSED, gimple *stmt)
a2740310 344{
345 int index = gimple_uid (stmt);
346 gcc_checking_assert (index == -1 || RDG_STMT (rdg, index) == stmt);
347 return index;
348}
349
a2740310 350/* Creates dependence edges in RDG for all the uses of DEF. IDEF is
351 the index of DEF in RDG. */
352
353static void
354create_rdg_edges_for_scalar (struct graph *rdg, tree def, int idef)
355{
356 use_operand_p imm_use_p;
357 imm_use_iterator iterator;
358
359 FOR_EACH_IMM_USE_FAST (imm_use_p, iterator, def)
360 {
361 struct graph_edge *e;
362 int use = rdg_vertex_for_stmt (rdg, USE_STMT (imm_use_p));
363
364 if (use < 0)
365 continue;
366
367 e = add_edge (rdg, idef, use);
368 e->data = XNEW (struct rdg_edge);
369 RDGE_TYPE (e) = flow_dd;
a2740310 370 }
371}
372
f1ce84d9 373/* Creates an edge for the control dependences of BB to the vertex V. */
374
375static void
376create_edge_for_control_dependence (struct graph *rdg, basic_block bb,
377 int v, control_dependences *cd)
378{
379 bitmap_iterator bi;
380 unsigned edge_n;
381 EXECUTE_IF_SET_IN_BITMAP (cd->get_edges_dependent_on (bb->index),
382 0, edge_n, bi)
383 {
ce143ff0 384 basic_block cond_bb = cd->get_edge_src (edge_n);
42acab1c 385 gimple *stmt = last_stmt (cond_bb);
f1ce84d9 386 if (stmt && is_ctrl_stmt (stmt))
387 {
388 struct graph_edge *e;
389 int c = rdg_vertex_for_stmt (rdg, stmt);
390 if (c < 0)
391 continue;
392
393 e = add_edge (rdg, c, v);
394 e->data = XNEW (struct rdg_edge);
395 RDGE_TYPE (e) = control_dd;
f1ce84d9 396 }
397 }
398}
399
a2740310 400/* Creates the edges of the reduced dependence graph RDG. */
401
402static void
7103facc 403create_rdg_flow_edges (struct graph *rdg)
a2740310 404{
405 int i;
a2740310 406 def_operand_p def_p;
407 ssa_op_iter iter;
408
a2740310 409 for (i = 0; i < rdg->n_vertices; i++)
410 FOR_EACH_PHI_OR_STMT_DEF (def_p, RDG_STMT (rdg, i),
411 iter, SSA_OP_DEF)
412 create_rdg_edges_for_scalar (rdg, DEF_FROM_PTR (def_p), i);
7103facc 413}
f1ce84d9 414
7103facc 415/* Creates the edges of the reduced dependence graph RDG. */
416
417static void
77d095c0 418create_rdg_cd_edges (struct graph *rdg, control_dependences *cd, loop_p loop)
7103facc 419{
420 int i;
421
422 for (i = 0; i < rdg->n_vertices; i++)
423 {
42acab1c 424 gimple *stmt = RDG_STMT (rdg, i);
7103facc 425 if (gimple_code (stmt) == GIMPLE_PHI)
426 {
427 edge_iterator ei;
428 edge e;
429 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds)
77d095c0 430 if (flow_bb_inside_loop_p (loop, e->src))
f1ce84d9 431 create_edge_for_control_dependence (rdg, e->src, i, cd);
7103facc 432 }
433 else
434 create_edge_for_control_dependence (rdg, gimple_bb (stmt), i, cd);
435 }
a2740310 436}
437
438/* Build the vertices of the reduced dependence graph RDG. Return false
439 if that failed. */
440
441static bool
f3754041 442create_rdg_vertices (struct graph *rdg, vec<gimple *> stmts, loop_p loop)
a2740310 443{
444 int i;
42acab1c 445 gimple *stmt;
a2740310 446
447 FOR_EACH_VEC_ELT (stmts, i, stmt)
448 {
449 struct vertex *v = &(rdg->vertices[i]);
450
451 /* Record statement to vertex mapping. */
452 gimple_set_uid (stmt, i);
453
454 v->data = XNEW (struct rdg_vertex);
455 RDGV_STMT (v) = stmt;
456 RDGV_DATAREFS (v).create (0);
457 RDGV_HAS_MEM_WRITE (v) = false;
458 RDGV_HAS_MEM_READS (v) = false;
459 if (gimple_code (stmt) == GIMPLE_PHI)
460 continue;
461
f3754041 462 unsigned drp = datarefs_vec.length ();
463 if (!find_data_references_in_stmt (loop, stmt, &datarefs_vec))
a2740310 464 return false;
f3754041 465 for (unsigned j = drp; j < datarefs_vec.length (); ++j)
a2740310 466 {
f3754041 467 data_reference_p dr = datarefs_vec[j];
a2740310 468 if (DR_IS_READ (dr))
469 RDGV_HAS_MEM_READS (v) = true;
470 else
471 RDGV_HAS_MEM_WRITE (v) = true;
472 RDGV_DATAREFS (v).safe_push (dr);
6079e9be 473 has_nonaddressable_dataref_p |= may_be_nonaddressable_p (dr->ref);
a2740310 474 }
475 }
476 return true;
477}
478
50eda3a8 479/* Array mapping basic block's index to its topological order. */
480static int *bb_top_order_index;
481/* And size of the array. */
482static int bb_top_order_index_size;
483
484/* If X has a smaller topological sort number than Y, returns -1;
485 if greater, returns 1. */
486
487static int
488bb_top_order_cmp (const void *x, const void *y)
489{
490 basic_block bb1 = *(const basic_block *) x;
491 basic_block bb2 = *(const basic_block *) y;
492
493 gcc_assert (bb1->index < bb_top_order_index_size
494 && bb2->index < bb_top_order_index_size);
495 gcc_assert (bb1 == bb2
496 || bb_top_order_index[bb1->index]
497 != bb_top_order_index[bb2->index]);
498
499 return (bb_top_order_index[bb1->index] - bb_top_order_index[bb2->index]);
500}
501
502/* Initialize STMTS with all the statements of LOOP. We use topological
503 order to discover all statements. The order is important because
504 generate_loops_for_partition is using the same traversal for identifying
505 statements in loop copies. */
a2740310 506
507static void
42acab1c 508stmts_from_loop (struct loop *loop, vec<gimple *> *stmts)
a2740310 509{
510 unsigned int i;
50eda3a8 511 basic_block *bbs = get_loop_body_in_custom_order (loop, bb_top_order_cmp);
a2740310 512
513 for (i = 0; i < loop->num_nodes; i++)
514 {
515 basic_block bb = bbs[i];
a2740310 516
1a91d914 517 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
518 gsi_next (&bsi))
519 if (!virtual_operand_p (gimple_phi_result (bsi.phi ())))
520 stmts->safe_push (bsi.phi ());
a2740310 521
1a91d914 522 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
523 gsi_next (&bsi))
a2740310 524 {
42acab1c 525 gimple *stmt = gsi_stmt (bsi);
a2740310 526 if (gimple_code (stmt) != GIMPLE_LABEL && !is_gimple_debug (stmt))
527 stmts->safe_push (stmt);
528 }
529 }
530
531 free (bbs);
532}
533
a2740310 534/* Free the reduced dependence graph RDG. */
535
536static void
537free_rdg (struct graph *rdg)
538{
539 int i;
540
541 for (i = 0; i < rdg->n_vertices; i++)
542 {
543 struct vertex *v = &(rdg->vertices[i]);
544 struct graph_edge *e;
545
546 for (e = v->succ; e; e = e->succ_next)
7103facc 547 free (e->data);
a2740310 548
549 if (v->data)
550 {
551 gimple_set_uid (RDGV_STMT (v), -1);
f3754041 552 (RDGV_DATAREFS (v)).release ();
a2740310 553 free (v->data);
554 }
555 }
556
557 free_graph (rdg);
558}
559
209a62a6 560/* Build the Reduced Dependence Graph (RDG) with one vertex per statement of
561 LOOP, and one edge per flow dependence or control dependence from control
f3754041 562 dependence CD. During visiting each statement, data references are also
563 collected and recorded in global data DATAREFS_VEC. */
a2740310 564
565static struct graph *
209a62a6 566build_rdg (struct loop *loop, control_dependences *cd)
a2740310 567{
568 struct graph *rdg;
a2740310 569
eeaec025 570 /* Create the RDG vertices from the stmts of the loop nest. */
42acab1c 571 auto_vec<gimple *, 10> stmts;
209a62a6 572 stmts_from_loop (loop, &stmts);
b223e75a 573 rdg = new_graph (stmts.length ());
f3754041 574 if (!create_rdg_vertices (rdg, stmts, loop))
a2740310 575 {
a2740310 576 free_rdg (rdg);
577 return NULL;
578 }
579 stmts.release ();
eeaec025 580
7103facc 581 create_rdg_flow_edges (rdg);
582 if (cd)
209a62a6 583 create_rdg_cd_edges (rdg, cd, loop);
7103facc 584
a2740310 585 return rdg;
586}
587
a2740310 588
f024aa04 589/* Kind of distributed loop. */
ac7a1007 590enum partition_kind {
b1c062d8 591 PKIND_NORMAL,
592 /* Partial memset stands for a paritition can be distributed into a loop
593 of memset calls, rather than a single memset call. It's handled just
594 like a normal parition, i.e, distributed as separate loop, no memset
595 call is generated.
596
597 Note: This is a hacking fix trying to distribute ZERO-ing stmt in a
598 loop nest as deep as possible. As a result, parloop achieves better
599 parallelization by parallelizing deeper loop nest. This hack should
600 be unnecessary and removed once distributed memset can be understood
601 and analyzed in data reference analysis. See PR82604 for more. */
602 PKIND_PARTIAL_MEMSET,
603 PKIND_MEMSET, PKIND_MEMCPY, PKIND_MEMMOVE
ac7a1007 604};
d32bc1d7 605
f024aa04 606/* Type of distributed loop. */
607enum partition_type {
608 /* The distributed loop can be executed parallelly. */
609 PTYPE_PARALLEL = 0,
610 /* The distributed loop has to be executed sequentially. */
611 PTYPE_SEQUENTIAL
612};
613
506fcb4f 614/* Builtin info for loop distribution. */
615struct builtin_info
616{
617 /* data-references a kind != PKIND_NORMAL partition is about. */
618 data_reference_p dst_dr;
619 data_reference_p src_dr;
620 /* Base address and size of memory objects operated by the builtin. Note
621 both dest and source memory objects must have the same size. */
622 tree dst_base;
623 tree src_base;
624 tree size;
05ebeee6 625 /* Base and offset part of dst_base after stripping constant offset. This
626 is only used in memset builtin distribution for now. */
627 tree dst_base_base;
628 unsigned HOST_WIDE_INT dst_base_offset;
506fcb4f 629};
630
889a3926 631/* Partition for loop distribution. */
04009ada 632struct partition
543506e0 633{
889a3926 634 /* Statements of the partition. */
543506e0 635 bitmap stmts;
889a3926 636 /* True if the partition defines variable which is used outside of loop. */
df9892ff 637 bool reduction_p;
d32bc1d7 638 enum partition_kind kind;
f024aa04 639 enum partition_type type;
889a3926 640 /* Data references in the partition. */
641 bitmap datarefs;
506fcb4f 642 /* Information of builtin parition. */
643 struct builtin_info *builtin;
04009ada 644};
543506e0 645
543506e0 646
647/* Allocate and initialize a partition from BITMAP. */
648
04009ada 649static partition *
889a3926 650partition_alloc (void)
543506e0 651{
04009ada 652 partition *partition = XCNEW (struct partition);
889a3926 653 partition->stmts = BITMAP_ALLOC (NULL);
df9892ff 654 partition->reduction_p = false;
d32bc1d7 655 partition->kind = PKIND_NORMAL;
889a3926 656 partition->datarefs = BITMAP_ALLOC (NULL);
543506e0 657 return partition;
658}
659
660/* Free PARTITION. */
661
662static void
04009ada 663partition_free (partition *partition)
543506e0 664{
665 BITMAP_FREE (partition->stmts);
889a3926 666 BITMAP_FREE (partition->datarefs);
506fcb4f 667 if (partition->builtin)
668 free (partition->builtin);
669
543506e0 670 free (partition);
671}
672
d32bc1d7 673/* Returns true if the partition can be generated as a builtin. */
674
675static bool
04009ada 676partition_builtin_p (partition *partition)
d32bc1d7 677{
b1c062d8 678 return partition->kind > PKIND_PARTIAL_MEMSET;
d32bc1d7 679}
543506e0 680
df9892ff 681/* Returns true if the partition contains a reduction. */
e5edce84 682
683static bool
04009ada 684partition_reduction_p (partition *partition)
e5edce84 685{
df9892ff 686 return partition->reduction_p;
e5edce84 687}
688
f1edc00d 689/* Partitions are fused because of different reasons. */
690enum fuse_type
691{
692 FUSE_NON_BUILTIN = 0,
693 FUSE_REDUCTION = 1,
694 FUSE_SHARE_REF = 2,
695 FUSE_SAME_SCC = 3,
696 FUSE_FINALIZE = 4
697};
698
699/* Description on different fusing reason. */
700static const char *fuse_message[] = {
701 "they are non-builtins",
702 "they have reductions",
703 "they have shared memory refs",
704 "they are in the same dependence scc",
705 "there is no point to distribute loop"};
706
df9892ff 707static void
f024aa04 708update_type_for_merge (struct graph *, partition *, partition *);
f1edc00d 709
f024aa04 710/* Merge PARTITION into the partition DEST. RDG is the reduced dependence
711 graph and we update type for result partition if it is non-NULL. */
889a3926 712
f024aa04 713static void
714partition_merge_into (struct graph *rdg, partition *dest,
715 partition *partition, enum fuse_type ft)
716{
f1edc00d 717 if (dump_file && (dump_flags & TDF_DETAILS))
718 {
719 fprintf (dump_file, "Fuse partitions because %s:\n", fuse_message[ft]);
720 fprintf (dump_file, " Part 1: ");
721 dump_bitmap (dump_file, dest->stmts);
722 fprintf (dump_file, " Part 2: ");
723 dump_bitmap (dump_file, partition->stmts);
724 }
f024aa04 725
726 dest->kind = PKIND_NORMAL;
727 if (dest->type == PTYPE_PARALLEL)
728 dest->type = partition->type;
729
730 bitmap_ior_into (dest->stmts, partition->stmts);
731 if (partition_reduction_p (partition))
732 dest->reduction_p = true;
733
734 /* Further check if any data dependence prevents us from executing the
735 new partition parallelly. */
736 if (dest->type == PTYPE_PARALLEL && rdg != NULL)
737 update_type_for_merge (rdg, dest, partition);
738
739 bitmap_ior_into (dest->datarefs, partition->datarefs);
df9892ff 740}
741
742
b5698e04 743/* Returns true when DEF is an SSA_NAME defined in LOOP and used after
744 the LOOP. */
745
746static bool
747ssa_name_has_uses_outside_loop_p (tree def, loop_p loop)
748{
749 imm_use_iterator imm_iter;
750 use_operand_p use_p;
751
752 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
64a31469 753 {
883b4905 754 if (is_gimple_debug (USE_STMT (use_p)))
755 continue;
756
757 basic_block use_bb = gimple_bb (USE_STMT (use_p));
758 if (!flow_bb_inside_loop_p (loop, use_bb))
64a31469 759 return true;
760 }
b5698e04 761
762 return false;
763}
764
765/* Returns true when STMT defines a scalar variable used after the
bfcf35ff 766 loop LOOP. */
b5698e04 767
768static bool
42acab1c 769stmt_has_scalar_dependences_outside_loop (loop_p loop, gimple *stmt)
b5698e04 770{
bfcf35ff 771 def_operand_p def_p;
772 ssa_op_iter op_iter;
b5698e04 773
35ec0372 774 if (gimple_code (stmt) == GIMPLE_PHI)
775 return ssa_name_has_uses_outside_loop_p (gimple_phi_result (stmt), loop);
776
bfcf35ff 777 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
778 if (ssa_name_has_uses_outside_loop_p (DEF_FROM_PTR (def_p), loop))
779 return true;
b5698e04 780
bfcf35ff 781 return false;
b5698e04 782}
783
801c5610 784/* Return a copy of LOOP placed before LOOP. */
785
786static struct loop *
787copy_loop_before (struct loop *loop)
788{
789 struct loop *res;
790 edge preheader = loop_preheader_edge (loop);
791
801c5610 792 initialize_original_copy_tables ();
c71d3c24 793 res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, NULL, preheader);
d32bc1d7 794 gcc_assert (res != NULL);
801c5610 795 free_original_copy_tables ();
c9b2c569 796 delete_update_ssa ();
801c5610 797
798 return res;
799}
800
801/* Creates an empty basic block after LOOP. */
802
803static void
804create_bb_after_loop (struct loop *loop)
805{
806 edge exit = single_exit (loop);
807
808 if (!exit)
809 return;
810
811 split_edge (exit);
812}
813
814/* Generate code for PARTITION from the code in LOOP. The loop is
815 copied when COPY_P is true. All the statements not flagged in the
816 PARTITION bitmap are removed from the loop or from its copy. The
817 statements are indexed in sequence inside a basic block, and the
d32bc1d7 818 basic blocks of a loop are taken in dom order. */
801c5610 819
d32bc1d7 820static void
04009ada 821generate_loops_for_partition (struct loop *loop, partition *partition,
543506e0 822 bool copy_p)
801c5610 823{
15c8650d 824 unsigned i;
801c5610 825 basic_block *bbs;
826
827 if (copy_p)
828 {
f562e2ea 829 int orig_loop_num = loop->orig_loop_num;
801c5610 830 loop = copy_loop_before (loop);
d32bc1d7 831 gcc_assert (loop != NULL);
f562e2ea 832 loop->orig_loop_num = orig_loop_num;
801c5610 833 create_preheader (loop, CP_SIMPLE_PREHEADERS);
834 create_bb_after_loop (loop);
835 }
f562e2ea 836 else
837 {
838 /* Origin number is set to the new versioned loop's num. */
839 gcc_assert (loop->orig_loop_num != loop->num);
840 }
801c5610 841
15c8650d 842 /* Remove stmts not in the PARTITION bitmap. */
801c5610 843 bbs = get_loop_body_in_dom_order (loop);
844
c64f38bf 845 if (MAY_HAVE_DEBUG_BIND_STMTS)
15c8650d 846 for (i = 0; i < loop->num_nodes; i++)
8ebd8f29 847 {
848 basic_block bb = bbs[i];
849
1a91d914 850 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
851 gsi_next (&bsi))
15c8650d 852 {
1a91d914 853 gphi *phi = bsi.phi ();
15c8650d 854 if (!virtual_operand_p (gimple_phi_result (phi))
855 && !bitmap_bit_p (partition->stmts, gimple_uid (phi)))
856 reset_debug_uses (phi);
857 }
8ebd8f29 858
1a91d914 859 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
8ebd8f29 860 {
42acab1c 861 gimple *stmt = gsi_stmt (bsi);
8ebd8f29 862 if (gimple_code (stmt) != GIMPLE_LABEL
863 && !is_gimple_debug (stmt)
15c8650d 864 && !bitmap_bit_p (partition->stmts, gimple_uid (stmt)))
8ebd8f29 865 reset_debug_uses (stmt);
866 }
867 }
868
15c8650d 869 for (i = 0; i < loop->num_nodes; i++)
801c5610 870 {
871 basic_block bb = bbs[i];
142b4ffb 872 edge inner_exit = NULL;
873
874 if (loop != bb->loop_father)
875 inner_exit = single_exit (bb->loop_father);
801c5610 876
1a91d914 877 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
15c8650d 878 {
1a91d914 879 gphi *phi = bsi.phi ();
15c8650d 880 if (!virtual_operand_p (gimple_phi_result (phi))
881 && !bitmap_bit_p (partition->stmts, gimple_uid (phi)))
ff24e1e2 882 remove_phi_node (&bsi, true);
15c8650d 883 else
884 gsi_next (&bsi);
885 }
801c5610 886
1a91d914 887 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
ff24e1e2 888 {
42acab1c 889 gimple *stmt = gsi_stmt (bsi);
8ebd8f29 890 if (gimple_code (stmt) != GIMPLE_LABEL
891 && !is_gimple_debug (stmt)
15c8650d 892 && !bitmap_bit_p (partition->stmts, gimple_uid (stmt)))
ff24e1e2 893 {
142b4ffb 894 /* In distribution of loop nest, if bb is inner loop's exit_bb,
895 we choose its exit edge/path in order to avoid generating
896 infinite loop. For all other cases, we choose an arbitrary
897 path through the empty CFG part that this unnecessary
898 control stmt controls. */
1a91d914 899 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
f1ce84d9 900 {
142b4ffb 901 if (inner_exit && inner_exit->flags & EDGE_TRUE_VALUE)
902 gimple_cond_make_true (cond_stmt);
903 else
904 gimple_cond_make_false (cond_stmt);
f1ce84d9 905 update_stmt (stmt);
906 }
907 else if (gimple_code (stmt) == GIMPLE_SWITCH)
908 {
1a91d914 909 gswitch *switch_stmt = as_a <gswitch *> (stmt);
f1ce84d9 910 gimple_switch_set_index
1a91d914 911 (switch_stmt, CASE_LOW (gimple_switch_label (switch_stmt, 1)));
f1ce84d9 912 update_stmt (stmt);
913 }
914 else
915 {
916 unlink_stmt_vdef (stmt);
917 gsi_remove (&bsi, true);
918 release_defs (stmt);
919 continue;
920 }
ff24e1e2 921 }
f1ce84d9 922 gsi_next (&bsi);
ff24e1e2 923 }
801c5610 924 }
925
926 free (bbs);
801c5610 927}
928
0dc30b0c 929/* If VAL memory representation contains the same value in all bytes,
930 return that value, otherwise return -1.
931 E.g. for 0x24242424 return 0x24, for IEEE double
932 747708026454360457216.0 return 0x44, etc. */
933
934static int
935const_with_all_bytes_same (tree val)
936{
937 unsigned char buf[64];
938 int i, len;
939
940 if (integer_zerop (val)
0dc30b0c 941 || (TREE_CODE (val) == CONSTRUCTOR
942 && !TREE_CLOBBER_P (val)
943 && CONSTRUCTOR_NELTS (val) == 0))
944 return 0;
945
53e7aca1 946 if (real_zerop (val))
947 {
948 /* Only return 0 for +0.0, not for -0.0, which doesn't have
949 an all bytes same memory representation. Don't transform
950 -0.0 stores into +0.0 even for !HONOR_SIGNED_ZEROS. */
951 switch (TREE_CODE (val))
952 {
953 case REAL_CST:
954 if (!real_isneg (TREE_REAL_CST_PTR (val)))
955 return 0;
956 break;
957 case COMPLEX_CST:
958 if (!const_with_all_bytes_same (TREE_REALPART (val))
959 && !const_with_all_bytes_same (TREE_IMAGPART (val)))
960 return 0;
961 break;
962 case VECTOR_CST:
0a2b1323 963 {
964 unsigned int count = vector_cst_encoded_nelts (val);
965 unsigned int j;
966 for (j = 0; j < count; ++j)
967 if (const_with_all_bytes_same (VECTOR_CST_ENCODED_ELT (val, j)))
968 break;
969 if (j == count)
970 return 0;
971 break;
972 }
53e7aca1 973 default:
974 break;
975 }
976 }
977
0dc30b0c 978 if (CHAR_BIT != 8 || BITS_PER_UNIT != 8)
979 return -1;
980
981 len = native_encode_expr (val, buf, sizeof (buf));
982 if (len == 0)
983 return -1;
984 for (i = 1; i < len; i++)
985 if (buf[i] != buf[0])
986 return -1;
987 return buf[0];
988}
989
d32bc1d7 990/* Generate a call to memset for PARTITION in LOOP. */
801c5610 991
a136cad6 992static void
04009ada 993generate_memset_builtin (struct loop *loop, partition *partition)
801c5610 994{
d32bc1d7 995 gimple_stmt_iterator gsi;
f59ae127 996 tree mem, fn, nb_bytes;
0644fcba 997 tree val;
506fcb4f 998 struct builtin_info *builtin = partition->builtin;
999 gimple *fn_call;
d32bc1d7 1000
1001 /* The new statements will be placed before LOOP. */
1002 gsi = gsi_last_bb (loop_preheader_edge (loop)->src);
801c5610 1003
bc6e884f 1004 nb_bytes = rewrite_to_non_trapping_overflow (builtin->size);
f689d33d 1005 nb_bytes = force_gimple_operand_gsi (&gsi, nb_bytes, true, NULL_TREE,
1006 false, GSI_CONTINUE_LINKING);
506fcb4f 1007 mem = builtin->dst_base;
f689d33d 1008 mem = force_gimple_operand_gsi (&gsi, mem, true, NULL_TREE,
1009 false, GSI_CONTINUE_LINKING);
801c5610 1010
0644fcba 1011 /* This exactly matches the pattern recognition in classify_partition. */
506fcb4f 1012 val = gimple_assign_rhs1 (DR_STMT (builtin->dst_dr));
0dc30b0c 1013 /* Handle constants like 0x15151515 and similarly
1014 floating point constants etc. where all bytes are the same. */
1015 int bytev = const_with_all_bytes_same (val);
1016 if (bytev != -1)
1017 val = build_int_cst (integer_type_node, bytev);
1018 else if (TREE_CODE (val) == INTEGER_CST)
1019 val = fold_convert (integer_type_node, val);
1020 else if (!useless_type_conversion_p (integer_type_node, TREE_TYPE (val)))
0644fcba 1021 {
f9e245b2 1022 tree tem = make_ssa_name (integer_type_node);
42acab1c 1023 gimple *cstmt = gimple_build_assign (tem, NOP_EXPR, val);
0dc30b0c 1024 gsi_insert_after (&gsi, cstmt, GSI_CONTINUE_LINKING);
1025 val = tem;
0644fcba 1026 }
1027
b9a16870 1028 fn = build_fold_addr_expr (builtin_decl_implicit (BUILT_IN_MEMSET));
0644fcba 1029 fn_call = gimple_build_call (fn, 3, mem, val, nb_bytes);
f689d33d 1030 gsi_insert_after (&gsi, fn_call, GSI_CONTINUE_LINKING);
801c5610 1031
1032 if (dump_file && (dump_flags & TDF_DETAILS))
0644fcba 1033 {
1034 fprintf (dump_file, "generated memset");
0dc30b0c 1035 if (bytev == 0)
0644fcba 1036 fprintf (dump_file, " zero\n");
0644fcba 1037 else
1038 fprintf (dump_file, "\n");
1039 }
801c5610 1040}
1041
f689d33d 1042/* Generate a call to memcpy for PARTITION in LOOP. */
1043
1044static void
04009ada 1045generate_memcpy_builtin (struct loop *loop, partition *partition)
f689d33d 1046{
1047 gimple_stmt_iterator gsi;
506fcb4f 1048 gimple *fn_call;
f59ae127 1049 tree dest, src, fn, nb_bytes;
f689d33d 1050 enum built_in_function kind;
506fcb4f 1051 struct builtin_info *builtin = partition->builtin;
f689d33d 1052
1053 /* The new statements will be placed before LOOP. */
1054 gsi = gsi_last_bb (loop_preheader_edge (loop)->src);
1055
bc6e884f 1056 nb_bytes = rewrite_to_non_trapping_overflow (builtin->size);
f689d33d 1057 nb_bytes = force_gimple_operand_gsi (&gsi, nb_bytes, true, NULL_TREE,
1058 false, GSI_CONTINUE_LINKING);
506fcb4f 1059 dest = builtin->dst_base;
1060 src = builtin->src_base;
5d4e2409 1061 if (partition->kind == PKIND_MEMCPY
1062 || ! ptr_derefs_may_alias_p (dest, src))
f689d33d 1063 kind = BUILT_IN_MEMCPY;
5d4e2409 1064 else
1065 kind = BUILT_IN_MEMMOVE;
f689d33d 1066
1067 dest = force_gimple_operand_gsi (&gsi, dest, true, NULL_TREE,
1068 false, GSI_CONTINUE_LINKING);
1069 src = force_gimple_operand_gsi (&gsi, src, true, NULL_TREE,
1070 false, GSI_CONTINUE_LINKING);
1071 fn = build_fold_addr_expr (builtin_decl_implicit (kind));
1072 fn_call = gimple_build_call (fn, 3, dest, src, nb_bytes);
1073 gsi_insert_after (&gsi, fn_call, GSI_CONTINUE_LINKING);
1074
1075 if (dump_file && (dump_flags & TDF_DETAILS))
1076 {
1077 if (kind == BUILT_IN_MEMCPY)
1078 fprintf (dump_file, "generated memcpy\n");
1079 else
1080 fprintf (dump_file, "generated memmove\n");
1081 }
1082}
1083
d32bc1d7 1084/* Remove and destroy the loop LOOP. */
801c5610 1085
d32bc1d7 1086static void
1087destroy_loop (struct loop *loop)
801c5610 1088{
d32bc1d7 1089 unsigned nbbs = loop->num_nodes;
1090 edge exit = single_exit (loop);
1091 basic_block src = loop_preheader_edge (loop)->src, dest = exit->dest;
801c5610 1092 basic_block *bbs;
d32bc1d7 1093 unsigned i;
801c5610 1094
1095 bbs = get_loop_body_in_dom_order (loop);
1096
d32bc1d7 1097 redirect_edge_pred (exit, src);
1098 exit->flags &= ~(EDGE_TRUE_VALUE|EDGE_FALSE_VALUE);
1099 exit->flags |= EDGE_FALLTHRU;
1100 cancel_loop_tree (loop);
1101 rescan_loop_exit (exit, false, true);
801c5610 1102
2084e279 1103 i = nbbs;
1104 do
54459dd6 1105 {
1106 /* We have made sure to not leave any dangling uses of SSA
1107 names defined in the loop. With the exception of virtuals.
1108 Make sure we replace all uses of virtual defs that will remain
1109 outside of the loop with the bare symbol as delete_basic_block
1110 will release them. */
2084e279 1111 --i;
1a91d914 1112 for (gphi_iterator gsi = gsi_start_phis (bbs[i]); !gsi_end_p (gsi);
1113 gsi_next (&gsi))
54459dd6 1114 {
1a91d914 1115 gphi *phi = gsi.phi ();
7c782c9b 1116 if (virtual_operand_p (gimple_phi_result (phi)))
54459dd6 1117 mark_virtual_phi_result_for_renaming (phi);
1118 }
1a91d914 1119 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi);
1120 gsi_next (&gsi))
54459dd6 1121 {
42acab1c 1122 gimple *stmt = gsi_stmt (gsi);
54459dd6 1123 tree vdef = gimple_vdef (stmt);
1124 if (vdef && TREE_CODE (vdef) == SSA_NAME)
1125 mark_virtual_operand_for_renaming (vdef);
1126 }
1127 delete_basic_block (bbs[i]);
1128 }
2084e279 1129 while (i != 0);
1130
801c5610 1131 free (bbs);
d32bc1d7 1132
1133 set_immediate_dominator (CDI_DOMINATORS, dest,
1134 recompute_dominator (CDI_DOMINATORS, dest));
801c5610 1135}
1136
77d095c0 1137/* Generates code for PARTITION. Return whether LOOP needs to be destroyed. */
801c5610 1138
77d095c0 1139static bool
f689d33d 1140generate_code_for_partition (struct loop *loop,
04009ada 1141 partition *partition, bool copy_p)
801c5610 1142{
d32bc1d7 1143 switch (partition->kind)
1144 {
df9892ff 1145 case PKIND_NORMAL:
b1c062d8 1146 case PKIND_PARTIAL_MEMSET:
df9892ff 1147 /* Reductions all have to be in the last partition. */
1148 gcc_assert (!partition_reduction_p (partition)
1149 || !copy_p);
1150 generate_loops_for_partition (loop, partition, copy_p);
77d095c0 1151 return false;
df9892ff 1152
d32bc1d7 1153 case PKIND_MEMSET:
f689d33d 1154 generate_memset_builtin (loop, partition);
f689d33d 1155 break;
1156
1157 case PKIND_MEMCPY:
5d4e2409 1158 case PKIND_MEMMOVE:
f689d33d 1159 generate_memcpy_builtin (loop, partition);
d32bc1d7 1160 break;
1161
1162 default:
1163 gcc_unreachable ();
1164 }
801c5610 1165
df9892ff 1166 /* Common tail for partitions we turn into a call. If this was the last
1167 partition for which we generate code, we have to destroy the loop. */
1168 if (!copy_p)
77d095c0 1169 return true;
1170 return false;
801c5610 1171}
1172
50f5937e 1173/* Return data dependence relation for data references A and B. The two
1174 data references must be in lexicographic order wrto reduced dependence
1175 graph RDG. We firstly try to find ddr from global ddr hash table. If
1176 it doesn't exist, compute the ddr and cache it. */
1177
1178static data_dependence_relation *
1179get_data_dependence (struct graph *rdg, data_reference_p a, data_reference_p b)
1180{
1181 struct data_dependence_relation ent, **slot;
1182 struct data_dependence_relation *ddr;
1183
1184 gcc_assert (DR_IS_WRITE (a) || DR_IS_WRITE (b));
1185 gcc_assert (rdg_vertex_for_stmt (rdg, DR_STMT (a))
1186 <= rdg_vertex_for_stmt (rdg, DR_STMT (b)));
1187 ent.a = a;
1188 ent.b = b;
bbb229ef 1189 slot = ddrs_table->find_slot (&ent, INSERT);
50f5937e 1190 if (*slot == NULL)
1191 {
1192 ddr = initialize_data_dependence_relation (a, b, loop_nest);
1193 compute_affine_dependence (ddr, loop_nest[0]);
1194 *slot = ddr;
1195 }
1196
1197 return *slot;
1198}
801c5610 1199
f024aa04 1200/* In reduced dependence graph RDG for loop distribution, return true if
1201 dependence between references DR1 and DR2 leads to a dependence cycle
1202 and such dependence cycle can't be resolved by runtime alias check. */
1203
1204static bool
1205data_dep_in_cycle_p (struct graph *rdg,
1206 data_reference_p dr1, data_reference_p dr2)
1207{
1208 struct data_dependence_relation *ddr;
1209
1210 /* Re-shuffle data-refs to be in topological order. */
1211 if (rdg_vertex_for_stmt (rdg, DR_STMT (dr1))
1212 > rdg_vertex_for_stmt (rdg, DR_STMT (dr2)))
1213 std::swap (dr1, dr2);
1214
1215 ddr = get_data_dependence (rdg, dr1, dr2);
1216
1217 /* In case of no data dependence. */
1218 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1219 return false;
1220 /* For unknown data dependence or known data dependence which can't be
1221 expressed in classic distance vector, we check if it can be resolved
1222 by runtime alias check. If yes, we still consider data dependence
1223 as won't introduce data dependence cycle. */
1224 else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know
1225 || DDR_NUM_DIST_VECTS (ddr) == 0)
1226 return !runtime_alias_check_p (ddr, NULL, true);
1227 else if (DDR_NUM_DIST_VECTS (ddr) > 1)
1228 return true;
1229 else if (DDR_REVERSED_P (ddr)
1230 || lambda_vector_zerop (DDR_DIST_VECT (ddr, 0), 1))
1231 return false;
1232
1233 return true;
1234}
1235
1236/* Given reduced dependence graph RDG, PARTITION1 and PARTITION2, update
1237 PARTITION1's type after merging PARTITION2 into PARTITION1. */
1238
1239static void
1240update_type_for_merge (struct graph *rdg,
1241 partition *partition1, partition *partition2)
1242{
1243 unsigned i, j;
1244 bitmap_iterator bi, bj;
1245 data_reference_p dr1, dr2;
1246
1247 EXECUTE_IF_SET_IN_BITMAP (partition1->datarefs, 0, i, bi)
1248 {
1249 unsigned start = (partition1 == partition2) ? i + 1 : 0;
1250
1251 dr1 = datarefs_vec[i];
1252 EXECUTE_IF_SET_IN_BITMAP (partition2->datarefs, start, j, bj)
1253 {
1254 dr2 = datarefs_vec[j];
1255 if (DR_IS_READ (dr1) && DR_IS_READ (dr2))
1256 continue;
1257
1258 /* Partition can only be executed sequentially if there is any
1259 data dependence cycle. */
1260 if (data_dep_in_cycle_p (rdg, dr1, dr2))
1261 {
1262 partition1->type = PTYPE_SEQUENTIAL;
1263 return;
1264 }
1265 }
1266 }
1267}
1268
b223e75a 1269/* Returns a partition with all the statements needed for computing
1270 the vertex V of the RDG, also including the loop exit conditions. */
801c5610 1271
04009ada 1272static partition *
b223e75a 1273build_rdg_partition_for_vertex (struct graph *rdg, int v)
801c5610 1274{
889a3926 1275 partition *partition = partition_alloc ();
4997014d 1276 auto_vec<int, 3> nodes;
889a3926 1277 unsigned i, j;
801c5610 1278 int x;
889a3926 1279 data_reference_p dr;
801c5610 1280
5529df60 1281 graphds_dfs (rdg, &v, 1, &nodes, false, NULL);
801c5610 1282
f1f41a6c 1283 FOR_EACH_VEC_ELT (nodes, i, x)
b223e75a 1284 {
1285 bitmap_set_bit (partition->stmts, x);
889a3926 1286
1287 for (j = 0; RDG_DATAREFS (rdg, x).iterate (j, &dr); ++j)
1288 {
1289 unsigned idx = (unsigned) DR_INDEX (dr);
1290 gcc_assert (idx < datarefs_vec.length ());
1291
f024aa04 1292 /* Partition can only be executed sequentially if there is any
1293 unknown data reference. */
1294 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr)
1295 || !DR_INIT (dr) || !DR_STEP (dr))
1296 partition->type = PTYPE_SEQUENTIAL;
1297
889a3926 1298 bitmap_set_bit (partition->datarefs, idx);
1299 }
b223e75a 1300 }
801c5610 1301
f024aa04 1302 if (partition->type == PTYPE_SEQUENTIAL)
1303 return partition;
1304
1305 /* Further check if any data dependence prevents us from executing the
1306 partition parallelly. */
1307 update_type_for_merge (rdg, partition, partition);
1308
801c5610 1309 return partition;
1310}
1311
b363c31b 1312/* Given PARTITION of LOOP and RDG, record single load/store data references
1313 for builtin partition in SRC_DR/DST_DR, return false if there is no such
506fcb4f 1314 data references. */
a136cad6 1315
506fcb4f 1316static bool
b363c31b 1317find_single_drs (struct loop *loop, struct graph *rdg, partition *partition,
506fcb4f 1318 data_reference_p *dst_dr, data_reference_p *src_dr)
a136cad6 1319{
d32bc1d7 1320 unsigned i;
506fcb4f 1321 data_reference_p single_ld = NULL, single_st = NULL;
1322 bitmap_iterator bi;
ac7a1007 1323
d32bc1d7 1324 EXECUTE_IF_SET_IN_BITMAP (partition->stmts, 0, i, bi)
1325 {
42acab1c 1326 gimple *stmt = RDG_STMT (rdg, i);
f689d33d 1327 data_reference_p dr;
d32bc1d7 1328
1329 if (gimple_code (stmt) == GIMPLE_PHI)
1330 continue;
1331
1332 /* Any scalar stmts are ok. */
1333 if (!gimple_vuse (stmt))
1334 continue;
1335
f689d33d 1336 /* Otherwise just regular loads/stores. */
1337 if (!gimple_assign_single_p (stmt))
506fcb4f 1338 return false;
f689d33d 1339
1340 /* But exactly one store and/or load. */
506fcb4f 1341 for (unsigned j = 0; RDG_DATAREFS (rdg, i).iterate (j, &dr); ++j)
d32bc1d7 1342 {
c8a8b2cf 1343 tree type = TREE_TYPE (DR_REF (dr));
1344
1345 /* The memset, memcpy and memmove library calls are only
1346 able to deal with generic address space. */
1347 if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type)))
506fcb4f 1348 return false;
c8a8b2cf 1349
f689d33d 1350 if (DR_IS_READ (dr))
1351 {
506fcb4f 1352 if (single_ld != NULL)
1353 return false;
1354 single_ld = dr;
f689d33d 1355 }
1356 else
1357 {
506fcb4f 1358 if (single_st != NULL)
1359 return false;
1360 single_st = dr;
f689d33d 1361 }
d32bc1d7 1362 }
d32bc1d7 1363 }
1364
506fcb4f 1365 if (!single_st)
1366 return false;
1367
1368 /* Bail out if this is a bitfield memory reference. */
1369 if (TREE_CODE (DR_REF (single_st)) == COMPONENT_REF
1370 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (single_st), 1)))
1371 return false;
f59ae127 1372
b363c31b 1373 /* Data reference must be executed exactly once per iteration of each
1374 loop in the loop nest. We only need to check dominance information
1375 against the outermost one in a perfect loop nest because a bb can't
1376 dominate outermost loop's latch without dominating inner loop's. */
506fcb4f 1377 basic_block bb_st = gimple_bb (DR_STMT (single_st));
b363c31b 1378 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb_st))
506fcb4f 1379 return false;
1380
1381 if (single_ld)
883b4905 1382 {
506fcb4f 1383 gimple *store = DR_STMT (single_st), *load = DR_STMT (single_ld);
1384 /* Direct aggregate copy or via an SSA name temporary. */
1385 if (load != store
1386 && gimple_assign_lhs (load) != gimple_assign_rhs1 (store))
1387 return false;
883b4905 1388
506fcb4f 1389 /* Bail out if this is a bitfield memory reference. */
1390 if (TREE_CODE (DR_REF (single_ld)) == COMPONENT_REF
1391 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (single_ld), 1)))
1392 return false;
1393
1394 /* Load and store must be in the same loop nest. */
1395 basic_block bb_ld = gimple_bb (DR_STMT (single_ld));
b363c31b 1396 if (bb_st->loop_father != bb_ld->loop_father)
506fcb4f 1397 return false;
1398
b363c31b 1399 /* Data reference must be executed exactly once per iteration.
1400 Same as single_st, we only need to check against the outermost
1401 loop. */
1402 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb_ld))
506fcb4f 1403 return false;
1404
b363c31b 1405 edge e = single_exit (bb_st->loop_father);
506fcb4f 1406 bool dom_ld = dominated_by_p (CDI_DOMINATORS, e->src, bb_ld);
1407 bool dom_st = dominated_by_p (CDI_DOMINATORS, e->src, bb_st);
1408 if (dom_ld != dom_st)
1409 return false;
1410 }
1411
1412 *src_dr = single_ld;
1413 *dst_dr = single_st;
1414 return true;
1415}
1416
1417/* Given data reference DR in LOOP_NEST, this function checks the enclosing
1418 loops from inner to outer to see if loop's step equals to access size at
b1c062d8 1419 each level of loop. Return 2 if we can prove this at all level loops;
1420 record access base and size in BASE and SIZE; save loop's step at each
1421 level of loop in STEPS if it is not null. For example:
506fcb4f 1422
1423 int arr[100][100][100];
1424 for (i = 0; i < 100; i++) ;steps[2] = 40000
1425 for (j = 100; j > 0; j--) ;steps[1] = -400
1426 for (k = 0; k < 100; k++) ;steps[0] = 4
b1c062d8 1427 arr[i][j - 1][k] = 0; ;base = &arr, size = 4000000
506fcb4f 1428
b1c062d8 1429 Return 1 if we can prove the equality at the innermost loop, but not all
1430 level loops. In this case, no information is recorded.
1431
1432 Return 0 if no equality can be proven at any level loops. */
1433
1434static int
506fcb4f 1435compute_access_range (loop_p loop_nest, data_reference_p dr, tree *base,
1436 tree *size, vec<tree> *steps = NULL)
1437{
1438 location_t loc = gimple_location (DR_STMT (dr));
1439 basic_block bb = gimple_bb (DR_STMT (dr));
1440 struct loop *loop = bb->loop_father;
1441 tree ref = DR_REF (dr);
1442 tree access_base = build_fold_addr_expr (ref);
1443 tree access_size = TYPE_SIZE_UNIT (TREE_TYPE (ref));
b1c062d8 1444 int res = 0;
506fcb4f 1445
1446 do {
1447 tree scev_fn = analyze_scalar_evolution (loop, access_base);
1448 if (TREE_CODE (scev_fn) != POLYNOMIAL_CHREC)
b1c062d8 1449 return res;
883b4905 1450
506fcb4f 1451 access_base = CHREC_LEFT (scev_fn);
1452 if (tree_contains_chrecs (access_base, NULL))
b1c062d8 1453 return res;
506fcb4f 1454
1455 tree scev_step = CHREC_RIGHT (scev_fn);
1456 /* Only support constant steps. */
1457 if (TREE_CODE (scev_step) != INTEGER_CST)
b1c062d8 1458 return res;
506fcb4f 1459
1460 enum ev_direction access_dir = scev_direction (scev_fn);
1461 if (access_dir == EV_DIR_UNKNOWN)
b1c062d8 1462 return res;
506fcb4f 1463
1464 if (steps != NULL)
1465 steps->safe_push (scev_step);
1466
1467 scev_step = fold_convert_loc (loc, sizetype, scev_step);
1468 /* Compute absolute value of scev step. */
1469 if (access_dir == EV_DIR_DECREASES)
1470 scev_step = fold_build1_loc (loc, NEGATE_EXPR, sizetype, scev_step);
1471
1472 /* At each level of loop, scev step must equal to access size. In other
1473 words, DR must access consecutive memory between loop iterations. */
1474 if (!operand_equal_p (scev_step, access_size, 0))
b1c062d8 1475 return res;
1476
1477 /* Access stride can be computed for data reference at least for the
1478 innermost loop. */
1479 res = 1;
506fcb4f 1480
1481 /* Compute DR's execution times in loop. */
1482 tree niters = number_of_latch_executions (loop);
1483 niters = fold_convert_loc (loc, sizetype, niters);
1484 if (dominated_by_p (CDI_DOMINATORS, single_exit (loop)->src, bb))
1485 niters = size_binop_loc (loc, PLUS_EXPR, niters, size_one_node);
1486
1487 /* Compute DR's overall access size in loop. */
1488 access_size = fold_build2_loc (loc, MULT_EXPR, sizetype,
1489 niters, scev_step);
1490 /* Adjust base address in case of negative step. */
1491 if (access_dir == EV_DIR_DECREASES)
883b4905 1492 {
506fcb4f 1493 tree adj = fold_build2_loc (loc, MINUS_EXPR, sizetype,
1494 scev_step, access_size);
1495 access_base = fold_build_pointer_plus_loc (loc, access_base, adj);
883b4905 1496 }
506fcb4f 1497 } while (loop != loop_nest && (loop = loop_outer (loop)) != NULL);
1498
1499 *base = access_base;
1500 *size = access_size;
b1c062d8 1501 /* Access stride can be computed for data reference at each level loop. */
1502 return 2;
506fcb4f 1503}
1504
1505/* Allocate and return builtin struct. Record information like DST_DR,
1506 SRC_DR, DST_BASE, SRC_BASE and SIZE in the allocated struct. */
1507
1508static struct builtin_info *
1509alloc_builtin (data_reference_p dst_dr, data_reference_p src_dr,
1510 tree dst_base, tree src_base, tree size)
1511{
1512 struct builtin_info *builtin = XNEW (struct builtin_info);
1513 builtin->dst_dr = dst_dr;
1514 builtin->src_dr = src_dr;
1515 builtin->dst_base = dst_base;
1516 builtin->src_base = src_base;
1517 builtin->size = size;
1518 return builtin;
1519}
1520
1521/* Given data reference DR in loop nest LOOP, classify if it forms builtin
1522 memset call. */
1523
1524static void
1525classify_builtin_st (loop_p loop, partition *partition, data_reference_p dr)
1526{
1527 gimple *stmt = DR_STMT (dr);
1528 tree base, size, rhs = gimple_assign_rhs1 (stmt);
1529
1530 if (const_with_all_bytes_same (rhs) == -1
1531 && (!INTEGRAL_TYPE_P (TREE_TYPE (rhs))
1532 || (TYPE_MODE (TREE_TYPE (rhs))
1533 != TYPE_MODE (unsigned_char_type_node))))
1534 return;
1535
1536 if (TREE_CODE (rhs) == SSA_NAME
1537 && !SSA_NAME_IS_DEFAULT_DEF (rhs)
1538 && flow_bb_inside_loop_p (loop, gimple_bb (SSA_NAME_DEF_STMT (rhs))))
1539 return;
1540
b1c062d8 1541 int res = compute_access_range (loop, dr, &base, &size);
1542 if (res == 0)
506fcb4f 1543 return;
b1c062d8 1544 if (res == 1)
1545 {
1546 partition->kind = PKIND_PARTIAL_MEMSET;
1547 return;
1548 }
506fcb4f 1549
caf8edd6 1550 poly_uint64 base_offset;
1551 unsigned HOST_WIDE_INT const_base_offset;
1552 tree base_base = strip_offset (base, &base_offset);
1553 if (!base_offset.is_constant (&const_base_offset))
1554 return;
1555
05ebeee6 1556 struct builtin_info *builtin;
1557 builtin = alloc_builtin (dr, NULL, base, NULL_TREE, size);
caf8edd6 1558 builtin->dst_base_base = base_base;
1559 builtin->dst_base_offset = const_base_offset;
05ebeee6 1560 partition->builtin = builtin;
506fcb4f 1561 partition->kind = PKIND_MEMSET;
1562}
1563
1564/* Given data references DST_DR and SRC_DR in loop nest LOOP and RDG, classify
1565 if it forms builtin memcpy or memmove call. */
1566
1567static void
1568classify_builtin_ldst (loop_p loop, struct graph *rdg, partition *partition,
1569 data_reference_p dst_dr, data_reference_p src_dr)
1570{
1571 tree base, size, src_base, src_size;
1572 auto_vec<tree> dst_steps, src_steps;
1573
b1c062d8 1574 /* Compute access range of both load and store. */
1575 int res = compute_access_range (loop, dst_dr, &base, &size, &dst_steps);
1576 if (res != 2)
1577 return;
1578 res = compute_access_range (loop, src_dr, &src_base, &src_size, &src_steps);
1579 if (res != 2)
1580 return;
1581
1582 /* They much have the same access size. */
1583 if (!operand_equal_p (size, src_size, 0))
506fcb4f 1584 return;
1585
1586 /* Load and store in loop nest must access memory in the same way, i.e,
1587 their must have the same steps in each loop of the nest. */
1588 if (dst_steps.length () != src_steps.length ())
1589 return;
1590 for (unsigned i = 0; i < dst_steps.length (); ++i)
1591 if (!operand_equal_p (dst_steps[i], src_steps[i], 0))
1592 return;
1593
1594 /* Now check that if there is a dependence. */
1595 ddr_p ddr = get_data_dependence (rdg, src_dr, dst_dr);
1596
1597 /* Classify as memcpy if no dependence between load and store. */
1598 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1599 {
1600 partition->builtin = alloc_builtin (dst_dr, src_dr, base, src_base, size);
1601 partition->kind = PKIND_MEMCPY;
1602 return;
883b4905 1603 }
1604
506fcb4f 1605 /* Can't do memmove in case of unknown dependence or dependence without
1606 classical distance vector. */
1607 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know
1608 || DDR_NUM_DIST_VECTS (ddr) == 0)
1609 return;
f59ae127 1610
506fcb4f 1611 unsigned i;
1612 lambda_vector dist_v;
1613 int num_lev = (DDR_LOOP_NEST (ddr)).length ();
1614 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
f689d33d 1615 {
506fcb4f 1616 unsigned dep_lev = dependence_level (dist_v, num_lev);
1617 /* Can't do memmove if load depends on store. */
1618 if (dep_lev > 0 && dist_v[dep_lev - 1] > 0 && !DDR_REVERSED_P (ddr))
f689d33d 1619 return;
f689d33d 1620 }
506fcb4f 1621
1622 partition->builtin = alloc_builtin (dst_dr, src_dr, base, src_base, size);
1623 partition->kind = PKIND_MEMMOVE;
1624 return;
1625}
1626
1627/* Classifies the builtin kind we can generate for PARTITION of RDG and LOOP.
1628 For the moment we detect memset, memcpy and memmove patterns. Bitmap
1629 STMT_IN_ALL_PARTITIONS contains statements belonging to all partitions. */
1630
1631static void
1632classify_partition (loop_p loop, struct graph *rdg, partition *partition,
1633 bitmap stmt_in_all_partitions)
1634{
1635 bitmap_iterator bi;
1636 unsigned i;
1637 data_reference_p single_ld = NULL, single_st = NULL;
1638 bool volatiles_p = false, has_reduction = false;
1639
1640 EXECUTE_IF_SET_IN_BITMAP (partition->stmts, 0, i, bi)
f689d33d 1641 {
506fcb4f 1642 gimple *stmt = RDG_STMT (rdg, i);
50f5937e 1643
506fcb4f 1644 if (gimple_has_volatile_ops (stmt))
1645 volatiles_p = true;
50f5937e 1646
506fcb4f 1647 /* If the stmt is not included by all partitions and there is uses
1648 outside of the loop, then mark the partition as reduction. */
1649 if (stmt_has_scalar_dependences_outside_loop (loop, stmt))
1650 {
1651 /* Due to limitation in the transform phase we have to fuse all
1652 reduction partitions. As a result, this could cancel valid
1653 loop distribution especially for loop that induction variable
1654 is used outside of loop. To workaround this issue, we skip
1655 marking partition as reudction if the reduction stmt belongs
1656 to all partitions. In such case, reduction will be computed
1657 correctly no matter how partitions are fused/distributed. */
1658 if (!bitmap_bit_p (stmt_in_all_partitions, i))
7b6f8db4 1659 {
506fcb4f 1660 partition->reduction_p = true;
1661 return;
7b6f8db4 1662 }
506fcb4f 1663 has_reduction = true;
7b6f8db4 1664 }
f689d33d 1665 }
506fcb4f 1666
1667 /* Perform general partition disqualification for builtins. */
1668 if (volatiles_p
1669 /* Simple workaround to prevent classifying the partition as builtin
1670 if it contains any use outside of loop. */
1671 || has_reduction
1672 || !flag_tree_loop_distribute_patterns)
1673 return;
1674
1675 /* Find single load/store data references for builtin partition. */
b363c31b 1676 if (!find_single_drs (loop, rdg, partition, &single_st, &single_ld))
506fcb4f 1677 return;
1678
1679 /* Classify the builtin kind. */
1680 if (single_ld == NULL)
1681 classify_builtin_st (loop, partition, single_st);
1682 else
1683 classify_builtin_ldst (loop, rdg, partition, single_st, single_ld);
a136cad6 1684}
1685
fd34627b 1686/* Returns true when PARTITION1 and PARTITION2 access the same memory
1687 object in RDG. */
a136cad6 1688
1689static bool
fd34627b 1690share_memory_accesses (struct graph *rdg,
1691 partition *partition1, partition *partition2)
a136cad6 1692{
fd34627b 1693 unsigned i, j;
a136cad6 1694 bitmap_iterator bi, bj;
fd34627b 1695 data_reference_p dr1, dr2;
f83623cc 1696
1697 /* First check whether in the intersection of the two partitions are
1698 any loads or stores. Common loads are the situation that happens
1699 most often. */
1700 EXECUTE_IF_AND_IN_BITMAP (partition1->stmts, partition2->stmts, 0, i, bi)
1701 if (RDG_MEM_WRITE_STMT (rdg, i)
1702 || RDG_MEM_READS_STMT (rdg, i))
1703 return true;
a136cad6 1704
fd34627b 1705 /* Then check whether the two partitions access the same memory object. */
1706 EXECUTE_IF_SET_IN_BITMAP (partition1->datarefs, 0, i, bi)
1707 {
1708 dr1 = datarefs_vec[i];
1709
1710 if (!DR_BASE_ADDRESS (dr1)
1711 || !DR_OFFSET (dr1) || !DR_INIT (dr1) || !DR_STEP (dr1))
1712 continue;
1713
1714 EXECUTE_IF_SET_IN_BITMAP (partition2->datarefs, 0, j, bj)
1715 {
1716 dr2 = datarefs_vec[j];
1717
1718 if (!DR_BASE_ADDRESS (dr2)
1719 || !DR_OFFSET (dr2) || !DR_INIT (dr2) || !DR_STEP (dr2))
1720 continue;
1721
1722 if (operand_equal_p (DR_BASE_ADDRESS (dr1), DR_BASE_ADDRESS (dr2), 0)
1723 && operand_equal_p (DR_OFFSET (dr1), DR_OFFSET (dr2), 0)
1724 && operand_equal_p (DR_INIT (dr1), DR_INIT (dr2), 0)
1725 && operand_equal_p (DR_STEP (dr1), DR_STEP (dr2), 0))
1726 return true;
1727 }
1728 }
a136cad6 1729
1730 return false;
1731}
1732
8d95fe31 1733/* For each seed statement in STARTING_STMTS, this function builds
1734 partition for it by adding depended statements according to RDG.
1735 All partitions are recorded in PARTITIONS. */
801c5610 1736
1737static void
88e0cdd9 1738rdg_build_partitions (struct graph *rdg,
42acab1c 1739 vec<gimple *> starting_stmts,
04009ada 1740 vec<partition *> *partitions)
801c5610 1741{
035def86 1742 auto_bitmap processed;
15c8650d 1743 int i;
42acab1c 1744 gimple *stmt;
801c5610 1745
15c8650d 1746 FOR_EACH_VEC_ELT (starting_stmts, i, stmt)
801c5610 1747 {
15c8650d 1748 int v = rdg_vertex_for_stmt (rdg, stmt);
1749
1750 if (dump_file && (dump_flags & TDF_DETAILS))
1751 fprintf (dump_file,
1752 "ldist asked to generate code for vertex %d\n", v);
48e1416a 1753
b223e75a 1754 /* If the vertex is already contained in another partition so
1755 is the partition rooted at it. */
801c5610 1756 if (bitmap_bit_p (processed, v))
1757 continue;
48e1416a 1758
04009ada 1759 partition *partition = build_rdg_partition_for_vertex (rdg, v);
b223e75a 1760 bitmap_ior_into (processed, partition->stmts);
801c5610 1761
df9892ff 1762 if (dump_file && (dump_flags & TDF_DETAILS))
801c5610 1763 {
f024aa04 1764 fprintf (dump_file, "ldist creates useful %s partition:\n",
1765 partition->type == PTYPE_PARALLEL ? "parallel" : "sequent");
1766 bitmap_print (dump_file, partition->stmts, " ", "\n");
801c5610 1767 }
df9892ff 1768
1769 partitions->safe_push (partition);
801c5610 1770 }
1771
88e0cdd9 1772 /* All vertices should have been assigned to at least one partition now,
1773 other than vertices belonging to dead code. */
801c5610 1774}
1775
1776/* Dump to FILE the PARTITIONS. */
1777
1778static void
04009ada 1779dump_rdg_partitions (FILE *file, vec<partition *> partitions)
801c5610 1780{
1781 int i;
04009ada 1782 partition *partition;
801c5610 1783
f1f41a6c 1784 FOR_EACH_VEC_ELT (partitions, i, partition)
543506e0 1785 debug_bitmap_file (file, partition->stmts);
801c5610 1786}
1787
1788/* Debug PARTITIONS. */
04009ada 1789extern void debug_rdg_partitions (vec<partition *> );
801c5610 1790
4b987fac 1791DEBUG_FUNCTION void
04009ada 1792debug_rdg_partitions (vec<partition *> partitions)
801c5610 1793{
1794 dump_rdg_partitions (stderr, partitions);
1795}
1796
577982d8 1797/* Returns the number of read and write operations in the RDG. */
1798
1799static int
1800number_of_rw_in_rdg (struct graph *rdg)
1801{
1802 int i, res = 0;
1803
1804 for (i = 0; i < rdg->n_vertices; i++)
1805 {
1806 if (RDG_MEM_WRITE_STMT (rdg, i))
1807 ++res;
1808
1809 if (RDG_MEM_READS_STMT (rdg, i))
1810 ++res;
1811 }
1812
1813 return res;
1814}
1815
1816/* Returns the number of read and write operations in a PARTITION of
1817 the RDG. */
1818
1819static int
04009ada 1820number_of_rw_in_partition (struct graph *rdg, partition *partition)
577982d8 1821{
1822 int res = 0;
1823 unsigned i;
1824 bitmap_iterator ii;
1825
543506e0 1826 EXECUTE_IF_SET_IN_BITMAP (partition->stmts, 0, i, ii)
577982d8 1827 {
1828 if (RDG_MEM_WRITE_STMT (rdg, i))
1829 ++res;
1830
1831 if (RDG_MEM_READS_STMT (rdg, i))
1832 ++res;
1833 }
1834
1835 return res;
1836}
1837
1838/* Returns true when one of the PARTITIONS contains all the read or
1839 write operations of RDG. */
1840
1841static bool
f1f41a6c 1842partition_contains_all_rw (struct graph *rdg,
04009ada 1843 vec<partition *> partitions)
577982d8 1844{
1845 int i;
04009ada 1846 partition *partition;
577982d8 1847 int nrw = number_of_rw_in_rdg (rdg);
1848
f1f41a6c 1849 FOR_EACH_VEC_ELT (partitions, i, partition)
577982d8 1850 if (nrw == number_of_rw_in_partition (rdg, partition))
1851 return true;
1852
1853 return false;
1854}
1855
7103facc 1856/* Compute partition dependence created by the data references in DRS1
f562e2ea 1857 and DRS2, modify and return DIR according to that. IF ALIAS_DDR is
1858 not NULL, we record dependence introduced by possible alias between
1859 two data references in ALIAS_DDRS; otherwise, we simply ignore such
1860 dependence as if it doesn't exist at all. */
7103facc 1861
1862static int
209a62a6 1863pg_add_dependence_edges (struct graph *rdg, int dir,
f562e2ea 1864 bitmap drs1, bitmap drs2, vec<ddr_p> *alias_ddrs)
7103facc 1865{
889a3926 1866 unsigned i, j;
1867 bitmap_iterator bi, bj;
1868 data_reference_p dr1, dr2, saved_dr1;
7103facc 1869
1870 /* dependence direction - 0 is no dependence, -1 is back,
1871 1 is forth, 2 is both (we can stop then, merging will occur). */
889a3926 1872 EXECUTE_IF_SET_IN_BITMAP (drs1, 0, i, bi)
1873 {
1874 dr1 = datarefs_vec[i];
1875
1876 EXECUTE_IF_SET_IN_BITMAP (drs2, 0, j, bj)
1877 {
f562e2ea 1878 int res, this_dir = 1;
1879 ddr_p ddr;
1880
889a3926 1881 dr2 = datarefs_vec[j];
1882
1883 /* Skip all <read, read> data dependence. */
1884 if (DR_IS_READ (dr1) && DR_IS_READ (dr2))
1885 continue;
1886
1887 saved_dr1 = dr1;
f562e2ea 1888 /* Re-shuffle data-refs to be in topological order. */
889a3926 1889 if (rdg_vertex_for_stmt (rdg, DR_STMT (dr1))
1890 > rdg_vertex_for_stmt (rdg, DR_STMT (dr2)))
1891 {
1892 std::swap (dr1, dr2);
1893 this_dir = -this_dir;
1894 }
50f5937e 1895 ddr = get_data_dependence (rdg, dr1, dr2);
889a3926 1896 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
f562e2ea 1897 {
1898 this_dir = 0;
1899 res = data_ref_compare_tree (DR_BASE_ADDRESS (dr1),
1900 DR_BASE_ADDRESS (dr2));
1901 /* Be conservative. If data references are not well analyzed,
1902 or the two data references have the same base address and
1903 offset, add dependence and consider it alias to each other.
f4d3c071 1904 In other words, the dependence cannot be resolved by
f562e2ea 1905 runtime alias check. */
1906 if (!DR_BASE_ADDRESS (dr1) || !DR_BASE_ADDRESS (dr2)
1907 || !DR_OFFSET (dr1) || !DR_OFFSET (dr2)
1908 || !DR_INIT (dr1) || !DR_INIT (dr2)
1909 || !DR_STEP (dr1) || !tree_fits_uhwi_p (DR_STEP (dr1))
1910 || !DR_STEP (dr2) || !tree_fits_uhwi_p (DR_STEP (dr2))
1911 || res == 0)
1912 this_dir = 2;
1913 /* Data dependence could be resolved by runtime alias check,
1914 record it in ALIAS_DDRS. */
1915 else if (alias_ddrs != NULL)
1916 alias_ddrs->safe_push (ddr);
1917 /* Or simply ignore it. */
1918 }
889a3926 1919 else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
1920 {
1921 if (DDR_REVERSED_P (ddr))
f562e2ea 1922 this_dir = -this_dir;
1923
889a3926 1924 /* Known dependences can still be unordered througout the
1925 iteration space, see gcc.dg/tree-ssa/ldist-16.c. */
1926 if (DDR_NUM_DIST_VECTS (ddr) != 1)
1927 this_dir = 2;
1928 /* If the overlap is exact preserve stmt order. */
21e5a1b6 1929 else if (lambda_vector_zerop (DDR_DIST_VECT (ddr, 0),
1930 DDR_NB_LOOPS (ddr)))
889a3926 1931 ;
f562e2ea 1932 /* Else as the distance vector is lexicographic positive swap
1933 the dependence direction. */
889a3926 1934 else
f562e2ea 1935 this_dir = -this_dir;
889a3926 1936 }
1937 else
1938 this_dir = 0;
889a3926 1939 if (this_dir == 2)
1940 return 2;
1941 else if (dir == 0)
1942 dir = this_dir;
1943 else if (this_dir != 0 && dir != this_dir)
1944 return 2;
1945 /* Shuffle "back" dr1. */
1946 dr1 = saved_dr1;
1947 }
1948 }
7103facc 1949 return dir;
1950}
1951
1952/* Compare postorder number of the partition graph vertices V1 and V2. */
1953
1954static int
1955pgcmp (const void *v1_, const void *v2_)
1956{
1957 const vertex *v1 = (const vertex *)v1_;
1958 const vertex *v2 = (const vertex *)v2_;
1959 return v2->post - v1->post;
1960}
15c8650d 1961
f562e2ea 1962/* Data attached to vertices of partition dependence graph. */
1963struct pg_vdata
1964{
1965 /* ID of the corresponding partition. */
1966 int id;
1967 /* The partition. */
1968 struct partition *partition;
1969};
1970
1971/* Data attached to edges of partition dependence graph. */
1972struct pg_edata
1973{
1974 /* If the dependence edge can be resolved by runtime alias check,
1975 this vector contains data dependence relations for runtime alias
1976 check. On the other hand, if the dependence edge is introduced
1977 because of compilation time known data dependence, this vector
1978 contains nothing. */
1979 vec<ddr_p> alias_ddrs;
1980};
1981
1982/* Callback data for traversing edges in graph. */
1983struct pg_edge_callback_data
1984{
1985 /* Bitmap contains strong connected components should be merged. */
1986 bitmap sccs_to_merge;
1987 /* Array constains component information for all vertices. */
1988 int *vertices_component;
1989 /* Vector to record all data dependence relations which are needed
1990 to break strong connected components by runtime alias checks. */
1991 vec<ddr_p> *alias_ddrs;
1992};
1993
1994/* Initialize vertice's data for partition dependence graph PG with
1995 PARTITIONS. */
1996
1997static void
1998init_partition_graph_vertices (struct graph *pg,
1999 vec<struct partition *> *partitions)
2000{
2001 int i;
2002 partition *partition;
2003 struct pg_vdata *data;
2004
2005 for (i = 0; partitions->iterate (i, &partition); ++i)
2006 {
2007 data = new pg_vdata;
2008 pg->vertices[i].data = data;
2009 data->id = i;
2010 data->partition = partition;
2011 }
2012}
2013
2014/* Add edge <I, J> to partition dependence graph PG. Attach vector of data
2015 dependence relations to the EDGE if DDRS isn't NULL. */
2016
2017static void
2018add_partition_graph_edge (struct graph *pg, int i, int j, vec<ddr_p> *ddrs)
2019{
2020 struct graph_edge *e = add_edge (pg, i, j);
2021
2022 /* If the edge is attached with data dependence relations, it means this
2023 dependence edge can be resolved by runtime alias checks. */
2024 if (ddrs != NULL)
2025 {
2026 struct pg_edata *data = new pg_edata;
2027
2028 gcc_assert (ddrs->length () > 0);
2029 e->data = data;
2030 data->alias_ddrs = vNULL;
2031 data->alias_ddrs.safe_splice (*ddrs);
2032 }
2033}
2034
2035/* Callback function for graph travesal algorithm. It returns true
2036 if edge E should skipped when traversing the graph. */
2037
2038static bool
2039pg_skip_alias_edge (struct graph_edge *e)
2040{
2041 struct pg_edata *data = (struct pg_edata *)e->data;
2042 return (data != NULL && data->alias_ddrs.length () > 0);
2043}
2044
2045/* Callback function freeing data attached to edge E of graph. */
2046
2047static void
2048free_partition_graph_edata_cb (struct graph *, struct graph_edge *e, void *)
2049{
2050 if (e->data != NULL)
2051 {
2052 struct pg_edata *data = (struct pg_edata *)e->data;
2053 data->alias_ddrs.release ();
2054 delete data;
2055 }
2056}
2057
2058/* Free data attached to vertice of partition dependence graph PG. */
2059
2060static void
2061free_partition_graph_vdata (struct graph *pg)
2062{
2063 int i;
2064 struct pg_vdata *data;
2065
2066 for (i = 0; i < pg->n_vertices; ++i)
2067 {
2068 data = (struct pg_vdata *)pg->vertices[i].data;
2069 delete data;
2070 }
2071}
2072
2073/* Build and return partition dependence graph for PARTITIONS. RDG is
2074 reduced dependence graph for the loop to be distributed. If IGNORE_ALIAS_P
2075 is true, data dependence caused by possible alias between references
2076 is ignored, as if it doesn't exist at all; otherwise all depdendences
2077 are considered. */
2078
2079static struct graph *
2080build_partition_graph (struct graph *rdg,
2081 vec<struct partition *> *partitions,
2082 bool ignore_alias_p)
2083{
2084 int i, j;
2085 struct partition *partition1, *partition2;
2086 graph *pg = new_graph (partitions->length ());
2087 auto_vec<ddr_p> alias_ddrs, *alias_ddrs_p;
2088
2089 alias_ddrs_p = ignore_alias_p ? NULL : &alias_ddrs;
2090
2091 init_partition_graph_vertices (pg, partitions);
2092
2093 for (i = 0; partitions->iterate (i, &partition1); ++i)
2094 {
2095 for (j = i + 1; partitions->iterate (j, &partition2); ++j)
2096 {
2097 /* dependence direction - 0 is no dependence, -1 is back,
2098 1 is forth, 2 is both (we can stop then, merging will occur). */
2099 int dir = 0;
2100
2101 /* If the first partition has reduction, add back edge; if the
2102 second partition has reduction, add forth edge. This makes
2103 sure that reduction partition will be sorted as the last one. */
2104 if (partition_reduction_p (partition1))
2105 dir = -1;
2106 else if (partition_reduction_p (partition2))
2107 dir = 1;
2108
2109 /* Cleanup the temporary vector. */
2110 alias_ddrs.truncate (0);
2111
2112 dir = pg_add_dependence_edges (rdg, dir, partition1->datarefs,
2113 partition2->datarefs, alias_ddrs_p);
2114
2115 /* Add edge to partition graph if there exists dependence. There
2116 are two types of edges. One type edge is caused by compilation
f4d3c071 2117 time known dependence, this type cannot be resolved by runtime
f562e2ea 2118 alias check. The other type can be resolved by runtime alias
2119 check. */
2120 if (dir == 1 || dir == 2
2121 || alias_ddrs.length () > 0)
2122 {
2123 /* Attach data dependence relations to edge that can be resolved
2124 by runtime alias check. */
2125 bool alias_edge_p = (dir != 1 && dir != 2);
2126 add_partition_graph_edge (pg, i, j,
2127 (alias_edge_p) ? &alias_ddrs : NULL);
2128 }
2129 if (dir == -1 || dir == 2
2130 || alias_ddrs.length () > 0)
2131 {
2132 /* Attach data dependence relations to edge that can be resolved
2133 by runtime alias check. */
2134 bool alias_edge_p = (dir != -1 && dir != 2);
2135 add_partition_graph_edge (pg, j, i,
2136 (alias_edge_p) ? &alias_ddrs : NULL);
2137 }
2138 }
2139 }
2140 return pg;
2141}
2142
cb072485 2143/* Sort partitions in PG in descending post order and store them in
2144 PARTITIONS. */
f562e2ea 2145
2146static void
2147sort_partitions_by_post_order (struct graph *pg,
2148 vec<struct partition *> *partitions)
2149{
2150 int i;
2151 struct pg_vdata *data;
2152
cb072485 2153 /* Now order the remaining nodes in descending postorder. */
f562e2ea 2154 qsort (pg->vertices, pg->n_vertices, sizeof (vertex), pgcmp);
2155 partitions->truncate (0);
2156 for (i = 0; i < pg->n_vertices; ++i)
2157 {
2158 data = (struct pg_vdata *)pg->vertices[i].data;
2159 if (data->partition)
2160 partitions->safe_push (data->partition);
2161 }
2162}
2163
2164/* Given reduced dependence graph RDG merge strong connected components
883b4905 2165 of PARTITIONS. If IGNORE_ALIAS_P is true, data dependence caused by
2166 possible alias between references is ignored, as if it doesn't exist
2167 at all; otherwise all depdendences are considered. */
f562e2ea 2168
2169static void
2170merge_dep_scc_partitions (struct graph *rdg,
883b4905 2171 vec<struct partition *> *partitions,
2172 bool ignore_alias_p)
f562e2ea 2173{
2174 struct partition *partition1, *partition2;
2175 struct pg_vdata *data;
883b4905 2176 graph *pg = build_partition_graph (rdg, partitions, ignore_alias_p);
f562e2ea 2177 int i, j, num_sccs = graphds_scc (pg, NULL);
2178
2179 /* Strong connected compoenent means dependence cycle, we cannot distribute
2180 them. So fuse them together. */
2181 if ((unsigned) num_sccs < partitions->length ())
2182 {
2183 for (i = 0; i < num_sccs; ++i)
2184 {
2185 for (j = 0; partitions->iterate (j, &partition1); ++j)
2186 if (pg->vertices[j].component == i)
2187 break;
2188 for (j = j + 1; partitions->iterate (j, &partition2); ++j)
2189 if (pg->vertices[j].component == i)
2190 {
2191 partition_merge_into (NULL, partition1,
2192 partition2, FUSE_SAME_SCC);
2193 partition1->type = PTYPE_SEQUENTIAL;
2194 (*partitions)[j] = NULL;
2195 partition_free (partition2);
2196 data = (struct pg_vdata *)pg->vertices[j].data;
2197 data->partition = NULL;
2198 }
2199 }
f562e2ea 2200 }
1df7b42b 2201
2202 sort_partitions_by_post_order (pg, partitions);
f562e2ea 2203 gcc_assert (partitions->length () == (unsigned)num_sccs);
2204 free_partition_graph_vdata (pg);
2205 free_graph (pg);
2206}
2207
2208/* Callback function for traversing edge E in graph G. DATA is private
2209 callback data. */
2210
2211static void
2212pg_collect_alias_ddrs (struct graph *g, struct graph_edge *e, void *data)
2213{
2214 int i, j, component;
2215 struct pg_edge_callback_data *cbdata;
2216 struct pg_edata *edata = (struct pg_edata *) e->data;
2217
2218 /* If the edge doesn't have attached data dependence, it represents
2219 compilation time known dependences. This type dependence cannot
2220 be resolved by runtime alias check. */
2221 if (edata == NULL || edata->alias_ddrs.length () == 0)
2222 return;
2223
2224 cbdata = (struct pg_edge_callback_data *) data;
2225 i = e->src;
2226 j = e->dest;
2227 component = cbdata->vertices_component[i];
2228 /* Vertices are topologically sorted according to compilation time
2229 known dependences, so we can break strong connected components
2230 by removing edges of the opposite direction, i.e, edges pointing
2231 from vertice with smaller post number to vertice with bigger post
2232 number. */
2233 if (g->vertices[i].post < g->vertices[j].post
2234 /* We only need to remove edges connecting vertices in the same
2235 strong connected component to break it. */
2236 && component == cbdata->vertices_component[j]
2237 /* Check if we want to break the strong connected component or not. */
2238 && !bitmap_bit_p (cbdata->sccs_to_merge, component))
2239 cbdata->alias_ddrs->safe_splice (edata->alias_ddrs);
2240}
2241
2242/* This is the main function breaking strong conected components in
2243 PARTITIONS giving reduced depdendence graph RDG. Store data dependence
2244 relations for runtime alias check in ALIAS_DDRS. */
2245
2246static void
2247break_alias_scc_partitions (struct graph *rdg,
2248 vec<struct partition *> *partitions,
2249 vec<ddr_p> *alias_ddrs)
2250{
cb072485 2251 int i, j, k, num_sccs, num_sccs_no_alias;
f562e2ea 2252 /* Build partition dependence graph. */
2253 graph *pg = build_partition_graph (rdg, partitions, false);
2254
2255 alias_ddrs->truncate (0);
2256 /* Find strong connected components in the graph, with all dependence edges
2257 considered. */
2258 num_sccs = graphds_scc (pg, NULL);
2259 /* All SCCs now can be broken by runtime alias checks because SCCs caused by
2260 compilation time known dependences are merged before this function. */
2261 if ((unsigned) num_sccs < partitions->length ())
2262 {
2263 struct pg_edge_callback_data cbdata;
2264 auto_bitmap sccs_to_merge;
2265 auto_vec<enum partition_type> scc_types;
2266 struct partition *partition, *first;
2267
85676b62 2268 /* If all partitions in a SCC have the same type, we can simply merge the
f562e2ea 2269 SCC. This loop finds out such SCCS and record them in bitmap. */
2270 bitmap_set_range (sccs_to_merge, 0, (unsigned) num_sccs);
2271 for (i = 0; i < num_sccs; ++i)
2272 {
2273 for (j = 0; partitions->iterate (j, &first); ++j)
2274 if (pg->vertices[j].component == i)
2275 break;
a1e9c80f 2276
2277 bool same_type = true, all_builtins = partition_builtin_p (first);
f562e2ea 2278 for (++j; partitions->iterate (j, &partition); ++j)
2279 {
2280 if (pg->vertices[j].component != i)
2281 continue;
2282
2283 if (first->type != partition->type)
2284 {
a1e9c80f 2285 same_type = false;
f562e2ea 2286 break;
2287 }
a1e9c80f 2288 all_builtins &= partition_builtin_p (partition);
f562e2ea 2289 }
a1e9c80f 2290 /* Merge SCC if all partitions in SCC have the same type, though the
2291 result partition is sequential, because vectorizer can do better
2292 runtime alias check. One expecption is all partitions in SCC are
2293 builtins. */
2294 if (!same_type || all_builtins)
2295 bitmap_clear_bit (sccs_to_merge, i);
f562e2ea 2296 }
2297
2298 /* Initialize callback data for traversing. */
2299 cbdata.sccs_to_merge = sccs_to_merge;
2300 cbdata.alias_ddrs = alias_ddrs;
2301 cbdata.vertices_component = XNEWVEC (int, pg->n_vertices);
2302 /* Record the component information which will be corrupted by next
2303 graph scc finding call. */
2304 for (i = 0; i < pg->n_vertices; ++i)
2305 cbdata.vertices_component[i] = pg->vertices[i].component;
2306
2307 /* Collect data dependences for runtime alias checks to break SCCs. */
2308 if (bitmap_count_bits (sccs_to_merge) != (unsigned) num_sccs)
2309 {
2310 /* Run SCC finding algorithm again, with alias dependence edges
85676b62 2311 skipped. This is to topologically sort partitions according to
f562e2ea 2312 compilation time known dependence. Note the topological order
2313 is stored in the form of pg's post order number. */
2314 num_sccs_no_alias = graphds_scc (pg, NULL, pg_skip_alias_edge);
2315 gcc_assert (partitions->length () == (unsigned) num_sccs_no_alias);
2316 /* With topological order, we can construct two subgraphs L and R.
2317 L contains edge <x, y> where x < y in terms of post order, while
2318 R contains edge <x, y> where x > y. Edges for compilation time
2319 known dependence all fall in R, so we break SCCs by removing all
2320 (alias) edges of in subgraph L. */
2321 for_each_edge (pg, pg_collect_alias_ddrs, &cbdata);
2322 }
2323
2324 /* For SCC that doesn't need to be broken, merge it. */
2325 for (i = 0; i < num_sccs; ++i)
2326 {
2327 if (!bitmap_bit_p (sccs_to_merge, i))
2328 continue;
2329
2330 for (j = 0; partitions->iterate (j, &first); ++j)
2331 if (cbdata.vertices_component[j] == i)
2332 break;
cb072485 2333 for (k = j + 1; partitions->iterate (k, &partition); ++k)
f562e2ea 2334 {
2335 struct pg_vdata *data;
2336
cb072485 2337 if (cbdata.vertices_component[k] != i)
f562e2ea 2338 continue;
2339
cb072485 2340 /* Update postorder number so that merged reduction partition is
2341 sorted after other partitions. */
2342 if (!partition_reduction_p (first)
2343 && partition_reduction_p (partition))
2344 {
2345 gcc_assert (pg->vertices[k].post < pg->vertices[j].post);
2346 pg->vertices[j].post = pg->vertices[k].post;
2347 }
f562e2ea 2348 partition_merge_into (NULL, first, partition, FUSE_SAME_SCC);
cb072485 2349 (*partitions)[k] = NULL;
f562e2ea 2350 partition_free (partition);
cb072485 2351 data = (struct pg_vdata *)pg->vertices[k].data;
2352 gcc_assert (data->id == k);
f562e2ea 2353 data->partition = NULL;
85676b62 2354 /* The result partition of merged SCC must be sequential. */
2355 first->type = PTYPE_SEQUENTIAL;
f562e2ea 2356 }
2357 }
2358 }
2359
2360 sort_partitions_by_post_order (pg, partitions);
2361 free_partition_graph_vdata (pg);
2362 for_each_edge (pg, free_partition_graph_edata_cb, NULL);
2363 free_graph (pg);
2364
2365 if (dump_file && (dump_flags & TDF_DETAILS))
2366 {
2367 fprintf (dump_file, "Possible alias data dependence to break:\n");
2368 dump_data_dependence_relations (dump_file, *alias_ddrs);
2369 }
2370}
2371
2372/* Compute and return an expression whose value is the segment length which
2373 will be accessed by DR in NITERS iterations. */
2374
2375static tree
2376data_ref_segment_size (struct data_reference *dr, tree niters)
2377{
e85b4a5e 2378 niters = size_binop (MINUS_EXPR,
2379 fold_convert (sizetype, niters),
2380 size_one_node);
2381 return size_binop (MULT_EXPR,
2382 fold_convert (sizetype, DR_STEP (dr)),
2383 fold_convert (sizetype, niters));
f562e2ea 2384}
2385
2386/* Return true if LOOP's latch is dominated by statement for data reference
2387 DR. */
2388
2389static inline bool
2390latch_dominated_by_data_ref (struct loop *loop, data_reference *dr)
2391{
2392 return dominated_by_p (CDI_DOMINATORS, single_exit (loop)->src,
2393 gimple_bb (DR_STMT (dr)));
2394}
2395
2396/* Compute alias check pairs and store them in COMP_ALIAS_PAIRS for LOOP's
2397 data dependence relations ALIAS_DDRS. */
2398
2399static void
2400compute_alias_check_pairs (struct loop *loop, vec<ddr_p> *alias_ddrs,
2401 vec<dr_with_seg_len_pair_t> *comp_alias_pairs)
2402{
2403 unsigned int i;
2404 unsigned HOST_WIDE_INT factor = 1;
2405 tree niters_plus_one, niters = number_of_latch_executions (loop);
2406
2407 gcc_assert (niters != NULL_TREE && niters != chrec_dont_know);
2408 niters = fold_convert (sizetype, niters);
2409 niters_plus_one = size_binop (PLUS_EXPR, niters, size_one_node);
2410
2411 if (dump_file && (dump_flags & TDF_DETAILS))
2412 fprintf (dump_file, "Creating alias check pairs:\n");
2413
2414 /* Iterate all data dependence relations and compute alias check pairs. */
2415 for (i = 0; i < alias_ddrs->length (); i++)
2416 {
2417 ddr_p ddr = (*alias_ddrs)[i];
2418 struct data_reference *dr_a = DDR_A (ddr);
2419 struct data_reference *dr_b = DDR_B (ddr);
2420 tree seg_length_a, seg_length_b;
2421 int comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a),
2422 DR_BASE_ADDRESS (dr_b));
2423
2424 if (comp_res == 0)
2425 comp_res = data_ref_compare_tree (DR_OFFSET (dr_a), DR_OFFSET (dr_b));
2426 gcc_assert (comp_res != 0);
2427
2428 if (latch_dominated_by_data_ref (loop, dr_a))
2429 seg_length_a = data_ref_segment_size (dr_a, niters_plus_one);
2430 else
2431 seg_length_a = data_ref_segment_size (dr_a, niters);
2432
2433 if (latch_dominated_by_data_ref (loop, dr_b))
2434 seg_length_b = data_ref_segment_size (dr_b, niters_plus_one);
2435 else
2436 seg_length_b = data_ref_segment_size (dr_b, niters);
2437
e85b4a5e 2438 unsigned HOST_WIDE_INT access_size_a
2439 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_a))));
2440 unsigned HOST_WIDE_INT access_size_b
2441 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_b))));
2442 unsigned int align_a = TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_a)));
2443 unsigned int align_b = TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_b)));
2444
f562e2ea 2445 dr_with_seg_len_pair_t dr_with_seg_len_pair
e85b4a5e 2446 (dr_with_seg_len (dr_a, seg_length_a, access_size_a, align_a),
2447 dr_with_seg_len (dr_b, seg_length_b, access_size_b, align_b));
f562e2ea 2448
2449 /* Canonicalize pairs by sorting the two DR members. */
2450 if (comp_res > 0)
2451 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
2452
2453 comp_alias_pairs->safe_push (dr_with_seg_len_pair);
2454 }
2455
2456 if (tree_fits_uhwi_p (niters))
2457 factor = tree_to_uhwi (niters);
2458
2459 /* Prune alias check pairs. */
2460 prune_runtime_alias_test_list (comp_alias_pairs, factor);
2461 if (dump_file && (dump_flags & TDF_DETAILS))
2462 fprintf (dump_file,
2463 "Improved number of alias checks from %d to %d\n",
2464 alias_ddrs->length (), comp_alias_pairs->length ());
2465}
2466
2467/* Given data dependence relations in ALIAS_DDRS, generate runtime alias
2468 checks and version LOOP under condition of these runtime alias checks. */
2469
2470static void
a1e9c80f 2471version_loop_by_alias_check (vec<struct partition *> *partitions,
2472 struct loop *loop, vec<ddr_p> *alias_ddrs)
f562e2ea 2473{
2474 profile_probability prob;
2475 basic_block cond_bb;
2476 struct loop *nloop;
2477 tree lhs, arg0, cond_expr = NULL_TREE;
2478 gimple_seq cond_stmts = NULL;
2479 gimple *call_stmt = NULL;
2480 auto_vec<dr_with_seg_len_pair_t> comp_alias_pairs;
2481
2482 /* Generate code for runtime alias checks if necessary. */
2483 gcc_assert (alias_ddrs->length () > 0);
2484
2485 if (dump_file && (dump_flags & TDF_DETAILS))
2486 fprintf (dump_file,
2487 "Version loop <%d> with runtime alias check\n", loop->num);
2488
2489 compute_alias_check_pairs (loop, alias_ddrs, &comp_alias_pairs);
2490 create_runtime_alias_checks (loop, &comp_alias_pairs, &cond_expr);
2491 cond_expr = force_gimple_operand_1 (cond_expr, &cond_stmts,
b48c230a 2492 is_gimple_val, NULL_TREE);
f562e2ea 2493
2494 /* Depend on vectorizer to fold IFN_LOOP_DIST_ALIAS. */
a1e9c80f 2495 bool cancelable_p = flag_tree_loop_vectorize;
2496 if (cancelable_p)
2497 {
2498 unsigned i = 0;
2499 struct partition *partition;
2500 for (; partitions->iterate (i, &partition); ++i)
2501 if (!partition_builtin_p (partition))
2502 break;
2503
2504 /* If all partitions are builtins, distributing it would be profitable and
2505 we don't want to cancel the runtime alias checks. */
2506 if (i == partitions->length ())
2507 cancelable_p = false;
2508 }
2509
2510 /* Generate internal function call for loop distribution alias check if the
2511 runtime alias check should be cancelable. */
2512 if (cancelable_p)
f562e2ea 2513 {
f562e2ea 2514 call_stmt = gimple_build_call_internal (IFN_LOOP_DIST_ALIAS,
2515 2, NULL_TREE, cond_expr);
2516 lhs = make_ssa_name (boolean_type_node);
2517 gimple_call_set_lhs (call_stmt, lhs);
2518 }
2519 else
2520 lhs = cond_expr;
2521
2522 prob = profile_probability::guessed_always ().apply_scale (9, 10);
2523 initialize_original_copy_tables ();
2524 nloop = loop_version (loop, lhs, &cond_bb, prob, prob.invert (),
2525 prob, prob.invert (), true);
2526 free_original_copy_tables ();
2527 /* Record the original loop number in newly generated loops. In case of
2528 distribution, the original loop will be distributed and the new loop
2529 is kept. */
2530 loop->orig_loop_num = nloop->num;
2531 nloop->orig_loop_num = nloop->num;
2532 nloop->dont_vectorize = true;
2533 nloop->force_vectorize = false;
2534
2535 if (call_stmt)
2536 {
2537 /* Record new loop's num in IFN_LOOP_DIST_ALIAS because the original
2538 loop could be destroyed. */
2539 arg0 = build_int_cst (integer_type_node, loop->orig_loop_num);
2540 gimple_call_set_arg (call_stmt, 0, arg0);
2541 gimple_seq_add_stmt_without_update (&cond_stmts, call_stmt);
2542 }
2543
2544 if (cond_stmts)
2545 {
2546 gimple_stmt_iterator cond_gsi = gsi_last_bb (cond_bb);
2547 gsi_insert_seq_before (&cond_gsi, cond_stmts, GSI_SAME_STMT);
2548 }
2549 update_ssa (TODO_update_ssa);
2550}
2551
2552/* Return true if loop versioning is needed to distrubute PARTITIONS.
2553 ALIAS_DDRS are data dependence relations for runtime alias check. */
2554
2555static inline bool
2556version_for_distribution_p (vec<struct partition *> *partitions,
2557 vec<ddr_p> *alias_ddrs)
2558{
2559 /* No need to version loop if we have only one partition. */
2560 if (partitions->length () == 1)
2561 return false;
2562
2563 /* Need to version loop if runtime alias check is necessary. */
2564 return (alias_ddrs->length () > 0);
2565}
2566
05ebeee6 2567/* Compare base offset of builtin mem* partitions P1 and P2. */
2568
207c43a2 2569static int
2570offset_cmp (const void *vp1, const void *vp2)
05ebeee6 2571{
207c43a2 2572 struct partition *p1 = *(struct partition *const *) vp1;
2573 struct partition *p2 = *(struct partition *const *) vp2;
2574 unsigned HOST_WIDE_INT o1 = p1->builtin->dst_base_offset;
2575 unsigned HOST_WIDE_INT o2 = p2->builtin->dst_base_offset;
2576 return (o2 < o1) - (o1 < o2);
05ebeee6 2577}
2578
2579/* Fuse adjacent memset builtin PARTITIONS if possible. This is a special
2580 case optimization transforming below code:
2581
2582 __builtin_memset (&obj, 0, 100);
2583 _1 = &obj + 100;
2584 __builtin_memset (_1, 0, 200);
2585 _2 = &obj + 300;
2586 __builtin_memset (_2, 0, 100);
2587
2588 into:
2589
2590 __builtin_memset (&obj, 0, 400);
2591
2592 Note we don't have dependence information between different partitions
2593 at this point, as a result, we can't handle nonadjacent memset builtin
2594 partitions since dependence might be broken. */
2595
2596static void
2597fuse_memset_builtins (vec<struct partition *> *partitions)
2598{
2599 unsigned i, j;
2600 struct partition *part1, *part2;
79f9738a 2601 tree rhs1, rhs2;
05ebeee6 2602
2603 for (i = 0; partitions->iterate (i, &part1);)
2604 {
2605 if (part1->kind != PKIND_MEMSET)
2606 {
2607 i++;
2608 continue;
2609 }
2610
2611 /* Find sub-array of memset builtins of the same base. Index range
2612 of the sub-array is [i, j) with "j > i". */
2613 for (j = i + 1; partitions->iterate (j, &part2); ++j)
2614 {
2615 if (part2->kind != PKIND_MEMSET
2616 || !operand_equal_p (part1->builtin->dst_base_base,
2617 part2->builtin->dst_base_base, 0))
2618 break;
79f9738a 2619
2620 /* Memset calls setting different values can't be merged. */
2621 rhs1 = gimple_assign_rhs1 (DR_STMT (part1->builtin->dst_dr));
2622 rhs2 = gimple_assign_rhs1 (DR_STMT (part2->builtin->dst_dr));
2623 if (!operand_equal_p (rhs1, rhs2, 0))
2624 break;
05ebeee6 2625 }
2626
2627 /* Stable sort is required in order to avoid breaking dependence. */
207c43a2 2628 gcc_stablesort (&(*partitions)[i], j - i, sizeof (*partitions)[i],
2629 offset_cmp);
05ebeee6 2630 /* Continue with next partition. */
2631 i = j;
2632 }
2633
2634 /* Merge all consecutive memset builtin partitions. */
2635 for (i = 0; i < partitions->length () - 1;)
2636 {
2637 part1 = (*partitions)[i];
2638 if (part1->kind != PKIND_MEMSET)
2639 {
2640 i++;
2641 continue;
2642 }
2643
2644 part2 = (*partitions)[i + 1];
2645 /* Only merge memset partitions of the same base and with constant
2646 access sizes. */
2647 if (part2->kind != PKIND_MEMSET
2648 || TREE_CODE (part1->builtin->size) != INTEGER_CST
2649 || TREE_CODE (part2->builtin->size) != INTEGER_CST
2650 || !operand_equal_p (part1->builtin->dst_base_base,
2651 part2->builtin->dst_base_base, 0))
2652 {
2653 i++;
2654 continue;
2655 }
79f9738a 2656 rhs1 = gimple_assign_rhs1 (DR_STMT (part1->builtin->dst_dr));
2657 rhs2 = gimple_assign_rhs1 (DR_STMT (part2->builtin->dst_dr));
05ebeee6 2658 int bytev1 = const_with_all_bytes_same (rhs1);
2659 int bytev2 = const_with_all_bytes_same (rhs2);
2660 /* Only merge memset partitions of the same value. */
2661 if (bytev1 != bytev2 || bytev1 == -1)
2662 {
2663 i++;
2664 continue;
2665 }
2666 wide_int end1 = wi::add (part1->builtin->dst_base_offset,
2667 wi::to_wide (part1->builtin->size));
2668 /* Only merge adjacent memset partitions. */
2669 if (wi::ne_p (end1, part2->builtin->dst_base_offset))
2670 {
2671 i++;
2672 continue;
2673 }
2674 /* Merge partitions[i] and partitions[i+1]. */
2675 part1->builtin->size = fold_build2 (PLUS_EXPR, sizetype,
2676 part1->builtin->size,
2677 part2->builtin->size);
2678 partition_free (part2);
2679 partitions->ordered_remove (i + 1);
2680 }
2681}
2682
883b4905 2683/* Fuse PARTITIONS of LOOP if necessary before finalizing distribution.
2684 ALIAS_DDRS contains ddrs which need runtime alias check. */
f562e2ea 2685
2686static void
883b4905 2687finalize_partitions (struct loop *loop, vec<struct partition *> *partitions,
f562e2ea 2688 vec<ddr_p> *alias_ddrs)
2689{
2690 unsigned i;
883b4905 2691 struct partition *partition, *a;
f562e2ea 2692
2693 if (partitions->length () == 1
2694 || alias_ddrs->length () > 0)
2695 return;
2696
b1c062d8 2697 unsigned num_builtin = 0, num_normal = 0, num_partial_memset = 0;
883b4905 2698 bool same_type_p = true;
2699 enum partition_type type = ((*partitions)[0])->type;
2700 for (i = 0; partitions->iterate (i, &partition); ++i)
f562e2ea 2701 {
883b4905 2702 same_type_p &= (type == partition->type);
b1c062d8 2703 if (partition_builtin_p (partition))
2704 {
2705 num_builtin++;
2706 continue;
2707 }
2708 num_normal++;
2709 if (partition->kind == PKIND_PARTIAL_MEMSET)
2710 num_partial_memset++;
f562e2ea 2711 }
2712
883b4905 2713 /* Don't distribute current loop into too many loops given we don't have
2714 memory stream cost model. Be even more conservative in case of loop
2715 nest distribution. */
b1c062d8 2716 if ((same_type_p && num_builtin == 0
2717 && (loop->inner == NULL || num_normal != 2 || num_partial_memset != 1))
883b4905 2718 || (loop->inner != NULL
2719 && i >= NUM_PARTITION_THRESHOLD && num_normal > 1)
2720 || (loop->inner == NULL
2721 && i >= NUM_PARTITION_THRESHOLD && num_normal > num_builtin))
f562e2ea 2722 {
883b4905 2723 a = (*partitions)[0];
2724 for (i = 1; partitions->iterate (i, &partition); ++i)
2725 {
2726 partition_merge_into (NULL, a, partition, FUSE_FINALIZE);
2727 partition_free (partition);
2728 }
2729 partitions->truncate (1);
f562e2ea 2730 }
05ebeee6 2731
2732 /* Fuse memset builtins if possible. */
2733 if (partitions->length () > 1)
2734 fuse_memset_builtins (partitions);
f562e2ea 2735}
2736
2737/* Distributes the code from LOOP in such a way that producer statements
2738 are placed before consumer statements. Tries to separate only the
2739 statements from STMTS into separate loops. Returns the number of
2740 distributed loops. Set NB_CALLS to number of generated builtin calls.
2741 Set *DESTROY_P to whether LOOP needs to be destroyed. */
801c5610 2742
2743static int
42acab1c 2744distribute_loop (struct loop *loop, vec<gimple *> stmts,
77d095c0 2745 control_dependences *cd, int *nb_calls, bool *destroy_p)
801c5610 2746{
bbb229ef 2747 ddrs_table = new hash_table<ddr_hasher> (389);
15c8650d 2748 struct graph *rdg;
04009ada 2749 partition *partition;
6198d968 2750 bool any_builtin;
15c8650d 2751 int i, nbp;
801c5610 2752
303e9267 2753 *destroy_p = false;
df9892ff 2754 *nb_calls = 0;
209a62a6 2755 loop_nest.create (0);
15c8650d 2756 if (!find_loop_nest (loop, &loop_nest))
209a62a6 2757 {
2758 loop_nest.release ();
bbb229ef 2759 delete ddrs_table;
209a62a6 2760 return 0;
2761 }
15c8650d 2762
f3754041 2763 datarefs_vec.create (20);
6079e9be 2764 has_nonaddressable_dataref_p = false;
209a62a6 2765 rdg = build_rdg (loop, cd);
15c8650d 2766 if (!rdg)
2767 {
2768 if (dump_file && (dump_flags & TDF_DETAILS))
2769 fprintf (dump_file,
2770 "Loop %d not distributed: failed to build the RDG.\n",
2771 loop->num);
2772
209a62a6 2773 loop_nest.release ();
f3754041 2774 free_data_refs (datarefs_vec);
bbb229ef 2775 delete ddrs_table;
f3754041 2776 return 0;
2777 }
2778
2779 if (datarefs_vec.length () > MAX_DATAREFS_NUM)
2780 {
2781 if (dump_file && (dump_flags & TDF_DETAILS))
2782 fprintf (dump_file,
2783 "Loop %d not distributed: too many memory references.\n",
2784 loop->num);
2785
2786 free_rdg (rdg);
2787 loop_nest.release ();
2788 free_data_refs (datarefs_vec);
bbb229ef 2789 delete ddrs_table;
15c8650d 2790 return 0;
2791 }
2792
f3754041 2793 data_reference_p dref;
2794 for (i = 0; datarefs_vec.iterate (i, &dref); ++i)
2795 dref->aux = (void *) (uintptr_t) i;
2796
15c8650d 2797 if (dump_file && (dump_flags & TDF_DETAILS))
2798 dump_rdg (dump_file, rdg);
2799
04009ada 2800 auto_vec<struct partition *, 3> partitions;
15c8650d 2801 rdg_build_partitions (rdg, stmts, &partitions);
801c5610 2802
f562e2ea 2803 auto_vec<ddr_p> alias_ddrs;
2804
8d95fe31 2805 auto_bitmap stmt_in_all_partitions;
2806 bitmap_copy (stmt_in_all_partitions, partitions[0]->stmts);
2807 for (i = 1; partitions.iterate (i, &partition); ++i)
2808 bitmap_and_into (stmt_in_all_partitions, partitions[i]->stmts);
2809
6198d968 2810 any_builtin = false;
f1f41a6c 2811 FOR_EACH_VEC_ELT (partitions, i, partition)
6198d968 2812 {
8d95fe31 2813 classify_partition (loop, rdg, partition, stmt_in_all_partitions);
6198d968 2814 any_builtin |= partition_builtin_p (partition);
2815 }
d32bc1d7 2816
7103facc 2817 /* If we are only distributing patterns but did not detect any,
2818 simply bail out. */
0c58611e 2819 if (!flag_tree_loop_distribution
2820 && !any_builtin)
2821 {
2822 nbp = 0;
2823 goto ldist_done;
2824 }
2825
7103facc 2826 /* If we are only distributing patterns fuse all partitions that
2827 were not classified as builtins. This also avoids chopping
2828 a loop into pieces, separated by builtin calls. That is, we
2829 only want no or a single loop body remaining. */
04009ada 2830 struct partition *into;
7103facc 2831 if (!flag_tree_loop_distribution)
2832 {
2833 for (i = 0; partitions.iterate (i, &into); ++i)
2834 if (!partition_builtin_p (into))
2835 break;
2836 for (++i; partitions.iterate (i, &partition); ++i)
2837 if (!partition_builtin_p (partition))
2838 {
f024aa04 2839 partition_merge_into (NULL, into, partition, FUSE_NON_BUILTIN);
7103facc 2840 partitions.unordered_remove (i);
2841 partition_free (partition);
2842 i--;
2843 }
2844 }
2845
2846 /* Due to limitations in the transform phase we have to fuse all
2847 reduction partitions into the last partition so the existing
2848 loop will contain all loop-closed PHI nodes. */
2849 for (i = 0; partitions.iterate (i, &into); ++i)
2850 if (partition_reduction_p (into))
2851 break;
2852 for (i = i + 1; partitions.iterate (i, &partition); ++i)
2853 if (partition_reduction_p (partition))
2854 {
f024aa04 2855 partition_merge_into (rdg, into, partition, FUSE_REDUCTION);
7103facc 2856 partitions.unordered_remove (i);
2857 partition_free (partition);
2858 i--;
2859 }
2860
0c58611e 2861 /* Apply our simple cost model - fuse partitions with similar
2862 memory accesses. */
0c58611e 2863 for (i = 0; partitions.iterate (i, &into); ++i)
2864 {
facf7bb7 2865 bool changed = false;
b1c062d8 2866 if (partition_builtin_p (into) || into->kind == PKIND_PARTIAL_MEMSET)
0c58611e 2867 continue;
2868 for (int j = i + 1;
2869 partitions.iterate (j, &partition); ++j)
2870 {
fd34627b 2871 if (share_memory_accesses (rdg, into, partition))
0c58611e 2872 {
f024aa04 2873 partition_merge_into (rdg, into, partition, FUSE_SHARE_REF);
7103facc 2874 partitions.unordered_remove (j);
0c58611e 2875 partition_free (partition);
2876 j--;
facf7bb7 2877 changed = true;
0c58611e 2878 }
2879 }
facf7bb7 2880 /* If we fused 0 1 2 in step 1 to 0,2 1 as 0 and 2 have similar
2881 accesses when 1 and 2 have similar accesses but not 0 and 1
2882 then in the next iteration we will fail to consider merging
2883 1 into 0,2. So try again if we did any merging into 0. */
2884 if (changed)
2885 i--;
0c58611e 2886 }
2887
883b4905 2888 /* Build the partition dependency graph and fuse partitions in strong
2889 connected component. */
7103facc 2890 if (partitions.length () > 1)
54459dd6 2891 {
883b4905 2892 /* Don't support loop nest distribution under runtime alias check
6079e9be 2893 since it's not likely to enable many vectorization opportunities.
2894 Also if loop has any data reference which may be not addressable
2895 since alias check needs to take, compare address of the object. */
2896 if (loop->inner || has_nonaddressable_dataref_p)
883b4905 2897 merge_dep_scc_partitions (rdg, &partitions, false);
2898 else
2899 {
2900 merge_dep_scc_partitions (rdg, &partitions, true);
2901 if (partitions.length () > 1)
2902 break_alias_scc_partitions (rdg, &partitions, &alias_ddrs);
2903 }
ac7a1007 2904 }
2905
883b4905 2906 finalize_partitions (loop, &partitions, &alias_ddrs);
f562e2ea 2907
f1f41a6c 2908 nbp = partitions.length ();
58ccfbea 2909 if (nbp == 0
f1f41a6c 2910 || (nbp == 1 && !partition_builtin_p (partitions[0]))
2911 || (nbp > 1 && partition_contains_all_rw (rdg, partitions)))
54459dd6 2912 {
2913 nbp = 0;
2914 goto ldist_done;
2915 }
801c5610 2916
f562e2ea 2917 if (version_for_distribution_p (&partitions, &alias_ddrs))
a1e9c80f 2918 version_loop_by_alias_check (&partitions, loop, &alias_ddrs);
f562e2ea 2919
801c5610 2920 if (dump_file && (dump_flags & TDF_DETAILS))
f562e2ea 2921 {
2922 fprintf (dump_file,
2923 "distribute loop <%d> into partitions:\n", loop->num);
2924 dump_rdg_partitions (dump_file, partitions);
2925 }
801c5610 2926
f1f41a6c 2927 FOR_EACH_VEC_ELT (partitions, i, partition)
df9892ff 2928 {
2929 if (partition_builtin_p (partition))
2930 (*nb_calls)++;
77d095c0 2931 *destroy_p |= generate_code_for_partition (loop, partition, i < nbp - 1);
df9892ff 2932 }
801c5610 2933
801c5610 2934 ldist_done:
209a62a6 2935 loop_nest.release ();
f3754041 2936 free_data_refs (datarefs_vec);
bbb229ef 2937 for (hash_table<ddr_hasher>::iterator iter = ddrs_table->begin ();
2938 iter != ddrs_table->end (); ++iter)
50f5937e 2939 {
2940 free_dependence_relation (*iter);
2941 *iter = NULL;
2942 }
bbb229ef 2943 delete ddrs_table;
801c5610 2944
f1f41a6c 2945 FOR_EACH_VEC_ELT (partitions, i, partition)
543506e0 2946 partition_free (partition);
801c5610 2947
801c5610 2948 free_rdg (rdg);
df9892ff 2949 return nbp - *nb_calls;
801c5610 2950}
2951
2952/* Distribute all loops in the current function. */
2953
65b0537f 2954namespace {
2955
2956const pass_data pass_data_loop_distribution =
2957{
2958 GIMPLE_PASS, /* type */
2959 "ldist", /* name */
2960 OPTGROUP_LOOP, /* optinfo_flags */
65b0537f 2961 TV_TREE_LOOP_DISTRIBUTION, /* tv_id */
2962 ( PROP_cfg | PROP_ssa ), /* properties_required */
2963 0, /* properties_provided */
2964 0, /* properties_destroyed */
2965 0, /* todo_flags_start */
8b88439e 2966 0, /* todo_flags_finish */
65b0537f 2967};
2968
2969class pass_loop_distribution : public gimple_opt_pass
2970{
2971public:
2972 pass_loop_distribution (gcc::context *ctxt)
2973 : gimple_opt_pass (pass_data_loop_distribution, ctxt)
2974 {}
2975
2976 /* opt_pass methods: */
2977 virtual bool gate (function *)
2978 {
2979 return flag_tree_loop_distribution
2980 || flag_tree_loop_distribute_patterns;
2981 }
2982
2983 virtual unsigned int execute (function *);
2984
2985}; // class pass_loop_distribution
2986
883b4905 2987
2988/* Given LOOP, this function records seed statements for distribution in
2989 WORK_LIST. Return false if there is nothing for distribution. */
2990
2991static bool
2992find_seed_stmts_for_distribution (struct loop *loop, vec<gimple *> *work_list)
2993{
2994 basic_block *bbs = get_loop_body_in_dom_order (loop);
2995
2996 /* Initialize the worklist with stmts we seed the partitions with. */
2997 for (unsigned i = 0; i < loop->num_nodes; ++i)
2998 {
2999 for (gphi_iterator gsi = gsi_start_phis (bbs[i]);
3000 !gsi_end_p (gsi); gsi_next (&gsi))
3001 {
3002 gphi *phi = gsi.phi ();
3003 if (virtual_operand_p (gimple_phi_result (phi)))
3004 continue;
3005 /* Distribute stmts which have defs that are used outside of
3006 the loop. */
3007 if (!stmt_has_scalar_dependences_outside_loop (loop, phi))
3008 continue;
3009 work_list->safe_push (phi);
3010 }
3011 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
3012 !gsi_end_p (gsi); gsi_next (&gsi))
3013 {
3014 gimple *stmt = gsi_stmt (gsi);
3015
3016 /* If there is a stmt with side-effects bail out - we
3017 cannot and should not distribute this loop. */
3018 if (gimple_has_side_effects (stmt))
3019 {
3020 free (bbs);
3021 return false;
3022 }
3023
3024 /* Distribute stmts which have defs that are used outside of
3025 the loop. */
3026 if (stmt_has_scalar_dependences_outside_loop (loop, stmt))
3027 ;
3028 /* Otherwise only distribute stores for now. */
3029 else if (!gimple_vdef (stmt))
3030 continue;
3031
3032 work_list->safe_push (stmt);
3033 }
3034 }
3035 free (bbs);
3036 return work_list->length () > 0;
3037}
3038
3039/* Given innermost LOOP, return the outermost enclosing loop that forms a
3040 perfect loop nest. */
3041
3042static struct loop *
3043prepare_perfect_loop_nest (struct loop *loop)
3044{
3045 struct loop *outer = loop_outer (loop);
3046 tree niters = number_of_latch_executions (loop);
3047
b1c062d8 3048 /* TODO: We only support the innermost 3-level loop nest distribution
883b4905 3049 because of compilation time issue for now. This should be relaxed
b1c062d8 3050 in the future. Note we only allow 3-level loop nest distribution
3051 when parallelizing loops. */
3052 while ((loop->inner == NULL
3053 || (loop->inner->inner == NULL && flag_tree_parallelize_loops > 1))
883b4905 3054 && loop_outer (outer)
3055 && outer->inner == loop && loop->next == NULL
3056 && single_exit (outer)
3057 && optimize_loop_for_speed_p (outer)
3058 && !chrec_contains_symbols_defined_in_loop (niters, outer->num)
3059 && (niters = number_of_latch_executions (outer)) != NULL_TREE
3060 && niters != chrec_dont_know)
3061 {
3062 loop = outer;
3063 outer = loop_outer (loop);
3064 }
3065
3066 return loop;
3067}
3068
65b0537f 3069unsigned int
3070pass_loop_distribution::execute (function *fun)
801c5610 3071{
3072 struct loop *loop;
54459dd6 3073 bool changed = false;
f83623cc 3074 basic_block bb;
f1ce84d9 3075 control_dependences *cd = NULL;
77d095c0 3076 auto_vec<loop_p> loops_to_be_destroyed;
f83623cc 3077
c4b1b865 3078 if (number_of_loops (fun) <= 1)
3079 return 0;
3080
50eda3a8 3081 /* Compute topological order for basic blocks. Topological order is
3082 needed because data dependence is computed for data references in
3083 lexicographical order. */
3084 if (bb_top_order_index == NULL)
3085 {
5c49e6ea 3086 int rpo_num;
50eda3a8 3087 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3088
3089 bb_top_order_index = XNEWVEC (int, last_basic_block_for_fn (cfun));
5c49e6ea 3090 bb_top_order_index_size = last_basic_block_for_fn (cfun);
3091 rpo_num = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, true);
3092 for (int i = 0; i < rpo_num; i++)
50eda3a8 3093 bb_top_order_index[rpo[i]] = i;
3094
3095 free (rpo);
3096 }
3097
65b0537f 3098 FOR_ALL_BB_FN (bb, fun)
f83623cc 3099 {
3100 gimple_stmt_iterator gsi;
3101 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3102 gimple_set_uid (gsi_stmt (gsi), -1);
3103 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3104 gimple_set_uid (gsi_stmt (gsi), -1);
3105 }
801c5610 3106
54459dd6 3107 /* We can at the moment only distribute non-nested loops, thus restrict
3108 walking to innermost loops. */
f21d4d00 3109 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
801c5610 3110 {
883b4905 3111 /* Don't distribute multiple exit edges loop, or cold loop. */
3112 if (!single_exit (loop)
3113 || !optimize_loop_for_speed_p (loop))
5f38d9ef 3114 continue;
3115
1c4ee769 3116 /* Don't distribute loop if niters is unknown. */
3117 tree niters = number_of_latch_executions (loop);
3118 if (niters == NULL_TREE || niters == chrec_dont_know)
3119 continue;
3120
883b4905 3121 /* Get the perfect loop nest for distribution. */
3122 loop = prepare_perfect_loop_nest (loop);
3123 for (; loop; loop = loop->inner)
6198d968 3124 {
883b4905 3125 auto_vec<gimple *> work_list;
3126 if (!find_seed_stmts_for_distribution (loop, &work_list))
3127 break;
54459dd6 3128
883b4905 3129 const char *str = loop->inner ? " nest" : "";
c309657f 3130 dump_user_location_t loc = find_loop_location (loop);
f1ce84d9 3131 if (!cd)
3132 {
73423315 3133 calculate_dominance_info (CDI_DOMINATORS);
f1ce84d9 3134 calculate_dominance_info (CDI_POST_DOMINATORS);
ce143ff0 3135 cd = new control_dependences ();
f1ce84d9 3136 free_dominance_info (CDI_POST_DOMINATORS);
3137 }
883b4905 3138
77d095c0 3139 bool destroy_p;
883b4905 3140 int nb_generated_loops, nb_generated_calls;
df9892ff 3141 nb_generated_loops = distribute_loop (loop, work_list, cd,
77d095c0 3142 &nb_generated_calls,
3143 &destroy_p);
3144 if (destroy_p)
3145 loops_to_be_destroyed.safe_push (loop);
54459dd6 3146
883b4905 3147 if (nb_generated_loops + nb_generated_calls > 0)
3148 {
3149 changed = true;
91f42adc 3150 if (dump_enabled_p ())
3151 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS,
3152 loc, "Loop%s %d distributed: split to %d loops "
3153 "and %d library calls.\n", str, loop->num,
3154 nb_generated_loops, nb_generated_calls);
883b4905 3155
3156 break;
3157 }
3158
3159 if (dump_file && (dump_flags & TDF_DETAILS))
3160 fprintf (dump_file, "Loop%s %d not distributed.\n", str, loop->num);
801c5610 3161 }
801c5610 3162 }
3163
f1ce84d9 3164 if (cd)
3165 delete cd;
3166
50eda3a8 3167 if (bb_top_order_index != NULL)
3168 {
3169 free (bb_top_order_index);
3170 bb_top_order_index = NULL;
3171 bb_top_order_index_size = 0;
3172 }
3173
54459dd6 3174 if (changed)
3175 {
ce143ff0 3176 /* Destroy loop bodies that could not be reused. Do this late as we
3177 otherwise can end up refering to stale data in control dependences. */
3178 unsigned i;
3179 FOR_EACH_VEC_ELT (loops_to_be_destroyed, i, loop)
50eda3a8 3180 destroy_loop (loop);
ce143ff0 3181
505d3633 3182 /* Cached scalar evolutions now may refer to wrong or non-existing
3183 loops. */
3184 scev_reset_htab ();
65b0537f 3185 mark_virtual_operands_for_renaming (fun);
54459dd6 3186 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
3187 }
3188
382ecba7 3189 checking_verify_loop_structure ();
54459dd6 3190
ade75d0f 3191 return changed ? TODO_cleanup_cfg : 0;
801c5610 3192}
3193
cbe8bda8 3194} // anon namespace
3195
3196gimple_opt_pass *
3197make_pass_loop_distribution (gcc::context *ctxt)
3198{
3199 return new pass_loop_distribution (ctxt);
3200}