]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/graphite-sese-to-poly.c
c4c3eb40449f7f34187b592fd5a2b5aa87778217
[thirdparty/gcc.git] / gcc / graphite-sese-to-poly.c
1 /* Conversion of SESE regions to Polyhedra.
2 Copyright (C) 2009-2013 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <sebastian.pop@amd.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22
23 #ifdef HAVE_cloog
24 #include <isl/set.h>
25 #include <isl/map.h>
26 #include <isl/union_map.h>
27 #include <isl/constraint.h>
28 #include <isl/aff.h>
29 #include <cloog/cloog.h>
30 #include <cloog/cloog.h>
31 #include <cloog/isl/domain.h>
32 #endif
33
34 #include "system.h"
35 #include "coretypes.h"
36 #include "tree-flow.h"
37 #include "tree-pass.h"
38 #include "cfgloop.h"
39 #include "tree-chrec.h"
40 #include "tree-data-ref.h"
41 #include "tree-scalar-evolution.h"
42 #include "domwalk.h"
43 #include "sese.h"
44
45 #ifdef HAVE_cloog
46 #include "graphite-poly.h"
47 #include "graphite-sese-to-poly.h"
48
49
50 /* Assigns to RES the value of the INTEGER_CST T. */
51
52 static inline void
53 tree_int_to_gmp (tree t, mpz_t res)
54 {
55 double_int di = tree_to_double_int (t);
56 mpz_set_double_int (res, di, TYPE_UNSIGNED (TREE_TYPE (t)));
57 }
58
59 /* Returns the index of the PHI argument defined in the outermost
60 loop. */
61
62 static size_t
63 phi_arg_in_outermost_loop (gimple phi)
64 {
65 loop_p loop = gimple_bb (phi)->loop_father;
66 size_t i, res = 0;
67
68 for (i = 0; i < gimple_phi_num_args (phi); i++)
69 if (!flow_bb_inside_loop_p (loop, gimple_phi_arg_edge (phi, i)->src))
70 {
71 loop = gimple_phi_arg_edge (phi, i)->src->loop_father;
72 res = i;
73 }
74
75 return res;
76 }
77
78 /* Removes a simple copy phi node "RES = phi (INIT, RES)" at position
79 PSI by inserting on the loop ENTRY edge assignment "RES = INIT". */
80
81 static void
82 remove_simple_copy_phi (gimple_stmt_iterator *psi)
83 {
84 gimple phi = gsi_stmt (*psi);
85 tree res = gimple_phi_result (phi);
86 size_t entry = phi_arg_in_outermost_loop (phi);
87 tree init = gimple_phi_arg_def (phi, entry);
88 gimple stmt = gimple_build_assign (res, init);
89 edge e = gimple_phi_arg_edge (phi, entry);
90
91 remove_phi_node (psi, false);
92 gsi_insert_on_edge_immediate (e, stmt);
93 SSA_NAME_DEF_STMT (res) = stmt;
94 }
95
96 /* Removes an invariant phi node at position PSI by inserting on the
97 loop ENTRY edge the assignment RES = INIT. */
98
99 static void
100 remove_invariant_phi (sese region, gimple_stmt_iterator *psi)
101 {
102 gimple phi = gsi_stmt (*psi);
103 loop_p loop = loop_containing_stmt (phi);
104 tree res = gimple_phi_result (phi);
105 tree scev = scalar_evolution_in_region (region, loop, res);
106 size_t entry = phi_arg_in_outermost_loop (phi);
107 edge e = gimple_phi_arg_edge (phi, entry);
108 tree var;
109 gimple stmt;
110 gimple_seq stmts = NULL;
111
112 if (tree_contains_chrecs (scev, NULL))
113 scev = gimple_phi_arg_def (phi, entry);
114
115 var = force_gimple_operand (scev, &stmts, true, NULL_TREE);
116 stmt = gimple_build_assign (res, var);
117 remove_phi_node (psi, false);
118
119 gimple_seq_add_stmt (&stmts, stmt);
120 gsi_insert_seq_on_edge (e, stmts);
121 gsi_commit_edge_inserts ();
122 SSA_NAME_DEF_STMT (res) = stmt;
123 }
124
125 /* Returns true when the phi node at PSI is of the form "a = phi (a, x)". */
126
127 static inline bool
128 simple_copy_phi_p (gimple phi)
129 {
130 tree res;
131
132 if (gimple_phi_num_args (phi) != 2)
133 return false;
134
135 res = gimple_phi_result (phi);
136 return (res == gimple_phi_arg_def (phi, 0)
137 || res == gimple_phi_arg_def (phi, 1));
138 }
139
140 /* Returns true when the phi node at position PSI is a reduction phi
141 node in REGION. Otherwise moves the pointer PSI to the next phi to
142 be considered. */
143
144 static bool
145 reduction_phi_p (sese region, gimple_stmt_iterator *psi)
146 {
147 loop_p loop;
148 gimple phi = gsi_stmt (*psi);
149 tree res = gimple_phi_result (phi);
150
151 loop = loop_containing_stmt (phi);
152
153 if (simple_copy_phi_p (phi))
154 {
155 /* PRE introduces phi nodes like these, for an example,
156 see id-5.f in the fortran graphite testsuite:
157
158 # prephitmp.85_265 = PHI <prephitmp.85_258(33), prephitmp.85_265(18)>
159 */
160 remove_simple_copy_phi (psi);
161 return false;
162 }
163
164 if (scev_analyzable_p (res, region))
165 {
166 tree scev = scalar_evolution_in_region (region, loop, res);
167
168 if (evolution_function_is_invariant_p (scev, loop->num))
169 remove_invariant_phi (region, psi);
170 else
171 gsi_next (psi);
172
173 return false;
174 }
175
176 /* All the other cases are considered reductions. */
177 return true;
178 }
179
180 /* Store the GRAPHITE representation of BB. */
181
182 static gimple_bb_p
183 new_gimple_bb (basic_block bb, vec<data_reference_p> drs)
184 {
185 struct gimple_bb *gbb;
186
187 gbb = XNEW (struct gimple_bb);
188 bb->aux = gbb;
189 GBB_BB (gbb) = bb;
190 GBB_DATA_REFS (gbb) = drs;
191 GBB_CONDITIONS (gbb).create (0);
192 GBB_CONDITION_CASES (gbb).create (0);
193
194 return gbb;
195 }
196
197 static void
198 free_data_refs_aux (vec<data_reference_p> datarefs)
199 {
200 unsigned int i;
201 struct data_reference *dr;
202
203 FOR_EACH_VEC_ELT (datarefs, i, dr)
204 if (dr->aux)
205 {
206 base_alias_pair *bap = (base_alias_pair *)(dr->aux);
207
208 free (bap->alias_set);
209
210 free (bap);
211 dr->aux = NULL;
212 }
213 }
214 /* Frees GBB. */
215
216 static void
217 free_gimple_bb (struct gimple_bb *gbb)
218 {
219 free_data_refs_aux (GBB_DATA_REFS (gbb));
220 free_data_refs (GBB_DATA_REFS (gbb));
221
222 GBB_CONDITIONS (gbb).release ();
223 GBB_CONDITION_CASES (gbb).release ();
224 GBB_BB (gbb)->aux = 0;
225 XDELETE (gbb);
226 }
227
228 /* Deletes all gimple bbs in SCOP. */
229
230 static void
231 remove_gbbs_in_scop (scop_p scop)
232 {
233 int i;
234 poly_bb_p pbb;
235
236 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
237 free_gimple_bb (PBB_BLACK_BOX (pbb));
238 }
239
240 /* Deletes all scops in SCOPS. */
241
242 void
243 free_scops (vec<scop_p> scops)
244 {
245 int i;
246 scop_p scop;
247
248 FOR_EACH_VEC_ELT (scops, i, scop)
249 {
250 remove_gbbs_in_scop (scop);
251 free_sese (SCOP_REGION (scop));
252 free_scop (scop);
253 }
254
255 scops.release ();
256 }
257
258 /* Same as outermost_loop_in_sese, returns the outermost loop
259 containing BB in REGION, but makes sure that the returned loop
260 belongs to the REGION, and so this returns the first loop in the
261 REGION when the loop containing BB does not belong to REGION. */
262
263 static loop_p
264 outermost_loop_in_sese_1 (sese region, basic_block bb)
265 {
266 loop_p nest = outermost_loop_in_sese (region, bb);
267
268 if (loop_in_sese_p (nest, region))
269 return nest;
270
271 /* When the basic block BB does not belong to a loop in the region,
272 return the first loop in the region. */
273 nest = nest->inner;
274 while (nest)
275 if (loop_in_sese_p (nest, region))
276 break;
277 else
278 nest = nest->next;
279
280 gcc_assert (nest);
281 return nest;
282 }
283
284 /* Generates a polyhedral black box only if the bb contains interesting
285 information. */
286
287 static gimple_bb_p
288 try_generate_gimple_bb (scop_p scop, basic_block bb)
289 {
290 vec<data_reference_p> drs;
291 drs.create (5);
292 sese region = SCOP_REGION (scop);
293 loop_p nest = outermost_loop_in_sese_1 (region, bb);
294 gimple_stmt_iterator gsi;
295
296 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
297 {
298 gimple stmt = gsi_stmt (gsi);
299 loop_p loop;
300
301 if (is_gimple_debug (stmt))
302 continue;
303
304 loop = loop_containing_stmt (stmt);
305 if (!loop_in_sese_p (loop, region))
306 loop = nest;
307
308 graphite_find_data_references_in_stmt (nest, loop, stmt, &drs);
309 }
310
311 return new_gimple_bb (bb, drs);
312 }
313
314 /* Returns true if all predecessors of BB, that are not dominated by BB, are
315 marked in MAP. The predecessors dominated by BB are loop latches and will
316 be handled after BB. */
317
318 static bool
319 all_non_dominated_preds_marked_p (basic_block bb, sbitmap map)
320 {
321 edge e;
322 edge_iterator ei;
323
324 FOR_EACH_EDGE (e, ei, bb->preds)
325 if (!bitmap_bit_p (map, e->src->index)
326 && !dominated_by_p (CDI_DOMINATORS, e->src, bb))
327 return false;
328
329 return true;
330 }
331
332 /* Compare the depth of two basic_block's P1 and P2. */
333
334 static int
335 compare_bb_depths (const void *p1, const void *p2)
336 {
337 const_basic_block const bb1 = *(const_basic_block const*)p1;
338 const_basic_block const bb2 = *(const_basic_block const*)p2;
339 int d1 = loop_depth (bb1->loop_father);
340 int d2 = loop_depth (bb2->loop_father);
341
342 if (d1 < d2)
343 return 1;
344
345 if (d1 > d2)
346 return -1;
347
348 return 0;
349 }
350
351 /* Sort the basic blocks from DOM such that the first are the ones at
352 a deepest loop level. */
353
354 static void
355 graphite_sort_dominated_info (vec<basic_block> dom)
356 {
357 dom.qsort (compare_bb_depths);
358 }
359
360 /* Recursive helper function for build_scops_bbs. */
361
362 static void
363 build_scop_bbs_1 (scop_p scop, sbitmap visited, basic_block bb)
364 {
365 sese region = SCOP_REGION (scop);
366 vec<basic_block> dom;
367 poly_bb_p pbb;
368
369 if (bitmap_bit_p (visited, bb->index)
370 || !bb_in_sese_p (bb, region))
371 return;
372
373 pbb = new_poly_bb (scop, try_generate_gimple_bb (scop, bb));
374 SCOP_BBS (scop).safe_push (pbb);
375 bitmap_set_bit (visited, bb->index);
376
377 dom = get_dominated_by (CDI_DOMINATORS, bb);
378
379 if (!dom.exists ())
380 return;
381
382 graphite_sort_dominated_info (dom);
383
384 while (!dom.is_empty ())
385 {
386 int i;
387 basic_block dom_bb;
388
389 FOR_EACH_VEC_ELT (dom, i, dom_bb)
390 if (all_non_dominated_preds_marked_p (dom_bb, visited))
391 {
392 build_scop_bbs_1 (scop, visited, dom_bb);
393 dom.unordered_remove (i);
394 break;
395 }
396 }
397
398 dom.release ();
399 }
400
401 /* Gather the basic blocks belonging to the SCOP. */
402
403 static void
404 build_scop_bbs (scop_p scop)
405 {
406 sbitmap visited = sbitmap_alloc (last_basic_block);
407 sese region = SCOP_REGION (scop);
408
409 bitmap_clear (visited);
410 build_scop_bbs_1 (scop, visited, SESE_ENTRY_BB (region));
411 sbitmap_free (visited);
412 }
413
414 /* Return an ISL identifier for the polyhedral basic block PBB. */
415
416 static isl_id *
417 isl_id_for_pbb (scop_p s, poly_bb_p pbb)
418 {
419 char name[50];
420 snprintf (name, sizeof (name), "S_%d", pbb_index (pbb));
421 return isl_id_alloc (s->ctx, name, pbb);
422 }
423
424 /* Converts the STATIC_SCHEDULE of PBB into a scattering polyhedron.
425 We generate SCATTERING_DIMENSIONS scattering dimensions.
426
427 CLooG 0.15.0 and previous versions require, that all
428 scattering functions of one CloogProgram have the same number of
429 scattering dimensions, therefore we allow to specify it. This
430 should be removed in future versions of CLooG.
431
432 The scattering polyhedron consists of these dimensions: scattering,
433 loop_iterators, parameters.
434
435 Example:
436
437 | scattering_dimensions = 5
438 | used_scattering_dimensions = 3
439 | nb_iterators = 1
440 | scop_nb_params = 2
441 |
442 | Schedule:
443 | i
444 | 4 5
445 |
446 | Scattering polyhedron:
447 |
448 | scattering: {s1, s2, s3, s4, s5}
449 | loop_iterators: {i}
450 | parameters: {p1, p2}
451 |
452 | s1 s2 s3 s4 s5 i p1 p2 1
453 | 1 0 0 0 0 0 0 0 -4 = 0
454 | 0 1 0 0 0 -1 0 0 0 = 0
455 | 0 0 1 0 0 0 0 0 -5 = 0 */
456
457 static void
458 build_pbb_scattering_polyhedrons (isl_aff *static_sched,
459 poly_bb_p pbb, int scattering_dimensions)
460 {
461 int i;
462 int nb_iterators = pbb_dim_iter_domain (pbb);
463 int used_scattering_dimensions = nb_iterators * 2 + 1;
464 isl_int val;
465 isl_space *dc, *dm;
466
467 gcc_assert (scattering_dimensions >= used_scattering_dimensions);
468
469 isl_int_init (val);
470
471 dc = isl_set_get_space (pbb->domain);
472 dm = isl_space_add_dims (isl_space_from_domain (dc),
473 isl_dim_out, scattering_dimensions);
474 pbb->schedule = isl_map_universe (dm);
475
476 for (i = 0; i < scattering_dimensions; i++)
477 {
478 /* Textual order inside this loop. */
479 if ((i % 2) == 0)
480 {
481 isl_constraint *c = isl_equality_alloc
482 (isl_local_space_from_space (isl_map_get_space (pbb->schedule)));
483
484 if (0 != isl_aff_get_coefficient (static_sched, isl_dim_in,
485 i / 2, &val))
486 gcc_unreachable ();
487
488 isl_int_neg (val, val);
489 c = isl_constraint_set_constant (c, val);
490 c = isl_constraint_set_coefficient_si (c, isl_dim_out, i, 1);
491 pbb->schedule = isl_map_add_constraint (pbb->schedule, c);
492 }
493
494 /* Iterations of this loop. */
495 else /* if ((i % 2) == 1) */
496 {
497 int loop = (i - 1) / 2;
498 pbb->schedule = isl_map_equate (pbb->schedule, isl_dim_in, loop,
499 isl_dim_out, i);
500 }
501 }
502
503 isl_int_clear (val);
504
505 pbb->transformed = isl_map_copy (pbb->schedule);
506 }
507
508 /* Build for BB the static schedule.
509
510 The static schedule is a Dewey numbering of the abstract syntax
511 tree: http://en.wikipedia.org/wiki/Dewey_Decimal_Classification
512
513 The following example informally defines the static schedule:
514
515 A
516 for (i: ...)
517 {
518 for (j: ...)
519 {
520 B
521 C
522 }
523
524 for (k: ...)
525 {
526 D
527 E
528 }
529 }
530 F
531
532 Static schedules for A to F:
533
534 DEPTH
535 0 1 2
536 A 0
537 B 1 0 0
538 C 1 0 1
539 D 1 1 0
540 E 1 1 1
541 F 2
542 */
543
544 static void
545 build_scop_scattering (scop_p scop)
546 {
547 int i;
548 poly_bb_p pbb;
549 gimple_bb_p previous_gbb = NULL;
550 isl_space *dc = isl_set_get_space (scop->context);
551 isl_aff *static_sched;
552
553 dc = isl_space_add_dims (dc, isl_dim_set, number_of_loops (cfun));
554 static_sched = isl_aff_zero_on_domain (isl_local_space_from_space (dc));
555
556 /* We have to start schedules at 0 on the first component and
557 because we cannot compare_prefix_loops against a previous loop,
558 prefix will be equal to zero, and that index will be
559 incremented before copying. */
560 static_sched = isl_aff_add_coefficient_si (static_sched, isl_dim_in, 0, -1);
561
562 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
563 {
564 gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
565 int prefix;
566 int nb_scat_dims = pbb_dim_iter_domain (pbb) * 2 + 1;
567
568 if (previous_gbb)
569 prefix = nb_common_loops (SCOP_REGION (scop), previous_gbb, gbb);
570 else
571 prefix = 0;
572
573 previous_gbb = gbb;
574
575 static_sched = isl_aff_add_coefficient_si (static_sched, isl_dim_in,
576 prefix, 1);
577 build_pbb_scattering_polyhedrons (static_sched, pbb, nb_scat_dims);
578 }
579
580 isl_aff_free (static_sched);
581 }
582
583 static isl_pw_aff *extract_affine (scop_p, tree, __isl_take isl_space *space);
584
585 /* Extract an affine expression from the chain of recurrence E. */
586
587 static isl_pw_aff *
588 extract_affine_chrec (scop_p s, tree e, __isl_take isl_space *space)
589 {
590 isl_pw_aff *lhs = extract_affine (s, CHREC_LEFT (e), isl_space_copy (space));
591 isl_pw_aff *rhs = extract_affine (s, CHREC_RIGHT (e), isl_space_copy (space));
592 isl_local_space *ls = isl_local_space_from_space (space);
593 unsigned pos = sese_loop_depth ((sese) s->region, get_chrec_loop (e)) - 1;
594 isl_aff *loop = isl_aff_set_coefficient_si
595 (isl_aff_zero_on_domain (ls), isl_dim_in, pos, 1);
596 isl_pw_aff *l = isl_pw_aff_from_aff (loop);
597
598 /* Before multiplying, make sure that the result is affine. */
599 gcc_assert (isl_pw_aff_is_cst (rhs)
600 || isl_pw_aff_is_cst (l));
601
602 return isl_pw_aff_add (lhs, isl_pw_aff_mul (rhs, l));
603 }
604
605 /* Extract an affine expression from the mult_expr E. */
606
607 static isl_pw_aff *
608 extract_affine_mul (scop_p s, tree e, __isl_take isl_space *space)
609 {
610 isl_pw_aff *lhs = extract_affine (s, TREE_OPERAND (e, 0),
611 isl_space_copy (space));
612 isl_pw_aff *rhs = extract_affine (s, TREE_OPERAND (e, 1), space);
613
614 if (!isl_pw_aff_is_cst (lhs)
615 && !isl_pw_aff_is_cst (rhs))
616 {
617 isl_pw_aff_free (lhs);
618 isl_pw_aff_free (rhs);
619 return NULL;
620 }
621
622 return isl_pw_aff_mul (lhs, rhs);
623 }
624
625 /* Return an ISL identifier from the name of the ssa_name E. */
626
627 static isl_id *
628 isl_id_for_ssa_name (scop_p s, tree e)
629 {
630 const char *name = get_name (e);
631 isl_id *id;
632
633 if (name)
634 id = isl_id_alloc (s->ctx, name, e);
635 else
636 {
637 char name1[50];
638 snprintf (name1, sizeof (name1), "P_%d", SSA_NAME_VERSION (e));
639 id = isl_id_alloc (s->ctx, name1, e);
640 }
641
642 return id;
643 }
644
645 /* Return an ISL identifier for the data reference DR. */
646
647 static isl_id *
648 isl_id_for_dr (scop_p s, data_reference_p dr ATTRIBUTE_UNUSED)
649 {
650 /* Data references all get the same isl_id. They need to be comparable
651 and are distinguished through the first dimension, which contains the
652 alias set number. */
653 return isl_id_alloc (s->ctx, "", 0);
654 }
655
656 /* Extract an affine expression from the ssa_name E. */
657
658 static isl_pw_aff *
659 extract_affine_name (scop_p s, tree e, __isl_take isl_space *space)
660 {
661 isl_aff *aff;
662 isl_set *dom;
663 isl_id *id;
664 int dimension;
665
666 id = isl_id_for_ssa_name (s, e);
667 dimension = isl_space_find_dim_by_id (space, isl_dim_param, id);
668 isl_id_free(id);
669 dom = isl_set_universe (isl_space_copy (space));
670 aff = isl_aff_zero_on_domain (isl_local_space_from_space (space));
671 aff = isl_aff_add_coefficient_si (aff, isl_dim_param, dimension, 1);
672 return isl_pw_aff_alloc (dom, aff);
673 }
674
675 /* Extract an affine expression from the gmp constant G. */
676
677 static isl_pw_aff *
678 extract_affine_gmp (mpz_t g, __isl_take isl_space *space)
679 {
680 isl_local_space *ls = isl_local_space_from_space (isl_space_copy (space));
681 isl_aff *aff = isl_aff_zero_on_domain (ls);
682 isl_set *dom = isl_set_universe (space);
683 isl_int v;
684
685 isl_int_init (v);
686 isl_int_set_gmp (v, g);
687 aff = isl_aff_add_constant (aff, v);
688 isl_int_clear (v);
689
690 return isl_pw_aff_alloc (dom, aff);
691 }
692
693 /* Extract an affine expression from the integer_cst E. */
694
695 static isl_pw_aff *
696 extract_affine_int (tree e, __isl_take isl_space *space)
697 {
698 isl_pw_aff *res;
699 mpz_t g;
700
701 mpz_init (g);
702 tree_int_to_gmp (e, g);
703 res = extract_affine_gmp (g, space);
704 mpz_clear (g);
705
706 return res;
707 }
708
709 /* Compute pwaff mod 2^width. */
710
711 static isl_pw_aff *
712 wrap (isl_pw_aff *pwaff, unsigned width)
713 {
714 isl_int mod;
715
716 isl_int_init (mod);
717 isl_int_set_si (mod, 1);
718 isl_int_mul_2exp (mod, mod, width);
719
720 pwaff = isl_pw_aff_mod (pwaff, mod);
721
722 isl_int_clear (mod);
723
724 return pwaff;
725 }
726
727 /* When parameter NAME is in REGION, returns its index in SESE_PARAMS.
728 Otherwise returns -1. */
729
730 static inline int
731 parameter_index_in_region_1 (tree name, sese region)
732 {
733 int i;
734 tree p;
735
736 gcc_assert (TREE_CODE (name) == SSA_NAME);
737
738 FOR_EACH_VEC_ELT (SESE_PARAMS (region), i, p)
739 if (p == name)
740 return i;
741
742 return -1;
743 }
744
745 /* When the parameter NAME is in REGION, returns its index in
746 SESE_PARAMS. Otherwise this function inserts NAME in SESE_PARAMS
747 and returns the index of NAME. */
748
749 static int
750 parameter_index_in_region (tree name, sese region)
751 {
752 int i;
753
754 gcc_assert (TREE_CODE (name) == SSA_NAME);
755
756 i = parameter_index_in_region_1 (name, region);
757 if (i != -1)
758 return i;
759
760 gcc_assert (SESE_ADD_PARAMS (region));
761
762 i = SESE_PARAMS (region).length ();
763 SESE_PARAMS (region).safe_push (name);
764 return i;
765 }
766
767 /* Extract an affine expression from the tree E in the scop S. */
768
769 static isl_pw_aff *
770 extract_affine (scop_p s, tree e, __isl_take isl_space *space)
771 {
772 isl_pw_aff *lhs, *rhs, *res;
773 tree type;
774
775 if (e == chrec_dont_know) {
776 isl_space_free (space);
777 return NULL;
778 }
779
780 switch (TREE_CODE (e))
781 {
782 case POLYNOMIAL_CHREC:
783 res = extract_affine_chrec (s, e, space);
784 break;
785
786 case MULT_EXPR:
787 res = extract_affine_mul (s, e, space);
788 break;
789
790 case PLUS_EXPR:
791 case POINTER_PLUS_EXPR:
792 lhs = extract_affine (s, TREE_OPERAND (e, 0), isl_space_copy (space));
793 rhs = extract_affine (s, TREE_OPERAND (e, 1), space);
794 res = isl_pw_aff_add (lhs, rhs);
795 break;
796
797 case MINUS_EXPR:
798 lhs = extract_affine (s, TREE_OPERAND (e, 0), isl_space_copy (space));
799 rhs = extract_affine (s, TREE_OPERAND (e, 1), space);
800 res = isl_pw_aff_sub (lhs, rhs);
801 break;
802
803 case NEGATE_EXPR:
804 case BIT_NOT_EXPR:
805 lhs = extract_affine (s, TREE_OPERAND (e, 0), isl_space_copy (space));
806 rhs = extract_affine (s, integer_minus_one_node, space);
807 res = isl_pw_aff_mul (lhs, rhs);
808 break;
809
810 case SSA_NAME:
811 gcc_assert (-1 != parameter_index_in_region_1 (e, SCOP_REGION (s)));
812 res = extract_affine_name (s, e, space);
813 break;
814
815 case INTEGER_CST:
816 res = extract_affine_int (e, space);
817 /* No need to wrap a single integer. */
818 return res;
819
820 CASE_CONVERT:
821 case NON_LVALUE_EXPR:
822 res = extract_affine (s, TREE_OPERAND (e, 0), space);
823 break;
824
825 default:
826 gcc_unreachable ();
827 break;
828 }
829
830 type = TREE_TYPE (e);
831 if (TYPE_UNSIGNED (type))
832 res = wrap (res, TYPE_PRECISION (type));
833
834 return res;
835 }
836
837 /* In the context of sese S, scan the expression E and translate it to
838 a linear expression C. When parsing a symbolic multiplication, K
839 represents the constant multiplier of an expression containing
840 parameters. */
841
842 static void
843 scan_tree_for_params (sese s, tree e)
844 {
845 if (e == chrec_dont_know)
846 return;
847
848 switch (TREE_CODE (e))
849 {
850 case POLYNOMIAL_CHREC:
851 scan_tree_for_params (s, CHREC_LEFT (e));
852 break;
853
854 case MULT_EXPR:
855 if (chrec_contains_symbols (TREE_OPERAND (e, 0)))
856 scan_tree_for_params (s, TREE_OPERAND (e, 0));
857 else
858 scan_tree_for_params (s, TREE_OPERAND (e, 1));
859 break;
860
861 case PLUS_EXPR:
862 case POINTER_PLUS_EXPR:
863 case MINUS_EXPR:
864 scan_tree_for_params (s, TREE_OPERAND (e, 0));
865 scan_tree_for_params (s, TREE_OPERAND (e, 1));
866 break;
867
868 case NEGATE_EXPR:
869 case BIT_NOT_EXPR:
870 CASE_CONVERT:
871 case NON_LVALUE_EXPR:
872 scan_tree_for_params (s, TREE_OPERAND (e, 0));
873 break;
874
875 case SSA_NAME:
876 parameter_index_in_region (e, s);
877 break;
878
879 case INTEGER_CST:
880 case ADDR_EXPR:
881 break;
882
883 default:
884 gcc_unreachable ();
885 break;
886 }
887 }
888
889 /* Find parameters with respect to REGION in BB. We are looking in memory
890 access functions, conditions and loop bounds. */
891
892 static void
893 find_params_in_bb (sese region, gimple_bb_p gbb)
894 {
895 int i;
896 unsigned j;
897 data_reference_p dr;
898 gimple stmt;
899 loop_p loop = GBB_BB (gbb)->loop_father;
900
901 /* Find parameters in the access functions of data references. */
902 FOR_EACH_VEC_ELT (GBB_DATA_REFS (gbb), i, dr)
903 for (j = 0; j < DR_NUM_DIMENSIONS (dr); j++)
904 scan_tree_for_params (region, DR_ACCESS_FN (dr, j));
905
906 /* Find parameters in conditional statements. */
907 FOR_EACH_VEC_ELT (GBB_CONDITIONS (gbb), i, stmt)
908 {
909 tree lhs = scalar_evolution_in_region (region, loop,
910 gimple_cond_lhs (stmt));
911 tree rhs = scalar_evolution_in_region (region, loop,
912 gimple_cond_rhs (stmt));
913
914 scan_tree_for_params (region, lhs);
915 scan_tree_for_params (region, rhs);
916 }
917 }
918
919 /* Record the parameters used in the SCOP. A variable is a parameter
920 in a scop if it does not vary during the execution of that scop. */
921
922 static void
923 find_scop_parameters (scop_p scop)
924 {
925 poly_bb_p pbb;
926 unsigned i;
927 sese region = SCOP_REGION (scop);
928 struct loop *loop;
929 int nbp;
930
931 /* Find the parameters used in the loop bounds. */
932 FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region), i, loop)
933 {
934 tree nb_iters = number_of_latch_executions (loop);
935
936 if (!chrec_contains_symbols (nb_iters))
937 continue;
938
939 nb_iters = scalar_evolution_in_region (region, loop, nb_iters);
940 scan_tree_for_params (region, nb_iters);
941 }
942
943 /* Find the parameters used in data accesses. */
944 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
945 find_params_in_bb (region, PBB_BLACK_BOX (pbb));
946
947 nbp = sese_nb_params (region);
948 scop_set_nb_params (scop, nbp);
949 SESE_ADD_PARAMS (region) = false;
950
951 {
952 tree e;
953 isl_space *space = isl_space_set_alloc (scop->ctx, nbp, 0);
954
955 FOR_EACH_VEC_ELT (SESE_PARAMS (region), i, e)
956 space = isl_space_set_dim_id (space, isl_dim_param, i,
957 isl_id_for_ssa_name (scop, e));
958
959 scop->context = isl_set_universe (space);
960 }
961 }
962
963 /* Builds the constraint polyhedra for LOOP in SCOP. OUTER_PH gives
964 the constraints for the surrounding loops. */
965
966 static void
967 build_loop_iteration_domains (scop_p scop, struct loop *loop,
968 int nb,
969 isl_set *outer, isl_set **doms)
970 {
971 tree nb_iters = number_of_latch_executions (loop);
972 sese region = SCOP_REGION (scop);
973
974 isl_set *inner = isl_set_copy (outer);
975 isl_space *space;
976 isl_constraint *c;
977 int pos = isl_set_dim (outer, isl_dim_set);
978 isl_int v;
979 mpz_t g;
980
981 mpz_init (g);
982 isl_int_init (v);
983
984 inner = isl_set_add_dims (inner, isl_dim_set, 1);
985 space = isl_set_get_space (inner);
986
987 /* 0 <= loop_i */
988 c = isl_inequality_alloc
989 (isl_local_space_from_space (isl_space_copy (space)));
990 c = isl_constraint_set_coefficient_si (c, isl_dim_set, pos, 1);
991 inner = isl_set_add_constraint (inner, c);
992
993 /* loop_i <= cst_nb_iters */
994 if (TREE_CODE (nb_iters) == INTEGER_CST)
995 {
996 c = isl_inequality_alloc
997 (isl_local_space_from_space(isl_space_copy (space)));
998 c = isl_constraint_set_coefficient_si (c, isl_dim_set, pos, -1);
999 tree_int_to_gmp (nb_iters, g);
1000 isl_int_set_gmp (v, g);
1001 c = isl_constraint_set_constant (c, v);
1002 inner = isl_set_add_constraint (inner, c);
1003 }
1004
1005 /* loop_i <= expr_nb_iters */
1006 else if (!chrec_contains_undetermined (nb_iters))
1007 {
1008 double_int nit;
1009 isl_pw_aff *aff;
1010 isl_set *valid;
1011 isl_local_space *ls;
1012 isl_aff *al;
1013 isl_set *le;
1014
1015 nb_iters = scalar_evolution_in_region (region, loop, nb_iters);
1016
1017 aff = extract_affine (scop, nb_iters, isl_set_get_space (inner));
1018 valid = isl_pw_aff_nonneg_set (isl_pw_aff_copy (aff));
1019 valid = isl_set_project_out (valid, isl_dim_set, 0,
1020 isl_set_dim (valid, isl_dim_set));
1021 scop->context = isl_set_intersect (scop->context, valid);
1022
1023 ls = isl_local_space_from_space (isl_space_copy (space));
1024 al = isl_aff_set_coefficient_si (isl_aff_zero_on_domain (ls),
1025 isl_dim_in, pos, 1);
1026 le = isl_pw_aff_le_set (isl_pw_aff_from_aff (al),
1027 isl_pw_aff_copy (aff));
1028 inner = isl_set_intersect (inner, le);
1029
1030 if (max_stmt_executions (loop, &nit))
1031 {
1032 /* Insert in the context the constraints from the
1033 estimation of the number of iterations NIT and the
1034 symbolic number of iterations (involving parameter
1035 names) NB_ITERS. First, build the affine expression
1036 "NIT - NB_ITERS" and then say that it is positive,
1037 i.e., NIT approximates NB_ITERS: "NIT >= NB_ITERS". */
1038 isl_pw_aff *approx;
1039 mpz_t g;
1040 isl_set *x;
1041 isl_constraint *c;
1042
1043 mpz_init (g);
1044 mpz_set_double_int (g, nit, false);
1045 mpz_sub_ui (g, g, 1);
1046 approx = extract_affine_gmp (g, isl_set_get_space (inner));
1047 x = isl_pw_aff_ge_set (approx, aff);
1048 x = isl_set_project_out (x, isl_dim_set, 0,
1049 isl_set_dim (x, isl_dim_set));
1050 scop->context = isl_set_intersect (scop->context, x);
1051
1052 c = isl_inequality_alloc
1053 (isl_local_space_from_space (isl_space_copy (space)));
1054 c = isl_constraint_set_coefficient_si (c, isl_dim_set, pos, -1);
1055 isl_int_set_gmp (v, g);
1056 mpz_clear (g);
1057 c = isl_constraint_set_constant (c, v);
1058 inner = isl_set_add_constraint (inner, c);
1059 }
1060 else
1061 isl_pw_aff_free (aff);
1062 }
1063 else
1064 gcc_unreachable ();
1065
1066 if (loop->inner && loop_in_sese_p (loop->inner, region))
1067 build_loop_iteration_domains (scop, loop->inner, nb + 1,
1068 isl_set_copy (inner), doms);
1069
1070 if (nb != 0
1071 && loop->next
1072 && loop_in_sese_p (loop->next, region))
1073 build_loop_iteration_domains (scop, loop->next, nb,
1074 isl_set_copy (outer), doms);
1075
1076 doms[loop->num] = inner;
1077
1078 isl_set_free (outer);
1079 isl_space_free (space);
1080 isl_int_clear (v);
1081 mpz_clear (g);
1082 }
1083
1084 /* Returns a linear expression for tree T evaluated in PBB. */
1085
1086 static isl_pw_aff *
1087 create_pw_aff_from_tree (poly_bb_p pbb, tree t)
1088 {
1089 scop_p scop = PBB_SCOP (pbb);
1090
1091 t = scalar_evolution_in_region (SCOP_REGION (scop), pbb_loop (pbb), t);
1092 gcc_assert (!automatically_generated_chrec_p (t));
1093
1094 return extract_affine (scop, t, isl_set_get_space (pbb->domain));
1095 }
1096
1097 /* Add conditional statement STMT to pbb. CODE is used as the comparison
1098 operator. This allows us to invert the condition or to handle
1099 inequalities. */
1100
1101 static void
1102 add_condition_to_pbb (poly_bb_p pbb, gimple stmt, enum tree_code code)
1103 {
1104 isl_pw_aff *lhs = create_pw_aff_from_tree (pbb, gimple_cond_lhs (stmt));
1105 isl_pw_aff *rhs = create_pw_aff_from_tree (pbb, gimple_cond_rhs (stmt));
1106 isl_set *cond;
1107
1108 switch (code)
1109 {
1110 case LT_EXPR:
1111 cond = isl_pw_aff_lt_set (lhs, rhs);
1112 break;
1113
1114 case GT_EXPR:
1115 cond = isl_pw_aff_gt_set (lhs, rhs);
1116 break;
1117
1118 case LE_EXPR:
1119 cond = isl_pw_aff_le_set (lhs, rhs);
1120 break;
1121
1122 case GE_EXPR:
1123 cond = isl_pw_aff_ge_set (lhs, rhs);
1124 break;
1125
1126 case EQ_EXPR:
1127 cond = isl_pw_aff_eq_set (lhs, rhs);
1128 break;
1129
1130 case NE_EXPR:
1131 cond = isl_pw_aff_ne_set (lhs, rhs);
1132 break;
1133
1134 default:
1135 isl_pw_aff_free(lhs);
1136 isl_pw_aff_free(rhs);
1137 return;
1138 }
1139
1140 cond = isl_set_coalesce (cond);
1141 cond = isl_set_set_tuple_id (cond, isl_set_get_tuple_id (pbb->domain));
1142 pbb->domain = isl_set_intersect (pbb->domain, cond);
1143 }
1144
1145 /* Add conditions to the domain of PBB. */
1146
1147 static void
1148 add_conditions_to_domain (poly_bb_p pbb)
1149 {
1150 unsigned int i;
1151 gimple stmt;
1152 gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
1153
1154 if (GBB_CONDITIONS (gbb).is_empty ())
1155 return;
1156
1157 FOR_EACH_VEC_ELT (GBB_CONDITIONS (gbb), i, stmt)
1158 switch (gimple_code (stmt))
1159 {
1160 case GIMPLE_COND:
1161 {
1162 enum tree_code code = gimple_cond_code (stmt);
1163
1164 /* The conditions for ELSE-branches are inverted. */
1165 if (!GBB_CONDITION_CASES (gbb)[i])
1166 code = invert_tree_comparison (code, false);
1167
1168 add_condition_to_pbb (pbb, stmt, code);
1169 break;
1170 }
1171
1172 case GIMPLE_SWITCH:
1173 /* Switch statements are not supported right now - fall through. */
1174
1175 default:
1176 gcc_unreachable ();
1177 break;
1178 }
1179 }
1180
1181 /* Traverses all the GBBs of the SCOP and add their constraints to the
1182 iteration domains. */
1183
1184 static void
1185 add_conditions_to_constraints (scop_p scop)
1186 {
1187 int i;
1188 poly_bb_p pbb;
1189
1190 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
1191 add_conditions_to_domain (pbb);
1192 }
1193
1194 /* Structure used to pass data to dom_walk. */
1195
1196 struct bsc
1197 {
1198 vec<gimple> *conditions, *cases;
1199 sese region;
1200 };
1201
1202 /* Returns a COND_EXPR statement when BB has a single predecessor, the
1203 edge between BB and its predecessor is not a loop exit edge, and
1204 the last statement of the single predecessor is a COND_EXPR. */
1205
1206 static gimple
1207 single_pred_cond_non_loop_exit (basic_block bb)
1208 {
1209 if (single_pred_p (bb))
1210 {
1211 edge e = single_pred_edge (bb);
1212 basic_block pred = e->src;
1213 gimple stmt;
1214
1215 if (loop_depth (pred->loop_father) > loop_depth (bb->loop_father))
1216 return NULL;
1217
1218 stmt = last_stmt (pred);
1219
1220 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1221 return stmt;
1222 }
1223
1224 return NULL;
1225 }
1226
1227 /* Call-back for dom_walk executed before visiting the dominated
1228 blocks. */
1229
1230 static void
1231 build_sese_conditions_before (struct dom_walk_data *dw_data,
1232 basic_block bb)
1233 {
1234 struct bsc *data = (struct bsc *) dw_data->global_data;
1235 vec<gimple> *conditions = data->conditions;
1236 vec<gimple> *cases = data->cases;
1237 gimple_bb_p gbb;
1238 gimple stmt;
1239
1240 if (!bb_in_sese_p (bb, data->region))
1241 return;
1242
1243 stmt = single_pred_cond_non_loop_exit (bb);
1244
1245 if (stmt)
1246 {
1247 edge e = single_pred_edge (bb);
1248
1249 conditions->safe_push (stmt);
1250
1251 if (e->flags & EDGE_TRUE_VALUE)
1252 cases->safe_push (stmt);
1253 else
1254 cases->safe_push (NULL);
1255 }
1256
1257 gbb = gbb_from_bb (bb);
1258
1259 if (gbb)
1260 {
1261 GBB_CONDITIONS (gbb) = conditions->copy ();
1262 GBB_CONDITION_CASES (gbb) = cases->copy ();
1263 }
1264 }
1265
1266 /* Call-back for dom_walk executed after visiting the dominated
1267 blocks. */
1268
1269 static void
1270 build_sese_conditions_after (struct dom_walk_data *dw_data,
1271 basic_block bb)
1272 {
1273 struct bsc *data = (struct bsc *) dw_data->global_data;
1274 vec<gimple> *conditions = data->conditions;
1275 vec<gimple> *cases = data->cases;
1276
1277 if (!bb_in_sese_p (bb, data->region))
1278 return;
1279
1280 if (single_pred_cond_non_loop_exit (bb))
1281 {
1282 conditions->pop ();
1283 cases->pop ();
1284 }
1285 }
1286
1287 /* Record all conditions in REGION. */
1288
1289 static void
1290 build_sese_conditions (sese region)
1291 {
1292 struct dom_walk_data walk_data;
1293 vec<gimple> conditions;
1294 conditions.create (3);
1295 vec<gimple> cases;
1296 cases.create (3);
1297 struct bsc data;
1298
1299 data.conditions = &conditions;
1300 data.cases = &cases;
1301 data.region = region;
1302
1303 walk_data.dom_direction = CDI_DOMINATORS;
1304 walk_data.initialize_block_local_data = NULL;
1305 walk_data.before_dom_children = build_sese_conditions_before;
1306 walk_data.after_dom_children = build_sese_conditions_after;
1307 walk_data.global_data = &data;
1308 walk_data.block_local_data_size = 0;
1309
1310 init_walk_dominator_tree (&walk_data);
1311 walk_dominator_tree (&walk_data, SESE_ENTRY_BB (region));
1312 fini_walk_dominator_tree (&walk_data);
1313
1314 conditions.release ();
1315 cases.release ();
1316 }
1317
1318 /* Add constraints on the possible values of parameter P from the type
1319 of P. */
1320
1321 static void
1322 add_param_constraints (scop_p scop, graphite_dim_t p)
1323 {
1324 tree parameter = SESE_PARAMS (SCOP_REGION (scop))[p];
1325 tree type = TREE_TYPE (parameter);
1326 tree lb = NULL_TREE;
1327 tree ub = NULL_TREE;
1328
1329 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
1330 lb = lower_bound_in_type (type, type);
1331 else
1332 lb = TYPE_MIN_VALUE (type);
1333
1334 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
1335 ub = upper_bound_in_type (type, type);
1336 else
1337 ub = TYPE_MAX_VALUE (type);
1338
1339 if (lb)
1340 {
1341 isl_space *space = isl_set_get_space (scop->context);
1342 isl_constraint *c;
1343 mpz_t g;
1344 isl_int v;
1345
1346 c = isl_inequality_alloc (isl_local_space_from_space (space));
1347 mpz_init (g);
1348 isl_int_init (v);
1349 tree_int_to_gmp (lb, g);
1350 isl_int_set_gmp (v, g);
1351 isl_int_neg (v, v);
1352 mpz_clear (g);
1353 c = isl_constraint_set_constant (c, v);
1354 isl_int_clear (v);
1355 c = isl_constraint_set_coefficient_si (c, isl_dim_param, p, 1);
1356
1357 scop->context = isl_set_add_constraint (scop->context, c);
1358 }
1359
1360 if (ub)
1361 {
1362 isl_space *space = isl_set_get_space (scop->context);
1363 isl_constraint *c;
1364 mpz_t g;
1365 isl_int v;
1366
1367 c = isl_inequality_alloc (isl_local_space_from_space (space));
1368
1369 mpz_init (g);
1370 isl_int_init (v);
1371 tree_int_to_gmp (ub, g);
1372 isl_int_set_gmp (v, g);
1373 mpz_clear (g);
1374 c = isl_constraint_set_constant (c, v);
1375 isl_int_clear (v);
1376 c = isl_constraint_set_coefficient_si (c, isl_dim_param, p, -1);
1377
1378 scop->context = isl_set_add_constraint (scop->context, c);
1379 }
1380 }
1381
1382 /* Build the context of the SCOP. The context usually contains extra
1383 constraints that are added to the iteration domains that constrain
1384 some parameters. */
1385
1386 static void
1387 build_scop_context (scop_p scop)
1388 {
1389 graphite_dim_t p, n = scop_nb_params (scop);
1390
1391 for (p = 0; p < n; p++)
1392 add_param_constraints (scop, p);
1393 }
1394
1395 /* Build the iteration domains: the loops belonging to the current
1396 SCOP, and that vary for the execution of the current basic block.
1397 Returns false if there is no loop in SCOP. */
1398
1399 static void
1400 build_scop_iteration_domain (scop_p scop)
1401 {
1402 struct loop *loop;
1403 sese region = SCOP_REGION (scop);
1404 int i;
1405 poly_bb_p pbb;
1406 int nb_loops = number_of_loops (cfun);
1407 isl_set **doms = XCNEWVEC (isl_set *, nb_loops);
1408
1409 FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region), i, loop)
1410 if (!loop_in_sese_p (loop_outer (loop), region))
1411 build_loop_iteration_domains (scop, loop, 0,
1412 isl_set_copy (scop->context), doms);
1413
1414 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
1415 {
1416 loop = pbb_loop (pbb);
1417
1418 if (doms[loop->num])
1419 pbb->domain = isl_set_copy (doms[loop->num]);
1420 else
1421 pbb->domain = isl_set_copy (scop->context);
1422
1423 pbb->domain = isl_set_set_tuple_id (pbb->domain,
1424 isl_id_for_pbb (scop, pbb));
1425 }
1426
1427 for (i = 0; i < nb_loops; i++)
1428 if (doms[i])
1429 isl_set_free (doms[i]);
1430
1431 free (doms);
1432 }
1433
1434 /* Add a constrain to the ACCESSES polyhedron for the alias set of
1435 data reference DR. ACCESSP_NB_DIMS is the dimension of the
1436 ACCESSES polyhedron, DOM_NB_DIMS is the dimension of the iteration
1437 domain. */
1438
1439 static isl_map *
1440 pdr_add_alias_set (isl_map *acc, data_reference_p dr)
1441 {
1442 isl_constraint *c;
1443 int alias_set_num = 0;
1444 base_alias_pair *bap = (base_alias_pair *)(dr->aux);
1445
1446 if (bap && bap->alias_set)
1447 alias_set_num = *(bap->alias_set);
1448
1449 c = isl_equality_alloc
1450 (isl_local_space_from_space (isl_map_get_space (acc)));
1451 c = isl_constraint_set_constant_si (c, -alias_set_num);
1452 c = isl_constraint_set_coefficient_si (c, isl_dim_out, 0, 1);
1453
1454 return isl_map_add_constraint (acc, c);
1455 }
1456
1457 /* Assign the affine expression INDEX to the output dimension POS of
1458 MAP and return the result. */
1459
1460 static isl_map *
1461 set_index (isl_map *map, int pos, isl_pw_aff *index)
1462 {
1463 isl_map *index_map;
1464 int len = isl_map_dim (map, isl_dim_out);
1465 isl_id *id;
1466
1467 index_map = isl_map_from_pw_aff (index);
1468 index_map = isl_map_insert_dims (index_map, isl_dim_out, 0, pos);
1469 index_map = isl_map_add_dims (index_map, isl_dim_out, len - pos - 1);
1470
1471 id = isl_map_get_tuple_id (map, isl_dim_out);
1472 index_map = isl_map_set_tuple_id (index_map, isl_dim_out, id);
1473 id = isl_map_get_tuple_id (map, isl_dim_in);
1474 index_map = isl_map_set_tuple_id (index_map, isl_dim_in, id);
1475
1476 return isl_map_intersect (map, index_map);
1477 }
1478
1479 /* Add to ACCESSES polyhedron equalities defining the access functions
1480 to the memory. ACCESSP_NB_DIMS is the dimension of the ACCESSES
1481 polyhedron, DOM_NB_DIMS is the dimension of the iteration domain.
1482 PBB is the poly_bb_p that contains the data reference DR. */
1483
1484 static isl_map *
1485 pdr_add_memory_accesses (isl_map *acc, data_reference_p dr, poly_bb_p pbb)
1486 {
1487 int i, nb_subscripts = DR_NUM_DIMENSIONS (dr);
1488 scop_p scop = PBB_SCOP (pbb);
1489
1490 for (i = 0; i < nb_subscripts; i++)
1491 {
1492 isl_pw_aff *aff;
1493 tree afn = DR_ACCESS_FN (dr, nb_subscripts - 1 - i);
1494
1495 aff = extract_affine (scop, afn,
1496 isl_space_domain (isl_map_get_space (acc)));
1497 acc = set_index (acc, i + 1, aff);
1498 }
1499
1500 return acc;
1501 }
1502
1503 /* Add constrains representing the size of the accessed data to the
1504 ACCESSES polyhedron. ACCESSP_NB_DIMS is the dimension of the
1505 ACCESSES polyhedron, DOM_NB_DIMS is the dimension of the iteration
1506 domain. */
1507
1508 static isl_set *
1509 pdr_add_data_dimensions (isl_set *extent, scop_p scop, data_reference_p dr)
1510 {
1511 tree ref = DR_REF (dr);
1512 int i, nb_subscripts = DR_NUM_DIMENSIONS (dr);
1513
1514 for (i = nb_subscripts - 1; i >= 0; i--, ref = TREE_OPERAND (ref, 0))
1515 {
1516 tree low, high;
1517
1518 if (TREE_CODE (ref) != ARRAY_REF)
1519 break;
1520
1521 low = array_ref_low_bound (ref);
1522 high = array_ref_up_bound (ref);
1523
1524 /* XXX The PPL code dealt separately with
1525 subscript - low >= 0 and high - subscript >= 0 in case one of
1526 the two bounds isn't known. Do the same here? */
1527
1528 if (host_integerp (low, 0)
1529 && high
1530 && host_integerp (high, 0)
1531 /* 1-element arrays at end of structures may extend over
1532 their declared size. */
1533 && !(array_at_struct_end_p (ref)
1534 && operand_equal_p (low, high, 0)))
1535 {
1536 isl_id *id;
1537 isl_aff *aff;
1538 isl_set *univ, *lbs, *ubs;
1539 isl_pw_aff *index;
1540 isl_space *space;
1541 isl_set *valid;
1542 isl_pw_aff *lb = extract_affine_int (low, isl_set_get_space (extent));
1543 isl_pw_aff *ub = extract_affine_int (high, isl_set_get_space (extent));
1544
1545 /* high >= 0 */
1546 valid = isl_pw_aff_nonneg_set (isl_pw_aff_copy (ub));
1547 valid = isl_set_project_out (valid, isl_dim_set, 0,
1548 isl_set_dim (valid, isl_dim_set));
1549 scop->context = isl_set_intersect (scop->context, valid);
1550
1551 space = isl_set_get_space (extent);
1552 aff = isl_aff_zero_on_domain (isl_local_space_from_space (space));
1553 aff = isl_aff_add_coefficient_si (aff, isl_dim_in, i + 1, 1);
1554 univ = isl_set_universe (isl_space_domain (isl_aff_get_space (aff)));
1555 index = isl_pw_aff_alloc (univ, aff);
1556
1557 id = isl_set_get_tuple_id (extent);
1558 lb = isl_pw_aff_set_tuple_id (lb, isl_dim_in, isl_id_copy (id));
1559 ub = isl_pw_aff_set_tuple_id (ub, isl_dim_in, id);
1560
1561 /* low <= sub_i <= high */
1562 lbs = isl_pw_aff_ge_set (isl_pw_aff_copy (index), lb);
1563 ubs = isl_pw_aff_le_set (index, ub);
1564 extent = isl_set_intersect (extent, lbs);
1565 extent = isl_set_intersect (extent, ubs);
1566 }
1567 }
1568
1569 return extent;
1570 }
1571
1572 /* Build data accesses for DR in PBB. */
1573
1574 static void
1575 build_poly_dr (data_reference_p dr, poly_bb_p pbb)
1576 {
1577 int dr_base_object_set;
1578 isl_map *acc;
1579 isl_set *extent;
1580 scop_p scop = PBB_SCOP (pbb);
1581
1582 {
1583 isl_space *dc = isl_set_get_space (pbb->domain);
1584 int nb_out = 1 + DR_NUM_DIMENSIONS (dr);
1585 isl_space *space = isl_space_add_dims (isl_space_from_domain (dc),
1586 isl_dim_out, nb_out);
1587
1588 acc = isl_map_universe (space);
1589 acc = isl_map_set_tuple_id (acc, isl_dim_out, isl_id_for_dr (scop, dr));
1590 }
1591
1592 acc = pdr_add_alias_set (acc, dr);
1593 acc = pdr_add_memory_accesses (acc, dr, pbb);
1594
1595 {
1596 isl_id *id = isl_id_for_dr (scop, dr);
1597 int nb = 1 + DR_NUM_DIMENSIONS (dr);
1598 isl_space *space = isl_space_set_alloc (scop->ctx, 0, nb);
1599 int alias_set_num = 0;
1600 base_alias_pair *bap = (base_alias_pair *)(dr->aux);
1601
1602 if (bap && bap->alias_set)
1603 alias_set_num = *(bap->alias_set);
1604
1605 space = isl_space_set_tuple_id (space, isl_dim_set, id);
1606 extent = isl_set_nat_universe (space);
1607 extent = isl_set_fix_si (extent, isl_dim_set, 0, alias_set_num);
1608 extent = pdr_add_data_dimensions (extent, scop, dr);
1609 }
1610
1611 gcc_assert (dr->aux);
1612 dr_base_object_set = ((base_alias_pair *)(dr->aux))->base_obj_set;
1613
1614 new_poly_dr (pbb, dr_base_object_set,
1615 DR_IS_READ (dr) ? PDR_READ : PDR_WRITE,
1616 dr, DR_NUM_DIMENSIONS (dr), acc, extent);
1617 }
1618
1619 /* Write to FILE the alias graph of data references in DIMACS format. */
1620
1621 static inline bool
1622 write_alias_graph_to_ascii_dimacs (FILE *file, char *comment,
1623 vec<data_reference_p> drs)
1624 {
1625 int num_vertex = drs.length ();
1626 int edge_num = 0;
1627 data_reference_p dr1, dr2;
1628 int i, j;
1629
1630 if (num_vertex == 0)
1631 return true;
1632
1633 FOR_EACH_VEC_ELT (drs, i, dr1)
1634 for (j = i + 1; drs.iterate (j, &dr2); j++)
1635 if (dr_may_alias_p (dr1, dr2, true))
1636 edge_num++;
1637
1638 fprintf (file, "$\n");
1639
1640 if (comment)
1641 fprintf (file, "c %s\n", comment);
1642
1643 fprintf (file, "p edge %d %d\n", num_vertex, edge_num);
1644
1645 FOR_EACH_VEC_ELT (drs, i, dr1)
1646 for (j = i + 1; drs.iterate (j, &dr2); j++)
1647 if (dr_may_alias_p (dr1, dr2, true))
1648 fprintf (file, "e %d %d\n", i + 1, j + 1);
1649
1650 return true;
1651 }
1652
1653 /* Write to FILE the alias graph of data references in DOT format. */
1654
1655 static inline bool
1656 write_alias_graph_to_ascii_dot (FILE *file, char *comment,
1657 vec<data_reference_p> drs)
1658 {
1659 int num_vertex = drs.length ();
1660 data_reference_p dr1, dr2;
1661 int i, j;
1662
1663 if (num_vertex == 0)
1664 return true;
1665
1666 fprintf (file, "$\n");
1667
1668 if (comment)
1669 fprintf (file, "c %s\n", comment);
1670
1671 /* First print all the vertices. */
1672 FOR_EACH_VEC_ELT (drs, i, dr1)
1673 fprintf (file, "n%d;\n", i);
1674
1675 FOR_EACH_VEC_ELT (drs, i, dr1)
1676 for (j = i + 1; drs.iterate (j, &dr2); j++)
1677 if (dr_may_alias_p (dr1, dr2, true))
1678 fprintf (file, "n%d n%d\n", i, j);
1679
1680 return true;
1681 }
1682
1683 /* Write to FILE the alias graph of data references in ECC format. */
1684
1685 static inline bool
1686 write_alias_graph_to_ascii_ecc (FILE *file, char *comment,
1687 vec<data_reference_p> drs)
1688 {
1689 int num_vertex = drs.length ();
1690 data_reference_p dr1, dr2;
1691 int i, j;
1692
1693 if (num_vertex == 0)
1694 return true;
1695
1696 fprintf (file, "$\n");
1697
1698 if (comment)
1699 fprintf (file, "c %s\n", comment);
1700
1701 FOR_EACH_VEC_ELT (drs, i, dr1)
1702 for (j = i + 1; drs.iterate (j, &dr2); j++)
1703 if (dr_may_alias_p (dr1, dr2, true))
1704 fprintf (file, "%d %d\n", i, j);
1705
1706 return true;
1707 }
1708
1709 /* Check if DR1 and DR2 are in the same object set. */
1710
1711 static bool
1712 dr_same_base_object_p (const struct data_reference *dr1,
1713 const struct data_reference *dr2)
1714 {
1715 return operand_equal_p (DR_BASE_OBJECT (dr1), DR_BASE_OBJECT (dr2), 0);
1716 }
1717
1718 /* Uses DFS component number as representative of alias-sets. Also tests for
1719 optimality by verifying if every connected component is a clique. Returns
1720 true (1) if the above test is true, and false (0) otherwise. */
1721
1722 static int
1723 build_alias_set_optimal_p (vec<data_reference_p> drs)
1724 {
1725 int num_vertices = drs.length ();
1726 struct graph *g = new_graph (num_vertices);
1727 data_reference_p dr1, dr2;
1728 int i, j;
1729 int num_connected_components;
1730 int v_indx1, v_indx2, num_vertices_in_component;
1731 int *all_vertices;
1732 int *vertices;
1733 struct graph_edge *e;
1734 int this_component_is_clique;
1735 int all_components_are_cliques = 1;
1736
1737 FOR_EACH_VEC_ELT (drs, i, dr1)
1738 for (j = i+1; drs.iterate (j, &dr2); j++)
1739 if (dr_may_alias_p (dr1, dr2, true))
1740 {
1741 add_edge (g, i, j);
1742 add_edge (g, j, i);
1743 }
1744
1745 all_vertices = XNEWVEC (int, num_vertices);
1746 vertices = XNEWVEC (int, num_vertices);
1747 for (i = 0; i < num_vertices; i++)
1748 all_vertices[i] = i;
1749
1750 num_connected_components = graphds_dfs (g, all_vertices, num_vertices,
1751 NULL, true, NULL);
1752 for (i = 0; i < g->n_vertices; i++)
1753 {
1754 data_reference_p dr = drs[i];
1755 base_alias_pair *bap;
1756
1757 gcc_assert (dr->aux);
1758 bap = (base_alias_pair *)(dr->aux);
1759
1760 bap->alias_set = XNEW (int);
1761 *(bap->alias_set) = g->vertices[i].component + 1;
1762 }
1763
1764 /* Verify if the DFS numbering results in optimal solution. */
1765 for (i = 0; i < num_connected_components; i++)
1766 {
1767 num_vertices_in_component = 0;
1768 /* Get all vertices whose DFS component number is the same as i. */
1769 for (j = 0; j < num_vertices; j++)
1770 if (g->vertices[j].component == i)
1771 vertices[num_vertices_in_component++] = j;
1772
1773 /* Now test if the vertices in 'vertices' form a clique, by testing
1774 for edges among each pair. */
1775 this_component_is_clique = 1;
1776 for (v_indx1 = 0; v_indx1 < num_vertices_in_component; v_indx1++)
1777 {
1778 for (v_indx2 = v_indx1+1; v_indx2 < num_vertices_in_component; v_indx2++)
1779 {
1780 /* Check if the two vertices are connected by iterating
1781 through all the edges which have one of these are source. */
1782 e = g->vertices[vertices[v_indx2]].pred;
1783 while (e)
1784 {
1785 if (e->src == vertices[v_indx1])
1786 break;
1787 e = e->pred_next;
1788 }
1789 if (!e)
1790 {
1791 this_component_is_clique = 0;
1792 break;
1793 }
1794 }
1795 if (!this_component_is_clique)
1796 all_components_are_cliques = 0;
1797 }
1798 }
1799
1800 free (all_vertices);
1801 free (vertices);
1802 free_graph (g);
1803 return all_components_are_cliques;
1804 }
1805
1806 /* Group each data reference in DRS with its base object set num. */
1807
1808 static void
1809 build_base_obj_set_for_drs (vec<data_reference_p> drs)
1810 {
1811 int num_vertex = drs.length ();
1812 struct graph *g = new_graph (num_vertex);
1813 data_reference_p dr1, dr2;
1814 int i, j;
1815 int *queue;
1816
1817 FOR_EACH_VEC_ELT (drs, i, dr1)
1818 for (j = i + 1; drs.iterate (j, &dr2); j++)
1819 if (dr_same_base_object_p (dr1, dr2))
1820 {
1821 add_edge (g, i, j);
1822 add_edge (g, j, i);
1823 }
1824
1825 queue = XNEWVEC (int, num_vertex);
1826 for (i = 0; i < num_vertex; i++)
1827 queue[i] = i;
1828
1829 graphds_dfs (g, queue, num_vertex, NULL, true, NULL);
1830
1831 for (i = 0; i < g->n_vertices; i++)
1832 {
1833 data_reference_p dr = drs[i];
1834 base_alias_pair *bap;
1835
1836 gcc_assert (dr->aux);
1837 bap = (base_alias_pair *)(dr->aux);
1838
1839 bap->base_obj_set = g->vertices[i].component + 1;
1840 }
1841
1842 free (queue);
1843 free_graph (g);
1844 }
1845
1846 /* Build the data references for PBB. */
1847
1848 static void
1849 build_pbb_drs (poly_bb_p pbb)
1850 {
1851 int j;
1852 data_reference_p dr;
1853 vec<data_reference_p> gbb_drs = GBB_DATA_REFS (PBB_BLACK_BOX (pbb));
1854
1855 FOR_EACH_VEC_ELT (gbb_drs, j, dr)
1856 build_poly_dr (dr, pbb);
1857 }
1858
1859 /* Dump to file the alias graphs for the data references in DRS. */
1860
1861 static void
1862 dump_alias_graphs (vec<data_reference_p> drs)
1863 {
1864 char comment[100];
1865 FILE *file_dimacs, *file_ecc, *file_dot;
1866
1867 file_dimacs = fopen ("/tmp/dr_alias_graph_dimacs", "ab");
1868 if (file_dimacs)
1869 {
1870 snprintf (comment, sizeof (comment), "%s %s", main_input_filename,
1871 current_function_name ());
1872 write_alias_graph_to_ascii_dimacs (file_dimacs, comment, drs);
1873 fclose (file_dimacs);
1874 }
1875
1876 file_ecc = fopen ("/tmp/dr_alias_graph_ecc", "ab");
1877 if (file_ecc)
1878 {
1879 snprintf (comment, sizeof (comment), "%s %s", main_input_filename,
1880 current_function_name ());
1881 write_alias_graph_to_ascii_ecc (file_ecc, comment, drs);
1882 fclose (file_ecc);
1883 }
1884
1885 file_dot = fopen ("/tmp/dr_alias_graph_dot", "ab");
1886 if (file_dot)
1887 {
1888 snprintf (comment, sizeof (comment), "%s %s", main_input_filename,
1889 current_function_name ());
1890 write_alias_graph_to_ascii_dot (file_dot, comment, drs);
1891 fclose (file_dot);
1892 }
1893 }
1894
1895 /* Build data references in SCOP. */
1896
1897 static void
1898 build_scop_drs (scop_p scop)
1899 {
1900 int i, j;
1901 poly_bb_p pbb;
1902 data_reference_p dr;
1903 vec<data_reference_p> drs;
1904 drs.create (3);
1905
1906 /* Remove all the PBBs that do not have data references: these basic
1907 blocks are not handled in the polyhedral representation. */
1908 for (i = 0; SCOP_BBS (scop).iterate (i, &pbb); i++)
1909 if (GBB_DATA_REFS (PBB_BLACK_BOX (pbb)).is_empty ())
1910 {
1911 free_gimple_bb (PBB_BLACK_BOX (pbb));
1912 free_poly_bb (pbb);
1913 SCOP_BBS (scop).ordered_remove (i);
1914 i--;
1915 }
1916
1917 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
1918 for (j = 0; GBB_DATA_REFS (PBB_BLACK_BOX (pbb)).iterate (j, &dr); j++)
1919 drs.safe_push (dr);
1920
1921 FOR_EACH_VEC_ELT (drs, i, dr)
1922 dr->aux = XNEW (base_alias_pair);
1923
1924 if (!build_alias_set_optimal_p (drs))
1925 {
1926 /* TODO: Add support when building alias set is not optimal. */
1927 ;
1928 }
1929
1930 build_base_obj_set_for_drs (drs);
1931
1932 /* When debugging, enable the following code. This cannot be used
1933 in production compilers. */
1934 if (0)
1935 dump_alias_graphs (drs);
1936
1937 drs.release ();
1938
1939 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
1940 build_pbb_drs (pbb);
1941 }
1942
1943 /* Return a gsi at the position of the phi node STMT. */
1944
1945 static gimple_stmt_iterator
1946 gsi_for_phi_node (gimple stmt)
1947 {
1948 gimple_stmt_iterator psi;
1949 basic_block bb = gimple_bb (stmt);
1950
1951 for (psi = gsi_start_phis (bb); !gsi_end_p (psi); gsi_next (&psi))
1952 if (stmt == gsi_stmt (psi))
1953 return psi;
1954
1955 gcc_unreachable ();
1956 return psi;
1957 }
1958
1959 /* Analyze all the data references of STMTS and add them to the
1960 GBB_DATA_REFS vector of BB. */
1961
1962 static void
1963 analyze_drs_in_stmts (scop_p scop, basic_block bb, vec<gimple> stmts)
1964 {
1965 loop_p nest;
1966 gimple_bb_p gbb;
1967 gimple stmt;
1968 int i;
1969 sese region = SCOP_REGION (scop);
1970
1971 if (!bb_in_sese_p (bb, region))
1972 return;
1973
1974 nest = outermost_loop_in_sese_1 (region, bb);
1975 gbb = gbb_from_bb (bb);
1976
1977 FOR_EACH_VEC_ELT (stmts, i, stmt)
1978 {
1979 loop_p loop;
1980
1981 if (is_gimple_debug (stmt))
1982 continue;
1983
1984 loop = loop_containing_stmt (stmt);
1985 if (!loop_in_sese_p (loop, region))
1986 loop = nest;
1987
1988 graphite_find_data_references_in_stmt (nest, loop, stmt,
1989 &GBB_DATA_REFS (gbb));
1990 }
1991 }
1992
1993 /* Insert STMT at the end of the STMTS sequence and then insert the
1994 statements from STMTS at INSERT_GSI and call analyze_drs_in_stmts
1995 on STMTS. */
1996
1997 static void
1998 insert_stmts (scop_p scop, gimple stmt, gimple_seq stmts,
1999 gimple_stmt_iterator insert_gsi)
2000 {
2001 gimple_stmt_iterator gsi;
2002 vec<gimple> x;
2003 x.create (3);
2004
2005 gimple_seq_add_stmt (&stmts, stmt);
2006 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
2007 x.safe_push (gsi_stmt (gsi));
2008
2009 gsi_insert_seq_before (&insert_gsi, stmts, GSI_SAME_STMT);
2010 analyze_drs_in_stmts (scop, gsi_bb (insert_gsi), x);
2011 x.release ();
2012 }
2013
2014 /* Insert the assignment "RES := EXPR" just after AFTER_STMT. */
2015
2016 static void
2017 insert_out_of_ssa_copy (scop_p scop, tree res, tree expr, gimple after_stmt)
2018 {
2019 gimple_seq stmts;
2020 gimple_stmt_iterator gsi;
2021 tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
2022 gimple stmt = gimple_build_assign (unshare_expr (res), var);
2023 vec<gimple> x;
2024 x.create (3);
2025
2026 gimple_seq_add_stmt (&stmts, stmt);
2027 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
2028 x.safe_push (gsi_stmt (gsi));
2029
2030 if (gimple_code (after_stmt) == GIMPLE_PHI)
2031 {
2032 gsi = gsi_after_labels (gimple_bb (after_stmt));
2033 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
2034 }
2035 else
2036 {
2037 gsi = gsi_for_stmt (after_stmt);
2038 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
2039 }
2040
2041 analyze_drs_in_stmts (scop, gimple_bb (after_stmt), x);
2042 x.release ();
2043 }
2044
2045 /* Creates a poly_bb_p for basic_block BB from the existing PBB. */
2046
2047 static void
2048 new_pbb_from_pbb (scop_p scop, poly_bb_p pbb, basic_block bb)
2049 {
2050 vec<data_reference_p> drs;
2051 drs.create (3);
2052 gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
2053 gimple_bb_p gbb1 = new_gimple_bb (bb, drs);
2054 poly_bb_p pbb1 = new_poly_bb (scop, gbb1);
2055 int index, n = SCOP_BBS (scop).length ();
2056
2057 /* The INDEX of PBB in SCOP_BBS. */
2058 for (index = 0; index < n; index++)
2059 if (SCOP_BBS (scop)[index] == pbb)
2060 break;
2061
2062 pbb1->domain = isl_set_copy (pbb->domain);
2063
2064 GBB_PBB (gbb1) = pbb1;
2065 GBB_CONDITIONS (gbb1) = GBB_CONDITIONS (gbb).copy ();
2066 GBB_CONDITION_CASES (gbb1) = GBB_CONDITION_CASES (gbb).copy ();
2067 SCOP_BBS (scop).safe_insert (index + 1, pbb1);
2068 }
2069
2070 /* Insert on edge E the assignment "RES := EXPR". */
2071
2072 static void
2073 insert_out_of_ssa_copy_on_edge (scop_p scop, edge e, tree res, tree expr)
2074 {
2075 gimple_stmt_iterator gsi;
2076 gimple_seq stmts = NULL;
2077 tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
2078 gimple stmt = gimple_build_assign (unshare_expr (res), var);
2079 basic_block bb;
2080 vec<gimple> x;
2081 x.create (3);
2082
2083 gimple_seq_add_stmt (&stmts, stmt);
2084 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
2085 x.safe_push (gsi_stmt (gsi));
2086
2087 gsi_insert_seq_on_edge (e, stmts);
2088 gsi_commit_edge_inserts ();
2089 bb = gimple_bb (stmt);
2090
2091 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
2092 return;
2093
2094 if (!gbb_from_bb (bb))
2095 new_pbb_from_pbb (scop, pbb_from_bb (e->src), bb);
2096
2097 analyze_drs_in_stmts (scop, bb, x);
2098 x.release ();
2099 }
2100
2101 /* Creates a zero dimension array of the same type as VAR. */
2102
2103 static tree
2104 create_zero_dim_array (tree var, const char *base_name)
2105 {
2106 tree index_type = build_index_type (integer_zero_node);
2107 tree elt_type = TREE_TYPE (var);
2108 tree array_type = build_array_type (elt_type, index_type);
2109 tree base = create_tmp_var (array_type, base_name);
2110
2111 return build4 (ARRAY_REF, elt_type, base, integer_zero_node, NULL_TREE,
2112 NULL_TREE);
2113 }
2114
2115 /* Returns true when PHI is a loop close phi node. */
2116
2117 static bool
2118 scalar_close_phi_node_p (gimple phi)
2119 {
2120 if (gimple_code (phi) != GIMPLE_PHI
2121 || virtual_operand_p (gimple_phi_result (phi)))
2122 return false;
2123
2124 /* Note that loop close phi nodes should have a single argument
2125 because we translated the representation into a canonical form
2126 before Graphite: see canonicalize_loop_closed_ssa_form. */
2127 return (gimple_phi_num_args (phi) == 1);
2128 }
2129
2130 /* For a definition DEF in REGION, propagates the expression EXPR in
2131 all the uses of DEF outside REGION. */
2132
2133 static void
2134 propagate_expr_outside_region (tree def, tree expr, sese region)
2135 {
2136 imm_use_iterator imm_iter;
2137 gimple use_stmt;
2138 gimple_seq stmts;
2139 bool replaced_once = false;
2140
2141 gcc_assert (TREE_CODE (def) == SSA_NAME);
2142
2143 expr = force_gimple_operand (unshare_expr (expr), &stmts, true,
2144 NULL_TREE);
2145
2146 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
2147 if (!is_gimple_debug (use_stmt)
2148 && !bb_in_sese_p (gimple_bb (use_stmt), region))
2149 {
2150 ssa_op_iter iter;
2151 use_operand_p use_p;
2152
2153 FOR_EACH_PHI_OR_STMT_USE (use_p, use_stmt, iter, SSA_OP_ALL_USES)
2154 if (operand_equal_p (def, USE_FROM_PTR (use_p), 0)
2155 && (replaced_once = true))
2156 replace_exp (use_p, expr);
2157
2158 update_stmt (use_stmt);
2159 }
2160
2161 if (replaced_once)
2162 {
2163 gsi_insert_seq_on_edge (SESE_ENTRY (region), stmts);
2164 gsi_commit_edge_inserts ();
2165 }
2166 }
2167
2168 /* Rewrite out of SSA the reduction phi node at PSI by creating a zero
2169 dimension array for it. */
2170
2171 static void
2172 rewrite_close_phi_out_of_ssa (scop_p scop, gimple_stmt_iterator *psi)
2173 {
2174 sese region = SCOP_REGION (scop);
2175 gimple phi = gsi_stmt (*psi);
2176 tree res = gimple_phi_result (phi);
2177 basic_block bb = gimple_bb (phi);
2178 gimple_stmt_iterator gsi = gsi_after_labels (bb);
2179 tree arg = gimple_phi_arg_def (phi, 0);
2180 gimple stmt;
2181
2182 /* Note that loop close phi nodes should have a single argument
2183 because we translated the representation into a canonical form
2184 before Graphite: see canonicalize_loop_closed_ssa_form. */
2185 gcc_assert (gimple_phi_num_args (phi) == 1);
2186
2187 /* The phi node can be a non close phi node, when its argument is
2188 invariant, or a default definition. */
2189 if (is_gimple_min_invariant (arg)
2190 || SSA_NAME_IS_DEFAULT_DEF (arg))
2191 {
2192 propagate_expr_outside_region (res, arg, region);
2193 gsi_next (psi);
2194 return;
2195 }
2196
2197 else if (gimple_bb (SSA_NAME_DEF_STMT (arg))->loop_father == bb->loop_father)
2198 {
2199 propagate_expr_outside_region (res, arg, region);
2200 stmt = gimple_build_assign (res, arg);
2201 remove_phi_node (psi, false);
2202 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
2203 SSA_NAME_DEF_STMT (res) = stmt;
2204 return;
2205 }
2206
2207 /* If res is scev analyzable and is not a scalar value, it is safe
2208 to ignore the close phi node: it will be code generated in the
2209 out of Graphite pass. */
2210 else if (scev_analyzable_p (res, region))
2211 {
2212 loop_p loop = loop_containing_stmt (SSA_NAME_DEF_STMT (res));
2213 tree scev;
2214
2215 if (!loop_in_sese_p (loop, region))
2216 {
2217 loop = loop_containing_stmt (SSA_NAME_DEF_STMT (arg));
2218 scev = scalar_evolution_in_region (region, loop, arg);
2219 scev = compute_overall_effect_of_inner_loop (loop, scev);
2220 }
2221 else
2222 scev = scalar_evolution_in_region (region, loop, res);
2223
2224 if (tree_does_not_contain_chrecs (scev))
2225 propagate_expr_outside_region (res, scev, region);
2226
2227 gsi_next (psi);
2228 return;
2229 }
2230 else
2231 {
2232 tree zero_dim_array = create_zero_dim_array (res, "Close_Phi");
2233
2234 stmt = gimple_build_assign (res, unshare_expr (zero_dim_array));
2235
2236 if (TREE_CODE (arg) == SSA_NAME)
2237 insert_out_of_ssa_copy (scop, zero_dim_array, arg,
2238 SSA_NAME_DEF_STMT (arg));
2239 else
2240 insert_out_of_ssa_copy_on_edge (scop, single_pred_edge (bb),
2241 zero_dim_array, arg);
2242 }
2243
2244 remove_phi_node (psi, false);
2245 SSA_NAME_DEF_STMT (res) = stmt;
2246
2247 insert_stmts (scop, stmt, NULL, gsi_after_labels (bb));
2248 }
2249
2250 /* Rewrite out of SSA the reduction phi node at PSI by creating a zero
2251 dimension array for it. */
2252
2253 static void
2254 rewrite_phi_out_of_ssa (scop_p scop, gimple_stmt_iterator *psi)
2255 {
2256 size_t i;
2257 gimple phi = gsi_stmt (*psi);
2258 basic_block bb = gimple_bb (phi);
2259 tree res = gimple_phi_result (phi);
2260 tree zero_dim_array = create_zero_dim_array (res, "phi_out_of_ssa");
2261 gimple stmt;
2262
2263 for (i = 0; i < gimple_phi_num_args (phi); i++)
2264 {
2265 tree arg = gimple_phi_arg_def (phi, i);
2266 edge e = gimple_phi_arg_edge (phi, i);
2267
2268 /* Avoid the insertion of code in the loop latch to please the
2269 pattern matching of the vectorizer. */
2270 if (TREE_CODE (arg) == SSA_NAME
2271 && e->src == bb->loop_father->latch)
2272 insert_out_of_ssa_copy (scop, zero_dim_array, arg,
2273 SSA_NAME_DEF_STMT (arg));
2274 else
2275 insert_out_of_ssa_copy_on_edge (scop, e, zero_dim_array, arg);
2276 }
2277
2278 stmt = gimple_build_assign (res, unshare_expr (zero_dim_array));
2279 remove_phi_node (psi, false);
2280 SSA_NAME_DEF_STMT (res) = stmt;
2281 insert_stmts (scop, stmt, NULL, gsi_after_labels (bb));
2282 }
2283
2284 /* Rewrite the degenerate phi node at position PSI from the degenerate
2285 form "x = phi (y, y, ..., y)" to "x = y". */
2286
2287 static void
2288 rewrite_degenerate_phi (gimple_stmt_iterator *psi)
2289 {
2290 tree rhs;
2291 gimple stmt;
2292 gimple_stmt_iterator gsi;
2293 gimple phi = gsi_stmt (*psi);
2294 tree res = gimple_phi_result (phi);
2295 basic_block bb;
2296
2297 bb = gimple_bb (phi);
2298 rhs = degenerate_phi_result (phi);
2299 gcc_assert (rhs);
2300
2301 stmt = gimple_build_assign (res, rhs);
2302 remove_phi_node (psi, false);
2303 SSA_NAME_DEF_STMT (res) = stmt;
2304
2305 gsi = gsi_after_labels (bb);
2306 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
2307 }
2308
2309 /* Rewrite out of SSA all the reduction phi nodes of SCOP. */
2310
2311 static void
2312 rewrite_reductions_out_of_ssa (scop_p scop)
2313 {
2314 basic_block bb;
2315 gimple_stmt_iterator psi;
2316 sese region = SCOP_REGION (scop);
2317
2318 FOR_EACH_BB (bb)
2319 if (bb_in_sese_p (bb, region))
2320 for (psi = gsi_start_phis (bb); !gsi_end_p (psi);)
2321 {
2322 gimple phi = gsi_stmt (psi);
2323
2324 if (virtual_operand_p (gimple_phi_result (phi)))
2325 {
2326 gsi_next (&psi);
2327 continue;
2328 }
2329
2330 if (gimple_phi_num_args (phi) > 1
2331 && degenerate_phi_result (phi))
2332 rewrite_degenerate_phi (&psi);
2333
2334 else if (scalar_close_phi_node_p (phi))
2335 rewrite_close_phi_out_of_ssa (scop, &psi);
2336
2337 else if (reduction_phi_p (region, &psi))
2338 rewrite_phi_out_of_ssa (scop, &psi);
2339 }
2340
2341 update_ssa (TODO_update_ssa);
2342 #ifdef ENABLE_CHECKING
2343 verify_loop_closed_ssa (true);
2344 #endif
2345 }
2346
2347 /* Rewrite the scalar dependence of DEF used in USE_STMT with a memory
2348 read from ZERO_DIM_ARRAY. */
2349
2350 static void
2351 rewrite_cross_bb_scalar_dependence (scop_p scop, tree zero_dim_array,
2352 tree def, gimple use_stmt)
2353 {
2354 gimple name_stmt;
2355 tree name;
2356 ssa_op_iter iter;
2357 use_operand_p use_p;
2358
2359 gcc_assert (gimple_code (use_stmt) != GIMPLE_PHI);
2360
2361 name = copy_ssa_name (def, NULL);
2362 name_stmt = gimple_build_assign (name, zero_dim_array);
2363
2364 gimple_assign_set_lhs (name_stmt, name);
2365 insert_stmts (scop, name_stmt, NULL, gsi_for_stmt (use_stmt));
2366
2367 FOR_EACH_SSA_USE_OPERAND (use_p, use_stmt, iter, SSA_OP_ALL_USES)
2368 if (operand_equal_p (def, USE_FROM_PTR (use_p), 0))
2369 replace_exp (use_p, name);
2370
2371 update_stmt (use_stmt);
2372 }
2373
2374 /* For every definition DEF in the SCOP that is used outside the scop,
2375 insert a closing-scop definition in the basic block just after this
2376 SCOP. */
2377
2378 static void
2379 handle_scalar_deps_crossing_scop_limits (scop_p scop, tree def, gimple stmt)
2380 {
2381 tree var = create_tmp_reg (TREE_TYPE (def), NULL);
2382 tree new_name = make_ssa_name (var, stmt);
2383 bool needs_copy = false;
2384 use_operand_p use_p;
2385 imm_use_iterator imm_iter;
2386 gimple use_stmt;
2387 sese region = SCOP_REGION (scop);
2388
2389 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
2390 {
2391 if (!bb_in_sese_p (gimple_bb (use_stmt), region))
2392 {
2393 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
2394 {
2395 SET_USE (use_p, new_name);
2396 }
2397 update_stmt (use_stmt);
2398 needs_copy = true;
2399 }
2400 }
2401
2402 /* Insert in the empty BB just after the scop a use of DEF such
2403 that the rewrite of cross_bb_scalar_dependences won't insert
2404 arrays everywhere else. */
2405 if (needs_copy)
2406 {
2407 gimple assign = gimple_build_assign (new_name, def);
2408 gimple_stmt_iterator psi = gsi_after_labels (SESE_EXIT (region)->dest);
2409
2410 SSA_NAME_DEF_STMT (new_name) = assign;
2411 update_stmt (assign);
2412 gsi_insert_before (&psi, assign, GSI_SAME_STMT);
2413 }
2414 }
2415
2416 /* Rewrite the scalar dependences crossing the boundary of the BB
2417 containing STMT with an array. Return true when something has been
2418 changed. */
2419
2420 static bool
2421 rewrite_cross_bb_scalar_deps (scop_p scop, gimple_stmt_iterator *gsi)
2422 {
2423 sese region = SCOP_REGION (scop);
2424 gimple stmt = gsi_stmt (*gsi);
2425 imm_use_iterator imm_iter;
2426 tree def;
2427 basic_block def_bb;
2428 tree zero_dim_array = NULL_TREE;
2429 gimple use_stmt;
2430 bool res = false;
2431
2432 switch (gimple_code (stmt))
2433 {
2434 case GIMPLE_ASSIGN:
2435 def = gimple_assign_lhs (stmt);
2436 break;
2437
2438 case GIMPLE_CALL:
2439 def = gimple_call_lhs (stmt);
2440 break;
2441
2442 default:
2443 return false;
2444 }
2445
2446 if (!def
2447 || !is_gimple_reg (def))
2448 return false;
2449
2450 if (scev_analyzable_p (def, region))
2451 {
2452 loop_p loop = loop_containing_stmt (SSA_NAME_DEF_STMT (def));
2453 tree scev = scalar_evolution_in_region (region, loop, def);
2454
2455 if (tree_contains_chrecs (scev, NULL))
2456 return false;
2457
2458 propagate_expr_outside_region (def, scev, region);
2459 return true;
2460 }
2461
2462 def_bb = gimple_bb (stmt);
2463
2464 handle_scalar_deps_crossing_scop_limits (scop, def, stmt);
2465
2466 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
2467 if (gimple_code (use_stmt) == GIMPLE_PHI
2468 && (res = true))
2469 {
2470 gimple_stmt_iterator psi = gsi_for_stmt (use_stmt);
2471
2472 if (scalar_close_phi_node_p (gsi_stmt (psi)))
2473 rewrite_close_phi_out_of_ssa (scop, &psi);
2474 else
2475 rewrite_phi_out_of_ssa (scop, &psi);
2476 }
2477
2478 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, def)
2479 if (gimple_code (use_stmt) != GIMPLE_PHI
2480 && def_bb != gimple_bb (use_stmt)
2481 && !is_gimple_debug (use_stmt)
2482 && (res = true))
2483 {
2484 if (!zero_dim_array)
2485 {
2486 zero_dim_array = create_zero_dim_array
2487 (def, "Cross_BB_scalar_dependence");
2488 insert_out_of_ssa_copy (scop, zero_dim_array, def,
2489 SSA_NAME_DEF_STMT (def));
2490 gsi_next (gsi);
2491 }
2492
2493 rewrite_cross_bb_scalar_dependence (scop, zero_dim_array,
2494 def, use_stmt);
2495 }
2496
2497 return res;
2498 }
2499
2500 /* Rewrite out of SSA all the reduction phi nodes of SCOP. */
2501
2502 static void
2503 rewrite_cross_bb_scalar_deps_out_of_ssa (scop_p scop)
2504 {
2505 basic_block bb;
2506 gimple_stmt_iterator psi;
2507 sese region = SCOP_REGION (scop);
2508 bool changed = false;
2509
2510 /* Create an extra empty BB after the scop. */
2511 split_edge (SESE_EXIT (region));
2512
2513 FOR_EACH_BB (bb)
2514 if (bb_in_sese_p (bb, region))
2515 for (psi = gsi_start_bb (bb); !gsi_end_p (psi); gsi_next (&psi))
2516 changed |= rewrite_cross_bb_scalar_deps (scop, &psi);
2517
2518 if (changed)
2519 {
2520 scev_reset_htab ();
2521 update_ssa (TODO_update_ssa);
2522 #ifdef ENABLE_CHECKING
2523 verify_loop_closed_ssa (true);
2524 #endif
2525 }
2526 }
2527
2528 /* Returns the number of pbbs that are in loops contained in SCOP. */
2529
2530 static int
2531 nb_pbbs_in_loops (scop_p scop)
2532 {
2533 int i;
2534 poly_bb_p pbb;
2535 int res = 0;
2536
2537 FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
2538 if (loop_in_sese_p (gbb_loop (PBB_BLACK_BOX (pbb)), SCOP_REGION (scop)))
2539 res++;
2540
2541 return res;
2542 }
2543
2544 /* Return the number of data references in BB that write in
2545 memory. */
2546
2547 static int
2548 nb_data_writes_in_bb (basic_block bb)
2549 {
2550 int res = 0;
2551 gimple_stmt_iterator gsi;
2552
2553 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2554 if (gimple_vdef (gsi_stmt (gsi)))
2555 res++;
2556
2557 return res;
2558 }
2559
2560 /* Splits at STMT the basic block BB represented as PBB in the
2561 polyhedral form. */
2562
2563 static edge
2564 split_pbb (scop_p scop, poly_bb_p pbb, basic_block bb, gimple stmt)
2565 {
2566 edge e1 = split_block (bb, stmt);
2567 new_pbb_from_pbb (scop, pbb, e1->dest);
2568 return e1;
2569 }
2570
2571 /* Splits STMT out of its current BB. This is done for reduction
2572 statements for which we want to ignore data dependences. */
2573
2574 static basic_block
2575 split_reduction_stmt (scop_p scop, gimple stmt)
2576 {
2577 basic_block bb = gimple_bb (stmt);
2578 poly_bb_p pbb = pbb_from_bb (bb);
2579 gimple_bb_p gbb = gbb_from_bb (bb);
2580 edge e1;
2581 int i;
2582 data_reference_p dr;
2583
2584 /* Do not split basic blocks with no writes to memory: the reduction
2585 will be the only write to memory. */
2586 if (nb_data_writes_in_bb (bb) == 0
2587 /* Or if we have already marked BB as a reduction. */
2588 || PBB_IS_REDUCTION (pbb_from_bb (bb)))
2589 return bb;
2590
2591 e1 = split_pbb (scop, pbb, bb, stmt);
2592
2593 /* Split once more only when the reduction stmt is not the only one
2594 left in the original BB. */
2595 if (!gsi_one_before_end_p (gsi_start_nondebug_bb (bb)))
2596 {
2597 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2598 gsi_prev (&gsi);
2599 e1 = split_pbb (scop, pbb, bb, gsi_stmt (gsi));
2600 }
2601
2602 /* A part of the data references will end in a different basic block
2603 after the split: move the DRs from the original GBB to the newly
2604 created GBB1. */
2605 FOR_EACH_VEC_ELT (GBB_DATA_REFS (gbb), i, dr)
2606 {
2607 basic_block bb1 = gimple_bb (DR_STMT (dr));
2608
2609 if (bb1 != bb)
2610 {
2611 gimple_bb_p gbb1 = gbb_from_bb (bb1);
2612 GBB_DATA_REFS (gbb1).safe_push (dr);
2613 GBB_DATA_REFS (gbb).ordered_remove (i);
2614 i--;
2615 }
2616 }
2617
2618 return e1->dest;
2619 }
2620
2621 /* Return true when stmt is a reduction operation. */
2622
2623 static inline bool
2624 is_reduction_operation_p (gimple stmt)
2625 {
2626 enum tree_code code;
2627
2628 gcc_assert (is_gimple_assign (stmt));
2629 code = gimple_assign_rhs_code (stmt);
2630
2631 return flag_associative_math
2632 && commutative_tree_code (code)
2633 && associative_tree_code (code);
2634 }
2635
2636 /* Returns true when PHI contains an argument ARG. */
2637
2638 static bool
2639 phi_contains_arg (gimple phi, tree arg)
2640 {
2641 size_t i;
2642
2643 for (i = 0; i < gimple_phi_num_args (phi); i++)
2644 if (operand_equal_p (arg, gimple_phi_arg_def (phi, i), 0))
2645 return true;
2646
2647 return false;
2648 }
2649
2650 /* Return a loop phi node that corresponds to a reduction containing LHS. */
2651
2652 static gimple
2653 follow_ssa_with_commutative_ops (tree arg, tree lhs)
2654 {
2655 gimple stmt;
2656
2657 if (TREE_CODE (arg) != SSA_NAME)
2658 return NULL;
2659
2660 stmt = SSA_NAME_DEF_STMT (arg);
2661
2662 if (gimple_code (stmt) == GIMPLE_NOP
2663 || gimple_code (stmt) == GIMPLE_CALL)
2664 return NULL;
2665
2666 if (gimple_code (stmt) == GIMPLE_PHI)
2667 {
2668 if (phi_contains_arg (stmt, lhs))
2669 return stmt;
2670 return NULL;
2671 }
2672
2673 if (!is_gimple_assign (stmt))
2674 return NULL;
2675
2676 if (gimple_num_ops (stmt) == 2)
2677 return follow_ssa_with_commutative_ops (gimple_assign_rhs1 (stmt), lhs);
2678
2679 if (is_reduction_operation_p (stmt))
2680 {
2681 gimple res = follow_ssa_with_commutative_ops (gimple_assign_rhs1 (stmt), lhs);
2682
2683 return res ? res :
2684 follow_ssa_with_commutative_ops (gimple_assign_rhs2 (stmt), lhs);
2685 }
2686
2687 return NULL;
2688 }
2689
2690 /* Detect commutative and associative scalar reductions starting at
2691 the STMT. Return the phi node of the reduction cycle, or NULL. */
2692
2693 static gimple
2694 detect_commutative_reduction_arg (tree lhs, gimple stmt, tree arg,
2695 vec<gimple> *in,
2696 vec<gimple> *out)
2697 {
2698 gimple phi = follow_ssa_with_commutative_ops (arg, lhs);
2699
2700 if (!phi)
2701 return NULL;
2702
2703 in->safe_push (stmt);
2704 out->safe_push (stmt);
2705 return phi;
2706 }
2707
2708 /* Detect commutative and associative scalar reductions starting at
2709 STMT. Return the phi node of the reduction cycle, or NULL. */
2710
2711 static gimple
2712 detect_commutative_reduction_assign (gimple stmt, vec<gimple> *in,
2713 vec<gimple> *out)
2714 {
2715 tree lhs = gimple_assign_lhs (stmt);
2716
2717 if (gimple_num_ops (stmt) == 2)
2718 return detect_commutative_reduction_arg (lhs, stmt,
2719 gimple_assign_rhs1 (stmt),
2720 in, out);
2721
2722 if (is_reduction_operation_p (stmt))
2723 {
2724 gimple res = detect_commutative_reduction_arg (lhs, stmt,
2725 gimple_assign_rhs1 (stmt),
2726 in, out);
2727 return res ? res
2728 : detect_commutative_reduction_arg (lhs, stmt,
2729 gimple_assign_rhs2 (stmt),
2730 in, out);
2731 }
2732
2733 return NULL;
2734 }
2735
2736 /* Return a loop phi node that corresponds to a reduction containing LHS. */
2737
2738 static gimple
2739 follow_inital_value_to_phi (tree arg, tree lhs)
2740 {
2741 gimple stmt;
2742
2743 if (!arg || TREE_CODE (arg) != SSA_NAME)
2744 return NULL;
2745
2746 stmt = SSA_NAME_DEF_STMT (arg);
2747
2748 if (gimple_code (stmt) == GIMPLE_PHI
2749 && phi_contains_arg (stmt, lhs))
2750 return stmt;
2751
2752 return NULL;
2753 }
2754
2755
2756 /* Return the argument of the loop PHI that is the initial value coming
2757 from outside the loop. */
2758
2759 static edge
2760 edge_initial_value_for_loop_phi (gimple phi)
2761 {
2762 size_t i;
2763
2764 for (i = 0; i < gimple_phi_num_args (phi); i++)
2765 {
2766 edge e = gimple_phi_arg_edge (phi, i);
2767
2768 if (loop_depth (e->src->loop_father)
2769 < loop_depth (e->dest->loop_father))
2770 return e;
2771 }
2772
2773 return NULL;
2774 }
2775
2776 /* Return the argument of the loop PHI that is the initial value coming
2777 from outside the loop. */
2778
2779 static tree
2780 initial_value_for_loop_phi (gimple phi)
2781 {
2782 size_t i;
2783
2784 for (i = 0; i < gimple_phi_num_args (phi); i++)
2785 {
2786 edge e = gimple_phi_arg_edge (phi, i);
2787
2788 if (loop_depth (e->src->loop_father)
2789 < loop_depth (e->dest->loop_father))
2790 return gimple_phi_arg_def (phi, i);
2791 }
2792
2793 return NULL_TREE;
2794 }
2795
2796 /* Returns true when DEF is used outside the reduction cycle of
2797 LOOP_PHI. */
2798
2799 static bool
2800 used_outside_reduction (tree def, gimple loop_phi)
2801 {
2802 use_operand_p use_p;
2803 imm_use_iterator imm_iter;
2804 loop_p loop = loop_containing_stmt (loop_phi);
2805
2806 /* In LOOP, DEF should be used only in LOOP_PHI. */
2807 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
2808 {
2809 gimple stmt = USE_STMT (use_p);
2810
2811 if (stmt != loop_phi
2812 && !is_gimple_debug (stmt)
2813 && flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
2814 return true;
2815 }
2816
2817 return false;
2818 }
2819
2820 /* Detect commutative and associative scalar reductions belonging to
2821 the SCOP starting at the loop closed phi node STMT. Return the phi
2822 node of the reduction cycle, or NULL. */
2823
2824 static gimple
2825 detect_commutative_reduction (scop_p scop, gimple stmt, vec<gimple> *in,
2826 vec<gimple> *out)
2827 {
2828 if (scalar_close_phi_node_p (stmt))
2829 {
2830 gimple def, loop_phi, phi, close_phi = stmt;
2831 tree init, lhs, arg = gimple_phi_arg_def (close_phi, 0);
2832
2833 if (TREE_CODE (arg) != SSA_NAME)
2834 return NULL;
2835
2836 /* Note that loop close phi nodes should have a single argument
2837 because we translated the representation into a canonical form
2838 before Graphite: see canonicalize_loop_closed_ssa_form. */
2839 gcc_assert (gimple_phi_num_args (close_phi) == 1);
2840
2841 def = SSA_NAME_DEF_STMT (arg);
2842 if (!stmt_in_sese_p (def, SCOP_REGION (scop))
2843 || !(loop_phi = detect_commutative_reduction (scop, def, in, out)))
2844 return NULL;
2845
2846 lhs = gimple_phi_result (close_phi);
2847 init = initial_value_for_loop_phi (loop_phi);
2848 phi = follow_inital_value_to_phi (init, lhs);
2849
2850 if (phi && (used_outside_reduction (lhs, phi)
2851 || !has_single_use (gimple_phi_result (phi))))
2852 return NULL;
2853
2854 in->safe_push (loop_phi);
2855 out->safe_push (close_phi);
2856 return phi;
2857 }
2858
2859 if (gimple_code (stmt) == GIMPLE_ASSIGN)
2860 return detect_commutative_reduction_assign (stmt, in, out);
2861
2862 return NULL;
2863 }
2864
2865 /* Translate the scalar reduction statement STMT to an array RED
2866 knowing that its recursive phi node is LOOP_PHI. */
2867
2868 static void
2869 translate_scalar_reduction_to_array_for_stmt (scop_p scop, tree red,
2870 gimple stmt, gimple loop_phi)
2871 {
2872 tree res = gimple_phi_result (loop_phi);
2873 gimple assign = gimple_build_assign (res, unshare_expr (red));
2874 gimple_stmt_iterator gsi;
2875
2876 insert_stmts (scop, assign, NULL, gsi_after_labels (gimple_bb (loop_phi)));
2877
2878 assign = gimple_build_assign (unshare_expr (red), gimple_assign_lhs (stmt));
2879 gsi = gsi_for_stmt (stmt);
2880 gsi_next (&gsi);
2881 insert_stmts (scop, assign, NULL, gsi);
2882 }
2883
2884 /* Removes the PHI node and resets all the debug stmts that are using
2885 the PHI_RESULT. */
2886
2887 static void
2888 remove_phi (gimple phi)
2889 {
2890 imm_use_iterator imm_iter;
2891 tree def;
2892 use_operand_p use_p;
2893 gimple_stmt_iterator gsi;
2894 vec<gimple> update;
2895 update.create (3);
2896 unsigned int i;
2897 gimple stmt;
2898
2899 def = PHI_RESULT (phi);
2900 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
2901 {
2902 stmt = USE_STMT (use_p);
2903
2904 if (is_gimple_debug (stmt))
2905 {
2906 gimple_debug_bind_reset_value (stmt);
2907 update.safe_push (stmt);
2908 }
2909 }
2910
2911 FOR_EACH_VEC_ELT (update, i, stmt)
2912 update_stmt (stmt);
2913
2914 update.release ();
2915
2916 gsi = gsi_for_phi_node (phi);
2917 remove_phi_node (&gsi, false);
2918 }
2919
2920 /* Helper function for for_each_index. For each INDEX of the data
2921 reference REF, returns true when its indices are valid in the loop
2922 nest LOOP passed in as DATA. */
2923
2924 static bool
2925 dr_indices_valid_in_loop (tree ref ATTRIBUTE_UNUSED, tree *index, void *data)
2926 {
2927 loop_p loop;
2928 basic_block header, def_bb;
2929 gimple stmt;
2930
2931 if (TREE_CODE (*index) != SSA_NAME)
2932 return true;
2933
2934 loop = *((loop_p *) data);
2935 header = loop->header;
2936 stmt = SSA_NAME_DEF_STMT (*index);
2937
2938 if (!stmt)
2939 return true;
2940
2941 def_bb = gimple_bb (stmt);
2942
2943 if (!def_bb)
2944 return true;
2945
2946 return dominated_by_p (CDI_DOMINATORS, header, def_bb);
2947 }
2948
2949 /* When the result of a CLOSE_PHI is written to a memory location,
2950 return a pointer to that memory reference, otherwise return
2951 NULL_TREE. */
2952
2953 static tree
2954 close_phi_written_to_memory (gimple close_phi)
2955 {
2956 imm_use_iterator imm_iter;
2957 use_operand_p use_p;
2958 gimple stmt;
2959 tree res, def = gimple_phi_result (close_phi);
2960
2961 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
2962 if ((stmt = USE_STMT (use_p))
2963 && gimple_code (stmt) == GIMPLE_ASSIGN
2964 && (res = gimple_assign_lhs (stmt)))
2965 {
2966 switch (TREE_CODE (res))
2967 {
2968 case VAR_DECL:
2969 case PARM_DECL:
2970 case RESULT_DECL:
2971 return res;
2972
2973 case ARRAY_REF:
2974 case MEM_REF:
2975 {
2976 tree arg = gimple_phi_arg_def (close_phi, 0);
2977 loop_p nest = loop_containing_stmt (SSA_NAME_DEF_STMT (arg));
2978
2979 /* FIXME: this restriction is for id-{24,25}.f and
2980 could be handled by duplicating the computation of
2981 array indices before the loop of the close_phi. */
2982 if (for_each_index (&res, dr_indices_valid_in_loop, &nest))
2983 return res;
2984 }
2985 /* Fallthru. */
2986
2987 default:
2988 continue;
2989 }
2990 }
2991 return NULL_TREE;
2992 }
2993
2994 /* Rewrite out of SSA the reduction described by the loop phi nodes
2995 IN, and the close phi nodes OUT. IN and OUT are structured by loop
2996 levels like this:
2997
2998 IN: stmt, loop_n, ..., loop_0
2999 OUT: stmt, close_n, ..., close_0
3000
3001 the first element is the reduction statement, and the next elements
3002 are the loop and close phi nodes of each of the outer loops. */
3003
3004 static void
3005 translate_scalar_reduction_to_array (scop_p scop,
3006 vec<gimple> in,
3007 vec<gimple> out)
3008 {
3009 gimple loop_phi;
3010 unsigned int i = out.length () - 1;
3011 tree red = close_phi_written_to_memory (out[i]);
3012
3013 FOR_EACH_VEC_ELT (in, i, loop_phi)
3014 {
3015 gimple close_phi = out[i];
3016
3017 if (i == 0)
3018 {
3019 gimple stmt = loop_phi;
3020 basic_block bb = split_reduction_stmt (scop, stmt);
3021 poly_bb_p pbb = pbb_from_bb (bb);
3022 PBB_IS_REDUCTION (pbb) = true;
3023 gcc_assert (close_phi == loop_phi);
3024
3025 if (!red)
3026 red = create_zero_dim_array
3027 (gimple_assign_lhs (stmt), "Commutative_Associative_Reduction");
3028
3029 translate_scalar_reduction_to_array_for_stmt (scop, red, stmt, in[1]);
3030 continue;
3031 }
3032
3033 if (i == in.length () - 1)
3034 {
3035 insert_out_of_ssa_copy (scop, gimple_phi_result (close_phi),
3036 unshare_expr (red), close_phi);
3037 insert_out_of_ssa_copy_on_edge
3038 (scop, edge_initial_value_for_loop_phi (loop_phi),
3039 unshare_expr (red), initial_value_for_loop_phi (loop_phi));
3040 }
3041
3042 remove_phi (loop_phi);
3043 remove_phi (close_phi);
3044 }
3045 }
3046
3047 /* Rewrites out of SSA a commutative reduction at CLOSE_PHI. Returns
3048 true when something has been changed. */
3049
3050 static bool
3051 rewrite_commutative_reductions_out_of_ssa_close_phi (scop_p scop,
3052 gimple close_phi)
3053 {
3054 bool res;
3055 vec<gimple> in;
3056 in.create (10);
3057 vec<gimple> out;
3058 out.create (10);
3059
3060 detect_commutative_reduction (scop, close_phi, &in, &out);
3061 res = in.length () > 1;
3062 if (res)
3063 translate_scalar_reduction_to_array (scop, in, out);
3064
3065 in.release ();
3066 out.release ();
3067 return res;
3068 }
3069
3070 /* Rewrites all the commutative reductions from LOOP out of SSA.
3071 Returns true when something has been changed. */
3072
3073 static bool
3074 rewrite_commutative_reductions_out_of_ssa_loop (scop_p scop,
3075 loop_p loop)
3076 {
3077 gimple_stmt_iterator gsi;
3078 edge exit = single_exit (loop);
3079 tree res;
3080 bool changed = false;
3081
3082 if (!exit)
3083 return false;
3084
3085 for (gsi = gsi_start_phis (exit->dest); !gsi_end_p (gsi); gsi_next (&gsi))
3086 if ((res = gimple_phi_result (gsi_stmt (gsi)))
3087 && !virtual_operand_p (res)
3088 && !scev_analyzable_p (res, SCOP_REGION (scop)))
3089 changed |= rewrite_commutative_reductions_out_of_ssa_close_phi
3090 (scop, gsi_stmt (gsi));
3091
3092 return changed;
3093 }
3094
3095 /* Rewrites all the commutative reductions from SCOP out of SSA. */
3096
3097 static void
3098 rewrite_commutative_reductions_out_of_ssa (scop_p scop)
3099 {
3100 loop_iterator li;
3101 loop_p loop;
3102 bool changed = false;
3103 sese region = SCOP_REGION (scop);
3104
3105 FOR_EACH_LOOP (li, loop, 0)
3106 if (loop_in_sese_p (loop, region))
3107 changed |= rewrite_commutative_reductions_out_of_ssa_loop (scop, loop);
3108
3109 if (changed)
3110 {
3111 scev_reset_htab ();
3112 gsi_commit_edge_inserts ();
3113 update_ssa (TODO_update_ssa);
3114 #ifdef ENABLE_CHECKING
3115 verify_loop_closed_ssa (true);
3116 #endif
3117 }
3118 }
3119
3120 /* Can all ivs be represented by a signed integer?
3121 As CLooG might generate negative values in its expressions, signed loop ivs
3122 are required in the backend. */
3123
3124 static bool
3125 scop_ivs_can_be_represented (scop_p scop)
3126 {
3127 loop_iterator li;
3128 loop_p loop;
3129 gimple_stmt_iterator psi;
3130 bool result = true;
3131
3132 FOR_EACH_LOOP (li, loop, 0)
3133 {
3134 if (!loop_in_sese_p (loop, SCOP_REGION (scop)))
3135 continue;
3136
3137 for (psi = gsi_start_phis (loop->header);
3138 !gsi_end_p (psi); gsi_next (&psi))
3139 {
3140 gimple phi = gsi_stmt (psi);
3141 tree res = PHI_RESULT (phi);
3142 tree type = TREE_TYPE (res);
3143
3144 if (TYPE_UNSIGNED (type)
3145 && TYPE_PRECISION (type) >= TYPE_PRECISION (long_long_integer_type_node))
3146 {
3147 result = false;
3148 break;
3149 }
3150 }
3151 if (!result)
3152 FOR_EACH_LOOP_BREAK (li);
3153 }
3154
3155 return result;
3156 }
3157
3158 /* Builds the polyhedral representation for a SESE region. */
3159
3160 void
3161 build_poly_scop (scop_p scop)
3162 {
3163 sese region = SCOP_REGION (scop);
3164 graphite_dim_t max_dim;
3165
3166 build_scop_bbs (scop);
3167
3168 /* FIXME: This restriction is needed to avoid a problem in CLooG.
3169 Once CLooG is fixed, remove this guard. Anyways, it makes no
3170 sense to optimize a scop containing only PBBs that do not belong
3171 to any loops. */
3172 if (nb_pbbs_in_loops (scop) == 0)
3173 return;
3174
3175 if (!scop_ivs_can_be_represented (scop))
3176 return;
3177
3178 if (flag_associative_math)
3179 rewrite_commutative_reductions_out_of_ssa (scop);
3180
3181 build_sese_loop_nests (region);
3182 build_sese_conditions (region);
3183 find_scop_parameters (scop);
3184
3185 max_dim = PARAM_VALUE (PARAM_GRAPHITE_MAX_NB_SCOP_PARAMS);
3186 if (scop_nb_params (scop) > max_dim)
3187 return;
3188
3189 build_scop_iteration_domain (scop);
3190 build_scop_context (scop);
3191 add_conditions_to_constraints (scop);
3192
3193 /* Rewrite out of SSA only after having translated the
3194 representation to the polyhedral representation to avoid scev
3195 analysis failures. That means that these functions will insert
3196 new data references that they create in the right place. */
3197 rewrite_reductions_out_of_ssa (scop);
3198 rewrite_cross_bb_scalar_deps_out_of_ssa (scop);
3199
3200 build_scop_drs (scop);
3201 scop_to_lst (scop);
3202 build_scop_scattering (scop);
3203
3204 /* This SCoP has been translated to the polyhedral
3205 representation. */
3206 POLY_SCOP_P (scop) = true;
3207 }
3208 #endif