]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-data-ref.c
tree-optimization/97482 - fix split_constant_offset of nop-conversions
[thirdparty/gcc.git] / gcc / tree-data-ref.c
1 /* Data references and dependences detectors.
2 Copyright (C) 2003-2020 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This pass walks a given loop structure searching for array
22 references. The information about the array accesses is recorded
23 in DATA_REFERENCE structures.
24
25 The basic test for determining the dependences is:
26 given two access functions chrec1 and chrec2 to a same array, and
27 x and y two vectors from the iteration domain, the same element of
28 the array is accessed twice at iterations x and y if and only if:
29 | chrec1 (x) == chrec2 (y).
30
31 The goals of this analysis are:
32
33 - to determine the independence: the relation between two
34 independent accesses is qualified with the chrec_known (this
35 information allows a loop parallelization),
36
37 - when two data references access the same data, to qualify the
38 dependence relation with classic dependence representations:
39
40 - distance vectors
41 - direction vectors
42 - loop carried level dependence
43 - polyhedron dependence
44 or with the chains of recurrences based representation,
45
46 - to define a knowledge base for storing the data dependence
47 information,
48
49 - to define an interface to access this data.
50
51
52 Definitions:
53
54 - subscript: given two array accesses a subscript is the tuple
55 composed of the access functions for a given dimension. Example:
56 Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts:
57 (f1, g1), (f2, g2), (f3, g3).
58
59 - Diophantine equation: an equation whose coefficients and
60 solutions are integer constants, for example the equation
61 | 3*x + 2*y = 1
62 has an integer solution x = 1 and y = -1.
63
64 References:
65
66 - "Advanced Compilation for High Performance Computing" by Randy
67 Allen and Ken Kennedy.
68 http://citeseer.ist.psu.edu/goff91practical.html
69
70 - "Loop Transformations for Restructuring Compilers - The Foundations"
71 by Utpal Banerjee.
72
73
74 */
75
76 #include "config.h"
77 #include "system.h"
78 #include "coretypes.h"
79 #include "backend.h"
80 #include "rtl.h"
81 #include "tree.h"
82 #include "gimple.h"
83 #include "gimple-pretty-print.h"
84 #include "alias.h"
85 #include "fold-const.h"
86 #include "expr.h"
87 #include "gimple-iterator.h"
88 #include "tree-ssa-loop-niter.h"
89 #include "tree-ssa-loop.h"
90 #include "tree-ssa.h"
91 #include "cfgloop.h"
92 #include "tree-data-ref.h"
93 #include "tree-scalar-evolution.h"
94 #include "dumpfile.h"
95 #include "tree-affine.h"
96 #include "builtins.h"
97 #include "tree-eh.h"
98 #include "ssa.h"
99 #include "internal-fn.h"
100
101 static struct datadep_stats
102 {
103 int num_dependence_tests;
104 int num_dependence_dependent;
105 int num_dependence_independent;
106 int num_dependence_undetermined;
107
108 int num_subscript_tests;
109 int num_subscript_undetermined;
110 int num_same_subscript_function;
111
112 int num_ziv;
113 int num_ziv_independent;
114 int num_ziv_dependent;
115 int num_ziv_unimplemented;
116
117 int num_siv;
118 int num_siv_independent;
119 int num_siv_dependent;
120 int num_siv_unimplemented;
121
122 int num_miv;
123 int num_miv_independent;
124 int num_miv_dependent;
125 int num_miv_unimplemented;
126 } dependence_stats;
127
128 static bool subscript_dependence_tester_1 (struct data_dependence_relation *,
129 unsigned int, unsigned int,
130 class loop *);
131 /* Returns true iff A divides B. */
132
133 static inline bool
134 tree_fold_divides_p (const_tree a, const_tree b)
135 {
136 gcc_assert (TREE_CODE (a) == INTEGER_CST);
137 gcc_assert (TREE_CODE (b) == INTEGER_CST);
138 return integer_zerop (int_const_binop (TRUNC_MOD_EXPR, b, a));
139 }
140
141 /* Returns true iff A divides B. */
142
143 static inline bool
144 int_divides_p (int a, int b)
145 {
146 return ((b % a) == 0);
147 }
148
149 /* Return true if reference REF contains a union access. */
150
151 static bool
152 ref_contains_union_access_p (tree ref)
153 {
154 while (handled_component_p (ref))
155 {
156 ref = TREE_OPERAND (ref, 0);
157 if (TREE_CODE (TREE_TYPE (ref)) == UNION_TYPE
158 || TREE_CODE (TREE_TYPE (ref)) == QUAL_UNION_TYPE)
159 return true;
160 }
161 return false;
162 }
163
164 \f
165
166 /* Dump into FILE all the data references from DATAREFS. */
167
168 static void
169 dump_data_references (FILE *file, vec<data_reference_p> datarefs)
170 {
171 unsigned int i;
172 struct data_reference *dr;
173
174 FOR_EACH_VEC_ELT (datarefs, i, dr)
175 dump_data_reference (file, dr);
176 }
177
178 /* Unified dump into FILE all the data references from DATAREFS. */
179
180 DEBUG_FUNCTION void
181 debug (vec<data_reference_p> &ref)
182 {
183 dump_data_references (stderr, ref);
184 }
185
186 DEBUG_FUNCTION void
187 debug (vec<data_reference_p> *ptr)
188 {
189 if (ptr)
190 debug (*ptr);
191 else
192 fprintf (stderr, "<nil>\n");
193 }
194
195
196 /* Dump into STDERR all the data references from DATAREFS. */
197
198 DEBUG_FUNCTION void
199 debug_data_references (vec<data_reference_p> datarefs)
200 {
201 dump_data_references (stderr, datarefs);
202 }
203
204 /* Print to STDERR the data_reference DR. */
205
206 DEBUG_FUNCTION void
207 debug_data_reference (struct data_reference *dr)
208 {
209 dump_data_reference (stderr, dr);
210 }
211
212 /* Dump function for a DATA_REFERENCE structure. */
213
214 void
215 dump_data_reference (FILE *outf,
216 struct data_reference *dr)
217 {
218 unsigned int i;
219
220 fprintf (outf, "#(Data Ref: \n");
221 fprintf (outf, "# bb: %d \n", gimple_bb (DR_STMT (dr))->index);
222 fprintf (outf, "# stmt: ");
223 print_gimple_stmt (outf, DR_STMT (dr), 0);
224 fprintf (outf, "# ref: ");
225 print_generic_stmt (outf, DR_REF (dr));
226 fprintf (outf, "# base_object: ");
227 print_generic_stmt (outf, DR_BASE_OBJECT (dr));
228
229 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
230 {
231 fprintf (outf, "# Access function %d: ", i);
232 print_generic_stmt (outf, DR_ACCESS_FN (dr, i));
233 }
234 fprintf (outf, "#)\n");
235 }
236
237 /* Unified dump function for a DATA_REFERENCE structure. */
238
239 DEBUG_FUNCTION void
240 debug (data_reference &ref)
241 {
242 dump_data_reference (stderr, &ref);
243 }
244
245 DEBUG_FUNCTION void
246 debug (data_reference *ptr)
247 {
248 if (ptr)
249 debug (*ptr);
250 else
251 fprintf (stderr, "<nil>\n");
252 }
253
254
255 /* Dumps the affine function described by FN to the file OUTF. */
256
257 DEBUG_FUNCTION void
258 dump_affine_function (FILE *outf, affine_fn fn)
259 {
260 unsigned i;
261 tree coef;
262
263 print_generic_expr (outf, fn[0], TDF_SLIM);
264 for (i = 1; fn.iterate (i, &coef); i++)
265 {
266 fprintf (outf, " + ");
267 print_generic_expr (outf, coef, TDF_SLIM);
268 fprintf (outf, " * x_%u", i);
269 }
270 }
271
272 /* Dumps the conflict function CF to the file OUTF. */
273
274 DEBUG_FUNCTION void
275 dump_conflict_function (FILE *outf, conflict_function *cf)
276 {
277 unsigned i;
278
279 if (cf->n == NO_DEPENDENCE)
280 fprintf (outf, "no dependence");
281 else if (cf->n == NOT_KNOWN)
282 fprintf (outf, "not known");
283 else
284 {
285 for (i = 0; i < cf->n; i++)
286 {
287 if (i != 0)
288 fprintf (outf, " ");
289 fprintf (outf, "[");
290 dump_affine_function (outf, cf->fns[i]);
291 fprintf (outf, "]");
292 }
293 }
294 }
295
296 /* Dump function for a SUBSCRIPT structure. */
297
298 DEBUG_FUNCTION void
299 dump_subscript (FILE *outf, struct subscript *subscript)
300 {
301 conflict_function *cf = SUB_CONFLICTS_IN_A (subscript);
302
303 fprintf (outf, "\n (subscript \n");
304 fprintf (outf, " iterations_that_access_an_element_twice_in_A: ");
305 dump_conflict_function (outf, cf);
306 if (CF_NONTRIVIAL_P (cf))
307 {
308 tree last_iteration = SUB_LAST_CONFLICT (subscript);
309 fprintf (outf, "\n last_conflict: ");
310 print_generic_expr (outf, last_iteration);
311 }
312
313 cf = SUB_CONFLICTS_IN_B (subscript);
314 fprintf (outf, "\n iterations_that_access_an_element_twice_in_B: ");
315 dump_conflict_function (outf, cf);
316 if (CF_NONTRIVIAL_P (cf))
317 {
318 tree last_iteration = SUB_LAST_CONFLICT (subscript);
319 fprintf (outf, "\n last_conflict: ");
320 print_generic_expr (outf, last_iteration);
321 }
322
323 fprintf (outf, "\n (Subscript distance: ");
324 print_generic_expr (outf, SUB_DISTANCE (subscript));
325 fprintf (outf, " ))\n");
326 }
327
328 /* Print the classic direction vector DIRV to OUTF. */
329
330 DEBUG_FUNCTION void
331 print_direction_vector (FILE *outf,
332 lambda_vector dirv,
333 int length)
334 {
335 int eq;
336
337 for (eq = 0; eq < length; eq++)
338 {
339 enum data_dependence_direction dir = ((enum data_dependence_direction)
340 dirv[eq]);
341
342 switch (dir)
343 {
344 case dir_positive:
345 fprintf (outf, " +");
346 break;
347 case dir_negative:
348 fprintf (outf, " -");
349 break;
350 case dir_equal:
351 fprintf (outf, " =");
352 break;
353 case dir_positive_or_equal:
354 fprintf (outf, " +=");
355 break;
356 case dir_positive_or_negative:
357 fprintf (outf, " +-");
358 break;
359 case dir_negative_or_equal:
360 fprintf (outf, " -=");
361 break;
362 case dir_star:
363 fprintf (outf, " *");
364 break;
365 default:
366 fprintf (outf, "indep");
367 break;
368 }
369 }
370 fprintf (outf, "\n");
371 }
372
373 /* Print a vector of direction vectors. */
374
375 DEBUG_FUNCTION void
376 print_dir_vectors (FILE *outf, vec<lambda_vector> dir_vects,
377 int length)
378 {
379 unsigned j;
380 lambda_vector v;
381
382 FOR_EACH_VEC_ELT (dir_vects, j, v)
383 print_direction_vector (outf, v, length);
384 }
385
386 /* Print out a vector VEC of length N to OUTFILE. */
387
388 DEBUG_FUNCTION void
389 print_lambda_vector (FILE * outfile, lambda_vector vector, int n)
390 {
391 int i;
392
393 for (i = 0; i < n; i++)
394 fprintf (outfile, "%3d ", (int)vector[i]);
395 fprintf (outfile, "\n");
396 }
397
398 /* Print a vector of distance vectors. */
399
400 DEBUG_FUNCTION void
401 print_dist_vectors (FILE *outf, vec<lambda_vector> dist_vects,
402 int length)
403 {
404 unsigned j;
405 lambda_vector v;
406
407 FOR_EACH_VEC_ELT (dist_vects, j, v)
408 print_lambda_vector (outf, v, length);
409 }
410
411 /* Dump function for a DATA_DEPENDENCE_RELATION structure. */
412
413 DEBUG_FUNCTION void
414 dump_data_dependence_relation (FILE *outf,
415 struct data_dependence_relation *ddr)
416 {
417 struct data_reference *dra, *drb;
418
419 fprintf (outf, "(Data Dep: \n");
420
421 if (!ddr || DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
422 {
423 if (ddr)
424 {
425 dra = DDR_A (ddr);
426 drb = DDR_B (ddr);
427 if (dra)
428 dump_data_reference (outf, dra);
429 else
430 fprintf (outf, " (nil)\n");
431 if (drb)
432 dump_data_reference (outf, drb);
433 else
434 fprintf (outf, " (nil)\n");
435 }
436 fprintf (outf, " (don't know)\n)\n");
437 return;
438 }
439
440 dra = DDR_A (ddr);
441 drb = DDR_B (ddr);
442 dump_data_reference (outf, dra);
443 dump_data_reference (outf, drb);
444
445 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
446 fprintf (outf, " (no dependence)\n");
447
448 else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
449 {
450 unsigned int i;
451 class loop *loopi;
452
453 subscript *sub;
454 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
455 {
456 fprintf (outf, " access_fn_A: ");
457 print_generic_stmt (outf, SUB_ACCESS_FN (sub, 0));
458 fprintf (outf, " access_fn_B: ");
459 print_generic_stmt (outf, SUB_ACCESS_FN (sub, 1));
460 dump_subscript (outf, sub);
461 }
462
463 fprintf (outf, " loop nest: (");
464 FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr), i, loopi)
465 fprintf (outf, "%d ", loopi->num);
466 fprintf (outf, ")\n");
467
468 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
469 {
470 fprintf (outf, " distance_vector: ");
471 print_lambda_vector (outf, DDR_DIST_VECT (ddr, i),
472 DDR_NB_LOOPS (ddr));
473 }
474
475 for (i = 0; i < DDR_NUM_DIR_VECTS (ddr); i++)
476 {
477 fprintf (outf, " direction_vector: ");
478 print_direction_vector (outf, DDR_DIR_VECT (ddr, i),
479 DDR_NB_LOOPS (ddr));
480 }
481 }
482
483 fprintf (outf, ")\n");
484 }
485
486 /* Debug version. */
487
488 DEBUG_FUNCTION void
489 debug_data_dependence_relation (struct data_dependence_relation *ddr)
490 {
491 dump_data_dependence_relation (stderr, ddr);
492 }
493
494 /* Dump into FILE all the dependence relations from DDRS. */
495
496 DEBUG_FUNCTION void
497 dump_data_dependence_relations (FILE *file,
498 vec<ddr_p> ddrs)
499 {
500 unsigned int i;
501 struct data_dependence_relation *ddr;
502
503 FOR_EACH_VEC_ELT (ddrs, i, ddr)
504 dump_data_dependence_relation (file, ddr);
505 }
506
507 DEBUG_FUNCTION void
508 debug (vec<ddr_p> &ref)
509 {
510 dump_data_dependence_relations (stderr, ref);
511 }
512
513 DEBUG_FUNCTION void
514 debug (vec<ddr_p> *ptr)
515 {
516 if (ptr)
517 debug (*ptr);
518 else
519 fprintf (stderr, "<nil>\n");
520 }
521
522
523 /* Dump to STDERR all the dependence relations from DDRS. */
524
525 DEBUG_FUNCTION void
526 debug_data_dependence_relations (vec<ddr_p> ddrs)
527 {
528 dump_data_dependence_relations (stderr, ddrs);
529 }
530
531 /* Dumps the distance and direction vectors in FILE. DDRS contains
532 the dependence relations, and VECT_SIZE is the size of the
533 dependence vectors, or in other words the number of loops in the
534 considered nest. */
535
536 DEBUG_FUNCTION void
537 dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs)
538 {
539 unsigned int i, j;
540 struct data_dependence_relation *ddr;
541 lambda_vector v;
542
543 FOR_EACH_VEC_ELT (ddrs, i, ddr)
544 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_AFFINE_P (ddr))
545 {
546 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), j, v)
547 {
548 fprintf (file, "DISTANCE_V (");
549 print_lambda_vector (file, v, DDR_NB_LOOPS (ddr));
550 fprintf (file, ")\n");
551 }
552
553 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), j, v)
554 {
555 fprintf (file, "DIRECTION_V (");
556 print_direction_vector (file, v, DDR_NB_LOOPS (ddr));
557 fprintf (file, ")\n");
558 }
559 }
560
561 fprintf (file, "\n\n");
562 }
563
564 /* Dumps the data dependence relations DDRS in FILE. */
565
566 DEBUG_FUNCTION void
567 dump_ddrs (FILE *file, vec<ddr_p> ddrs)
568 {
569 unsigned int i;
570 struct data_dependence_relation *ddr;
571
572 FOR_EACH_VEC_ELT (ddrs, i, ddr)
573 dump_data_dependence_relation (file, ddr);
574
575 fprintf (file, "\n\n");
576 }
577
578 DEBUG_FUNCTION void
579 debug_ddrs (vec<ddr_p> ddrs)
580 {
581 dump_ddrs (stderr, ddrs);
582 }
583
584 static void
585 split_constant_offset (tree exp, tree *var, tree *off,
586 hash_map<tree, std::pair<tree, tree> > &cache,
587 unsigned *limit);
588
589 /* Helper function for split_constant_offset. Expresses OP0 CODE OP1
590 (the type of the result is TYPE) as VAR + OFF, where OFF is a nonzero
591 constant of type ssizetype, and returns true. If we cannot do this
592 with OFF nonzero, OFF and VAR are set to NULL_TREE instead and false
593 is returned. */
594
595 static bool
596 split_constant_offset_1 (tree type, tree op0, enum tree_code code, tree op1,
597 tree *var, tree *off,
598 hash_map<tree, std::pair<tree, tree> > &cache,
599 unsigned *limit)
600 {
601 tree var0, var1;
602 tree off0, off1;
603 enum tree_code ocode = code;
604
605 *var = NULL_TREE;
606 *off = NULL_TREE;
607
608 switch (code)
609 {
610 case INTEGER_CST:
611 *var = build_int_cst (type, 0);
612 *off = fold_convert (ssizetype, op0);
613 return true;
614
615 case POINTER_PLUS_EXPR:
616 ocode = PLUS_EXPR;
617 /* FALLTHROUGH */
618 case PLUS_EXPR:
619 case MINUS_EXPR:
620 if (TREE_CODE (op1) == INTEGER_CST)
621 {
622 split_constant_offset (op0, &var0, &off0, cache, limit);
623 *var = var0;
624 *off = size_binop (ocode, off0, fold_convert (ssizetype, op1));
625 return true;
626 }
627 split_constant_offset (op0, &var0, &off0, cache, limit);
628 split_constant_offset (op1, &var1, &off1, cache, limit);
629 *var = fold_build2 (code, type, var0, var1);
630 *off = size_binop (ocode, off0, off1);
631 return true;
632
633 case MULT_EXPR:
634 if (TREE_CODE (op1) != INTEGER_CST)
635 return false;
636
637 split_constant_offset (op0, &var0, &off0, cache, limit);
638 *var = fold_build2 (MULT_EXPR, type, var0, op1);
639 *off = size_binop (MULT_EXPR, off0, fold_convert (ssizetype, op1));
640 return true;
641
642 case ADDR_EXPR:
643 {
644 tree base, poffset;
645 poly_int64 pbitsize, pbitpos, pbytepos;
646 machine_mode pmode;
647 int punsignedp, preversep, pvolatilep;
648
649 op0 = TREE_OPERAND (op0, 0);
650 base
651 = get_inner_reference (op0, &pbitsize, &pbitpos, &poffset, &pmode,
652 &punsignedp, &preversep, &pvolatilep);
653
654 if (!multiple_p (pbitpos, BITS_PER_UNIT, &pbytepos))
655 return false;
656 base = build_fold_addr_expr (base);
657 off0 = ssize_int (pbytepos);
658
659 if (poffset)
660 {
661 split_constant_offset (poffset, &poffset, &off1, cache, limit);
662 off0 = size_binop (PLUS_EXPR, off0, off1);
663 if (POINTER_TYPE_P (TREE_TYPE (base)))
664 base = fold_build_pointer_plus (base, poffset);
665 else
666 base = fold_build2 (PLUS_EXPR, TREE_TYPE (base), base,
667 fold_convert (TREE_TYPE (base), poffset));
668 }
669
670 var0 = fold_convert (type, base);
671
672 /* If variable length types are involved, punt, otherwise casts
673 might be converted into ARRAY_REFs in gimplify_conversion.
674 To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which
675 possibly no longer appears in current GIMPLE, might resurface.
676 This perhaps could run
677 if (CONVERT_EXPR_P (var0))
678 {
679 gimplify_conversion (&var0);
680 // Attempt to fill in any within var0 found ARRAY_REF's
681 // element size from corresponding op embedded ARRAY_REF,
682 // if unsuccessful, just punt.
683 } */
684 while (POINTER_TYPE_P (type))
685 type = TREE_TYPE (type);
686 if (int_size_in_bytes (type) < 0)
687 return false;
688
689 *var = var0;
690 *off = off0;
691 return true;
692 }
693
694 case SSA_NAME:
695 {
696 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op0))
697 return false;
698
699 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
700 enum tree_code subcode;
701
702 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
703 return false;
704
705 subcode = gimple_assign_rhs_code (def_stmt);
706
707 /* We are using a cache to avoid un-CSEing large amounts of code. */
708 bool use_cache = false;
709 if (!has_single_use (op0)
710 && (subcode == POINTER_PLUS_EXPR
711 || subcode == PLUS_EXPR
712 || subcode == MINUS_EXPR
713 || subcode == MULT_EXPR
714 || subcode == ADDR_EXPR
715 || CONVERT_EXPR_CODE_P (subcode)))
716 {
717 use_cache = true;
718 bool existed;
719 std::pair<tree, tree> &e = cache.get_or_insert (op0, &existed);
720 if (existed)
721 {
722 if (integer_zerop (e.second))
723 return false;
724 *var = e.first;
725 *off = e.second;
726 return true;
727 }
728 e = std::make_pair (op0, ssize_int (0));
729 }
730
731 if (*limit == 0)
732 return false;
733 --*limit;
734
735 var0 = gimple_assign_rhs1 (def_stmt);
736 var1 = gimple_assign_rhs2 (def_stmt);
737
738 bool res = split_constant_offset_1 (type, var0, subcode, var1,
739 var, off, cache, limit);
740 if (res && use_cache)
741 *cache.get (op0) = std::make_pair (*var, *off);
742 return res;
743 }
744 CASE_CONVERT:
745 {
746 /* We must not introduce undefined overflow, and we must not change
747 the value. Hence we're okay if the inner type doesn't overflow
748 to start with (pointer or signed), the outer type also is an
749 integer or pointer and the outer precision is at least as large
750 as the inner. */
751 tree itype = TREE_TYPE (op0);
752 if ((POINTER_TYPE_P (itype)
753 || (INTEGRAL_TYPE_P (itype) && !TYPE_OVERFLOW_TRAPS (itype)))
754 && TYPE_PRECISION (type) >= TYPE_PRECISION (itype)
755 && (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)))
756 {
757 if (INTEGRAL_TYPE_P (itype) && TYPE_OVERFLOW_WRAPS (itype)
758 && (TYPE_PRECISION (type) > TYPE_PRECISION (itype)
759 || TYPE_UNSIGNED (itype) != TYPE_UNSIGNED (type)))
760 {
761 /* Split the unconverted operand and try to prove that
762 wrapping isn't a problem. */
763 tree tmp_var, tmp_off;
764 split_constant_offset (op0, &tmp_var, &tmp_off, cache, limit);
765
766 /* See whether we have an SSA_NAME whose range is known
767 to be [A, B]. */
768 if (TREE_CODE (tmp_var) != SSA_NAME)
769 return false;
770 wide_int var_min, var_max;
771 value_range_kind vr_type = get_range_info (tmp_var, &var_min,
772 &var_max);
773 wide_int var_nonzero = get_nonzero_bits (tmp_var);
774 signop sgn = TYPE_SIGN (itype);
775 if (intersect_range_with_nonzero_bits (vr_type, &var_min,
776 &var_max, var_nonzero,
777 sgn) != VR_RANGE)
778 return false;
779
780 /* See whether the range of OP0 (i.e. TMP_VAR + TMP_OFF)
781 is known to be [A + TMP_OFF, B + TMP_OFF], with all
782 operations done in ITYPE. The addition must overflow
783 at both ends of the range or at neither. */
784 wi::overflow_type overflow[2];
785 unsigned int prec = TYPE_PRECISION (itype);
786 wide_int woff = wi::to_wide (tmp_off, prec);
787 wide_int op0_min = wi::add (var_min, woff, sgn, &overflow[0]);
788 wi::add (var_max, woff, sgn, &overflow[1]);
789 if ((overflow[0] != wi::OVF_NONE) != (overflow[1] != wi::OVF_NONE))
790 return false;
791
792 /* Calculate (ssizetype) OP0 - (ssizetype) TMP_VAR. */
793 widest_int diff = (widest_int::from (op0_min, sgn)
794 - widest_int::from (var_min, sgn));
795 var0 = tmp_var;
796 *off = wide_int_to_tree (ssizetype, diff);
797 }
798 else
799 split_constant_offset (op0, &var0, off, cache, limit);
800 *var = fold_convert (type, var0);
801 return true;
802 }
803 return false;
804 }
805
806 default:
807 return false;
808 }
809 }
810
811 /* Expresses EXP as VAR + OFF, where off is a constant. The type of OFF
812 will be ssizetype. */
813
814 static void
815 split_constant_offset (tree exp, tree *var, tree *off,
816 hash_map<tree, std::pair<tree, tree> > &cache,
817 unsigned *limit)
818 {
819 tree type = TREE_TYPE (exp), op0, op1, e, o;
820 enum tree_code code;
821
822 *var = exp;
823 *off = ssize_int (0);
824
825 if (tree_is_chrec (exp)
826 || get_gimple_rhs_class (TREE_CODE (exp)) == GIMPLE_TERNARY_RHS)
827 return;
828
829 code = TREE_CODE (exp);
830 extract_ops_from_tree (exp, &code, &op0, &op1);
831 if (split_constant_offset_1 (type, op0, code, op1, &e, &o, cache, limit))
832 {
833 *var = e;
834 *off = o;
835 }
836 }
837
838 void
839 split_constant_offset (tree exp, tree *var, tree *off)
840 {
841 unsigned limit = param_ssa_name_def_chain_limit;
842 static hash_map<tree, std::pair<tree, tree> > *cache;
843 if (!cache)
844 cache = new hash_map<tree, std::pair<tree, tree> > (37);
845 split_constant_offset (exp, var, off, *cache, &limit);
846 cache->empty ();
847 }
848
849 /* Returns the address ADDR of an object in a canonical shape (without nop
850 casts, and with type of pointer to the object). */
851
852 static tree
853 canonicalize_base_object_address (tree addr)
854 {
855 tree orig = addr;
856
857 STRIP_NOPS (addr);
858
859 /* The base address may be obtained by casting from integer, in that case
860 keep the cast. */
861 if (!POINTER_TYPE_P (TREE_TYPE (addr)))
862 return orig;
863
864 if (TREE_CODE (addr) != ADDR_EXPR)
865 return addr;
866
867 return build_fold_addr_expr (TREE_OPERAND (addr, 0));
868 }
869
870 /* Analyze the behavior of memory reference REF within STMT.
871 There are two modes:
872
873 - BB analysis. In this case we simply split the address into base,
874 init and offset components, without reference to any containing loop.
875 The resulting base and offset are general expressions and they can
876 vary arbitrarily from one iteration of the containing loop to the next.
877 The step is always zero.
878
879 - loop analysis. In this case we analyze the reference both wrt LOOP
880 and on the basis that the reference occurs (is "used") in LOOP;
881 see the comment above analyze_scalar_evolution_in_loop for more
882 information about this distinction. The base, init, offset and
883 step fields are all invariant in LOOP.
884
885 Perform BB analysis if LOOP is null, or if LOOP is the function's
886 dummy outermost loop. In other cases perform loop analysis.
887
888 Return true if the analysis succeeded and store the results in DRB if so.
889 BB analysis can only fail for bitfield or reversed-storage accesses. */
890
891 opt_result
892 dr_analyze_innermost (innermost_loop_behavior *drb, tree ref,
893 class loop *loop, const gimple *stmt)
894 {
895 poly_int64 pbitsize, pbitpos;
896 tree base, poffset;
897 machine_mode pmode;
898 int punsignedp, preversep, pvolatilep;
899 affine_iv base_iv, offset_iv;
900 tree init, dinit, step;
901 bool in_loop = (loop && loop->num);
902
903 if (dump_file && (dump_flags & TDF_DETAILS))
904 fprintf (dump_file, "analyze_innermost: ");
905
906 base = get_inner_reference (ref, &pbitsize, &pbitpos, &poffset, &pmode,
907 &punsignedp, &preversep, &pvolatilep);
908 gcc_assert (base != NULL_TREE);
909
910 poly_int64 pbytepos;
911 if (!multiple_p (pbitpos, BITS_PER_UNIT, &pbytepos))
912 return opt_result::failure_at (stmt,
913 "failed: bit offset alignment.\n");
914
915 if (preversep)
916 return opt_result::failure_at (stmt,
917 "failed: reverse storage order.\n");
918
919 /* Calculate the alignment and misalignment for the inner reference. */
920 unsigned int HOST_WIDE_INT bit_base_misalignment;
921 unsigned int bit_base_alignment;
922 get_object_alignment_1 (base, &bit_base_alignment, &bit_base_misalignment);
923
924 /* There are no bitfield references remaining in BASE, so the values
925 we got back must be whole bytes. */
926 gcc_assert (bit_base_alignment % BITS_PER_UNIT == 0
927 && bit_base_misalignment % BITS_PER_UNIT == 0);
928 unsigned int base_alignment = bit_base_alignment / BITS_PER_UNIT;
929 poly_int64 base_misalignment = bit_base_misalignment / BITS_PER_UNIT;
930
931 if (TREE_CODE (base) == MEM_REF)
932 {
933 if (!integer_zerop (TREE_OPERAND (base, 1)))
934 {
935 /* Subtract MOFF from the base and add it to POFFSET instead.
936 Adjust the misalignment to reflect the amount we subtracted. */
937 poly_offset_int moff = mem_ref_offset (base);
938 base_misalignment -= moff.force_shwi ();
939 tree mofft = wide_int_to_tree (sizetype, moff);
940 if (!poffset)
941 poffset = mofft;
942 else
943 poffset = size_binop (PLUS_EXPR, poffset, mofft);
944 }
945 base = TREE_OPERAND (base, 0);
946 }
947 else
948 base = build_fold_addr_expr (base);
949
950 if (in_loop)
951 {
952 if (!simple_iv (loop, loop, base, &base_iv, true))
953 return opt_result::failure_at
954 (stmt, "failed: evolution of base is not affine.\n");
955 }
956 else
957 {
958 base_iv.base = base;
959 base_iv.step = ssize_int (0);
960 base_iv.no_overflow = true;
961 }
962
963 if (!poffset)
964 {
965 offset_iv.base = ssize_int (0);
966 offset_iv.step = ssize_int (0);
967 }
968 else
969 {
970 if (!in_loop)
971 {
972 offset_iv.base = poffset;
973 offset_iv.step = ssize_int (0);
974 }
975 else if (!simple_iv (loop, loop, poffset, &offset_iv, true))
976 return opt_result::failure_at
977 (stmt, "failed: evolution of offset is not affine.\n");
978 }
979
980 init = ssize_int (pbytepos);
981
982 /* Subtract any constant component from the base and add it to INIT instead.
983 Adjust the misalignment to reflect the amount we subtracted. */
984 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
985 init = size_binop (PLUS_EXPR, init, dinit);
986 base_misalignment -= TREE_INT_CST_LOW (dinit);
987
988 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
989 init = size_binop (PLUS_EXPR, init, dinit);
990
991 step = size_binop (PLUS_EXPR,
992 fold_convert (ssizetype, base_iv.step),
993 fold_convert (ssizetype, offset_iv.step));
994
995 base = canonicalize_base_object_address (base_iv.base);
996
997 /* See if get_pointer_alignment can guarantee a higher alignment than
998 the one we calculated above. */
999 unsigned int HOST_WIDE_INT alt_misalignment;
1000 unsigned int alt_alignment;
1001 get_pointer_alignment_1 (base, &alt_alignment, &alt_misalignment);
1002
1003 /* As above, these values must be whole bytes. */
1004 gcc_assert (alt_alignment % BITS_PER_UNIT == 0
1005 && alt_misalignment % BITS_PER_UNIT == 0);
1006 alt_alignment /= BITS_PER_UNIT;
1007 alt_misalignment /= BITS_PER_UNIT;
1008
1009 if (base_alignment < alt_alignment)
1010 {
1011 base_alignment = alt_alignment;
1012 base_misalignment = alt_misalignment;
1013 }
1014
1015 drb->base_address = base;
1016 drb->offset = fold_convert (ssizetype, offset_iv.base);
1017 drb->init = init;
1018 drb->step = step;
1019 if (known_misalignment (base_misalignment, base_alignment,
1020 &drb->base_misalignment))
1021 drb->base_alignment = base_alignment;
1022 else
1023 {
1024 drb->base_alignment = known_alignment (base_misalignment);
1025 drb->base_misalignment = 0;
1026 }
1027 drb->offset_alignment = highest_pow2_factor (offset_iv.base);
1028 drb->step_alignment = highest_pow2_factor (step);
1029
1030 if (dump_file && (dump_flags & TDF_DETAILS))
1031 fprintf (dump_file, "success.\n");
1032
1033 return opt_result::success ();
1034 }
1035
1036 /* Return true if OP is a valid component reference for a DR access
1037 function. This accepts a subset of what handled_component_p accepts. */
1038
1039 static bool
1040 access_fn_component_p (tree op)
1041 {
1042 switch (TREE_CODE (op))
1043 {
1044 case REALPART_EXPR:
1045 case IMAGPART_EXPR:
1046 case ARRAY_REF:
1047 return true;
1048
1049 case COMPONENT_REF:
1050 return TREE_CODE (TREE_TYPE (TREE_OPERAND (op, 0))) == RECORD_TYPE;
1051
1052 default:
1053 return false;
1054 }
1055 }
1056
1057 /* Determines the base object and the list of indices of memory reference
1058 DR, analyzed in LOOP and instantiated before NEST. */
1059
1060 static void
1061 dr_analyze_indices (struct data_reference *dr, edge nest, loop_p loop)
1062 {
1063 vec<tree> access_fns = vNULL;
1064 tree ref, op;
1065 tree base, off, access_fn;
1066
1067 /* If analyzing a basic-block there are no indices to analyze
1068 and thus no access functions. */
1069 if (!nest)
1070 {
1071 DR_BASE_OBJECT (dr) = DR_REF (dr);
1072 DR_ACCESS_FNS (dr).create (0);
1073 return;
1074 }
1075
1076 ref = DR_REF (dr);
1077
1078 /* REALPART_EXPR and IMAGPART_EXPR can be handled like accesses
1079 into a two element array with a constant index. The base is
1080 then just the immediate underlying object. */
1081 if (TREE_CODE (ref) == REALPART_EXPR)
1082 {
1083 ref = TREE_OPERAND (ref, 0);
1084 access_fns.safe_push (integer_zero_node);
1085 }
1086 else if (TREE_CODE (ref) == IMAGPART_EXPR)
1087 {
1088 ref = TREE_OPERAND (ref, 0);
1089 access_fns.safe_push (integer_one_node);
1090 }
1091
1092 /* Analyze access functions of dimensions we know to be independent.
1093 The list of component references handled here should be kept in
1094 sync with access_fn_component_p. */
1095 while (handled_component_p (ref))
1096 {
1097 if (TREE_CODE (ref) == ARRAY_REF)
1098 {
1099 op = TREE_OPERAND (ref, 1);
1100 access_fn = analyze_scalar_evolution (loop, op);
1101 access_fn = instantiate_scev (nest, loop, access_fn);
1102 access_fns.safe_push (access_fn);
1103 }
1104 else if (TREE_CODE (ref) == COMPONENT_REF
1105 && TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0))) == RECORD_TYPE)
1106 {
1107 /* For COMPONENT_REFs of records (but not unions!) use the
1108 FIELD_DECL offset as constant access function so we can
1109 disambiguate a[i].f1 and a[i].f2. */
1110 tree off = component_ref_field_offset (ref);
1111 off = size_binop (PLUS_EXPR,
1112 size_binop (MULT_EXPR,
1113 fold_convert (bitsizetype, off),
1114 bitsize_int (BITS_PER_UNIT)),
1115 DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)));
1116 access_fns.safe_push (off);
1117 }
1118 else
1119 /* If we have an unhandled component we could not translate
1120 to an access function stop analyzing. We have determined
1121 our base object in this case. */
1122 break;
1123
1124 ref = TREE_OPERAND (ref, 0);
1125 }
1126
1127 /* If the address operand of a MEM_REF base has an evolution in the
1128 analyzed nest, add it as an additional independent access-function. */
1129 if (TREE_CODE (ref) == MEM_REF)
1130 {
1131 op = TREE_OPERAND (ref, 0);
1132 access_fn = analyze_scalar_evolution (loop, op);
1133 access_fn = instantiate_scev (nest, loop, access_fn);
1134 if (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1135 {
1136 tree orig_type;
1137 tree memoff = TREE_OPERAND (ref, 1);
1138 base = initial_condition (access_fn);
1139 orig_type = TREE_TYPE (base);
1140 STRIP_USELESS_TYPE_CONVERSION (base);
1141 split_constant_offset (base, &base, &off);
1142 STRIP_USELESS_TYPE_CONVERSION (base);
1143 /* Fold the MEM_REF offset into the evolutions initial
1144 value to make more bases comparable. */
1145 if (!integer_zerop (memoff))
1146 {
1147 off = size_binop (PLUS_EXPR, off,
1148 fold_convert (ssizetype, memoff));
1149 memoff = build_int_cst (TREE_TYPE (memoff), 0);
1150 }
1151 /* Adjust the offset so it is a multiple of the access type
1152 size and thus we separate bases that can possibly be used
1153 to produce partial overlaps (which the access_fn machinery
1154 cannot handle). */
1155 wide_int rem;
1156 if (TYPE_SIZE_UNIT (TREE_TYPE (ref))
1157 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref))) == INTEGER_CST
1158 && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref))))
1159 rem = wi::mod_trunc
1160 (wi::to_wide (off),
1161 wi::to_wide (TYPE_SIZE_UNIT (TREE_TYPE (ref))),
1162 SIGNED);
1163 else
1164 /* If we can't compute the remainder simply force the initial
1165 condition to zero. */
1166 rem = wi::to_wide (off);
1167 off = wide_int_to_tree (ssizetype, wi::to_wide (off) - rem);
1168 memoff = wide_int_to_tree (TREE_TYPE (memoff), rem);
1169 /* And finally replace the initial condition. */
1170 access_fn = chrec_replace_initial_condition
1171 (access_fn, fold_convert (orig_type, off));
1172 /* ??? This is still not a suitable base object for
1173 dr_may_alias_p - the base object needs to be an
1174 access that covers the object as whole. With
1175 an evolution in the pointer this cannot be
1176 guaranteed.
1177 As a band-aid, mark the access so we can special-case
1178 it in dr_may_alias_p. */
1179 tree old = ref;
1180 ref = fold_build2_loc (EXPR_LOCATION (ref),
1181 MEM_REF, TREE_TYPE (ref),
1182 base, memoff);
1183 MR_DEPENDENCE_CLIQUE (ref) = MR_DEPENDENCE_CLIQUE (old);
1184 MR_DEPENDENCE_BASE (ref) = MR_DEPENDENCE_BASE (old);
1185 DR_UNCONSTRAINED_BASE (dr) = true;
1186 access_fns.safe_push (access_fn);
1187 }
1188 }
1189 else if (DECL_P (ref))
1190 {
1191 /* Canonicalize DR_BASE_OBJECT to MEM_REF form. */
1192 ref = build2 (MEM_REF, TREE_TYPE (ref),
1193 build_fold_addr_expr (ref),
1194 build_int_cst (reference_alias_ptr_type (ref), 0));
1195 }
1196
1197 DR_BASE_OBJECT (dr) = ref;
1198 DR_ACCESS_FNS (dr) = access_fns;
1199 }
1200
1201 /* Extracts the alias analysis information from the memory reference DR. */
1202
1203 static void
1204 dr_analyze_alias (struct data_reference *dr)
1205 {
1206 tree ref = DR_REF (dr);
1207 tree base = get_base_address (ref), addr;
1208
1209 if (INDIRECT_REF_P (base)
1210 || TREE_CODE (base) == MEM_REF)
1211 {
1212 addr = TREE_OPERAND (base, 0);
1213 if (TREE_CODE (addr) == SSA_NAME)
1214 DR_PTR_INFO (dr) = SSA_NAME_PTR_INFO (addr);
1215 }
1216 }
1217
1218 /* Frees data reference DR. */
1219
1220 void
1221 free_data_ref (data_reference_p dr)
1222 {
1223 DR_ACCESS_FNS (dr).release ();
1224 free (dr);
1225 }
1226
1227 /* Analyze memory reference MEMREF, which is accessed in STMT.
1228 The reference is a read if IS_READ is true, otherwise it is a write.
1229 IS_CONDITIONAL_IN_STMT indicates that the reference is conditional
1230 within STMT, i.e. that it might not occur even if STMT is executed
1231 and runs to completion.
1232
1233 Return the data_reference description of MEMREF. NEST is the outermost
1234 loop in which the reference should be instantiated, LOOP is the loop
1235 in which the data reference should be analyzed. */
1236
1237 struct data_reference *
1238 create_data_ref (edge nest, loop_p loop, tree memref, gimple *stmt,
1239 bool is_read, bool is_conditional_in_stmt)
1240 {
1241 struct data_reference *dr;
1242
1243 if (dump_file && (dump_flags & TDF_DETAILS))
1244 {
1245 fprintf (dump_file, "Creating dr for ");
1246 print_generic_expr (dump_file, memref, TDF_SLIM);
1247 fprintf (dump_file, "\n");
1248 }
1249
1250 dr = XCNEW (struct data_reference);
1251 DR_STMT (dr) = stmt;
1252 DR_REF (dr) = memref;
1253 DR_IS_READ (dr) = is_read;
1254 DR_IS_CONDITIONAL_IN_STMT (dr) = is_conditional_in_stmt;
1255
1256 dr_analyze_innermost (&DR_INNERMOST (dr), memref,
1257 nest != NULL ? loop : NULL, stmt);
1258 dr_analyze_indices (dr, nest, loop);
1259 dr_analyze_alias (dr);
1260
1261 if (dump_file && (dump_flags & TDF_DETAILS))
1262 {
1263 unsigned i;
1264 fprintf (dump_file, "\tbase_address: ");
1265 print_generic_expr (dump_file, DR_BASE_ADDRESS (dr), TDF_SLIM);
1266 fprintf (dump_file, "\n\toffset from base address: ");
1267 print_generic_expr (dump_file, DR_OFFSET (dr), TDF_SLIM);
1268 fprintf (dump_file, "\n\tconstant offset from base address: ");
1269 print_generic_expr (dump_file, DR_INIT (dr), TDF_SLIM);
1270 fprintf (dump_file, "\n\tstep: ");
1271 print_generic_expr (dump_file, DR_STEP (dr), TDF_SLIM);
1272 fprintf (dump_file, "\n\tbase alignment: %d", DR_BASE_ALIGNMENT (dr));
1273 fprintf (dump_file, "\n\tbase misalignment: %d",
1274 DR_BASE_MISALIGNMENT (dr));
1275 fprintf (dump_file, "\n\toffset alignment: %d",
1276 DR_OFFSET_ALIGNMENT (dr));
1277 fprintf (dump_file, "\n\tstep alignment: %d", DR_STEP_ALIGNMENT (dr));
1278 fprintf (dump_file, "\n\tbase_object: ");
1279 print_generic_expr (dump_file, DR_BASE_OBJECT (dr), TDF_SLIM);
1280 fprintf (dump_file, "\n");
1281 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
1282 {
1283 fprintf (dump_file, "\tAccess function %d: ", i);
1284 print_generic_stmt (dump_file, DR_ACCESS_FN (dr, i), TDF_SLIM);
1285 }
1286 }
1287
1288 return dr;
1289 }
1290
1291 /* A helper function computes order between two tree expressions T1 and T2.
1292 This is used in comparator functions sorting objects based on the order
1293 of tree expressions. The function returns -1, 0, or 1. */
1294
1295 int
1296 data_ref_compare_tree (tree t1, tree t2)
1297 {
1298 int i, cmp;
1299 enum tree_code code;
1300 char tclass;
1301
1302 if (t1 == t2)
1303 return 0;
1304 if (t1 == NULL)
1305 return -1;
1306 if (t2 == NULL)
1307 return 1;
1308
1309 STRIP_USELESS_TYPE_CONVERSION (t1);
1310 STRIP_USELESS_TYPE_CONVERSION (t2);
1311 if (t1 == t2)
1312 return 0;
1313
1314 if (TREE_CODE (t1) != TREE_CODE (t2)
1315 && ! (CONVERT_EXPR_P (t1) && CONVERT_EXPR_P (t2)))
1316 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
1317
1318 code = TREE_CODE (t1);
1319 switch (code)
1320 {
1321 case INTEGER_CST:
1322 return tree_int_cst_compare (t1, t2);
1323
1324 case STRING_CST:
1325 if (TREE_STRING_LENGTH (t1) != TREE_STRING_LENGTH (t2))
1326 return TREE_STRING_LENGTH (t1) < TREE_STRING_LENGTH (t2) ? -1 : 1;
1327 return memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2),
1328 TREE_STRING_LENGTH (t1));
1329
1330 case SSA_NAME:
1331 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
1332 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
1333 break;
1334
1335 default:
1336 if (POLY_INT_CST_P (t1))
1337 return compare_sizes_for_sort (wi::to_poly_widest (t1),
1338 wi::to_poly_widest (t2));
1339
1340 tclass = TREE_CODE_CLASS (code);
1341
1342 /* For decls, compare their UIDs. */
1343 if (tclass == tcc_declaration)
1344 {
1345 if (DECL_UID (t1) != DECL_UID (t2))
1346 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
1347 break;
1348 }
1349 /* For expressions, compare their operands recursively. */
1350 else if (IS_EXPR_CODE_CLASS (tclass))
1351 {
1352 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
1353 {
1354 cmp = data_ref_compare_tree (TREE_OPERAND (t1, i),
1355 TREE_OPERAND (t2, i));
1356 if (cmp != 0)
1357 return cmp;
1358 }
1359 }
1360 else
1361 gcc_unreachable ();
1362 }
1363
1364 return 0;
1365 }
1366
1367 /* Return TRUE it's possible to resolve data dependence DDR by runtime alias
1368 check. */
1369
1370 opt_result
1371 runtime_alias_check_p (ddr_p ddr, class loop *loop, bool speed_p)
1372 {
1373 if (dump_enabled_p ())
1374 dump_printf (MSG_NOTE,
1375 "consider run-time aliasing test between %T and %T\n",
1376 DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
1377
1378 if (!speed_p)
1379 return opt_result::failure_at (DR_STMT (DDR_A (ddr)),
1380 "runtime alias check not supported when"
1381 " optimizing for size.\n");
1382
1383 /* FORNOW: We don't support versioning with outer-loop in either
1384 vectorization or loop distribution. */
1385 if (loop != NULL && loop->inner != NULL)
1386 return opt_result::failure_at (DR_STMT (DDR_A (ddr)),
1387 "runtime alias check not supported for"
1388 " outer loop.\n");
1389
1390 return opt_result::success ();
1391 }
1392
1393 /* Operator == between two dr_with_seg_len objects.
1394
1395 This equality operator is used to make sure two data refs
1396 are the same one so that we will consider to combine the
1397 aliasing checks of those two pairs of data dependent data
1398 refs. */
1399
1400 static bool
1401 operator == (const dr_with_seg_len& d1,
1402 const dr_with_seg_len& d2)
1403 {
1404 return (operand_equal_p (DR_BASE_ADDRESS (d1.dr),
1405 DR_BASE_ADDRESS (d2.dr), 0)
1406 && data_ref_compare_tree (DR_OFFSET (d1.dr), DR_OFFSET (d2.dr)) == 0
1407 && data_ref_compare_tree (DR_INIT (d1.dr), DR_INIT (d2.dr)) == 0
1408 && data_ref_compare_tree (d1.seg_len, d2.seg_len) == 0
1409 && known_eq (d1.access_size, d2.access_size)
1410 && d1.align == d2.align);
1411 }
1412
1413 /* Comparison function for sorting objects of dr_with_seg_len_pair_t
1414 so that we can combine aliasing checks in one scan. */
1415
1416 static int
1417 comp_dr_with_seg_len_pair (const void *pa_, const void *pb_)
1418 {
1419 const dr_with_seg_len_pair_t* pa = (const dr_with_seg_len_pair_t *) pa_;
1420 const dr_with_seg_len_pair_t* pb = (const dr_with_seg_len_pair_t *) pb_;
1421 const dr_with_seg_len &a1 = pa->first, &a2 = pa->second;
1422 const dr_with_seg_len &b1 = pb->first, &b2 = pb->second;
1423
1424 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
1425 if a and c have the same basic address snd step, and b and d have the same
1426 address and step. Therefore, if any a&c or b&d don't have the same address
1427 and step, we don't care the order of those two pairs after sorting. */
1428 int comp_res;
1429
1430 if ((comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (a1.dr),
1431 DR_BASE_ADDRESS (b1.dr))) != 0)
1432 return comp_res;
1433 if ((comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (a2.dr),
1434 DR_BASE_ADDRESS (b2.dr))) != 0)
1435 return comp_res;
1436 if ((comp_res = data_ref_compare_tree (DR_STEP (a1.dr),
1437 DR_STEP (b1.dr))) != 0)
1438 return comp_res;
1439 if ((comp_res = data_ref_compare_tree (DR_STEP (a2.dr),
1440 DR_STEP (b2.dr))) != 0)
1441 return comp_res;
1442 if ((comp_res = data_ref_compare_tree (DR_OFFSET (a1.dr),
1443 DR_OFFSET (b1.dr))) != 0)
1444 return comp_res;
1445 if ((comp_res = data_ref_compare_tree (DR_INIT (a1.dr),
1446 DR_INIT (b1.dr))) != 0)
1447 return comp_res;
1448 if ((comp_res = data_ref_compare_tree (DR_OFFSET (a2.dr),
1449 DR_OFFSET (b2.dr))) != 0)
1450 return comp_res;
1451 if ((comp_res = data_ref_compare_tree (DR_INIT (a2.dr),
1452 DR_INIT (b2.dr))) != 0)
1453 return comp_res;
1454
1455 return 0;
1456 }
1457
1458 /* Dump information about ALIAS_PAIR, indenting each line by INDENT. */
1459
1460 static void
1461 dump_alias_pair (dr_with_seg_len_pair_t *alias_pair, const char *indent)
1462 {
1463 dump_printf (MSG_NOTE, "%sreference: %T vs. %T\n", indent,
1464 DR_REF (alias_pair->first.dr),
1465 DR_REF (alias_pair->second.dr));
1466
1467 dump_printf (MSG_NOTE, "%ssegment length: %T", indent,
1468 alias_pair->first.seg_len);
1469 if (!operand_equal_p (alias_pair->first.seg_len,
1470 alias_pair->second.seg_len, 0))
1471 dump_printf (MSG_NOTE, " vs. %T", alias_pair->second.seg_len);
1472
1473 dump_printf (MSG_NOTE, "\n%saccess size: ", indent);
1474 dump_dec (MSG_NOTE, alias_pair->first.access_size);
1475 if (maybe_ne (alias_pair->first.access_size, alias_pair->second.access_size))
1476 {
1477 dump_printf (MSG_NOTE, " vs. ");
1478 dump_dec (MSG_NOTE, alias_pair->second.access_size);
1479 }
1480
1481 dump_printf (MSG_NOTE, "\n%salignment: %d", indent,
1482 alias_pair->first.align);
1483 if (alias_pair->first.align != alias_pair->second.align)
1484 dump_printf (MSG_NOTE, " vs. %d", alias_pair->second.align);
1485
1486 dump_printf (MSG_NOTE, "\n%sflags: ", indent);
1487 if (alias_pair->flags & DR_ALIAS_RAW)
1488 dump_printf (MSG_NOTE, " RAW");
1489 if (alias_pair->flags & DR_ALIAS_WAR)
1490 dump_printf (MSG_NOTE, " WAR");
1491 if (alias_pair->flags & DR_ALIAS_WAW)
1492 dump_printf (MSG_NOTE, " WAW");
1493 if (alias_pair->flags & DR_ALIAS_ARBITRARY)
1494 dump_printf (MSG_NOTE, " ARBITRARY");
1495 if (alias_pair->flags & DR_ALIAS_SWAPPED)
1496 dump_printf (MSG_NOTE, " SWAPPED");
1497 if (alias_pair->flags & DR_ALIAS_UNSWAPPED)
1498 dump_printf (MSG_NOTE, " UNSWAPPED");
1499 if (alias_pair->flags & DR_ALIAS_MIXED_STEPS)
1500 dump_printf (MSG_NOTE, " MIXED_STEPS");
1501 if (alias_pair->flags == 0)
1502 dump_printf (MSG_NOTE, " <none>");
1503 dump_printf (MSG_NOTE, "\n");
1504 }
1505
1506 /* Merge alias checks recorded in ALIAS_PAIRS and remove redundant ones.
1507 FACTOR is number of iterations that each data reference is accessed.
1508
1509 Basically, for each pair of dependent data refs store_ptr_0 & load_ptr_0,
1510 we create an expression:
1511
1512 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
1513 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
1514
1515 for aliasing checks. However, in some cases we can decrease the number
1516 of checks by combining two checks into one. For example, suppose we have
1517 another pair of data refs store_ptr_0 & load_ptr_1, and if the following
1518 condition is satisfied:
1519
1520 load_ptr_0 < load_ptr_1 &&
1521 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
1522
1523 (this condition means, in each iteration of vectorized loop, the accessed
1524 memory of store_ptr_0 cannot be between the memory of load_ptr_0 and
1525 load_ptr_1.)
1526
1527 we then can use only the following expression to finish the alising checks
1528 between store_ptr_0 & load_ptr_0 and store_ptr_0 & load_ptr_1:
1529
1530 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
1531 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
1532
1533 Note that we only consider that load_ptr_0 and load_ptr_1 have the same
1534 basic address. */
1535
1536 void
1537 prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs,
1538 poly_uint64)
1539 {
1540 if (alias_pairs->is_empty ())
1541 return;
1542
1543 /* Canonicalize each pair so that the base components are ordered wrt
1544 data_ref_compare_tree. This allows the loop below to merge more
1545 cases. */
1546 unsigned int i;
1547 dr_with_seg_len_pair_t *alias_pair;
1548 FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
1549 {
1550 data_reference_p dr_a = alias_pair->first.dr;
1551 data_reference_p dr_b = alias_pair->second.dr;
1552 int comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a),
1553 DR_BASE_ADDRESS (dr_b));
1554 if (comp_res == 0)
1555 comp_res = data_ref_compare_tree (DR_OFFSET (dr_a), DR_OFFSET (dr_b));
1556 if (comp_res == 0)
1557 comp_res = data_ref_compare_tree (DR_INIT (dr_a), DR_INIT (dr_b));
1558 if (comp_res > 0)
1559 {
1560 std::swap (alias_pair->first, alias_pair->second);
1561 alias_pair->flags |= DR_ALIAS_SWAPPED;
1562 }
1563 else
1564 alias_pair->flags |= DR_ALIAS_UNSWAPPED;
1565 }
1566
1567 /* Sort the collected data ref pairs so that we can scan them once to
1568 combine all possible aliasing checks. */
1569 alias_pairs->qsort (comp_dr_with_seg_len_pair);
1570
1571 /* Scan the sorted dr pairs and check if we can combine alias checks
1572 of two neighboring dr pairs. */
1573 unsigned int last = 0;
1574 for (i = 1; i < alias_pairs->length (); ++i)
1575 {
1576 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
1577 dr_with_seg_len_pair_t *alias_pair1 = &(*alias_pairs)[last];
1578 dr_with_seg_len_pair_t *alias_pair2 = &(*alias_pairs)[i];
1579
1580 dr_with_seg_len *dr_a1 = &alias_pair1->first;
1581 dr_with_seg_len *dr_b1 = &alias_pair1->second;
1582 dr_with_seg_len *dr_a2 = &alias_pair2->first;
1583 dr_with_seg_len *dr_b2 = &alias_pair2->second;
1584
1585 /* Remove duplicate data ref pairs. */
1586 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
1587 {
1588 if (dump_enabled_p ())
1589 dump_printf (MSG_NOTE, "found equal ranges %T, %T and %T, %T\n",
1590 DR_REF (dr_a1->dr), DR_REF (dr_b1->dr),
1591 DR_REF (dr_a2->dr), DR_REF (dr_b2->dr));
1592 alias_pair1->flags |= alias_pair2->flags;
1593 continue;
1594 }
1595
1596 /* Assume that we won't be able to merge the pairs, then correct
1597 if we do. */
1598 last += 1;
1599 if (last != i)
1600 (*alias_pairs)[last] = (*alias_pairs)[i];
1601
1602 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
1603 {
1604 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
1605 and DR_A1 and DR_A2 are two consecutive memrefs. */
1606 if (*dr_a1 == *dr_a2)
1607 {
1608 std::swap (dr_a1, dr_b1);
1609 std::swap (dr_a2, dr_b2);
1610 }
1611
1612 poly_int64 init_a1, init_a2;
1613 /* Only consider cases in which the distance between the initial
1614 DR_A1 and the initial DR_A2 is known at compile time. */
1615 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
1616 DR_BASE_ADDRESS (dr_a2->dr), 0)
1617 || !operand_equal_p (DR_OFFSET (dr_a1->dr),
1618 DR_OFFSET (dr_a2->dr), 0)
1619 || !poly_int_tree_p (DR_INIT (dr_a1->dr), &init_a1)
1620 || !poly_int_tree_p (DR_INIT (dr_a2->dr), &init_a2))
1621 continue;
1622
1623 /* Don't combine if we can't tell which one comes first. */
1624 if (!ordered_p (init_a1, init_a2))
1625 continue;
1626
1627 /* Work out what the segment length would be if we did combine
1628 DR_A1 and DR_A2:
1629
1630 - If DR_A1 and DR_A2 have equal lengths, that length is
1631 also the combined length.
1632
1633 - If DR_A1 and DR_A2 both have negative "lengths", the combined
1634 length is the lower bound on those lengths.
1635
1636 - If DR_A1 and DR_A2 both have positive lengths, the combined
1637 length is the upper bound on those lengths.
1638
1639 Other cases are unlikely to give a useful combination.
1640
1641 The lengths both have sizetype, so the sign is taken from
1642 the step instead. */
1643 poly_uint64 new_seg_len = 0;
1644 bool new_seg_len_p = !operand_equal_p (dr_a1->seg_len,
1645 dr_a2->seg_len, 0);
1646 if (new_seg_len_p)
1647 {
1648 poly_uint64 seg_len_a1, seg_len_a2;
1649 if (!poly_int_tree_p (dr_a1->seg_len, &seg_len_a1)
1650 || !poly_int_tree_p (dr_a2->seg_len, &seg_len_a2))
1651 continue;
1652
1653 tree indicator_a = dr_direction_indicator (dr_a1->dr);
1654 if (TREE_CODE (indicator_a) != INTEGER_CST)
1655 continue;
1656
1657 tree indicator_b = dr_direction_indicator (dr_a2->dr);
1658 if (TREE_CODE (indicator_b) != INTEGER_CST)
1659 continue;
1660
1661 int sign_a = tree_int_cst_sgn (indicator_a);
1662 int sign_b = tree_int_cst_sgn (indicator_b);
1663
1664 if (sign_a <= 0 && sign_b <= 0)
1665 new_seg_len = lower_bound (seg_len_a1, seg_len_a2);
1666 else if (sign_a >= 0 && sign_b >= 0)
1667 new_seg_len = upper_bound (seg_len_a1, seg_len_a2);
1668 else
1669 continue;
1670 }
1671 /* At this point we're committed to merging the refs. */
1672
1673 /* Make sure dr_a1 starts left of dr_a2. */
1674 if (maybe_gt (init_a1, init_a2))
1675 {
1676 std::swap (*dr_a1, *dr_a2);
1677 std::swap (init_a1, init_a2);
1678 }
1679
1680 /* The DR_Bs are equal, so only the DR_As can introduce
1681 mixed steps. */
1682 if (!operand_equal_p (DR_STEP (dr_a1->dr), DR_STEP (dr_a2->dr), 0))
1683 alias_pair1->flags |= DR_ALIAS_MIXED_STEPS;
1684
1685 if (new_seg_len_p)
1686 {
1687 dr_a1->seg_len = build_int_cst (TREE_TYPE (dr_a1->seg_len),
1688 new_seg_len);
1689 dr_a1->align = MIN (dr_a1->align, known_alignment (new_seg_len));
1690 }
1691
1692 /* This is always positive due to the swap above. */
1693 poly_uint64 diff = init_a2 - init_a1;
1694
1695 /* The new check will start at DR_A1. Make sure that its access
1696 size encompasses the initial DR_A2. */
1697 if (maybe_lt (dr_a1->access_size, diff + dr_a2->access_size))
1698 {
1699 dr_a1->access_size = upper_bound (dr_a1->access_size,
1700 diff + dr_a2->access_size);
1701 unsigned int new_align = known_alignment (dr_a1->access_size);
1702 dr_a1->align = MIN (dr_a1->align, new_align);
1703 }
1704 if (dump_enabled_p ())
1705 dump_printf (MSG_NOTE, "merging ranges for %T, %T and %T, %T\n",
1706 DR_REF (dr_a1->dr), DR_REF (dr_b1->dr),
1707 DR_REF (dr_a2->dr), DR_REF (dr_b2->dr));
1708 alias_pair1->flags |= alias_pair2->flags;
1709 last -= 1;
1710 }
1711 }
1712 alias_pairs->truncate (last + 1);
1713
1714 /* Try to restore the original dr_with_seg_len order within each
1715 dr_with_seg_len_pair_t. If we ended up combining swapped and
1716 unswapped pairs into the same check, we have to invalidate any
1717 RAW, WAR and WAW information for it. */
1718 if (dump_enabled_p ())
1719 dump_printf (MSG_NOTE, "merged alias checks:\n");
1720 FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
1721 {
1722 unsigned int swap_mask = (DR_ALIAS_SWAPPED | DR_ALIAS_UNSWAPPED);
1723 unsigned int swapped = (alias_pair->flags & swap_mask);
1724 if (swapped == DR_ALIAS_SWAPPED)
1725 std::swap (alias_pair->first, alias_pair->second);
1726 else if (swapped != DR_ALIAS_UNSWAPPED)
1727 alias_pair->flags |= DR_ALIAS_ARBITRARY;
1728 alias_pair->flags &= ~swap_mask;
1729 if (dump_enabled_p ())
1730 dump_alias_pair (alias_pair, " ");
1731 }
1732 }
1733
1734 /* A subroutine of create_intersect_range_checks, with a subset of the
1735 same arguments. Try to use IFN_CHECK_RAW_PTRS and IFN_CHECK_WAR_PTRS
1736 to optimize cases in which the references form a simple RAW, WAR or
1737 WAR dependence. */
1738
1739 static bool
1740 create_ifn_alias_checks (tree *cond_expr,
1741 const dr_with_seg_len_pair_t &alias_pair)
1742 {
1743 const dr_with_seg_len& dr_a = alias_pair.first;
1744 const dr_with_seg_len& dr_b = alias_pair.second;
1745
1746 /* Check for cases in which:
1747
1748 (a) we have a known RAW, WAR or WAR dependence
1749 (b) the accesses are well-ordered in both the original and new code
1750 (see the comment above the DR_ALIAS_* flags for details); and
1751 (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */
1752 if (alias_pair.flags & ~(DR_ALIAS_RAW | DR_ALIAS_WAR | DR_ALIAS_WAW))
1753 return false;
1754
1755 /* Make sure that both DRs access the same pattern of bytes,
1756 with a constant length and step. */
1757 poly_uint64 seg_len;
1758 if (!operand_equal_p (dr_a.seg_len, dr_b.seg_len, 0)
1759 || !poly_int_tree_p (dr_a.seg_len, &seg_len)
1760 || maybe_ne (dr_a.access_size, dr_b.access_size)
1761 || !operand_equal_p (DR_STEP (dr_a.dr), DR_STEP (dr_b.dr), 0)
1762 || !tree_fits_uhwi_p (DR_STEP (dr_a.dr)))
1763 return false;
1764
1765 unsigned HOST_WIDE_INT bytes = tree_to_uhwi (DR_STEP (dr_a.dr));
1766 tree addr_a = DR_BASE_ADDRESS (dr_a.dr);
1767 tree addr_b = DR_BASE_ADDRESS (dr_b.dr);
1768
1769 /* See whether the target suports what we want to do. WAW checks are
1770 equivalent to WAR checks here. */
1771 internal_fn ifn = (alias_pair.flags & DR_ALIAS_RAW
1772 ? IFN_CHECK_RAW_PTRS
1773 : IFN_CHECK_WAR_PTRS);
1774 unsigned int align = MIN (dr_a.align, dr_b.align);
1775 poly_uint64 full_length = seg_len + bytes;
1776 if (!internal_check_ptrs_fn_supported_p (ifn, TREE_TYPE (addr_a),
1777 full_length, align))
1778 {
1779 full_length = seg_len + dr_a.access_size;
1780 if (!internal_check_ptrs_fn_supported_p (ifn, TREE_TYPE (addr_a),
1781 full_length, align))
1782 return false;
1783 }
1784
1785 /* Commit to using this form of test. */
1786 addr_a = fold_build_pointer_plus (addr_a, DR_OFFSET (dr_a.dr));
1787 addr_a = fold_build_pointer_plus (addr_a, DR_INIT (dr_a.dr));
1788
1789 addr_b = fold_build_pointer_plus (addr_b, DR_OFFSET (dr_b.dr));
1790 addr_b = fold_build_pointer_plus (addr_b, DR_INIT (dr_b.dr));
1791
1792 *cond_expr = build_call_expr_internal_loc (UNKNOWN_LOCATION,
1793 ifn, boolean_type_node,
1794 4, addr_a, addr_b,
1795 size_int (full_length),
1796 size_int (align));
1797
1798 if (dump_enabled_p ())
1799 {
1800 if (ifn == IFN_CHECK_RAW_PTRS)
1801 dump_printf (MSG_NOTE, "using an IFN_CHECK_RAW_PTRS test\n");
1802 else
1803 dump_printf (MSG_NOTE, "using an IFN_CHECK_WAR_PTRS test\n");
1804 }
1805 return true;
1806 }
1807
1808 /* Try to generate a runtime condition that is true if ALIAS_PAIR is
1809 free of aliases, using a condition based on index values instead
1810 of a condition based on addresses. Return true on success,
1811 storing the condition in *COND_EXPR.
1812
1813 This can only be done if the two data references in ALIAS_PAIR access
1814 the same array object and the index is the only difference. For example,
1815 if the two data references are DR_A and DR_B:
1816
1817 DR_A DR_B
1818 data-ref arr[i] arr[j]
1819 base_object arr arr
1820 index {i_0, +, 1}_loop {j_0, +, 1}_loop
1821
1822 The addresses and their index are like:
1823
1824 |<- ADDR_A ->| |<- ADDR_B ->|
1825 ------------------------------------------------------->
1826 | | | | | | | | | |
1827 ------------------------------------------------------->
1828 i_0 ... i_0+4 j_0 ... j_0+4
1829
1830 We can create expression based on index rather than address:
1831
1832 (unsigned) (i_0 - j_0 + 3) <= 6
1833
1834 i.e. the indices are less than 4 apart.
1835
1836 Note evolution step of index needs to be considered in comparison. */
1837
1838 static bool
1839 create_intersect_range_checks_index (class loop *loop, tree *cond_expr,
1840 const dr_with_seg_len_pair_t &alias_pair)
1841 {
1842 const dr_with_seg_len &dr_a = alias_pair.first;
1843 const dr_with_seg_len &dr_b = alias_pair.second;
1844 if ((alias_pair.flags & DR_ALIAS_MIXED_STEPS)
1845 || integer_zerop (DR_STEP (dr_a.dr))
1846 || integer_zerop (DR_STEP (dr_b.dr))
1847 || DR_NUM_DIMENSIONS (dr_a.dr) != DR_NUM_DIMENSIONS (dr_b.dr))
1848 return false;
1849
1850 poly_uint64 seg_len1, seg_len2;
1851 if (!poly_int_tree_p (dr_a.seg_len, &seg_len1)
1852 || !poly_int_tree_p (dr_b.seg_len, &seg_len2))
1853 return false;
1854
1855 if (!tree_fits_shwi_p (DR_STEP (dr_a.dr)))
1856 return false;
1857
1858 if (!operand_equal_p (DR_BASE_OBJECT (dr_a.dr), DR_BASE_OBJECT (dr_b.dr), 0))
1859 return false;
1860
1861 if (!operand_equal_p (DR_STEP (dr_a.dr), DR_STEP (dr_b.dr), 0))
1862 return false;
1863
1864 gcc_assert (TREE_CODE (DR_STEP (dr_a.dr)) == INTEGER_CST);
1865
1866 bool neg_step = tree_int_cst_compare (DR_STEP (dr_a.dr), size_zero_node) < 0;
1867 unsigned HOST_WIDE_INT abs_step = tree_to_shwi (DR_STEP (dr_a.dr));
1868 if (neg_step)
1869 {
1870 abs_step = -abs_step;
1871 seg_len1 = (-wi::to_poly_wide (dr_a.seg_len)).force_uhwi ();
1872 seg_len2 = (-wi::to_poly_wide (dr_b.seg_len)).force_uhwi ();
1873 }
1874
1875 /* Infer the number of iterations with which the memory segment is accessed
1876 by DR. In other words, alias is checked if memory segment accessed by
1877 DR_A in some iterations intersect with memory segment accessed by DR_B
1878 in the same amount iterations.
1879 Note segnment length is a linear function of number of iterations with
1880 DR_STEP as the coefficient. */
1881 poly_uint64 niter_len1, niter_len2;
1882 if (!can_div_trunc_p (seg_len1 + abs_step - 1, abs_step, &niter_len1)
1883 || !can_div_trunc_p (seg_len2 + abs_step - 1, abs_step, &niter_len2))
1884 return false;
1885
1886 /* Divide each access size by the byte step, rounding up. */
1887 poly_uint64 niter_access1, niter_access2;
1888 if (!can_div_trunc_p (dr_a.access_size + abs_step - 1,
1889 abs_step, &niter_access1)
1890 || !can_div_trunc_p (dr_b.access_size + abs_step - 1,
1891 abs_step, &niter_access2))
1892 return false;
1893
1894 bool waw_or_war_p = (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW)) == 0;
1895
1896 unsigned int i;
1897 for (i = 0; i < DR_NUM_DIMENSIONS (dr_a.dr); i++)
1898 {
1899 tree access1 = DR_ACCESS_FN (dr_a.dr, i);
1900 tree access2 = DR_ACCESS_FN (dr_b.dr, i);
1901 /* Two indices must be the same if they are not scev, or not scev wrto
1902 current loop being vecorized. */
1903 if (TREE_CODE (access1) != POLYNOMIAL_CHREC
1904 || TREE_CODE (access2) != POLYNOMIAL_CHREC
1905 || CHREC_VARIABLE (access1) != (unsigned)loop->num
1906 || CHREC_VARIABLE (access2) != (unsigned)loop->num)
1907 {
1908 if (operand_equal_p (access1, access2, 0))
1909 continue;
1910
1911 return false;
1912 }
1913 /* The two indices must have the same step. */
1914 if (!operand_equal_p (CHREC_RIGHT (access1), CHREC_RIGHT (access2), 0))
1915 return false;
1916
1917 tree idx_step = CHREC_RIGHT (access1);
1918 /* Index must have const step, otherwise DR_STEP won't be constant. */
1919 gcc_assert (TREE_CODE (idx_step) == INTEGER_CST);
1920 /* Index must evaluate in the same direction as DR. */
1921 gcc_assert (!neg_step || tree_int_cst_sign_bit (idx_step) == 1);
1922
1923 tree min1 = CHREC_LEFT (access1);
1924 tree min2 = CHREC_LEFT (access2);
1925 if (!types_compatible_p (TREE_TYPE (min1), TREE_TYPE (min2)))
1926 return false;
1927
1928 /* Ideally, alias can be checked against loop's control IV, but we
1929 need to prove linear mapping between control IV and reference
1930 index. Although that should be true, we check against (array)
1931 index of data reference. Like segment length, index length is
1932 linear function of the number of iterations with index_step as
1933 the coefficient, i.e, niter_len * idx_step. */
1934 offset_int abs_idx_step = offset_int::from (wi::to_wide (idx_step),
1935 SIGNED);
1936 if (neg_step)
1937 abs_idx_step = -abs_idx_step;
1938 poly_offset_int idx_len1 = abs_idx_step * niter_len1;
1939 poly_offset_int idx_len2 = abs_idx_step * niter_len2;
1940 poly_offset_int idx_access1 = abs_idx_step * niter_access1;
1941 poly_offset_int idx_access2 = abs_idx_step * niter_access2;
1942
1943 gcc_assert (known_ge (idx_len1, 0)
1944 && known_ge (idx_len2, 0)
1945 && known_ge (idx_access1, 0)
1946 && known_ge (idx_access2, 0));
1947
1948 /* Each access has the following pattern, with lengths measured
1949 in units of INDEX:
1950
1951 <-- idx_len -->
1952 <--- A: -ve step --->
1953 +-----+-------+-----+-------+-----+
1954 | n-1 | ..... | 0 | ..... | n-1 |
1955 +-----+-------+-----+-------+-----+
1956 <--- B: +ve step --->
1957 <-- idx_len -->
1958 |
1959 min
1960
1961 where "n" is the number of scalar iterations covered by the segment
1962 and where each access spans idx_access units.
1963
1964 A is the range of bytes accessed when the step is negative,
1965 B is the range when the step is positive.
1966
1967 When checking for general overlap, we need to test whether
1968 the range:
1969
1970 [min1 + low_offset1, min2 + high_offset1 + idx_access1 - 1]
1971
1972 overlaps:
1973
1974 [min2 + low_offset2, min2 + high_offset2 + idx_access2 - 1]
1975
1976 where:
1977
1978 low_offsetN = +ve step ? 0 : -idx_lenN;
1979 high_offsetN = +ve step ? idx_lenN : 0;
1980
1981 This is equivalent to testing whether:
1982
1983 min1 + low_offset1 <= min2 + high_offset2 + idx_access2 - 1
1984 && min2 + low_offset2 <= min1 + high_offset1 + idx_access1 - 1
1985
1986 Converting this into a single test, there is an overlap if:
1987
1988 0 <= min2 - min1 + bias <= limit
1989
1990 where bias = high_offset2 + idx_access2 - 1 - low_offset1
1991 limit = (high_offset1 - low_offset1 + idx_access1 - 1)
1992 + (high_offset2 - low_offset2 + idx_access2 - 1)
1993 i.e. limit = idx_len1 + idx_access1 - 1 + idx_len2 + idx_access2 - 1
1994
1995 Combining the tests requires limit to be computable in an unsigned
1996 form of the index type; if it isn't, we fall back to the usual
1997 pointer-based checks.
1998
1999 We can do better if DR_B is a write and if DR_A and DR_B are
2000 well-ordered in both the original and the new code (see the
2001 comment above the DR_ALIAS_* flags for details). In this case
2002 we know that for each i in [0, n-1], the write performed by
2003 access i of DR_B occurs after access numbers j<=i of DR_A in
2004 both the original and the new code. Any write or anti
2005 dependencies wrt those DR_A accesses are therefore maintained.
2006
2007 We just need to make sure that each individual write in DR_B does not
2008 overlap any higher-indexed access in DR_A; such DR_A accesses happen
2009 after the DR_B access in the original code but happen before it in
2010 the new code.
2011
2012 We know the steps for both accesses are equal, so by induction, we
2013 just need to test whether the first write of DR_B overlaps a later
2014 access of DR_A. In other words, we need to move min1 along by
2015 one iteration:
2016
2017 min1' = min1 + idx_step
2018
2019 and use the ranges:
2020
2021 [min1' + low_offset1', min1' + high_offset1' + idx_access1 - 1]
2022
2023 and:
2024
2025 [min2, min2 + idx_access2 - 1]
2026
2027 where:
2028
2029 low_offset1' = +ve step ? 0 : -(idx_len1 - |idx_step|)
2030 high_offset1' = +ve_step ? idx_len1 - |idx_step| : 0. */
2031 if (waw_or_war_p)
2032 idx_len1 -= abs_idx_step;
2033
2034 poly_offset_int limit = idx_len1 + idx_access1 - 1 + idx_access2 - 1;
2035 if (!waw_or_war_p)
2036 limit += idx_len2;
2037
2038 tree utype = unsigned_type_for (TREE_TYPE (min1));
2039 if (!wi::fits_to_tree_p (limit, utype))
2040 return false;
2041
2042 poly_offset_int low_offset1 = neg_step ? -idx_len1 : 0;
2043 poly_offset_int high_offset2 = neg_step || waw_or_war_p ? 0 : idx_len2;
2044 poly_offset_int bias = high_offset2 + idx_access2 - 1 - low_offset1;
2045 /* Equivalent to adding IDX_STEP to MIN1. */
2046 if (waw_or_war_p)
2047 bias -= wi::to_offset (idx_step);
2048
2049 tree subject = fold_build2 (MINUS_EXPR, utype,
2050 fold_convert (utype, min2),
2051 fold_convert (utype, min1));
2052 subject = fold_build2 (PLUS_EXPR, utype, subject,
2053 wide_int_to_tree (utype, bias));
2054 tree part_cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject,
2055 wide_int_to_tree (utype, limit));
2056 if (*cond_expr)
2057 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2058 *cond_expr, part_cond_expr);
2059 else
2060 *cond_expr = part_cond_expr;
2061 }
2062 if (dump_enabled_p ())
2063 {
2064 if (waw_or_war_p)
2065 dump_printf (MSG_NOTE, "using an index-based WAR/WAW test\n");
2066 else
2067 dump_printf (MSG_NOTE, "using an index-based overlap test\n");
2068 }
2069 return true;
2070 }
2071
2072 /* A subroutine of create_intersect_range_checks, with a subset of the
2073 same arguments. Try to optimize cases in which the second access
2074 is a write and in which some overlap is valid. */
2075
2076 static bool
2077 create_waw_or_war_checks (tree *cond_expr,
2078 const dr_with_seg_len_pair_t &alias_pair)
2079 {
2080 const dr_with_seg_len& dr_a = alias_pair.first;
2081 const dr_with_seg_len& dr_b = alias_pair.second;
2082
2083 /* Check for cases in which:
2084
2085 (a) DR_B is always a write;
2086 (b) the accesses are well-ordered in both the original and new code
2087 (see the comment above the DR_ALIAS_* flags for details); and
2088 (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */
2089 if (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW))
2090 return false;
2091
2092 /* Check for equal (but possibly variable) steps. */
2093 tree step = DR_STEP (dr_a.dr);
2094 if (!operand_equal_p (step, DR_STEP (dr_b.dr)))
2095 return false;
2096
2097 /* Make sure that we can operate on sizetype without loss of precision. */
2098 tree addr_type = TREE_TYPE (DR_BASE_ADDRESS (dr_a.dr));
2099 if (TYPE_PRECISION (addr_type) != TYPE_PRECISION (sizetype))
2100 return false;
2101
2102 /* All addresses involved are known to have a common alignment ALIGN.
2103 We can therefore subtract ALIGN from an exclusive endpoint to get
2104 an inclusive endpoint. In the best (and common) case, ALIGN is the
2105 same as the access sizes of both DRs, and so subtracting ALIGN
2106 cancels out the addition of an access size. */
2107 unsigned int align = MIN (dr_a.align, dr_b.align);
2108 poly_uint64 last_chunk_a = dr_a.access_size - align;
2109 poly_uint64 last_chunk_b = dr_b.access_size - align;
2110
2111 /* Get a boolean expression that is true when the step is negative. */
2112 tree indicator = dr_direction_indicator (dr_a.dr);
2113 tree neg_step = fold_build2 (LT_EXPR, boolean_type_node,
2114 fold_convert (ssizetype, indicator),
2115 ssize_int (0));
2116
2117 /* Get lengths in sizetype. */
2118 tree seg_len_a
2119 = fold_convert (sizetype, rewrite_to_non_trapping_overflow (dr_a.seg_len));
2120 step = fold_convert (sizetype, rewrite_to_non_trapping_overflow (step));
2121
2122 /* Each access has the following pattern:
2123
2124 <- |seg_len| ->
2125 <--- A: -ve step --->
2126 +-----+-------+-----+-------+-----+
2127 | n-1 | ..... | 0 | ..... | n-1 |
2128 +-----+-------+-----+-------+-----+
2129 <--- B: +ve step --->
2130 <- |seg_len| ->
2131 |
2132 base address
2133
2134 where "n" is the number of scalar iterations covered by the segment.
2135
2136 A is the range of bytes accessed when the step is negative,
2137 B is the range when the step is positive.
2138
2139 We know that DR_B is a write. We also know (from checking that
2140 DR_A and DR_B are well-ordered) that for each i in [0, n-1],
2141 the write performed by access i of DR_B occurs after access numbers
2142 j<=i of DR_A in both the original and the new code. Any write or
2143 anti dependencies wrt those DR_A accesses are therefore maintained.
2144
2145 We just need to make sure that each individual write in DR_B does not
2146 overlap any higher-indexed access in DR_A; such DR_A accesses happen
2147 after the DR_B access in the original code but happen before it in
2148 the new code.
2149
2150 We know the steps for both accesses are equal, so by induction, we
2151 just need to test whether the first write of DR_B overlaps a later
2152 access of DR_A. In other words, we need to move addr_a along by
2153 one iteration:
2154
2155 addr_a' = addr_a + step
2156
2157 and check whether:
2158
2159 [addr_b, addr_b + last_chunk_b]
2160
2161 overlaps:
2162
2163 [addr_a' + low_offset_a, addr_a' + high_offset_a + last_chunk_a]
2164
2165 where [low_offset_a, high_offset_a] spans accesses [1, n-1]. I.e.:
2166
2167 low_offset_a = +ve step ? 0 : seg_len_a - step
2168 high_offset_a = +ve step ? seg_len_a - step : 0
2169
2170 This is equivalent to testing whether:
2171
2172 addr_a' + low_offset_a <= addr_b + last_chunk_b
2173 && addr_b <= addr_a' + high_offset_a + last_chunk_a
2174
2175 Converting this into a single test, there is an overlap if:
2176
2177 0 <= addr_b + last_chunk_b - addr_a' - low_offset_a <= limit
2178
2179 where limit = high_offset_a - low_offset_a + last_chunk_a + last_chunk_b
2180
2181 If DR_A is performed, limit + |step| - last_chunk_b is known to be
2182 less than the size of the object underlying DR_A. We also know
2183 that last_chunk_b <= |step|; this is checked elsewhere if it isn't
2184 guaranteed at compile time. There can therefore be no overflow if
2185 "limit" is calculated in an unsigned type with pointer precision. */
2186 tree addr_a = fold_build_pointer_plus (DR_BASE_ADDRESS (dr_a.dr),
2187 DR_OFFSET (dr_a.dr));
2188 addr_a = fold_build_pointer_plus (addr_a, DR_INIT (dr_a.dr));
2189
2190 tree addr_b = fold_build_pointer_plus (DR_BASE_ADDRESS (dr_b.dr),
2191 DR_OFFSET (dr_b.dr));
2192 addr_b = fold_build_pointer_plus (addr_b, DR_INIT (dr_b.dr));
2193
2194 /* Advance ADDR_A by one iteration and adjust the length to compensate. */
2195 addr_a = fold_build_pointer_plus (addr_a, step);
2196 tree seg_len_a_minus_step = fold_build2 (MINUS_EXPR, sizetype,
2197 seg_len_a, step);
2198 if (!CONSTANT_CLASS_P (seg_len_a_minus_step))
2199 seg_len_a_minus_step = build1 (SAVE_EXPR, sizetype, seg_len_a_minus_step);
2200
2201 tree low_offset_a = fold_build3 (COND_EXPR, sizetype, neg_step,
2202 seg_len_a_minus_step, size_zero_node);
2203 if (!CONSTANT_CLASS_P (low_offset_a))
2204 low_offset_a = build1 (SAVE_EXPR, sizetype, low_offset_a);
2205
2206 /* We could use COND_EXPR <neg_step, size_zero_node, seg_len_a_minus_step>,
2207 but it's usually more efficient to reuse the LOW_OFFSET_A result. */
2208 tree high_offset_a = fold_build2 (MINUS_EXPR, sizetype, seg_len_a_minus_step,
2209 low_offset_a);
2210
2211 /* The amount added to addr_b - addr_a'. */
2212 tree bias = fold_build2 (MINUS_EXPR, sizetype,
2213 size_int (last_chunk_b), low_offset_a);
2214
2215 tree limit = fold_build2 (MINUS_EXPR, sizetype, high_offset_a, low_offset_a);
2216 limit = fold_build2 (PLUS_EXPR, sizetype, limit,
2217 size_int (last_chunk_a + last_chunk_b));
2218
2219 tree subject = fold_build2 (POINTER_DIFF_EXPR, ssizetype, addr_b, addr_a);
2220 subject = fold_build2 (PLUS_EXPR, sizetype,
2221 fold_convert (sizetype, subject), bias);
2222
2223 *cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject, limit);
2224 if (dump_enabled_p ())
2225 dump_printf (MSG_NOTE, "using an address-based WAR/WAW test\n");
2226 return true;
2227 }
2228
2229 /* If ALIGN is nonzero, set up *SEQ_MIN_OUT and *SEQ_MAX_OUT so that for
2230 every address ADDR accessed by D:
2231
2232 *SEQ_MIN_OUT <= ADDR (== ADDR & -ALIGN) <= *SEQ_MAX_OUT
2233
2234 In this case, every element accessed by D is aligned to at least
2235 ALIGN bytes.
2236
2237 If ALIGN is zero then instead set *SEG_MAX_OUT so that:
2238
2239 *SEQ_MIN_OUT <= ADDR < *SEQ_MAX_OUT. */
2240
2241 static void
2242 get_segment_min_max (const dr_with_seg_len &d, tree *seg_min_out,
2243 tree *seg_max_out, HOST_WIDE_INT align)
2244 {
2245 /* Each access has the following pattern:
2246
2247 <- |seg_len| ->
2248 <--- A: -ve step --->
2249 +-----+-------+-----+-------+-----+
2250 | n-1 | ,.... | 0 | ..... | n-1 |
2251 +-----+-------+-----+-------+-----+
2252 <--- B: +ve step --->
2253 <- |seg_len| ->
2254 |
2255 base address
2256
2257 where "n" is the number of scalar iterations covered by the segment.
2258 (This should be VF for a particular pair if we know that both steps
2259 are the same, otherwise it will be the full number of scalar loop
2260 iterations.)
2261
2262 A is the range of bytes accessed when the step is negative,
2263 B is the range when the step is positive.
2264
2265 If the access size is "access_size" bytes, the lowest addressed byte is:
2266
2267 base + (step < 0 ? seg_len : 0) [LB]
2268
2269 and the highest addressed byte is always below:
2270
2271 base + (step < 0 ? 0 : seg_len) + access_size [UB]
2272
2273 Thus:
2274
2275 LB <= ADDR < UB
2276
2277 If ALIGN is nonzero, all three values are aligned to at least ALIGN
2278 bytes, so:
2279
2280 LB <= ADDR <= UB - ALIGN
2281
2282 where "- ALIGN" folds naturally with the "+ access_size" and often
2283 cancels it out.
2284
2285 We don't try to simplify LB and UB beyond this (e.g. by using
2286 MIN and MAX based on whether seg_len rather than the stride is
2287 negative) because it is possible for the absolute size of the
2288 segment to overflow the range of a ssize_t.
2289
2290 Keeping the pointer_plus outside of the cond_expr should allow
2291 the cond_exprs to be shared with other alias checks. */
2292 tree indicator = dr_direction_indicator (d.dr);
2293 tree neg_step = fold_build2 (LT_EXPR, boolean_type_node,
2294 fold_convert (ssizetype, indicator),
2295 ssize_int (0));
2296 tree addr_base = fold_build_pointer_plus (DR_BASE_ADDRESS (d.dr),
2297 DR_OFFSET (d.dr));
2298 addr_base = fold_build_pointer_plus (addr_base, DR_INIT (d.dr));
2299 tree seg_len
2300 = fold_convert (sizetype, rewrite_to_non_trapping_overflow (d.seg_len));
2301
2302 tree min_reach = fold_build3 (COND_EXPR, sizetype, neg_step,
2303 seg_len, size_zero_node);
2304 tree max_reach = fold_build3 (COND_EXPR, sizetype, neg_step,
2305 size_zero_node, seg_len);
2306 max_reach = fold_build2 (PLUS_EXPR, sizetype, max_reach,
2307 size_int (d.access_size - align));
2308
2309 *seg_min_out = fold_build_pointer_plus (addr_base, min_reach);
2310 *seg_max_out = fold_build_pointer_plus (addr_base, max_reach);
2311 }
2312
2313 /* Generate a runtime condition that is true if ALIAS_PAIR is free of aliases,
2314 storing the condition in *COND_EXPR. The fallback is to generate a
2315 a test that the two accesses do not overlap:
2316
2317 end_a <= start_b || end_b <= start_a. */
2318
2319 static void
2320 create_intersect_range_checks (class loop *loop, tree *cond_expr,
2321 const dr_with_seg_len_pair_t &alias_pair)
2322 {
2323 const dr_with_seg_len& dr_a = alias_pair.first;
2324 const dr_with_seg_len& dr_b = alias_pair.second;
2325 *cond_expr = NULL_TREE;
2326 if (create_intersect_range_checks_index (loop, cond_expr, alias_pair))
2327 return;
2328
2329 if (create_ifn_alias_checks (cond_expr, alias_pair))
2330 return;
2331
2332 if (create_waw_or_war_checks (cond_expr, alias_pair))
2333 return;
2334
2335 unsigned HOST_WIDE_INT min_align;
2336 tree_code cmp_code;
2337 /* We don't have to check DR_ALIAS_MIXED_STEPS here, since both versions
2338 are equivalent. This is just an optimization heuristic. */
2339 if (TREE_CODE (DR_STEP (dr_a.dr)) == INTEGER_CST
2340 && TREE_CODE (DR_STEP (dr_b.dr)) == INTEGER_CST)
2341 {
2342 /* In this case adding access_size to seg_len is likely to give
2343 a simple X * step, where X is either the number of scalar
2344 iterations or the vectorization factor. We're better off
2345 keeping that, rather than subtracting an alignment from it.
2346
2347 In this case the maximum values are exclusive and so there is
2348 no alias if the maximum of one segment equals the minimum
2349 of another. */
2350 min_align = 0;
2351 cmp_code = LE_EXPR;
2352 }
2353 else
2354 {
2355 /* Calculate the minimum alignment shared by all four pointers,
2356 then arrange for this alignment to be subtracted from the
2357 exclusive maximum values to get inclusive maximum values.
2358 This "- min_align" is cumulative with a "+ access_size"
2359 in the calculation of the maximum values. In the best
2360 (and common) case, the two cancel each other out, leaving
2361 us with an inclusive bound based only on seg_len. In the
2362 worst case we're simply adding a smaller number than before.
2363
2364 Because the maximum values are inclusive, there is an alias
2365 if the maximum value of one segment is equal to the minimum
2366 value of the other. */
2367 min_align = MIN (dr_a.align, dr_b.align);
2368 cmp_code = LT_EXPR;
2369 }
2370
2371 tree seg_a_min, seg_a_max, seg_b_min, seg_b_max;
2372 get_segment_min_max (dr_a, &seg_a_min, &seg_a_max, min_align);
2373 get_segment_min_max (dr_b, &seg_b_min, &seg_b_max, min_align);
2374
2375 *cond_expr
2376 = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
2377 fold_build2 (cmp_code, boolean_type_node, seg_a_max, seg_b_min),
2378 fold_build2 (cmp_code, boolean_type_node, seg_b_max, seg_a_min));
2379 if (dump_enabled_p ())
2380 dump_printf (MSG_NOTE, "using an address-based overlap test\n");
2381 }
2382
2383 /* Create a conditional expression that represents the run-time checks for
2384 overlapping of address ranges represented by a list of data references
2385 pairs passed in ALIAS_PAIRS. Data references are in LOOP. The returned
2386 COND_EXPR is the conditional expression to be used in the if statement
2387 that controls which version of the loop gets executed at runtime. */
2388
2389 void
2390 create_runtime_alias_checks (class loop *loop,
2391 vec<dr_with_seg_len_pair_t> *alias_pairs,
2392 tree * cond_expr)
2393 {
2394 tree part_cond_expr;
2395
2396 fold_defer_overflow_warnings ();
2397 dr_with_seg_len_pair_t *alias_pair;
2398 unsigned int i;
2399 FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
2400 {
2401 gcc_assert (alias_pair->flags);
2402 if (dump_enabled_p ())
2403 dump_printf (MSG_NOTE,
2404 "create runtime check for data references %T and %T\n",
2405 DR_REF (alias_pair->first.dr),
2406 DR_REF (alias_pair->second.dr));
2407
2408 /* Create condition expression for each pair data references. */
2409 create_intersect_range_checks (loop, &part_cond_expr, *alias_pair);
2410 if (*cond_expr)
2411 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2412 *cond_expr, part_cond_expr);
2413 else
2414 *cond_expr = part_cond_expr;
2415 }
2416 fold_undefer_and_ignore_overflow_warnings ();
2417 }
2418
2419 /* Check if OFFSET1 and OFFSET2 (DR_OFFSETs of some data-refs) are identical
2420 expressions. */
2421 static bool
2422 dr_equal_offsets_p1 (tree offset1, tree offset2)
2423 {
2424 bool res;
2425
2426 STRIP_NOPS (offset1);
2427 STRIP_NOPS (offset2);
2428
2429 if (offset1 == offset2)
2430 return true;
2431
2432 if (TREE_CODE (offset1) != TREE_CODE (offset2)
2433 || (!BINARY_CLASS_P (offset1) && !UNARY_CLASS_P (offset1)))
2434 return false;
2435
2436 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 0),
2437 TREE_OPERAND (offset2, 0));
2438
2439 if (!res || !BINARY_CLASS_P (offset1))
2440 return res;
2441
2442 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 1),
2443 TREE_OPERAND (offset2, 1));
2444
2445 return res;
2446 }
2447
2448 /* Check if DRA and DRB have equal offsets. */
2449 bool
2450 dr_equal_offsets_p (struct data_reference *dra,
2451 struct data_reference *drb)
2452 {
2453 tree offset1, offset2;
2454
2455 offset1 = DR_OFFSET (dra);
2456 offset2 = DR_OFFSET (drb);
2457
2458 return dr_equal_offsets_p1 (offset1, offset2);
2459 }
2460
2461 /* Returns true if FNA == FNB. */
2462
2463 static bool
2464 affine_function_equal_p (affine_fn fna, affine_fn fnb)
2465 {
2466 unsigned i, n = fna.length ();
2467
2468 if (n != fnb.length ())
2469 return false;
2470
2471 for (i = 0; i < n; i++)
2472 if (!operand_equal_p (fna[i], fnb[i], 0))
2473 return false;
2474
2475 return true;
2476 }
2477
2478 /* If all the functions in CF are the same, returns one of them,
2479 otherwise returns NULL. */
2480
2481 static affine_fn
2482 common_affine_function (conflict_function *cf)
2483 {
2484 unsigned i;
2485 affine_fn comm;
2486
2487 if (!CF_NONTRIVIAL_P (cf))
2488 return affine_fn ();
2489
2490 comm = cf->fns[0];
2491
2492 for (i = 1; i < cf->n; i++)
2493 if (!affine_function_equal_p (comm, cf->fns[i]))
2494 return affine_fn ();
2495
2496 return comm;
2497 }
2498
2499 /* Returns the base of the affine function FN. */
2500
2501 static tree
2502 affine_function_base (affine_fn fn)
2503 {
2504 return fn[0];
2505 }
2506
2507 /* Returns true if FN is a constant. */
2508
2509 static bool
2510 affine_function_constant_p (affine_fn fn)
2511 {
2512 unsigned i;
2513 tree coef;
2514
2515 for (i = 1; fn.iterate (i, &coef); i++)
2516 if (!integer_zerop (coef))
2517 return false;
2518
2519 return true;
2520 }
2521
2522 /* Returns true if FN is the zero constant function. */
2523
2524 static bool
2525 affine_function_zero_p (affine_fn fn)
2526 {
2527 return (integer_zerop (affine_function_base (fn))
2528 && affine_function_constant_p (fn));
2529 }
2530
2531 /* Returns a signed integer type with the largest precision from TA
2532 and TB. */
2533
2534 static tree
2535 signed_type_for_types (tree ta, tree tb)
2536 {
2537 if (TYPE_PRECISION (ta) > TYPE_PRECISION (tb))
2538 return signed_type_for (ta);
2539 else
2540 return signed_type_for (tb);
2541 }
2542
2543 /* Applies operation OP on affine functions FNA and FNB, and returns the
2544 result. */
2545
2546 static affine_fn
2547 affine_fn_op (enum tree_code op, affine_fn fna, affine_fn fnb)
2548 {
2549 unsigned i, n, m;
2550 affine_fn ret;
2551 tree coef;
2552
2553 if (fnb.length () > fna.length ())
2554 {
2555 n = fna.length ();
2556 m = fnb.length ();
2557 }
2558 else
2559 {
2560 n = fnb.length ();
2561 m = fna.length ();
2562 }
2563
2564 ret.create (m);
2565 for (i = 0; i < n; i++)
2566 {
2567 tree type = signed_type_for_types (TREE_TYPE (fna[i]),
2568 TREE_TYPE (fnb[i]));
2569 ret.quick_push (fold_build2 (op, type, fna[i], fnb[i]));
2570 }
2571
2572 for (; fna.iterate (i, &coef); i++)
2573 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
2574 coef, integer_zero_node));
2575 for (; fnb.iterate (i, &coef); i++)
2576 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
2577 integer_zero_node, coef));
2578
2579 return ret;
2580 }
2581
2582 /* Returns the sum of affine functions FNA and FNB. */
2583
2584 static affine_fn
2585 affine_fn_plus (affine_fn fna, affine_fn fnb)
2586 {
2587 return affine_fn_op (PLUS_EXPR, fna, fnb);
2588 }
2589
2590 /* Returns the difference of affine functions FNA and FNB. */
2591
2592 static affine_fn
2593 affine_fn_minus (affine_fn fna, affine_fn fnb)
2594 {
2595 return affine_fn_op (MINUS_EXPR, fna, fnb);
2596 }
2597
2598 /* Frees affine function FN. */
2599
2600 static void
2601 affine_fn_free (affine_fn fn)
2602 {
2603 fn.release ();
2604 }
2605
2606 /* Determine for each subscript in the data dependence relation DDR
2607 the distance. */
2608
2609 static void
2610 compute_subscript_distance (struct data_dependence_relation *ddr)
2611 {
2612 conflict_function *cf_a, *cf_b;
2613 affine_fn fn_a, fn_b, diff;
2614
2615 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
2616 {
2617 unsigned int i;
2618
2619 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
2620 {
2621 struct subscript *subscript;
2622
2623 subscript = DDR_SUBSCRIPT (ddr, i);
2624 cf_a = SUB_CONFLICTS_IN_A (subscript);
2625 cf_b = SUB_CONFLICTS_IN_B (subscript);
2626
2627 fn_a = common_affine_function (cf_a);
2628 fn_b = common_affine_function (cf_b);
2629 if (!fn_a.exists () || !fn_b.exists ())
2630 {
2631 SUB_DISTANCE (subscript) = chrec_dont_know;
2632 return;
2633 }
2634 diff = affine_fn_minus (fn_a, fn_b);
2635
2636 if (affine_function_constant_p (diff))
2637 SUB_DISTANCE (subscript) = affine_function_base (diff);
2638 else
2639 SUB_DISTANCE (subscript) = chrec_dont_know;
2640
2641 affine_fn_free (diff);
2642 }
2643 }
2644 }
2645
2646 /* Returns the conflict function for "unknown". */
2647
2648 static conflict_function *
2649 conflict_fn_not_known (void)
2650 {
2651 conflict_function *fn = XCNEW (conflict_function);
2652 fn->n = NOT_KNOWN;
2653
2654 return fn;
2655 }
2656
2657 /* Returns the conflict function for "independent". */
2658
2659 static conflict_function *
2660 conflict_fn_no_dependence (void)
2661 {
2662 conflict_function *fn = XCNEW (conflict_function);
2663 fn->n = NO_DEPENDENCE;
2664
2665 return fn;
2666 }
2667
2668 /* Returns true if the address of OBJ is invariant in LOOP. */
2669
2670 static bool
2671 object_address_invariant_in_loop_p (const class loop *loop, const_tree obj)
2672 {
2673 while (handled_component_p (obj))
2674 {
2675 if (TREE_CODE (obj) == ARRAY_REF)
2676 {
2677 for (int i = 1; i < 4; ++i)
2678 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, i),
2679 loop->num))
2680 return false;
2681 }
2682 else if (TREE_CODE (obj) == COMPONENT_REF)
2683 {
2684 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2),
2685 loop->num))
2686 return false;
2687 }
2688 obj = TREE_OPERAND (obj, 0);
2689 }
2690
2691 if (!INDIRECT_REF_P (obj)
2692 && TREE_CODE (obj) != MEM_REF)
2693 return true;
2694
2695 return !chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 0),
2696 loop->num);
2697 }
2698
2699 /* Returns false if we can prove that data references A and B do not alias,
2700 true otherwise. If LOOP_NEST is false no cross-iteration aliases are
2701 considered. */
2702
2703 bool
2704 dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
2705 class loop *loop_nest)
2706 {
2707 tree addr_a = DR_BASE_OBJECT (a);
2708 tree addr_b = DR_BASE_OBJECT (b);
2709
2710 /* If we are not processing a loop nest but scalar code we
2711 do not need to care about possible cross-iteration dependences
2712 and thus can process the full original reference. Do so,
2713 similar to how loop invariant motion applies extra offset-based
2714 disambiguation. */
2715 if (!loop_nest)
2716 {
2717 aff_tree off1, off2;
2718 poly_widest_int size1, size2;
2719 get_inner_reference_aff (DR_REF (a), &off1, &size1);
2720 get_inner_reference_aff (DR_REF (b), &off2, &size2);
2721 aff_combination_scale (&off1, -1);
2722 aff_combination_add (&off2, &off1);
2723 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
2724 return false;
2725 }
2726
2727 if ((TREE_CODE (addr_a) == MEM_REF || TREE_CODE (addr_a) == TARGET_MEM_REF)
2728 && (TREE_CODE (addr_b) == MEM_REF || TREE_CODE (addr_b) == TARGET_MEM_REF)
2729 /* For cross-iteration dependences the cliques must be valid for the
2730 whole loop, not just individual iterations. */
2731 && (!loop_nest
2732 || MR_DEPENDENCE_CLIQUE (addr_a) == 1
2733 || MR_DEPENDENCE_CLIQUE (addr_a) == loop_nest->owned_clique)
2734 && MR_DEPENDENCE_CLIQUE (addr_a) == MR_DEPENDENCE_CLIQUE (addr_b)
2735 && MR_DEPENDENCE_BASE (addr_a) != MR_DEPENDENCE_BASE (addr_b))
2736 return false;
2737
2738 /* If we had an evolution in a pointer-based MEM_REF BASE_OBJECT we
2739 do not know the size of the base-object. So we cannot do any
2740 offset/overlap based analysis but have to rely on points-to
2741 information only. */
2742 if (TREE_CODE (addr_a) == MEM_REF
2743 && (DR_UNCONSTRAINED_BASE (a)
2744 || TREE_CODE (TREE_OPERAND (addr_a, 0)) == SSA_NAME))
2745 {
2746 /* For true dependences we can apply TBAA. */
2747 if (flag_strict_aliasing
2748 && DR_IS_WRITE (a) && DR_IS_READ (b)
2749 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
2750 get_alias_set (DR_REF (b))))
2751 return false;
2752 if (TREE_CODE (addr_b) == MEM_REF)
2753 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
2754 TREE_OPERAND (addr_b, 0));
2755 else
2756 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
2757 build_fold_addr_expr (addr_b));
2758 }
2759 else if (TREE_CODE (addr_b) == MEM_REF
2760 && (DR_UNCONSTRAINED_BASE (b)
2761 || TREE_CODE (TREE_OPERAND (addr_b, 0)) == SSA_NAME))
2762 {
2763 /* For true dependences we can apply TBAA. */
2764 if (flag_strict_aliasing
2765 && DR_IS_WRITE (a) && DR_IS_READ (b)
2766 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
2767 get_alias_set (DR_REF (b))))
2768 return false;
2769 if (TREE_CODE (addr_a) == MEM_REF)
2770 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
2771 TREE_OPERAND (addr_b, 0));
2772 else
2773 return ptr_derefs_may_alias_p (build_fold_addr_expr (addr_a),
2774 TREE_OPERAND (addr_b, 0));
2775 }
2776
2777 /* Otherwise DR_BASE_OBJECT is an access that covers the whole object
2778 that is being subsetted in the loop nest. */
2779 if (DR_IS_WRITE (a) && DR_IS_WRITE (b))
2780 return refs_output_dependent_p (addr_a, addr_b);
2781 else if (DR_IS_READ (a) && DR_IS_WRITE (b))
2782 return refs_anti_dependent_p (addr_a, addr_b);
2783 return refs_may_alias_p (addr_a, addr_b);
2784 }
2785
2786 /* REF_A and REF_B both satisfy access_fn_component_p. Return true
2787 if it is meaningful to compare their associated access functions
2788 when checking for dependencies. */
2789
2790 static bool
2791 access_fn_components_comparable_p (tree ref_a, tree ref_b)
2792 {
2793 /* Allow pairs of component refs from the following sets:
2794
2795 { REALPART_EXPR, IMAGPART_EXPR }
2796 { COMPONENT_REF }
2797 { ARRAY_REF }. */
2798 tree_code code_a = TREE_CODE (ref_a);
2799 tree_code code_b = TREE_CODE (ref_b);
2800 if (code_a == IMAGPART_EXPR)
2801 code_a = REALPART_EXPR;
2802 if (code_b == IMAGPART_EXPR)
2803 code_b = REALPART_EXPR;
2804 if (code_a != code_b)
2805 return false;
2806
2807 if (TREE_CODE (ref_a) == COMPONENT_REF)
2808 /* ??? We cannot simply use the type of operand #0 of the refs here as
2809 the Fortran compiler smuggles type punning into COMPONENT_REFs.
2810 Use the DECL_CONTEXT of the FIELD_DECLs instead. */
2811 return (DECL_CONTEXT (TREE_OPERAND (ref_a, 1))
2812 == DECL_CONTEXT (TREE_OPERAND (ref_b, 1)));
2813
2814 return types_compatible_p (TREE_TYPE (TREE_OPERAND (ref_a, 0)),
2815 TREE_TYPE (TREE_OPERAND (ref_b, 0)));
2816 }
2817
2818 /* Initialize a data dependence relation between data accesses A and
2819 B. NB_LOOPS is the number of loops surrounding the references: the
2820 size of the classic distance/direction vectors. */
2821
2822 struct data_dependence_relation *
2823 initialize_data_dependence_relation (struct data_reference *a,
2824 struct data_reference *b,
2825 vec<loop_p> loop_nest)
2826 {
2827 struct data_dependence_relation *res;
2828 unsigned int i;
2829
2830 res = XCNEW (struct data_dependence_relation);
2831 DDR_A (res) = a;
2832 DDR_B (res) = b;
2833 DDR_LOOP_NEST (res).create (0);
2834 DDR_SUBSCRIPTS (res).create (0);
2835 DDR_DIR_VECTS (res).create (0);
2836 DDR_DIST_VECTS (res).create (0);
2837
2838 if (a == NULL || b == NULL)
2839 {
2840 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
2841 return res;
2842 }
2843
2844 /* If the data references do not alias, then they are independent. */
2845 if (!dr_may_alias_p (a, b, loop_nest.exists () ? loop_nest[0] : NULL))
2846 {
2847 DDR_ARE_DEPENDENT (res) = chrec_known;
2848 return res;
2849 }
2850
2851 unsigned int num_dimensions_a = DR_NUM_DIMENSIONS (a);
2852 unsigned int num_dimensions_b = DR_NUM_DIMENSIONS (b);
2853 if (num_dimensions_a == 0 || num_dimensions_b == 0)
2854 {
2855 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
2856 return res;
2857 }
2858
2859 /* For unconstrained bases, the root (highest-indexed) subscript
2860 describes a variation in the base of the original DR_REF rather
2861 than a component access. We have no type that accurately describes
2862 the new DR_BASE_OBJECT (whose TREE_TYPE describes the type *after*
2863 applying this subscript) so limit the search to the last real
2864 component access.
2865
2866 E.g. for:
2867
2868 void
2869 f (int a[][8], int b[][8])
2870 {
2871 for (int i = 0; i < 8; ++i)
2872 a[i * 2][0] = b[i][0];
2873 }
2874
2875 the a and b accesses have a single ARRAY_REF component reference [0]
2876 but have two subscripts. */
2877 if (DR_UNCONSTRAINED_BASE (a))
2878 num_dimensions_a -= 1;
2879 if (DR_UNCONSTRAINED_BASE (b))
2880 num_dimensions_b -= 1;
2881
2882 /* These structures describe sequences of component references in
2883 DR_REF (A) and DR_REF (B). Each component reference is tied to a
2884 specific access function. */
2885 struct {
2886 /* The sequence starts at DR_ACCESS_FN (A, START_A) of A and
2887 DR_ACCESS_FN (B, START_B) of B (inclusive) and extends to higher
2888 indices. In C notation, these are the indices of the rightmost
2889 component references; e.g. for a sequence .b.c.d, the start
2890 index is for .d. */
2891 unsigned int start_a;
2892 unsigned int start_b;
2893
2894 /* The sequence contains LENGTH consecutive access functions from
2895 each DR. */
2896 unsigned int length;
2897
2898 /* The enclosing objects for the A and B sequences respectively,
2899 i.e. the objects to which DR_ACCESS_FN (A, START_A + LENGTH - 1)
2900 and DR_ACCESS_FN (B, START_B + LENGTH - 1) are applied. */
2901 tree object_a;
2902 tree object_b;
2903 } full_seq = {}, struct_seq = {};
2904
2905 /* Before each iteration of the loop:
2906
2907 - REF_A is what you get after applying DR_ACCESS_FN (A, INDEX_A) and
2908 - REF_B is what you get after applying DR_ACCESS_FN (B, INDEX_B). */
2909 unsigned int index_a = 0;
2910 unsigned int index_b = 0;
2911 tree ref_a = DR_REF (a);
2912 tree ref_b = DR_REF (b);
2913
2914 /* Now walk the component references from the final DR_REFs back up to
2915 the enclosing base objects. Each component reference corresponds
2916 to one access function in the DR, with access function 0 being for
2917 the final DR_REF and the highest-indexed access function being the
2918 one that is applied to the base of the DR.
2919
2920 Look for a sequence of component references whose access functions
2921 are comparable (see access_fn_components_comparable_p). If more
2922 than one such sequence exists, pick the one nearest the base
2923 (which is the leftmost sequence in C notation). Store this sequence
2924 in FULL_SEQ.
2925
2926 For example, if we have:
2927
2928 struct foo { struct bar s; ... } (*a)[10], (*b)[10];
2929
2930 A: a[0][i].s.c.d
2931 B: __real b[0][i].s.e[i].f
2932
2933 (where d is the same type as the real component of f) then the access
2934 functions would be:
2935
2936 0 1 2 3
2937 A: .d .c .s [i]
2938
2939 0 1 2 3 4 5
2940 B: __real .f [i] .e .s [i]
2941
2942 The A0/B2 column isn't comparable, since .d is a COMPONENT_REF
2943 and [i] is an ARRAY_REF. However, the A1/B3 column contains two
2944 COMPONENT_REF accesses for struct bar, so is comparable. Likewise
2945 the A2/B4 column contains two COMPONENT_REF accesses for struct foo,
2946 so is comparable. The A3/B5 column contains two ARRAY_REFs that
2947 index foo[10] arrays, so is again comparable. The sequence is
2948 therefore:
2949
2950 A: [1, 3] (i.e. [i].s.c)
2951 B: [3, 5] (i.e. [i].s.e)
2952
2953 Also look for sequences of component references whose access
2954 functions are comparable and whose enclosing objects have the same
2955 RECORD_TYPE. Store this sequence in STRUCT_SEQ. In the above
2956 example, STRUCT_SEQ would be:
2957
2958 A: [1, 2] (i.e. s.c)
2959 B: [3, 4] (i.e. s.e) */
2960 while (index_a < num_dimensions_a && index_b < num_dimensions_b)
2961 {
2962 /* REF_A and REF_B must be one of the component access types
2963 allowed by dr_analyze_indices. */
2964 gcc_checking_assert (access_fn_component_p (ref_a));
2965 gcc_checking_assert (access_fn_component_p (ref_b));
2966
2967 /* Get the immediately-enclosing objects for REF_A and REF_B,
2968 i.e. the references *before* applying DR_ACCESS_FN (A, INDEX_A)
2969 and DR_ACCESS_FN (B, INDEX_B). */
2970 tree object_a = TREE_OPERAND (ref_a, 0);
2971 tree object_b = TREE_OPERAND (ref_b, 0);
2972
2973 tree type_a = TREE_TYPE (object_a);
2974 tree type_b = TREE_TYPE (object_b);
2975 if (access_fn_components_comparable_p (ref_a, ref_b))
2976 {
2977 /* This pair of component accesses is comparable for dependence
2978 analysis, so we can include DR_ACCESS_FN (A, INDEX_A) and
2979 DR_ACCESS_FN (B, INDEX_B) in the sequence. */
2980 if (full_seq.start_a + full_seq.length != index_a
2981 || full_seq.start_b + full_seq.length != index_b)
2982 {
2983 /* The accesses don't extend the current sequence,
2984 so start a new one here. */
2985 full_seq.start_a = index_a;
2986 full_seq.start_b = index_b;
2987 full_seq.length = 0;
2988 }
2989
2990 /* Add this pair of references to the sequence. */
2991 full_seq.length += 1;
2992 full_seq.object_a = object_a;
2993 full_seq.object_b = object_b;
2994
2995 /* If the enclosing objects are structures (and thus have the
2996 same RECORD_TYPE), record the new sequence in STRUCT_SEQ. */
2997 if (TREE_CODE (type_a) == RECORD_TYPE)
2998 struct_seq = full_seq;
2999
3000 /* Move to the next containing reference for both A and B. */
3001 ref_a = object_a;
3002 ref_b = object_b;
3003 index_a += 1;
3004 index_b += 1;
3005 continue;
3006 }
3007
3008 /* Try to approach equal type sizes. */
3009 if (!COMPLETE_TYPE_P (type_a)
3010 || !COMPLETE_TYPE_P (type_b)
3011 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_a))
3012 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_b)))
3013 break;
3014
3015 unsigned HOST_WIDE_INT size_a = tree_to_uhwi (TYPE_SIZE_UNIT (type_a));
3016 unsigned HOST_WIDE_INT size_b = tree_to_uhwi (TYPE_SIZE_UNIT (type_b));
3017 if (size_a <= size_b)
3018 {
3019 index_a += 1;
3020 ref_a = object_a;
3021 }
3022 if (size_b <= size_a)
3023 {
3024 index_b += 1;
3025 ref_b = object_b;
3026 }
3027 }
3028
3029 /* See whether FULL_SEQ ends at the base and whether the two bases
3030 are equal. We do not care about TBAA or alignment info so we can
3031 use OEP_ADDRESS_OF to avoid false negatives. */
3032 tree base_a = DR_BASE_OBJECT (a);
3033 tree base_b = DR_BASE_OBJECT (b);
3034 bool same_base_p = (full_seq.start_a + full_seq.length == num_dimensions_a
3035 && full_seq.start_b + full_seq.length == num_dimensions_b
3036 && DR_UNCONSTRAINED_BASE (a) == DR_UNCONSTRAINED_BASE (b)
3037 && operand_equal_p (base_a, base_b, OEP_ADDRESS_OF)
3038 && types_compatible_p (TREE_TYPE (base_a),
3039 TREE_TYPE (base_b))
3040 && (!loop_nest.exists ()
3041 || (object_address_invariant_in_loop_p
3042 (loop_nest[0], base_a))));
3043
3044 /* If the bases are the same, we can include the base variation too.
3045 E.g. the b accesses in:
3046
3047 for (int i = 0; i < n; ++i)
3048 b[i + 4][0] = b[i][0];
3049
3050 have a definite dependence distance of 4, while for:
3051
3052 for (int i = 0; i < n; ++i)
3053 a[i + 4][0] = b[i][0];
3054
3055 the dependence distance depends on the gap between a and b.
3056
3057 If the bases are different then we can only rely on the sequence
3058 rooted at a structure access, since arrays are allowed to overlap
3059 arbitrarily and change shape arbitrarily. E.g. we treat this as
3060 valid code:
3061
3062 int a[256];
3063 ...
3064 ((int (*)[4][3]) &a[1])[i][0] += ((int (*)[4][3]) &a[2])[i][0];
3065
3066 where two lvalues with the same int[4][3] type overlap, and where
3067 both lvalues are distinct from the object's declared type. */
3068 if (same_base_p)
3069 {
3070 if (DR_UNCONSTRAINED_BASE (a))
3071 full_seq.length += 1;
3072 }
3073 else
3074 full_seq = struct_seq;
3075
3076 /* Punt if we didn't find a suitable sequence. */
3077 if (full_seq.length == 0)
3078 {
3079 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3080 return res;
3081 }
3082
3083 if (!same_base_p)
3084 {
3085 /* Partial overlap is possible for different bases when strict aliasing
3086 is not in effect. It's also possible if either base involves a union
3087 access; e.g. for:
3088
3089 struct s1 { int a[2]; };
3090 struct s2 { struct s1 b; int c; };
3091 struct s3 { int d; struct s1 e; };
3092 union u { struct s2 f; struct s3 g; } *p, *q;
3093
3094 the s1 at "p->f.b" (base "p->f") partially overlaps the s1 at
3095 "p->g.e" (base "p->g") and might partially overlap the s1 at
3096 "q->g.e" (base "q->g"). */
3097 if (!flag_strict_aliasing
3098 || ref_contains_union_access_p (full_seq.object_a)
3099 || ref_contains_union_access_p (full_seq.object_b))
3100 {
3101 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3102 return res;
3103 }
3104
3105 DDR_COULD_BE_INDEPENDENT_P (res) = true;
3106 if (!loop_nest.exists ()
3107 || (object_address_invariant_in_loop_p (loop_nest[0],
3108 full_seq.object_a)
3109 && object_address_invariant_in_loop_p (loop_nest[0],
3110 full_seq.object_b)))
3111 {
3112 DDR_OBJECT_A (res) = full_seq.object_a;
3113 DDR_OBJECT_B (res) = full_seq.object_b;
3114 }
3115 }
3116
3117 DDR_AFFINE_P (res) = true;
3118 DDR_ARE_DEPENDENT (res) = NULL_TREE;
3119 DDR_SUBSCRIPTS (res).create (full_seq.length);
3120 DDR_LOOP_NEST (res) = loop_nest;
3121 DDR_SELF_REFERENCE (res) = false;
3122
3123 for (i = 0; i < full_seq.length; ++i)
3124 {
3125 struct subscript *subscript;
3126
3127 subscript = XNEW (struct subscript);
3128 SUB_ACCESS_FN (subscript, 0) = DR_ACCESS_FN (a, full_seq.start_a + i);
3129 SUB_ACCESS_FN (subscript, 1) = DR_ACCESS_FN (b, full_seq.start_b + i);
3130 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known ();
3131 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
3132 SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
3133 SUB_DISTANCE (subscript) = chrec_dont_know;
3134 DDR_SUBSCRIPTS (res).safe_push (subscript);
3135 }
3136
3137 return res;
3138 }
3139
3140 /* Frees memory used by the conflict function F. */
3141
3142 static void
3143 free_conflict_function (conflict_function *f)
3144 {
3145 unsigned i;
3146
3147 if (CF_NONTRIVIAL_P (f))
3148 {
3149 for (i = 0; i < f->n; i++)
3150 affine_fn_free (f->fns[i]);
3151 }
3152 free (f);
3153 }
3154
3155 /* Frees memory used by SUBSCRIPTS. */
3156
3157 static void
3158 free_subscripts (vec<subscript_p> subscripts)
3159 {
3160 unsigned i;
3161 subscript_p s;
3162
3163 FOR_EACH_VEC_ELT (subscripts, i, s)
3164 {
3165 free_conflict_function (s->conflicting_iterations_in_a);
3166 free_conflict_function (s->conflicting_iterations_in_b);
3167 free (s);
3168 }
3169 subscripts.release ();
3170 }
3171
3172 /* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap
3173 description. */
3174
3175 static inline void
3176 finalize_ddr_dependent (struct data_dependence_relation *ddr,
3177 tree chrec)
3178 {
3179 DDR_ARE_DEPENDENT (ddr) = chrec;
3180 free_subscripts (DDR_SUBSCRIPTS (ddr));
3181 DDR_SUBSCRIPTS (ddr).create (0);
3182 }
3183
3184 /* The dependence relation DDR cannot be represented by a distance
3185 vector. */
3186
3187 static inline void
3188 non_affine_dependence_relation (struct data_dependence_relation *ddr)
3189 {
3190 if (dump_file && (dump_flags & TDF_DETAILS))
3191 fprintf (dump_file, "(Dependence relation cannot be represented by distance vector.) \n");
3192
3193 DDR_AFFINE_P (ddr) = false;
3194 }
3195
3196 \f
3197
3198 /* This section contains the classic Banerjee tests. */
3199
3200 /* Returns true iff CHREC_A and CHREC_B are not dependent on any index
3201 variables, i.e., if the ZIV (Zero Index Variable) test is true. */
3202
3203 static inline bool
3204 ziv_subscript_p (const_tree chrec_a, const_tree chrec_b)
3205 {
3206 return (evolution_function_is_constant_p (chrec_a)
3207 && evolution_function_is_constant_p (chrec_b));
3208 }
3209
3210 /* Returns true iff CHREC_A and CHREC_B are dependent on an index
3211 variable, i.e., if the SIV (Single Index Variable) test is true. */
3212
3213 static bool
3214 siv_subscript_p (const_tree chrec_a, const_tree chrec_b)
3215 {
3216 if ((evolution_function_is_constant_p (chrec_a)
3217 && evolution_function_is_univariate_p (chrec_b))
3218 || (evolution_function_is_constant_p (chrec_b)
3219 && evolution_function_is_univariate_p (chrec_a)))
3220 return true;
3221
3222 if (evolution_function_is_univariate_p (chrec_a)
3223 && evolution_function_is_univariate_p (chrec_b))
3224 {
3225 switch (TREE_CODE (chrec_a))
3226 {
3227 case POLYNOMIAL_CHREC:
3228 switch (TREE_CODE (chrec_b))
3229 {
3230 case POLYNOMIAL_CHREC:
3231 if (CHREC_VARIABLE (chrec_a) != CHREC_VARIABLE (chrec_b))
3232 return false;
3233 /* FALLTHRU */
3234
3235 default:
3236 return true;
3237 }
3238
3239 default:
3240 return true;
3241 }
3242 }
3243
3244 return false;
3245 }
3246
3247 /* Creates a conflict function with N dimensions. The affine functions
3248 in each dimension follow. */
3249
3250 static conflict_function *
3251 conflict_fn (unsigned n, ...)
3252 {
3253 unsigned i;
3254 conflict_function *ret = XCNEW (conflict_function);
3255 va_list ap;
3256
3257 gcc_assert (n > 0 && n <= MAX_DIM);
3258 va_start (ap, n);
3259
3260 ret->n = n;
3261 for (i = 0; i < n; i++)
3262 ret->fns[i] = va_arg (ap, affine_fn);
3263 va_end (ap);
3264
3265 return ret;
3266 }
3267
3268 /* Returns constant affine function with value CST. */
3269
3270 static affine_fn
3271 affine_fn_cst (tree cst)
3272 {
3273 affine_fn fn;
3274 fn.create (1);
3275 fn.quick_push (cst);
3276 return fn;
3277 }
3278
3279 /* Returns affine function with single variable, CST + COEF * x_DIM. */
3280
3281 static affine_fn
3282 affine_fn_univar (tree cst, unsigned dim, tree coef)
3283 {
3284 affine_fn fn;
3285 fn.create (dim + 1);
3286 unsigned i;
3287
3288 gcc_assert (dim > 0);
3289 fn.quick_push (cst);
3290 for (i = 1; i < dim; i++)
3291 fn.quick_push (integer_zero_node);
3292 fn.quick_push (coef);
3293 return fn;
3294 }
3295
3296 /* Analyze a ZIV (Zero Index Variable) subscript. *OVERLAPS_A and
3297 *OVERLAPS_B are initialized to the functions that describe the
3298 relation between the elements accessed twice by CHREC_A and
3299 CHREC_B. For k >= 0, the following property is verified:
3300
3301 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
3302
3303 static void
3304 analyze_ziv_subscript (tree chrec_a,
3305 tree chrec_b,
3306 conflict_function **overlaps_a,
3307 conflict_function **overlaps_b,
3308 tree *last_conflicts)
3309 {
3310 tree type, difference;
3311 dependence_stats.num_ziv++;
3312
3313 if (dump_file && (dump_flags & TDF_DETAILS))
3314 fprintf (dump_file, "(analyze_ziv_subscript \n");
3315
3316 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
3317 chrec_a = chrec_convert (type, chrec_a, NULL);
3318 chrec_b = chrec_convert (type, chrec_b, NULL);
3319 difference = chrec_fold_minus (type, chrec_a, chrec_b);
3320
3321 switch (TREE_CODE (difference))
3322 {
3323 case INTEGER_CST:
3324 if (integer_zerop (difference))
3325 {
3326 /* The difference is equal to zero: the accessed index
3327 overlaps for each iteration in the loop. */
3328 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3329 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
3330 *last_conflicts = chrec_dont_know;
3331 dependence_stats.num_ziv_dependent++;
3332 }
3333 else
3334 {
3335 /* The accesses do not overlap. */
3336 *overlaps_a = conflict_fn_no_dependence ();
3337 *overlaps_b = conflict_fn_no_dependence ();
3338 *last_conflicts = integer_zero_node;
3339 dependence_stats.num_ziv_independent++;
3340 }
3341 break;
3342
3343 default:
3344 /* We're not sure whether the indexes overlap. For the moment,
3345 conservatively answer "don't know". */
3346 if (dump_file && (dump_flags & TDF_DETAILS))
3347 fprintf (dump_file, "ziv test failed: difference is non-integer.\n");
3348
3349 *overlaps_a = conflict_fn_not_known ();
3350 *overlaps_b = conflict_fn_not_known ();
3351 *last_conflicts = chrec_dont_know;
3352 dependence_stats.num_ziv_unimplemented++;
3353 break;
3354 }
3355
3356 if (dump_file && (dump_flags & TDF_DETAILS))
3357 fprintf (dump_file, ")\n");
3358 }
3359
3360 /* Similar to max_stmt_executions_int, but returns the bound as a tree,
3361 and only if it fits to the int type. If this is not the case, or the
3362 bound on the number of iterations of LOOP could not be derived, returns
3363 chrec_dont_know. */
3364
3365 static tree
3366 max_stmt_executions_tree (class loop *loop)
3367 {
3368 widest_int nit;
3369
3370 if (!max_stmt_executions (loop, &nit))
3371 return chrec_dont_know;
3372
3373 if (!wi::fits_to_tree_p (nit, unsigned_type_node))
3374 return chrec_dont_know;
3375
3376 return wide_int_to_tree (unsigned_type_node, nit);
3377 }
3378
3379 /* Determine whether the CHREC is always positive/negative. If the expression
3380 cannot be statically analyzed, return false, otherwise set the answer into
3381 VALUE. */
3382
3383 static bool
3384 chrec_is_positive (tree chrec, bool *value)
3385 {
3386 bool value0, value1, value2;
3387 tree end_value, nb_iter;
3388
3389 switch (TREE_CODE (chrec))
3390 {
3391 case POLYNOMIAL_CHREC:
3392 if (!chrec_is_positive (CHREC_LEFT (chrec), &value0)
3393 || !chrec_is_positive (CHREC_RIGHT (chrec), &value1))
3394 return false;
3395
3396 /* FIXME -- overflows. */
3397 if (value0 == value1)
3398 {
3399 *value = value0;
3400 return true;
3401 }
3402
3403 /* Otherwise the chrec is under the form: "{-197, +, 2}_1",
3404 and the proof consists in showing that the sign never
3405 changes during the execution of the loop, from 0 to
3406 loop->nb_iterations. */
3407 if (!evolution_function_is_affine_p (chrec))
3408 return false;
3409
3410 nb_iter = number_of_latch_executions (get_chrec_loop (chrec));
3411 if (chrec_contains_undetermined (nb_iter))
3412 return false;
3413
3414 #if 0
3415 /* TODO -- If the test is after the exit, we may decrease the number of
3416 iterations by one. */
3417 if (after_exit)
3418 nb_iter = chrec_fold_minus (type, nb_iter, build_int_cst (type, 1));
3419 #endif
3420
3421 end_value = chrec_apply (CHREC_VARIABLE (chrec), chrec, nb_iter);
3422
3423 if (!chrec_is_positive (end_value, &value2))
3424 return false;
3425
3426 *value = value0;
3427 return value0 == value1;
3428
3429 case INTEGER_CST:
3430 switch (tree_int_cst_sgn (chrec))
3431 {
3432 case -1:
3433 *value = false;
3434 break;
3435 case 1:
3436 *value = true;
3437 break;
3438 default:
3439 return false;
3440 }
3441 return true;
3442
3443 default:
3444 return false;
3445 }
3446 }
3447
3448
3449 /* Analyze a SIV (Single Index Variable) subscript where CHREC_A is a
3450 constant, and CHREC_B is an affine function. *OVERLAPS_A and
3451 *OVERLAPS_B are initialized to the functions that describe the
3452 relation between the elements accessed twice by CHREC_A and
3453 CHREC_B. For k >= 0, the following property is verified:
3454
3455 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
3456
3457 static void
3458 analyze_siv_subscript_cst_affine (tree chrec_a,
3459 tree chrec_b,
3460 conflict_function **overlaps_a,
3461 conflict_function **overlaps_b,
3462 tree *last_conflicts)
3463 {
3464 bool value0, value1, value2;
3465 tree type, difference, tmp;
3466
3467 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
3468 chrec_a = chrec_convert (type, chrec_a, NULL);
3469 chrec_b = chrec_convert (type, chrec_b, NULL);
3470 difference = chrec_fold_minus (type, initial_condition (chrec_b), chrec_a);
3471
3472 /* Special case overlap in the first iteration. */
3473 if (integer_zerop (difference))
3474 {
3475 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3476 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
3477 *last_conflicts = integer_one_node;
3478 return;
3479 }
3480
3481 if (!chrec_is_positive (initial_condition (difference), &value0))
3482 {
3483 if (dump_file && (dump_flags & TDF_DETAILS))
3484 fprintf (dump_file, "siv test failed: chrec is not positive.\n");
3485
3486 dependence_stats.num_siv_unimplemented++;
3487 *overlaps_a = conflict_fn_not_known ();
3488 *overlaps_b = conflict_fn_not_known ();
3489 *last_conflicts = chrec_dont_know;
3490 return;
3491 }
3492 else
3493 {
3494 if (value0 == false)
3495 {
3496 if (TREE_CODE (chrec_b) != POLYNOMIAL_CHREC
3497 || !chrec_is_positive (CHREC_RIGHT (chrec_b), &value1))
3498 {
3499 if (dump_file && (dump_flags & TDF_DETAILS))
3500 fprintf (dump_file, "siv test failed: chrec not positive.\n");
3501
3502 *overlaps_a = conflict_fn_not_known ();
3503 *overlaps_b = conflict_fn_not_known ();
3504 *last_conflicts = chrec_dont_know;
3505 dependence_stats.num_siv_unimplemented++;
3506 return;
3507 }
3508 else
3509 {
3510 if (value1 == true)
3511 {
3512 /* Example:
3513 chrec_a = 12
3514 chrec_b = {10, +, 1}
3515 */
3516
3517 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
3518 {
3519 HOST_WIDE_INT numiter;
3520 class loop *loop = get_chrec_loop (chrec_b);
3521
3522 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3523 tmp = fold_build2 (EXACT_DIV_EXPR, type,
3524 fold_build1 (ABS_EXPR, type, difference),
3525 CHREC_RIGHT (chrec_b));
3526 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
3527 *last_conflicts = integer_one_node;
3528
3529
3530 /* Perform weak-zero siv test to see if overlap is
3531 outside the loop bounds. */
3532 numiter = max_stmt_executions_int (loop);
3533
3534 if (numiter >= 0
3535 && compare_tree_int (tmp, numiter) > 0)
3536 {
3537 free_conflict_function (*overlaps_a);
3538 free_conflict_function (*overlaps_b);
3539 *overlaps_a = conflict_fn_no_dependence ();
3540 *overlaps_b = conflict_fn_no_dependence ();
3541 *last_conflicts = integer_zero_node;
3542 dependence_stats.num_siv_independent++;
3543 return;
3544 }
3545 dependence_stats.num_siv_dependent++;
3546 return;
3547 }
3548
3549 /* When the step does not divide the difference, there are
3550 no overlaps. */
3551 else
3552 {
3553 *overlaps_a = conflict_fn_no_dependence ();
3554 *overlaps_b = conflict_fn_no_dependence ();
3555 *last_conflicts = integer_zero_node;
3556 dependence_stats.num_siv_independent++;
3557 return;
3558 }
3559 }
3560
3561 else
3562 {
3563 /* Example:
3564 chrec_a = 12
3565 chrec_b = {10, +, -1}
3566
3567 In this case, chrec_a will not overlap with chrec_b. */
3568 *overlaps_a = conflict_fn_no_dependence ();
3569 *overlaps_b = conflict_fn_no_dependence ();
3570 *last_conflicts = integer_zero_node;
3571 dependence_stats.num_siv_independent++;
3572 return;
3573 }
3574 }
3575 }
3576 else
3577 {
3578 if (TREE_CODE (chrec_b) != POLYNOMIAL_CHREC
3579 || !chrec_is_positive (CHREC_RIGHT (chrec_b), &value2))
3580 {
3581 if (dump_file && (dump_flags & TDF_DETAILS))
3582 fprintf (dump_file, "siv test failed: chrec not positive.\n");
3583
3584 *overlaps_a = conflict_fn_not_known ();
3585 *overlaps_b = conflict_fn_not_known ();
3586 *last_conflicts = chrec_dont_know;
3587 dependence_stats.num_siv_unimplemented++;
3588 return;
3589 }
3590 else
3591 {
3592 if (value2 == false)
3593 {
3594 /* Example:
3595 chrec_a = 3
3596 chrec_b = {10, +, -1}
3597 */
3598 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
3599 {
3600 HOST_WIDE_INT numiter;
3601 class loop *loop = get_chrec_loop (chrec_b);
3602
3603 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3604 tmp = fold_build2 (EXACT_DIV_EXPR, type, difference,
3605 CHREC_RIGHT (chrec_b));
3606 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
3607 *last_conflicts = integer_one_node;
3608
3609 /* Perform weak-zero siv test to see if overlap is
3610 outside the loop bounds. */
3611 numiter = max_stmt_executions_int (loop);
3612
3613 if (numiter >= 0
3614 && compare_tree_int (tmp, numiter) > 0)
3615 {
3616 free_conflict_function (*overlaps_a);
3617 free_conflict_function (*overlaps_b);
3618 *overlaps_a = conflict_fn_no_dependence ();
3619 *overlaps_b = conflict_fn_no_dependence ();
3620 *last_conflicts = integer_zero_node;
3621 dependence_stats.num_siv_independent++;
3622 return;
3623 }
3624 dependence_stats.num_siv_dependent++;
3625 return;
3626 }
3627
3628 /* When the step does not divide the difference, there
3629 are no overlaps. */
3630 else
3631 {
3632 *overlaps_a = conflict_fn_no_dependence ();
3633 *overlaps_b = conflict_fn_no_dependence ();
3634 *last_conflicts = integer_zero_node;
3635 dependence_stats.num_siv_independent++;
3636 return;
3637 }
3638 }
3639 else
3640 {
3641 /* Example:
3642 chrec_a = 3
3643 chrec_b = {4, +, 1}
3644
3645 In this case, chrec_a will not overlap with chrec_b. */
3646 *overlaps_a = conflict_fn_no_dependence ();
3647 *overlaps_b = conflict_fn_no_dependence ();
3648 *last_conflicts = integer_zero_node;
3649 dependence_stats.num_siv_independent++;
3650 return;
3651 }
3652 }
3653 }
3654 }
3655 }
3656
3657 /* Helper recursive function for initializing the matrix A. Returns
3658 the initial value of CHREC. */
3659
3660 static tree
3661 initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult)
3662 {
3663 gcc_assert (chrec);
3664
3665 switch (TREE_CODE (chrec))
3666 {
3667 case POLYNOMIAL_CHREC:
3668 if (!cst_and_fits_in_hwi (CHREC_RIGHT (chrec)))
3669 return chrec_dont_know;
3670 A[index][0] = mult * int_cst_value (CHREC_RIGHT (chrec));
3671 return initialize_matrix_A (A, CHREC_LEFT (chrec), index + 1, mult);
3672
3673 case PLUS_EXPR:
3674 case MULT_EXPR:
3675 case MINUS_EXPR:
3676 {
3677 tree op0 = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
3678 tree op1 = initialize_matrix_A (A, TREE_OPERAND (chrec, 1), index, mult);
3679
3680 return chrec_fold_op (TREE_CODE (chrec), chrec_type (chrec), op0, op1);
3681 }
3682
3683 CASE_CONVERT:
3684 {
3685 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
3686 return chrec_convert (chrec_type (chrec), op, NULL);
3687 }
3688
3689 case BIT_NOT_EXPR:
3690 {
3691 /* Handle ~X as -1 - X. */
3692 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
3693 return chrec_fold_op (MINUS_EXPR, chrec_type (chrec),
3694 build_int_cst (TREE_TYPE (chrec), -1), op);
3695 }
3696
3697 case INTEGER_CST:
3698 return chrec;
3699
3700 default:
3701 gcc_unreachable ();
3702 return NULL_TREE;
3703 }
3704 }
3705
3706 #define FLOOR_DIV(x,y) ((x) / (y))
3707
3708 /* Solves the special case of the Diophantine equation:
3709 | {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B)
3710
3711 Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the
3712 number of iterations that loops X and Y run. The overlaps will be
3713 constructed as evolutions in dimension DIM. */
3714
3715 static void
3716 compute_overlap_steps_for_affine_univar (HOST_WIDE_INT niter,
3717 HOST_WIDE_INT step_a,
3718 HOST_WIDE_INT step_b,
3719 affine_fn *overlaps_a,
3720 affine_fn *overlaps_b,
3721 tree *last_conflicts, int dim)
3722 {
3723 if (((step_a > 0 && step_b > 0)
3724 || (step_a < 0 && step_b < 0)))
3725 {
3726 HOST_WIDE_INT step_overlaps_a, step_overlaps_b;
3727 HOST_WIDE_INT gcd_steps_a_b, last_conflict, tau2;
3728
3729 gcd_steps_a_b = gcd (step_a, step_b);
3730 step_overlaps_a = step_b / gcd_steps_a_b;
3731 step_overlaps_b = step_a / gcd_steps_a_b;
3732
3733 if (niter > 0)
3734 {
3735 tau2 = FLOOR_DIV (niter, step_overlaps_a);
3736 tau2 = MIN (tau2, FLOOR_DIV (niter, step_overlaps_b));
3737 last_conflict = tau2;
3738 *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
3739 }
3740 else
3741 *last_conflicts = chrec_dont_know;
3742
3743 *overlaps_a = affine_fn_univar (integer_zero_node, dim,
3744 build_int_cst (NULL_TREE,
3745 step_overlaps_a));
3746 *overlaps_b = affine_fn_univar (integer_zero_node, dim,
3747 build_int_cst (NULL_TREE,
3748 step_overlaps_b));
3749 }
3750
3751 else
3752 {
3753 *overlaps_a = affine_fn_cst (integer_zero_node);
3754 *overlaps_b = affine_fn_cst (integer_zero_node);
3755 *last_conflicts = integer_zero_node;
3756 }
3757 }
3758
3759 /* Solves the special case of a Diophantine equation where CHREC_A is
3760 an affine bivariate function, and CHREC_B is an affine univariate
3761 function. For example,
3762
3763 | {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z
3764
3765 has the following overlapping functions:
3766
3767 | x (t, u, v) = {{0, +, 1336}_t, +, 1}_v
3768 | y (t, u, v) = {{0, +, 1336}_u, +, 1}_v
3769 | z (t, u, v) = {{{0, +, 1}_t, +, 1335}_u, +, 1}_v
3770
3771 FORNOW: This is a specialized implementation for a case occurring in
3772 a common benchmark. Implement the general algorithm. */
3773
3774 static void
3775 compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b,
3776 conflict_function **overlaps_a,
3777 conflict_function **overlaps_b,
3778 tree *last_conflicts)
3779 {
3780 bool xz_p, yz_p, xyz_p;
3781 HOST_WIDE_INT step_x, step_y, step_z;
3782 HOST_WIDE_INT niter_x, niter_y, niter_z, niter;
3783 affine_fn overlaps_a_xz, overlaps_b_xz;
3784 affine_fn overlaps_a_yz, overlaps_b_yz;
3785 affine_fn overlaps_a_xyz, overlaps_b_xyz;
3786 affine_fn ova1, ova2, ovb;
3787 tree last_conflicts_xz, last_conflicts_yz, last_conflicts_xyz;
3788
3789 step_x = int_cst_value (CHREC_RIGHT (CHREC_LEFT (chrec_a)));
3790 step_y = int_cst_value (CHREC_RIGHT (chrec_a));
3791 step_z = int_cst_value (CHREC_RIGHT (chrec_b));
3792
3793 niter_x = max_stmt_executions_int (get_chrec_loop (CHREC_LEFT (chrec_a)));
3794 niter_y = max_stmt_executions_int (get_chrec_loop (chrec_a));
3795 niter_z = max_stmt_executions_int (get_chrec_loop (chrec_b));
3796
3797 if (niter_x < 0 || niter_y < 0 || niter_z < 0)
3798 {
3799 if (dump_file && (dump_flags & TDF_DETAILS))
3800 fprintf (dump_file, "overlap steps test failed: no iteration counts.\n");
3801
3802 *overlaps_a = conflict_fn_not_known ();
3803 *overlaps_b = conflict_fn_not_known ();
3804 *last_conflicts = chrec_dont_know;
3805 return;
3806 }
3807
3808 niter = MIN (niter_x, niter_z);
3809 compute_overlap_steps_for_affine_univar (niter, step_x, step_z,
3810 &overlaps_a_xz,
3811 &overlaps_b_xz,
3812 &last_conflicts_xz, 1);
3813 niter = MIN (niter_y, niter_z);
3814 compute_overlap_steps_for_affine_univar (niter, step_y, step_z,
3815 &overlaps_a_yz,
3816 &overlaps_b_yz,
3817 &last_conflicts_yz, 2);
3818 niter = MIN (niter_x, niter_z);
3819 niter = MIN (niter_y, niter);
3820 compute_overlap_steps_for_affine_univar (niter, step_x + step_y, step_z,
3821 &overlaps_a_xyz,
3822 &overlaps_b_xyz,
3823 &last_conflicts_xyz, 3);
3824
3825 xz_p = !integer_zerop (last_conflicts_xz);
3826 yz_p = !integer_zerop (last_conflicts_yz);
3827 xyz_p = !integer_zerop (last_conflicts_xyz);
3828
3829 if (xz_p || yz_p || xyz_p)
3830 {
3831 ova1 = affine_fn_cst (integer_zero_node);
3832 ova2 = affine_fn_cst (integer_zero_node);
3833 ovb = affine_fn_cst (integer_zero_node);
3834 if (xz_p)
3835 {
3836 affine_fn t0 = ova1;
3837 affine_fn t2 = ovb;
3838
3839 ova1 = affine_fn_plus (ova1, overlaps_a_xz);
3840 ovb = affine_fn_plus (ovb, overlaps_b_xz);
3841 affine_fn_free (t0);
3842 affine_fn_free (t2);
3843 *last_conflicts = last_conflicts_xz;
3844 }
3845 if (yz_p)
3846 {
3847 affine_fn t0 = ova2;
3848 affine_fn t2 = ovb;
3849
3850 ova2 = affine_fn_plus (ova2, overlaps_a_yz);
3851 ovb = affine_fn_plus (ovb, overlaps_b_yz);
3852 affine_fn_free (t0);
3853 affine_fn_free (t2);
3854 *last_conflicts = last_conflicts_yz;
3855 }
3856 if (xyz_p)
3857 {
3858 affine_fn t0 = ova1;
3859 affine_fn t2 = ova2;
3860 affine_fn t4 = ovb;
3861
3862 ova1 = affine_fn_plus (ova1, overlaps_a_xyz);
3863 ova2 = affine_fn_plus (ova2, overlaps_a_xyz);
3864 ovb = affine_fn_plus (ovb, overlaps_b_xyz);
3865 affine_fn_free (t0);
3866 affine_fn_free (t2);
3867 affine_fn_free (t4);
3868 *last_conflicts = last_conflicts_xyz;
3869 }
3870 *overlaps_a = conflict_fn (2, ova1, ova2);
3871 *overlaps_b = conflict_fn (1, ovb);
3872 }
3873 else
3874 {
3875 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3876 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
3877 *last_conflicts = integer_zero_node;
3878 }
3879
3880 affine_fn_free (overlaps_a_xz);
3881 affine_fn_free (overlaps_b_xz);
3882 affine_fn_free (overlaps_a_yz);
3883 affine_fn_free (overlaps_b_yz);
3884 affine_fn_free (overlaps_a_xyz);
3885 affine_fn_free (overlaps_b_xyz);
3886 }
3887
3888 /* Copy the elements of vector VEC1 with length SIZE to VEC2. */
3889
3890 static void
3891 lambda_vector_copy (lambda_vector vec1, lambda_vector vec2,
3892 int size)
3893 {
3894 memcpy (vec2, vec1, size * sizeof (*vec1));
3895 }
3896
3897 /* Copy the elements of M x N matrix MAT1 to MAT2. */
3898
3899 static void
3900 lambda_matrix_copy (lambda_matrix mat1, lambda_matrix mat2,
3901 int m, int n)
3902 {
3903 int i;
3904
3905 for (i = 0; i < m; i++)
3906 lambda_vector_copy (mat1[i], mat2[i], n);
3907 }
3908
3909 /* Store the N x N identity matrix in MAT. */
3910
3911 static void
3912 lambda_matrix_id (lambda_matrix mat, int size)
3913 {
3914 int i, j;
3915
3916 for (i = 0; i < size; i++)
3917 for (j = 0; j < size; j++)
3918 mat[i][j] = (i == j) ? 1 : 0;
3919 }
3920
3921 /* Return the index of the first nonzero element of vector VEC1 between
3922 START and N. We must have START <= N.
3923 Returns N if VEC1 is the zero vector. */
3924
3925 static int
3926 lambda_vector_first_nz (lambda_vector vec1, int n, int start)
3927 {
3928 int j = start;
3929 while (j < n && vec1[j] == 0)
3930 j++;
3931 return j;
3932 }
3933
3934 /* Add a multiple of row R1 of matrix MAT with N columns to row R2:
3935 R2 = R2 + CONST1 * R1. */
3936
3937 static void
3938 lambda_matrix_row_add (lambda_matrix mat, int n, int r1, int r2,
3939 lambda_int const1)
3940 {
3941 int i;
3942
3943 if (const1 == 0)
3944 return;
3945
3946 for (i = 0; i < n; i++)
3947 mat[r2][i] += const1 * mat[r1][i];
3948 }
3949
3950 /* Multiply vector VEC1 of length SIZE by a constant CONST1,
3951 and store the result in VEC2. */
3952
3953 static void
3954 lambda_vector_mult_const (lambda_vector vec1, lambda_vector vec2,
3955 int size, lambda_int const1)
3956 {
3957 int i;
3958
3959 if (const1 == 0)
3960 lambda_vector_clear (vec2, size);
3961 else
3962 for (i = 0; i < size; i++)
3963 vec2[i] = const1 * vec1[i];
3964 }
3965
3966 /* Negate vector VEC1 with length SIZE and store it in VEC2. */
3967
3968 static void
3969 lambda_vector_negate (lambda_vector vec1, lambda_vector vec2,
3970 int size)
3971 {
3972 lambda_vector_mult_const (vec1, vec2, size, -1);
3973 }
3974
3975 /* Negate row R1 of matrix MAT which has N columns. */
3976
3977 static void
3978 lambda_matrix_row_negate (lambda_matrix mat, int n, int r1)
3979 {
3980 lambda_vector_negate (mat[r1], mat[r1], n);
3981 }
3982
3983 /* Return true if two vectors are equal. */
3984
3985 static bool
3986 lambda_vector_equal (lambda_vector vec1, lambda_vector vec2, int size)
3987 {
3988 int i;
3989 for (i = 0; i < size; i++)
3990 if (vec1[i] != vec2[i])
3991 return false;
3992 return true;
3993 }
3994
3995 /* Given an M x N integer matrix A, this function determines an M x
3996 M unimodular matrix U, and an M x N echelon matrix S such that
3997 "U.A = S". This decomposition is also known as "right Hermite".
3998
3999 Ref: Algorithm 2.1 page 33 in "Loop Transformations for
4000 Restructuring Compilers" Utpal Banerjee. */
4001
4002 static void
4003 lambda_matrix_right_hermite (lambda_matrix A, int m, int n,
4004 lambda_matrix S, lambda_matrix U)
4005 {
4006 int i, j, i0 = 0;
4007
4008 lambda_matrix_copy (A, S, m, n);
4009 lambda_matrix_id (U, m);
4010
4011 for (j = 0; j < n; j++)
4012 {
4013 if (lambda_vector_first_nz (S[j], m, i0) < m)
4014 {
4015 ++i0;
4016 for (i = m - 1; i >= i0; i--)
4017 {
4018 while (S[i][j] != 0)
4019 {
4020 lambda_int sigma, factor, a, b;
4021
4022 a = S[i-1][j];
4023 b = S[i][j];
4024 sigma = (a * b < 0) ? -1: 1;
4025 a = abs_hwi (a);
4026 b = abs_hwi (b);
4027 factor = sigma * (a / b);
4028
4029 lambda_matrix_row_add (S, n, i, i-1, -factor);
4030 std::swap (S[i], S[i-1]);
4031
4032 lambda_matrix_row_add (U, m, i, i-1, -factor);
4033 std::swap (U[i], U[i-1]);
4034 }
4035 }
4036 }
4037 }
4038 }
4039
4040 /* Determines the overlapping elements due to accesses CHREC_A and
4041 CHREC_B, that are affine functions. This function cannot handle
4042 symbolic evolution functions, ie. when initial conditions are
4043 parameters, because it uses lambda matrices of integers. */
4044
4045 static void
4046 analyze_subscript_affine_affine (tree chrec_a,
4047 tree chrec_b,
4048 conflict_function **overlaps_a,
4049 conflict_function **overlaps_b,
4050 tree *last_conflicts)
4051 {
4052 unsigned nb_vars_a, nb_vars_b, dim;
4053 HOST_WIDE_INT gamma, gcd_alpha_beta;
4054 lambda_matrix A, U, S;
4055 struct obstack scratch_obstack;
4056
4057 if (eq_evolutions_p (chrec_a, chrec_b))
4058 {
4059 /* The accessed index overlaps for each iteration in the
4060 loop. */
4061 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4062 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4063 *last_conflicts = chrec_dont_know;
4064 return;
4065 }
4066 if (dump_file && (dump_flags & TDF_DETAILS))
4067 fprintf (dump_file, "(analyze_subscript_affine_affine \n");
4068
4069 /* For determining the initial intersection, we have to solve a
4070 Diophantine equation. This is the most time consuming part.
4071
4072 For answering to the question: "Is there a dependence?" we have
4073 to prove that there exists a solution to the Diophantine
4074 equation, and that the solution is in the iteration domain,
4075 i.e. the solution is positive or zero, and that the solution
4076 happens before the upper bound loop.nb_iterations. Otherwise
4077 there is no dependence. This function outputs a description of
4078 the iterations that hold the intersections. */
4079
4080 nb_vars_a = nb_vars_in_chrec (chrec_a);
4081 nb_vars_b = nb_vars_in_chrec (chrec_b);
4082
4083 gcc_obstack_init (&scratch_obstack);
4084
4085 dim = nb_vars_a + nb_vars_b;
4086 U = lambda_matrix_new (dim, dim, &scratch_obstack);
4087 A = lambda_matrix_new (dim, 1, &scratch_obstack);
4088 S = lambda_matrix_new (dim, 1, &scratch_obstack);
4089
4090 tree init_a = initialize_matrix_A (A, chrec_a, 0, 1);
4091 tree init_b = initialize_matrix_A (A, chrec_b, nb_vars_a, -1);
4092 if (init_a == chrec_dont_know
4093 || init_b == chrec_dont_know)
4094 {
4095 if (dump_file && (dump_flags & TDF_DETAILS))
4096 fprintf (dump_file, "affine-affine test failed: "
4097 "representation issue.\n");
4098 *overlaps_a = conflict_fn_not_known ();
4099 *overlaps_b = conflict_fn_not_known ();
4100 *last_conflicts = chrec_dont_know;
4101 goto end_analyze_subs_aa;
4102 }
4103 gamma = int_cst_value (init_b) - int_cst_value (init_a);
4104
4105 /* Don't do all the hard work of solving the Diophantine equation
4106 when we already know the solution: for example,
4107 | {3, +, 1}_1
4108 | {3, +, 4}_2
4109 | gamma = 3 - 3 = 0.
4110 Then the first overlap occurs during the first iterations:
4111 | {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x)
4112 */
4113 if (gamma == 0)
4114 {
4115 if (nb_vars_a == 1 && nb_vars_b == 1)
4116 {
4117 HOST_WIDE_INT step_a, step_b;
4118 HOST_WIDE_INT niter, niter_a, niter_b;
4119 affine_fn ova, ovb;
4120
4121 niter_a = max_stmt_executions_int (get_chrec_loop (chrec_a));
4122 niter_b = max_stmt_executions_int (get_chrec_loop (chrec_b));
4123 niter = MIN (niter_a, niter_b);
4124 step_a = int_cst_value (CHREC_RIGHT (chrec_a));
4125 step_b = int_cst_value (CHREC_RIGHT (chrec_b));
4126
4127 compute_overlap_steps_for_affine_univar (niter, step_a, step_b,
4128 &ova, &ovb,
4129 last_conflicts, 1);
4130 *overlaps_a = conflict_fn (1, ova);
4131 *overlaps_b = conflict_fn (1, ovb);
4132 }
4133
4134 else if (nb_vars_a == 2 && nb_vars_b == 1)
4135 compute_overlap_steps_for_affine_1_2
4136 (chrec_a, chrec_b, overlaps_a, overlaps_b, last_conflicts);
4137
4138 else if (nb_vars_a == 1 && nb_vars_b == 2)
4139 compute_overlap_steps_for_affine_1_2
4140 (chrec_b, chrec_a, overlaps_b, overlaps_a, last_conflicts);
4141
4142 else
4143 {
4144 if (dump_file && (dump_flags & TDF_DETAILS))
4145 fprintf (dump_file, "affine-affine test failed: too many variables.\n");
4146 *overlaps_a = conflict_fn_not_known ();
4147 *overlaps_b = conflict_fn_not_known ();
4148 *last_conflicts = chrec_dont_know;
4149 }
4150 goto end_analyze_subs_aa;
4151 }
4152
4153 /* U.A = S */
4154 lambda_matrix_right_hermite (A, dim, 1, S, U);
4155
4156 if (S[0][0] < 0)
4157 {
4158 S[0][0] *= -1;
4159 lambda_matrix_row_negate (U, dim, 0);
4160 }
4161 gcd_alpha_beta = S[0][0];
4162
4163 /* Something went wrong: for example in {1, +, 0}_5 vs. {0, +, 0}_5,
4164 but that is a quite strange case. Instead of ICEing, answer
4165 don't know. */
4166 if (gcd_alpha_beta == 0)
4167 {
4168 *overlaps_a = conflict_fn_not_known ();
4169 *overlaps_b = conflict_fn_not_known ();
4170 *last_conflicts = chrec_dont_know;
4171 goto end_analyze_subs_aa;
4172 }
4173
4174 /* The classic "gcd-test". */
4175 if (!int_divides_p (gcd_alpha_beta, gamma))
4176 {
4177 /* The "gcd-test" has determined that there is no integer
4178 solution, i.e. there is no dependence. */
4179 *overlaps_a = conflict_fn_no_dependence ();
4180 *overlaps_b = conflict_fn_no_dependence ();
4181 *last_conflicts = integer_zero_node;
4182 }
4183
4184 /* Both access functions are univariate. This includes SIV and MIV cases. */
4185 else if (nb_vars_a == 1 && nb_vars_b == 1)
4186 {
4187 /* Both functions should have the same evolution sign. */
4188 if (((A[0][0] > 0 && -A[1][0] > 0)
4189 || (A[0][0] < 0 && -A[1][0] < 0)))
4190 {
4191 /* The solutions are given by:
4192 |
4193 | [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0]
4194 | [u21 u22] [y0]
4195
4196 For a given integer t. Using the following variables,
4197
4198 | i0 = u11 * gamma / gcd_alpha_beta
4199 | j0 = u12 * gamma / gcd_alpha_beta
4200 | i1 = u21
4201 | j1 = u22
4202
4203 the solutions are:
4204
4205 | x0 = i0 + i1 * t,
4206 | y0 = j0 + j1 * t. */
4207 HOST_WIDE_INT i0, j0, i1, j1;
4208
4209 i0 = U[0][0] * gamma / gcd_alpha_beta;
4210 j0 = U[0][1] * gamma / gcd_alpha_beta;
4211 i1 = U[1][0];
4212 j1 = U[1][1];
4213
4214 if ((i1 == 0 && i0 < 0)
4215 || (j1 == 0 && j0 < 0))
4216 {
4217 /* There is no solution.
4218 FIXME: The case "i0 > nb_iterations, j0 > nb_iterations"
4219 falls in here, but for the moment we don't look at the
4220 upper bound of the iteration domain. */
4221 *overlaps_a = conflict_fn_no_dependence ();
4222 *overlaps_b = conflict_fn_no_dependence ();
4223 *last_conflicts = integer_zero_node;
4224 goto end_analyze_subs_aa;
4225 }
4226
4227 if (i1 > 0 && j1 > 0)
4228 {
4229 HOST_WIDE_INT niter_a
4230 = max_stmt_executions_int (get_chrec_loop (chrec_a));
4231 HOST_WIDE_INT niter_b
4232 = max_stmt_executions_int (get_chrec_loop (chrec_b));
4233 HOST_WIDE_INT niter = MIN (niter_a, niter_b);
4234
4235 /* (X0, Y0) is a solution of the Diophantine equation:
4236 "chrec_a (X0) = chrec_b (Y0)". */
4237 HOST_WIDE_INT tau1 = MAX (CEIL (-i0, i1),
4238 CEIL (-j0, j1));
4239 HOST_WIDE_INT x0 = i1 * tau1 + i0;
4240 HOST_WIDE_INT y0 = j1 * tau1 + j0;
4241
4242 /* (X1, Y1) is the smallest positive solution of the eq
4243 "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the
4244 first conflict occurs. */
4245 HOST_WIDE_INT min_multiple = MIN (x0 / i1, y0 / j1);
4246 HOST_WIDE_INT x1 = x0 - i1 * min_multiple;
4247 HOST_WIDE_INT y1 = y0 - j1 * min_multiple;
4248
4249 if (niter > 0)
4250 {
4251 /* If the overlap occurs outside of the bounds of the
4252 loop, there is no dependence. */
4253 if (x1 >= niter_a || y1 >= niter_b)
4254 {
4255 *overlaps_a = conflict_fn_no_dependence ();
4256 *overlaps_b = conflict_fn_no_dependence ();
4257 *last_conflicts = integer_zero_node;
4258 goto end_analyze_subs_aa;
4259 }
4260
4261 /* max stmt executions can get quite large, avoid
4262 overflows by using wide ints here. */
4263 widest_int tau2
4264 = wi::smin (wi::sdiv_floor (wi::sub (niter_a, i0), i1),
4265 wi::sdiv_floor (wi::sub (niter_b, j0), j1));
4266 widest_int last_conflict = wi::sub (tau2, (x1 - i0)/i1);
4267 if (wi::min_precision (last_conflict, SIGNED)
4268 <= TYPE_PRECISION (integer_type_node))
4269 *last_conflicts
4270 = build_int_cst (integer_type_node,
4271 last_conflict.to_shwi ());
4272 else
4273 *last_conflicts = chrec_dont_know;
4274 }
4275 else
4276 *last_conflicts = chrec_dont_know;
4277
4278 *overlaps_a
4279 = conflict_fn (1,
4280 affine_fn_univar (build_int_cst (NULL_TREE, x1),
4281 1,
4282 build_int_cst (NULL_TREE, i1)));
4283 *overlaps_b
4284 = conflict_fn (1,
4285 affine_fn_univar (build_int_cst (NULL_TREE, y1),
4286 1,
4287 build_int_cst (NULL_TREE, j1)));
4288 }
4289 else
4290 {
4291 /* FIXME: For the moment, the upper bound of the
4292 iteration domain for i and j is not checked. */
4293 if (dump_file && (dump_flags & TDF_DETAILS))
4294 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
4295 *overlaps_a = conflict_fn_not_known ();
4296 *overlaps_b = conflict_fn_not_known ();
4297 *last_conflicts = chrec_dont_know;
4298 }
4299 }
4300 else
4301 {
4302 if (dump_file && (dump_flags & TDF_DETAILS))
4303 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
4304 *overlaps_a = conflict_fn_not_known ();
4305 *overlaps_b = conflict_fn_not_known ();
4306 *last_conflicts = chrec_dont_know;
4307 }
4308 }
4309 else
4310 {
4311 if (dump_file && (dump_flags & TDF_DETAILS))
4312 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
4313 *overlaps_a = conflict_fn_not_known ();
4314 *overlaps_b = conflict_fn_not_known ();
4315 *last_conflicts = chrec_dont_know;
4316 }
4317
4318 end_analyze_subs_aa:
4319 obstack_free (&scratch_obstack, NULL);
4320 if (dump_file && (dump_flags & TDF_DETAILS))
4321 {
4322 fprintf (dump_file, " (overlaps_a = ");
4323 dump_conflict_function (dump_file, *overlaps_a);
4324 fprintf (dump_file, ")\n (overlaps_b = ");
4325 dump_conflict_function (dump_file, *overlaps_b);
4326 fprintf (dump_file, "))\n");
4327 }
4328 }
4329
4330 /* Returns true when analyze_subscript_affine_affine can be used for
4331 determining the dependence relation between chrec_a and chrec_b,
4332 that contain symbols. This function modifies chrec_a and chrec_b
4333 such that the analysis result is the same, and such that they don't
4334 contain symbols, and then can safely be passed to the analyzer.
4335
4336 Example: The analysis of the following tuples of evolutions produce
4337 the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1
4338 vs. {0, +, 1}_1
4339
4340 {x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1)
4341 {-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1)
4342 */
4343
4344 static bool
4345 can_use_analyze_subscript_affine_affine (tree *chrec_a, tree *chrec_b)
4346 {
4347 tree diff, type, left_a, left_b, right_b;
4348
4349 if (chrec_contains_symbols (CHREC_RIGHT (*chrec_a))
4350 || chrec_contains_symbols (CHREC_RIGHT (*chrec_b)))
4351 /* FIXME: For the moment not handled. Might be refined later. */
4352 return false;
4353
4354 type = chrec_type (*chrec_a);
4355 left_a = CHREC_LEFT (*chrec_a);
4356 left_b = chrec_convert (type, CHREC_LEFT (*chrec_b), NULL);
4357 diff = chrec_fold_minus (type, left_a, left_b);
4358
4359 if (!evolution_function_is_constant_p (diff))
4360 return false;
4361
4362 if (dump_file && (dump_flags & TDF_DETAILS))
4363 fprintf (dump_file, "can_use_subscript_aff_aff_for_symbolic \n");
4364
4365 *chrec_a = build_polynomial_chrec (CHREC_VARIABLE (*chrec_a),
4366 diff, CHREC_RIGHT (*chrec_a));
4367 right_b = chrec_convert (type, CHREC_RIGHT (*chrec_b), NULL);
4368 *chrec_b = build_polynomial_chrec (CHREC_VARIABLE (*chrec_b),
4369 build_int_cst (type, 0),
4370 right_b);
4371 return true;
4372 }
4373
4374 /* Analyze a SIV (Single Index Variable) subscript. *OVERLAPS_A and
4375 *OVERLAPS_B are initialized to the functions that describe the
4376 relation between the elements accessed twice by CHREC_A and
4377 CHREC_B. For k >= 0, the following property is verified:
4378
4379 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
4380
4381 static void
4382 analyze_siv_subscript (tree chrec_a,
4383 tree chrec_b,
4384 conflict_function **overlaps_a,
4385 conflict_function **overlaps_b,
4386 tree *last_conflicts,
4387 int loop_nest_num)
4388 {
4389 dependence_stats.num_siv++;
4390
4391 if (dump_file && (dump_flags & TDF_DETAILS))
4392 fprintf (dump_file, "(analyze_siv_subscript \n");
4393
4394 if (evolution_function_is_constant_p (chrec_a)
4395 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
4396 analyze_siv_subscript_cst_affine (chrec_a, chrec_b,
4397 overlaps_a, overlaps_b, last_conflicts);
4398
4399 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
4400 && evolution_function_is_constant_p (chrec_b))
4401 analyze_siv_subscript_cst_affine (chrec_b, chrec_a,
4402 overlaps_b, overlaps_a, last_conflicts);
4403
4404 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
4405 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
4406 {
4407 if (!chrec_contains_symbols (chrec_a)
4408 && !chrec_contains_symbols (chrec_b))
4409 {
4410 analyze_subscript_affine_affine (chrec_a, chrec_b,
4411 overlaps_a, overlaps_b,
4412 last_conflicts);
4413
4414 if (CF_NOT_KNOWN_P (*overlaps_a)
4415 || CF_NOT_KNOWN_P (*overlaps_b))
4416 dependence_stats.num_siv_unimplemented++;
4417 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
4418 || CF_NO_DEPENDENCE_P (*overlaps_b))
4419 dependence_stats.num_siv_independent++;
4420 else
4421 dependence_stats.num_siv_dependent++;
4422 }
4423 else if (can_use_analyze_subscript_affine_affine (&chrec_a,
4424 &chrec_b))
4425 {
4426 analyze_subscript_affine_affine (chrec_a, chrec_b,
4427 overlaps_a, overlaps_b,
4428 last_conflicts);
4429
4430 if (CF_NOT_KNOWN_P (*overlaps_a)
4431 || CF_NOT_KNOWN_P (*overlaps_b))
4432 dependence_stats.num_siv_unimplemented++;
4433 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
4434 || CF_NO_DEPENDENCE_P (*overlaps_b))
4435 dependence_stats.num_siv_independent++;
4436 else
4437 dependence_stats.num_siv_dependent++;
4438 }
4439 else
4440 goto siv_subscript_dontknow;
4441 }
4442
4443 else
4444 {
4445 siv_subscript_dontknow:;
4446 if (dump_file && (dump_flags & TDF_DETAILS))
4447 fprintf (dump_file, " siv test failed: unimplemented");
4448 *overlaps_a = conflict_fn_not_known ();
4449 *overlaps_b = conflict_fn_not_known ();
4450 *last_conflicts = chrec_dont_know;
4451 dependence_stats.num_siv_unimplemented++;
4452 }
4453
4454 if (dump_file && (dump_flags & TDF_DETAILS))
4455 fprintf (dump_file, ")\n");
4456 }
4457
4458 /* Returns false if we can prove that the greatest common divisor of the steps
4459 of CHREC does not divide CST, false otherwise. */
4460
4461 static bool
4462 gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst)
4463 {
4464 HOST_WIDE_INT cd = 0, val;
4465 tree step;
4466
4467 if (!tree_fits_shwi_p (cst))
4468 return true;
4469 val = tree_to_shwi (cst);
4470
4471 while (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
4472 {
4473 step = CHREC_RIGHT (chrec);
4474 if (!tree_fits_shwi_p (step))
4475 return true;
4476 cd = gcd (cd, tree_to_shwi (step));
4477 chrec = CHREC_LEFT (chrec);
4478 }
4479
4480 return val % cd == 0;
4481 }
4482
4483 /* Analyze a MIV (Multiple Index Variable) subscript with respect to
4484 LOOP_NEST. *OVERLAPS_A and *OVERLAPS_B are initialized to the
4485 functions that describe the relation between the elements accessed
4486 twice by CHREC_A and CHREC_B. For k >= 0, the following property
4487 is verified:
4488
4489 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
4490
4491 static void
4492 analyze_miv_subscript (tree chrec_a,
4493 tree chrec_b,
4494 conflict_function **overlaps_a,
4495 conflict_function **overlaps_b,
4496 tree *last_conflicts,
4497 class loop *loop_nest)
4498 {
4499 tree type, difference;
4500
4501 dependence_stats.num_miv++;
4502 if (dump_file && (dump_flags & TDF_DETAILS))
4503 fprintf (dump_file, "(analyze_miv_subscript \n");
4504
4505 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
4506 chrec_a = chrec_convert (type, chrec_a, NULL);
4507 chrec_b = chrec_convert (type, chrec_b, NULL);
4508 difference = chrec_fold_minus (type, chrec_a, chrec_b);
4509
4510 if (eq_evolutions_p (chrec_a, chrec_b))
4511 {
4512 /* Access functions are the same: all the elements are accessed
4513 in the same order. */
4514 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4515 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4516 *last_conflicts = max_stmt_executions_tree (get_chrec_loop (chrec_a));
4517 dependence_stats.num_miv_dependent++;
4518 }
4519
4520 else if (evolution_function_is_constant_p (difference)
4521 && evolution_function_is_affine_multivariate_p (chrec_a,
4522 loop_nest->num)
4523 && !gcd_of_steps_may_divide_p (chrec_a, difference))
4524 {
4525 /* testsuite/.../ssa-chrec-33.c
4526 {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2
4527
4528 The difference is 1, and all the evolution steps are multiples
4529 of 2, consequently there are no overlapping elements. */
4530 *overlaps_a = conflict_fn_no_dependence ();
4531 *overlaps_b = conflict_fn_no_dependence ();
4532 *last_conflicts = integer_zero_node;
4533 dependence_stats.num_miv_independent++;
4534 }
4535
4536 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest->num)
4537 && !chrec_contains_symbols (chrec_a, loop_nest)
4538 && evolution_function_is_affine_in_loop (chrec_b, loop_nest->num)
4539 && !chrec_contains_symbols (chrec_b, loop_nest))
4540 {
4541 /* testsuite/.../ssa-chrec-35.c
4542 {0, +, 1}_2 vs. {0, +, 1}_3
4543 the overlapping elements are respectively located at iterations:
4544 {0, +, 1}_x and {0, +, 1}_x,
4545 in other words, we have the equality:
4546 {0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x)
4547
4548 Other examples:
4549 {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) =
4550 {0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y)
4551
4552 {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) =
4553 {{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y)
4554 */
4555 analyze_subscript_affine_affine (chrec_a, chrec_b,
4556 overlaps_a, overlaps_b, last_conflicts);
4557
4558 if (CF_NOT_KNOWN_P (*overlaps_a)
4559 || CF_NOT_KNOWN_P (*overlaps_b))
4560 dependence_stats.num_miv_unimplemented++;
4561 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
4562 || CF_NO_DEPENDENCE_P (*overlaps_b))
4563 dependence_stats.num_miv_independent++;
4564 else
4565 dependence_stats.num_miv_dependent++;
4566 }
4567
4568 else
4569 {
4570 /* When the analysis is too difficult, answer "don't know". */
4571 if (dump_file && (dump_flags & TDF_DETAILS))
4572 fprintf (dump_file, "analyze_miv_subscript test failed: unimplemented.\n");
4573
4574 *overlaps_a = conflict_fn_not_known ();
4575 *overlaps_b = conflict_fn_not_known ();
4576 *last_conflicts = chrec_dont_know;
4577 dependence_stats.num_miv_unimplemented++;
4578 }
4579
4580 if (dump_file && (dump_flags & TDF_DETAILS))
4581 fprintf (dump_file, ")\n");
4582 }
4583
4584 /* Determines the iterations for which CHREC_A is equal to CHREC_B in
4585 with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and
4586 OVERLAP_ITERATIONS_B are initialized with two functions that
4587 describe the iterations that contain conflicting elements.
4588
4589 Remark: For an integer k >= 0, the following equality is true:
4590
4591 CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)).
4592 */
4593
4594 static void
4595 analyze_overlapping_iterations (tree chrec_a,
4596 tree chrec_b,
4597 conflict_function **overlap_iterations_a,
4598 conflict_function **overlap_iterations_b,
4599 tree *last_conflicts, class loop *loop_nest)
4600 {
4601 unsigned int lnn = loop_nest->num;
4602
4603 dependence_stats.num_subscript_tests++;
4604
4605 if (dump_file && (dump_flags & TDF_DETAILS))
4606 {
4607 fprintf (dump_file, "(analyze_overlapping_iterations \n");
4608 fprintf (dump_file, " (chrec_a = ");
4609 print_generic_expr (dump_file, chrec_a);
4610 fprintf (dump_file, ")\n (chrec_b = ");
4611 print_generic_expr (dump_file, chrec_b);
4612 fprintf (dump_file, ")\n");
4613 }
4614
4615 if (chrec_a == NULL_TREE
4616 || chrec_b == NULL_TREE
4617 || chrec_contains_undetermined (chrec_a)
4618 || chrec_contains_undetermined (chrec_b))
4619 {
4620 dependence_stats.num_subscript_undetermined++;
4621
4622 *overlap_iterations_a = conflict_fn_not_known ();
4623 *overlap_iterations_b = conflict_fn_not_known ();
4624 }
4625
4626 /* If they are the same chrec, and are affine, they overlap
4627 on every iteration. */
4628 else if (eq_evolutions_p (chrec_a, chrec_b)
4629 && (evolution_function_is_affine_multivariate_p (chrec_a, lnn)
4630 || operand_equal_p (chrec_a, chrec_b, 0)))
4631 {
4632 dependence_stats.num_same_subscript_function++;
4633 *overlap_iterations_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4634 *overlap_iterations_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4635 *last_conflicts = chrec_dont_know;
4636 }
4637
4638 /* If they aren't the same, and aren't affine, we can't do anything
4639 yet. */
4640 else if ((chrec_contains_symbols (chrec_a)
4641 || chrec_contains_symbols (chrec_b))
4642 && (!evolution_function_is_affine_multivariate_p (chrec_a, lnn)
4643 || !evolution_function_is_affine_multivariate_p (chrec_b, lnn)))
4644 {
4645 dependence_stats.num_subscript_undetermined++;
4646 *overlap_iterations_a = conflict_fn_not_known ();
4647 *overlap_iterations_b = conflict_fn_not_known ();
4648 }
4649
4650 else if (ziv_subscript_p (chrec_a, chrec_b))
4651 analyze_ziv_subscript (chrec_a, chrec_b,
4652 overlap_iterations_a, overlap_iterations_b,
4653 last_conflicts);
4654
4655 else if (siv_subscript_p (chrec_a, chrec_b))
4656 analyze_siv_subscript (chrec_a, chrec_b,
4657 overlap_iterations_a, overlap_iterations_b,
4658 last_conflicts, lnn);
4659
4660 else
4661 analyze_miv_subscript (chrec_a, chrec_b,
4662 overlap_iterations_a, overlap_iterations_b,
4663 last_conflicts, loop_nest);
4664
4665 if (dump_file && (dump_flags & TDF_DETAILS))
4666 {
4667 fprintf (dump_file, " (overlap_iterations_a = ");
4668 dump_conflict_function (dump_file, *overlap_iterations_a);
4669 fprintf (dump_file, ")\n (overlap_iterations_b = ");
4670 dump_conflict_function (dump_file, *overlap_iterations_b);
4671 fprintf (dump_file, "))\n");
4672 }
4673 }
4674
4675 /* Helper function for uniquely inserting distance vectors. */
4676
4677 static void
4678 save_dist_v (struct data_dependence_relation *ddr, lambda_vector dist_v)
4679 {
4680 unsigned i;
4681 lambda_vector v;
4682
4683 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, v)
4684 if (lambda_vector_equal (v, dist_v, DDR_NB_LOOPS (ddr)))
4685 return;
4686
4687 DDR_DIST_VECTS (ddr).safe_push (dist_v);
4688 }
4689
4690 /* Helper function for uniquely inserting direction vectors. */
4691
4692 static void
4693 save_dir_v (struct data_dependence_relation *ddr, lambda_vector dir_v)
4694 {
4695 unsigned i;
4696 lambda_vector v;
4697
4698 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), i, v)
4699 if (lambda_vector_equal (v, dir_v, DDR_NB_LOOPS (ddr)))
4700 return;
4701
4702 DDR_DIR_VECTS (ddr).safe_push (dir_v);
4703 }
4704
4705 /* Add a distance of 1 on all the loops outer than INDEX. If we
4706 haven't yet determined a distance for this outer loop, push a new
4707 distance vector composed of the previous distance, and a distance
4708 of 1 for this outer loop. Example:
4709
4710 | loop_1
4711 | loop_2
4712 | A[10]
4713 | endloop_2
4714 | endloop_1
4715
4716 Saved vectors are of the form (dist_in_1, dist_in_2). First, we
4717 save (0, 1), then we have to save (1, 0). */
4718
4719 static void
4720 add_outer_distances (struct data_dependence_relation *ddr,
4721 lambda_vector dist_v, int index)
4722 {
4723 /* For each outer loop where init_v is not set, the accesses are
4724 in dependence of distance 1 in the loop. */
4725 while (--index >= 0)
4726 {
4727 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
4728 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
4729 save_v[index] = 1;
4730 save_dist_v (ddr, save_v);
4731 }
4732 }
4733
4734 /* Return false when fail to represent the data dependence as a
4735 distance vector. A_INDEX is the index of the first reference
4736 (0 for DDR_A, 1 for DDR_B) and B_INDEX is the index of the
4737 second reference. INIT_B is set to true when a component has been
4738 added to the distance vector DIST_V. INDEX_CARRY is then set to
4739 the index in DIST_V that carries the dependence. */
4740
4741 static bool
4742 build_classic_dist_vector_1 (struct data_dependence_relation *ddr,
4743 unsigned int a_index, unsigned int b_index,
4744 lambda_vector dist_v, bool *init_b,
4745 int *index_carry)
4746 {
4747 unsigned i;
4748 lambda_vector init_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
4749 class loop *loop = DDR_LOOP_NEST (ddr)[0];
4750
4751 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
4752 {
4753 tree access_fn_a, access_fn_b;
4754 struct subscript *subscript = DDR_SUBSCRIPT (ddr, i);
4755
4756 if (chrec_contains_undetermined (SUB_DISTANCE (subscript)))
4757 {
4758 non_affine_dependence_relation (ddr);
4759 return false;
4760 }
4761
4762 access_fn_a = SUB_ACCESS_FN (subscript, a_index);
4763 access_fn_b = SUB_ACCESS_FN (subscript, b_index);
4764
4765 if (TREE_CODE (access_fn_a) == POLYNOMIAL_CHREC
4766 && TREE_CODE (access_fn_b) == POLYNOMIAL_CHREC)
4767 {
4768 HOST_WIDE_INT dist;
4769 int index;
4770 int var_a = CHREC_VARIABLE (access_fn_a);
4771 int var_b = CHREC_VARIABLE (access_fn_b);
4772
4773 if (var_a != var_b
4774 || chrec_contains_undetermined (SUB_DISTANCE (subscript)))
4775 {
4776 non_affine_dependence_relation (ddr);
4777 return false;
4778 }
4779
4780 /* When data references are collected in a loop while data
4781 dependences are analyzed in loop nest nested in the loop, we
4782 would have more number of access functions than number of
4783 loops. Skip access functions of loops not in the loop nest.
4784
4785 See PR89725 for more information. */
4786 if (flow_loop_nested_p (get_loop (cfun, var_a), loop))
4787 continue;
4788
4789 dist = int_cst_value (SUB_DISTANCE (subscript));
4790 index = index_in_loop_nest (var_a, DDR_LOOP_NEST (ddr));
4791 *index_carry = MIN (index, *index_carry);
4792
4793 /* This is the subscript coupling test. If we have already
4794 recorded a distance for this loop (a distance coming from
4795 another subscript), it should be the same. For example,
4796 in the following code, there is no dependence:
4797
4798 | loop i = 0, N, 1
4799 | T[i+1][i] = ...
4800 | ... = T[i][i]
4801 | endloop
4802 */
4803 if (init_v[index] != 0 && dist_v[index] != dist)
4804 {
4805 finalize_ddr_dependent (ddr, chrec_known);
4806 return false;
4807 }
4808
4809 dist_v[index] = dist;
4810 init_v[index] = 1;
4811 *init_b = true;
4812 }
4813 else if (!operand_equal_p (access_fn_a, access_fn_b, 0))
4814 {
4815 /* This can be for example an affine vs. constant dependence
4816 (T[i] vs. T[3]) that is not an affine dependence and is
4817 not representable as a distance vector. */
4818 non_affine_dependence_relation (ddr);
4819 return false;
4820 }
4821 }
4822
4823 return true;
4824 }
4825
4826 /* Return true when the DDR contains only invariant access functions wrto. loop
4827 number LNUM. */
4828
4829 static bool
4830 invariant_access_functions (const struct data_dependence_relation *ddr,
4831 int lnum)
4832 {
4833 unsigned i;
4834 subscript *sub;
4835
4836 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
4837 if (!evolution_function_is_invariant_p (SUB_ACCESS_FN (sub, 0), lnum)
4838 || !evolution_function_is_invariant_p (SUB_ACCESS_FN (sub, 1), lnum))
4839 return false;
4840
4841 return true;
4842 }
4843
4844 /* Helper function for the case where DDR_A and DDR_B are the same
4845 multivariate access function with a constant step. For an example
4846 see pr34635-1.c. */
4847
4848 static void
4849 add_multivariate_self_dist (struct data_dependence_relation *ddr, tree c_2)
4850 {
4851 int x_1, x_2;
4852 tree c_1 = CHREC_LEFT (c_2);
4853 tree c_0 = CHREC_LEFT (c_1);
4854 lambda_vector dist_v;
4855 HOST_WIDE_INT v1, v2, cd;
4856
4857 /* Polynomials with more than 2 variables are not handled yet. When
4858 the evolution steps are parameters, it is not possible to
4859 represent the dependence using classical distance vectors. */
4860 if (TREE_CODE (c_0) != INTEGER_CST
4861 || TREE_CODE (CHREC_RIGHT (c_1)) != INTEGER_CST
4862 || TREE_CODE (CHREC_RIGHT (c_2)) != INTEGER_CST)
4863 {
4864 DDR_AFFINE_P (ddr) = false;
4865 return;
4866 }
4867
4868 x_2 = index_in_loop_nest (CHREC_VARIABLE (c_2), DDR_LOOP_NEST (ddr));
4869 x_1 = index_in_loop_nest (CHREC_VARIABLE (c_1), DDR_LOOP_NEST (ddr));
4870
4871 /* For "{{0, +, 2}_1, +, 3}_2" the distance vector is (3, -2). */
4872 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
4873 v1 = int_cst_value (CHREC_RIGHT (c_1));
4874 v2 = int_cst_value (CHREC_RIGHT (c_2));
4875 cd = gcd (v1, v2);
4876 v1 /= cd;
4877 v2 /= cd;
4878
4879 if (v2 < 0)
4880 {
4881 v2 = -v2;
4882 v1 = -v1;
4883 }
4884
4885 dist_v[x_1] = v2;
4886 dist_v[x_2] = -v1;
4887 save_dist_v (ddr, dist_v);
4888
4889 add_outer_distances (ddr, dist_v, x_1);
4890 }
4891
4892 /* Helper function for the case where DDR_A and DDR_B are the same
4893 access functions. */
4894
4895 static void
4896 add_other_self_distances (struct data_dependence_relation *ddr)
4897 {
4898 lambda_vector dist_v;
4899 unsigned i;
4900 int index_carry = DDR_NB_LOOPS (ddr);
4901 subscript *sub;
4902 class loop *loop = DDR_LOOP_NEST (ddr)[0];
4903
4904 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
4905 {
4906 tree access_fun = SUB_ACCESS_FN (sub, 0);
4907
4908 if (TREE_CODE (access_fun) == POLYNOMIAL_CHREC)
4909 {
4910 if (!evolution_function_is_univariate_p (access_fun, loop->num))
4911 {
4912 if (DDR_NUM_SUBSCRIPTS (ddr) != 1)
4913 {
4914 DDR_ARE_DEPENDENT (ddr) = chrec_dont_know;
4915 return;
4916 }
4917
4918 access_fun = SUB_ACCESS_FN (DDR_SUBSCRIPT (ddr, 0), 0);
4919
4920 if (TREE_CODE (CHREC_LEFT (access_fun)) == POLYNOMIAL_CHREC)
4921 add_multivariate_self_dist (ddr, access_fun);
4922 else
4923 /* The evolution step is not constant: it varies in
4924 the outer loop, so this cannot be represented by a
4925 distance vector. For example in pr34635.c the
4926 evolution is {0, +, {0, +, 4}_1}_2. */
4927 DDR_AFFINE_P (ddr) = false;
4928
4929 return;
4930 }
4931
4932 /* When data references are collected in a loop while data
4933 dependences are analyzed in loop nest nested in the loop, we
4934 would have more number of access functions than number of
4935 loops. Skip access functions of loops not in the loop nest.
4936
4937 See PR89725 for more information. */
4938 if (flow_loop_nested_p (get_loop (cfun, CHREC_VARIABLE (access_fun)),
4939 loop))
4940 continue;
4941
4942 index_carry = MIN (index_carry,
4943 index_in_loop_nest (CHREC_VARIABLE (access_fun),
4944 DDR_LOOP_NEST (ddr)));
4945 }
4946 }
4947
4948 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
4949 add_outer_distances (ddr, dist_v, index_carry);
4950 }
4951
4952 static void
4953 insert_innermost_unit_dist_vector (struct data_dependence_relation *ddr)
4954 {
4955 lambda_vector dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
4956
4957 dist_v[0] = 1;
4958 save_dist_v (ddr, dist_v);
4959 }
4960
4961 /* Adds a unit distance vector to DDR when there is a 0 overlap. This
4962 is the case for example when access functions are the same and
4963 equal to a constant, as in:
4964
4965 | loop_1
4966 | A[3] = ...
4967 | ... = A[3]
4968 | endloop_1
4969
4970 in which case the distance vectors are (0) and (1). */
4971
4972 static void
4973 add_distance_for_zero_overlaps (struct data_dependence_relation *ddr)
4974 {
4975 unsigned i, j;
4976
4977 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
4978 {
4979 subscript_p sub = DDR_SUBSCRIPT (ddr, i);
4980 conflict_function *ca = SUB_CONFLICTS_IN_A (sub);
4981 conflict_function *cb = SUB_CONFLICTS_IN_B (sub);
4982
4983 for (j = 0; j < ca->n; j++)
4984 if (affine_function_zero_p (ca->fns[j]))
4985 {
4986 insert_innermost_unit_dist_vector (ddr);
4987 return;
4988 }
4989
4990 for (j = 0; j < cb->n; j++)
4991 if (affine_function_zero_p (cb->fns[j]))
4992 {
4993 insert_innermost_unit_dist_vector (ddr);
4994 return;
4995 }
4996 }
4997 }
4998
4999 /* Return true when the DDR contains two data references that have the
5000 same access functions. */
5001
5002 static inline bool
5003 same_access_functions (const struct data_dependence_relation *ddr)
5004 {
5005 unsigned i;
5006 subscript *sub;
5007
5008 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
5009 if (!eq_evolutions_p (SUB_ACCESS_FN (sub, 0),
5010 SUB_ACCESS_FN (sub, 1)))
5011 return false;
5012
5013 return true;
5014 }
5015
5016 /* Compute the classic per loop distance vector. DDR is the data
5017 dependence relation to build a vector from. Return false when fail
5018 to represent the data dependence as a distance vector. */
5019
5020 static bool
5021 build_classic_dist_vector (struct data_dependence_relation *ddr,
5022 class loop *loop_nest)
5023 {
5024 bool init_b = false;
5025 int index_carry = DDR_NB_LOOPS (ddr);
5026 lambda_vector dist_v;
5027
5028 if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE)
5029 return false;
5030
5031 if (same_access_functions (ddr))
5032 {
5033 /* Save the 0 vector. */
5034 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5035 save_dist_v (ddr, dist_v);
5036
5037 if (invariant_access_functions (ddr, loop_nest->num))
5038 add_distance_for_zero_overlaps (ddr);
5039
5040 if (DDR_NB_LOOPS (ddr) > 1)
5041 add_other_self_distances (ddr);
5042
5043 return true;
5044 }
5045
5046 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5047 if (!build_classic_dist_vector_1 (ddr, 0, 1, dist_v, &init_b, &index_carry))
5048 return false;
5049
5050 /* Save the distance vector if we initialized one. */
5051 if (init_b)
5052 {
5053 /* Verify a basic constraint: classic distance vectors should
5054 always be lexicographically positive.
5055
5056 Data references are collected in the order of execution of
5057 the program, thus for the following loop
5058
5059 | for (i = 1; i < 100; i++)
5060 | for (j = 1; j < 100; j++)
5061 | {
5062 | t = T[j+1][i-1]; // A
5063 | T[j][i] = t + 2; // B
5064 | }
5065
5066 references are collected following the direction of the wind:
5067 A then B. The data dependence tests are performed also
5068 following this order, such that we're looking at the distance
5069 separating the elements accessed by A from the elements later
5070 accessed by B. But in this example, the distance returned by
5071 test_dep (A, B) is lexicographically negative (-1, 1), that
5072 means that the access A occurs later than B with respect to
5073 the outer loop, ie. we're actually looking upwind. In this
5074 case we solve test_dep (B, A) looking downwind to the
5075 lexicographically positive solution, that returns the
5076 distance vector (1, -1). */
5077 if (!lambda_vector_lexico_pos (dist_v, DDR_NB_LOOPS (ddr)))
5078 {
5079 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5080 if (!subscript_dependence_tester_1 (ddr, 1, 0, loop_nest))
5081 return false;
5082 compute_subscript_distance (ddr);
5083 if (!build_classic_dist_vector_1 (ddr, 1, 0, save_v, &init_b,
5084 &index_carry))
5085 return false;
5086 save_dist_v (ddr, save_v);
5087 DDR_REVERSED_P (ddr) = true;
5088
5089 /* In this case there is a dependence forward for all the
5090 outer loops:
5091
5092 | for (k = 1; k < 100; k++)
5093 | for (i = 1; i < 100; i++)
5094 | for (j = 1; j < 100; j++)
5095 | {
5096 | t = T[j+1][i-1]; // A
5097 | T[j][i] = t + 2; // B
5098 | }
5099
5100 the vectors are:
5101 (0, 1, -1)
5102 (1, 1, -1)
5103 (1, -1, 1)
5104 */
5105 if (DDR_NB_LOOPS (ddr) > 1)
5106 {
5107 add_outer_distances (ddr, save_v, index_carry);
5108 add_outer_distances (ddr, dist_v, index_carry);
5109 }
5110 }
5111 else
5112 {
5113 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5114 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
5115
5116 if (DDR_NB_LOOPS (ddr) > 1)
5117 {
5118 lambda_vector opposite_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5119
5120 if (!subscript_dependence_tester_1 (ddr, 1, 0, loop_nest))
5121 return false;
5122 compute_subscript_distance (ddr);
5123 if (!build_classic_dist_vector_1 (ddr, 1, 0, opposite_v, &init_b,
5124 &index_carry))
5125 return false;
5126
5127 save_dist_v (ddr, save_v);
5128 add_outer_distances (ddr, dist_v, index_carry);
5129 add_outer_distances (ddr, opposite_v, index_carry);
5130 }
5131 else
5132 save_dist_v (ddr, save_v);
5133 }
5134 }
5135 else
5136 {
5137 /* There is a distance of 1 on all the outer loops: Example:
5138 there is a dependence of distance 1 on loop_1 for the array A.
5139
5140 | loop_1
5141 | A[5] = ...
5142 | endloop
5143 */
5144 add_outer_distances (ddr, dist_v,
5145 lambda_vector_first_nz (dist_v,
5146 DDR_NB_LOOPS (ddr), 0));
5147 }
5148
5149 if (dump_file && (dump_flags & TDF_DETAILS))
5150 {
5151 unsigned i;
5152
5153 fprintf (dump_file, "(build_classic_dist_vector\n");
5154 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
5155 {
5156 fprintf (dump_file, " dist_vector = (");
5157 print_lambda_vector (dump_file, DDR_DIST_VECT (ddr, i),
5158 DDR_NB_LOOPS (ddr));
5159 fprintf (dump_file, " )\n");
5160 }
5161 fprintf (dump_file, ")\n");
5162 }
5163
5164 return true;
5165 }
5166
5167 /* Return the direction for a given distance.
5168 FIXME: Computing dir this way is suboptimal, since dir can catch
5169 cases that dist is unable to represent. */
5170
5171 static inline enum data_dependence_direction
5172 dir_from_dist (int dist)
5173 {
5174 if (dist > 0)
5175 return dir_positive;
5176 else if (dist < 0)
5177 return dir_negative;
5178 else
5179 return dir_equal;
5180 }
5181
5182 /* Compute the classic per loop direction vector. DDR is the data
5183 dependence relation to build a vector from. */
5184
5185 static void
5186 build_classic_dir_vector (struct data_dependence_relation *ddr)
5187 {
5188 unsigned i, j;
5189 lambda_vector dist_v;
5190
5191 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
5192 {
5193 lambda_vector dir_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5194
5195 for (j = 0; j < DDR_NB_LOOPS (ddr); j++)
5196 dir_v[j] = dir_from_dist (dist_v[j]);
5197
5198 save_dir_v (ddr, dir_v);
5199 }
5200 }
5201
5202 /* Helper function. Returns true when there is a dependence between the
5203 data references. A_INDEX is the index of the first reference (0 for
5204 DDR_A, 1 for DDR_B) and B_INDEX is the index of the second reference. */
5205
5206 static bool
5207 subscript_dependence_tester_1 (struct data_dependence_relation *ddr,
5208 unsigned int a_index, unsigned int b_index,
5209 class loop *loop_nest)
5210 {
5211 unsigned int i;
5212 tree last_conflicts;
5213 struct subscript *subscript;
5214 tree res = NULL_TREE;
5215
5216 for (i = 0; DDR_SUBSCRIPTS (ddr).iterate (i, &subscript); i++)
5217 {
5218 conflict_function *overlaps_a, *overlaps_b;
5219
5220 analyze_overlapping_iterations (SUB_ACCESS_FN (subscript, a_index),
5221 SUB_ACCESS_FN (subscript, b_index),
5222 &overlaps_a, &overlaps_b,
5223 &last_conflicts, loop_nest);
5224
5225 if (SUB_CONFLICTS_IN_A (subscript))
5226 free_conflict_function (SUB_CONFLICTS_IN_A (subscript));
5227 if (SUB_CONFLICTS_IN_B (subscript))
5228 free_conflict_function (SUB_CONFLICTS_IN_B (subscript));
5229
5230 SUB_CONFLICTS_IN_A (subscript) = overlaps_a;
5231 SUB_CONFLICTS_IN_B (subscript) = overlaps_b;
5232 SUB_LAST_CONFLICT (subscript) = last_conflicts;
5233
5234 /* If there is any undetermined conflict function we have to
5235 give a conservative answer in case we cannot prove that
5236 no dependence exists when analyzing another subscript. */
5237 if (CF_NOT_KNOWN_P (overlaps_a)
5238 || CF_NOT_KNOWN_P (overlaps_b))
5239 {
5240 res = chrec_dont_know;
5241 continue;
5242 }
5243
5244 /* When there is a subscript with no dependence we can stop. */
5245 else if (CF_NO_DEPENDENCE_P (overlaps_a)
5246 || CF_NO_DEPENDENCE_P (overlaps_b))
5247 {
5248 res = chrec_known;
5249 break;
5250 }
5251 }
5252
5253 if (res == NULL_TREE)
5254 return true;
5255
5256 if (res == chrec_known)
5257 dependence_stats.num_dependence_independent++;
5258 else
5259 dependence_stats.num_dependence_undetermined++;
5260 finalize_ddr_dependent (ddr, res);
5261 return false;
5262 }
5263
5264 /* Computes the conflicting iterations in LOOP_NEST, and initialize DDR. */
5265
5266 static void
5267 subscript_dependence_tester (struct data_dependence_relation *ddr,
5268 class loop *loop_nest)
5269 {
5270 if (subscript_dependence_tester_1 (ddr, 0, 1, loop_nest))
5271 dependence_stats.num_dependence_dependent++;
5272
5273 compute_subscript_distance (ddr);
5274 if (build_classic_dist_vector (ddr, loop_nest))
5275 build_classic_dir_vector (ddr);
5276 }
5277
5278 /* Returns true when all the access functions of A are affine or
5279 constant with respect to LOOP_NEST. */
5280
5281 static bool
5282 access_functions_are_affine_or_constant_p (const struct data_reference *a,
5283 const class loop *loop_nest)
5284 {
5285 unsigned int i;
5286 vec<tree> fns = DR_ACCESS_FNS (a);
5287 tree t;
5288
5289 FOR_EACH_VEC_ELT (fns, i, t)
5290 if (!evolution_function_is_invariant_p (t, loop_nest->num)
5291 && !evolution_function_is_affine_multivariate_p (t, loop_nest->num))
5292 return false;
5293
5294 return true;
5295 }
5296
5297 /* This computes the affine dependence relation between A and B with
5298 respect to LOOP_NEST. CHREC_KNOWN is used for representing the
5299 independence between two accesses, while CHREC_DONT_KNOW is used
5300 for representing the unknown relation.
5301
5302 Note that it is possible to stop the computation of the dependence
5303 relation the first time we detect a CHREC_KNOWN element for a given
5304 subscript. */
5305
5306 void
5307 compute_affine_dependence (struct data_dependence_relation *ddr,
5308 class loop *loop_nest)
5309 {
5310 struct data_reference *dra = DDR_A (ddr);
5311 struct data_reference *drb = DDR_B (ddr);
5312
5313 if (dump_file && (dump_flags & TDF_DETAILS))
5314 {
5315 fprintf (dump_file, "(compute_affine_dependence\n");
5316 fprintf (dump_file, " stmt_a: ");
5317 print_gimple_stmt (dump_file, DR_STMT (dra), 0, TDF_SLIM);
5318 fprintf (dump_file, " stmt_b: ");
5319 print_gimple_stmt (dump_file, DR_STMT (drb), 0, TDF_SLIM);
5320 }
5321
5322 /* Analyze only when the dependence relation is not yet known. */
5323 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
5324 {
5325 dependence_stats.num_dependence_tests++;
5326
5327 if (access_functions_are_affine_or_constant_p (dra, loop_nest)
5328 && access_functions_are_affine_or_constant_p (drb, loop_nest))
5329 subscript_dependence_tester (ddr, loop_nest);
5330
5331 /* As a last case, if the dependence cannot be determined, or if
5332 the dependence is considered too difficult to determine, answer
5333 "don't know". */
5334 else
5335 {
5336 dependence_stats.num_dependence_undetermined++;
5337
5338 if (dump_file && (dump_flags & TDF_DETAILS))
5339 {
5340 fprintf (dump_file, "Data ref a:\n");
5341 dump_data_reference (dump_file, dra);
5342 fprintf (dump_file, "Data ref b:\n");
5343 dump_data_reference (dump_file, drb);
5344 fprintf (dump_file, "affine dependence test not usable: access function not affine or constant.\n");
5345 }
5346 finalize_ddr_dependent (ddr, chrec_dont_know);
5347 }
5348 }
5349
5350 if (dump_file && (dump_flags & TDF_DETAILS))
5351 {
5352 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
5353 fprintf (dump_file, ") -> no dependence\n");
5354 else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
5355 fprintf (dump_file, ") -> dependence analysis failed\n");
5356 else
5357 fprintf (dump_file, ")\n");
5358 }
5359 }
5360
5361 /* Compute in DEPENDENCE_RELATIONS the data dependence graph for all
5362 the data references in DATAREFS, in the LOOP_NEST. When
5363 COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self
5364 relations. Return true when successful, i.e. data references number
5365 is small enough to be handled. */
5366
5367 bool
5368 compute_all_dependences (vec<data_reference_p> datarefs,
5369 vec<ddr_p> *dependence_relations,
5370 vec<loop_p> loop_nest,
5371 bool compute_self_and_rr)
5372 {
5373 struct data_dependence_relation *ddr;
5374 struct data_reference *a, *b;
5375 unsigned int i, j;
5376
5377 if ((int) datarefs.length ()
5378 > param_loop_max_datarefs_for_datadeps)
5379 {
5380 struct data_dependence_relation *ddr;
5381
5382 /* Insert a single relation into dependence_relations:
5383 chrec_dont_know. */
5384 ddr = initialize_data_dependence_relation (NULL, NULL, loop_nest);
5385 dependence_relations->safe_push (ddr);
5386 return false;
5387 }
5388
5389 FOR_EACH_VEC_ELT (datarefs, i, a)
5390 for (j = i + 1; datarefs.iterate (j, &b); j++)
5391 if (DR_IS_WRITE (a) || DR_IS_WRITE (b) || compute_self_and_rr)
5392 {
5393 ddr = initialize_data_dependence_relation (a, b, loop_nest);
5394 dependence_relations->safe_push (ddr);
5395 if (loop_nest.exists ())
5396 compute_affine_dependence (ddr, loop_nest[0]);
5397 }
5398
5399 if (compute_self_and_rr)
5400 FOR_EACH_VEC_ELT (datarefs, i, a)
5401 {
5402 ddr = initialize_data_dependence_relation (a, a, loop_nest);
5403 dependence_relations->safe_push (ddr);
5404 if (loop_nest.exists ())
5405 compute_affine_dependence (ddr, loop_nest[0]);
5406 }
5407
5408 return true;
5409 }
5410
5411 /* Describes a location of a memory reference. */
5412
5413 struct data_ref_loc
5414 {
5415 /* The memory reference. */
5416 tree ref;
5417
5418 /* True if the memory reference is read. */
5419 bool is_read;
5420
5421 /* True if the data reference is conditional within the containing
5422 statement, i.e. if it might not occur even when the statement
5423 is executed and runs to completion. */
5424 bool is_conditional_in_stmt;
5425 };
5426
5427
5428 /* Stores the locations of memory references in STMT to REFERENCES. Returns
5429 true if STMT clobbers memory, false otherwise. */
5430
5431 static bool
5432 get_references_in_stmt (gimple *stmt, vec<data_ref_loc, va_heap> *references)
5433 {
5434 bool clobbers_memory = false;
5435 data_ref_loc ref;
5436 tree op0, op1;
5437 enum gimple_code stmt_code = gimple_code (stmt);
5438
5439 /* ASM_EXPR and CALL_EXPR may embed arbitrary side effects.
5440 As we cannot model data-references to not spelled out
5441 accesses give up if they may occur. */
5442 if (stmt_code == GIMPLE_CALL
5443 && !(gimple_call_flags (stmt) & ECF_CONST))
5444 {
5445 /* Allow IFN_GOMP_SIMD_LANE in their own loops. */
5446 if (gimple_call_internal_p (stmt))
5447 switch (gimple_call_internal_fn (stmt))
5448 {
5449 case IFN_GOMP_SIMD_LANE:
5450 {
5451 class loop *loop = gimple_bb (stmt)->loop_father;
5452 tree uid = gimple_call_arg (stmt, 0);
5453 gcc_assert (TREE_CODE (uid) == SSA_NAME);
5454 if (loop == NULL
5455 || loop->simduid != SSA_NAME_VAR (uid))
5456 clobbers_memory = true;
5457 break;
5458 }
5459 case IFN_MASK_LOAD:
5460 case IFN_MASK_STORE:
5461 break;
5462 default:
5463 clobbers_memory = true;
5464 break;
5465 }
5466 else
5467 clobbers_memory = true;
5468 }
5469 else if (stmt_code == GIMPLE_ASM
5470 && (gimple_asm_volatile_p (as_a <gasm *> (stmt))
5471 || gimple_vuse (stmt)))
5472 clobbers_memory = true;
5473
5474 if (!gimple_vuse (stmt))
5475 return clobbers_memory;
5476
5477 if (stmt_code == GIMPLE_ASSIGN)
5478 {
5479 tree base;
5480 op0 = gimple_assign_lhs (stmt);
5481 op1 = gimple_assign_rhs1 (stmt);
5482
5483 if (DECL_P (op1)
5484 || (REFERENCE_CLASS_P (op1)
5485 && (base = get_base_address (op1))
5486 && TREE_CODE (base) != SSA_NAME
5487 && !is_gimple_min_invariant (base)))
5488 {
5489 ref.ref = op1;
5490 ref.is_read = true;
5491 ref.is_conditional_in_stmt = false;
5492 references->safe_push (ref);
5493 }
5494 }
5495 else if (stmt_code == GIMPLE_CALL)
5496 {
5497 unsigned i, n;
5498 tree ptr, type;
5499 unsigned int align;
5500
5501 ref.is_read = false;
5502 if (gimple_call_internal_p (stmt))
5503 switch (gimple_call_internal_fn (stmt))
5504 {
5505 case IFN_MASK_LOAD:
5506 if (gimple_call_lhs (stmt) == NULL_TREE)
5507 break;
5508 ref.is_read = true;
5509 /* FALLTHRU */
5510 case IFN_MASK_STORE:
5511 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
5512 align = tree_to_shwi (gimple_call_arg (stmt, 1));
5513 if (ref.is_read)
5514 type = TREE_TYPE (gimple_call_lhs (stmt));
5515 else
5516 type = TREE_TYPE (gimple_call_arg (stmt, 3));
5517 if (TYPE_ALIGN (type) != align)
5518 type = build_aligned_type (type, align);
5519 ref.is_conditional_in_stmt = true;
5520 ref.ref = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
5521 ptr);
5522 references->safe_push (ref);
5523 return false;
5524 default:
5525 break;
5526 }
5527
5528 op0 = gimple_call_lhs (stmt);
5529 n = gimple_call_num_args (stmt);
5530 for (i = 0; i < n; i++)
5531 {
5532 op1 = gimple_call_arg (stmt, i);
5533
5534 if (DECL_P (op1)
5535 || (REFERENCE_CLASS_P (op1) && get_base_address (op1)))
5536 {
5537 ref.ref = op1;
5538 ref.is_read = true;
5539 ref.is_conditional_in_stmt = false;
5540 references->safe_push (ref);
5541 }
5542 }
5543 }
5544 else
5545 return clobbers_memory;
5546
5547 if (op0
5548 && (DECL_P (op0)
5549 || (REFERENCE_CLASS_P (op0) && get_base_address (op0))))
5550 {
5551 ref.ref = op0;
5552 ref.is_read = false;
5553 ref.is_conditional_in_stmt = false;
5554 references->safe_push (ref);
5555 }
5556 return clobbers_memory;
5557 }
5558
5559
5560 /* Returns true if the loop-nest has any data reference. */
5561
5562 bool
5563 loop_nest_has_data_refs (loop_p loop)
5564 {
5565 basic_block *bbs = get_loop_body (loop);
5566 auto_vec<data_ref_loc, 3> references;
5567
5568 for (unsigned i = 0; i < loop->num_nodes; i++)
5569 {
5570 basic_block bb = bbs[i];
5571 gimple_stmt_iterator bsi;
5572
5573 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
5574 {
5575 gimple *stmt = gsi_stmt (bsi);
5576 get_references_in_stmt (stmt, &references);
5577 if (references.length ())
5578 {
5579 free (bbs);
5580 return true;
5581 }
5582 }
5583 }
5584 free (bbs);
5585 return false;
5586 }
5587
5588 /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable
5589 reference, returns false, otherwise returns true. NEST is the outermost
5590 loop of the loop nest in which the references should be analyzed. */
5591
5592 opt_result
5593 find_data_references_in_stmt (class loop *nest, gimple *stmt,
5594 vec<data_reference_p> *datarefs)
5595 {
5596 unsigned i;
5597 auto_vec<data_ref_loc, 2> references;
5598 data_ref_loc *ref;
5599 data_reference_p dr;
5600
5601 if (get_references_in_stmt (stmt, &references))
5602 return opt_result::failure_at (stmt, "statement clobbers memory: %G",
5603 stmt);
5604
5605 FOR_EACH_VEC_ELT (references, i, ref)
5606 {
5607 dr = create_data_ref (nest ? loop_preheader_edge (nest) : NULL,
5608 loop_containing_stmt (stmt), ref->ref,
5609 stmt, ref->is_read, ref->is_conditional_in_stmt);
5610 gcc_assert (dr != NULL);
5611 datarefs->safe_push (dr);
5612 }
5613
5614 return opt_result::success ();
5615 }
5616
5617 /* Stores the data references in STMT to DATAREFS. If there is an
5618 unanalyzable reference, returns false, otherwise returns true.
5619 NEST is the outermost loop of the loop nest in which the references
5620 should be instantiated, LOOP is the loop in which the references
5621 should be analyzed. */
5622
5623 bool
5624 graphite_find_data_references_in_stmt (edge nest, loop_p loop, gimple *stmt,
5625 vec<data_reference_p> *datarefs)
5626 {
5627 unsigned i;
5628 auto_vec<data_ref_loc, 2> references;
5629 data_ref_loc *ref;
5630 bool ret = true;
5631 data_reference_p dr;
5632
5633 if (get_references_in_stmt (stmt, &references))
5634 return false;
5635
5636 FOR_EACH_VEC_ELT (references, i, ref)
5637 {
5638 dr = create_data_ref (nest, loop, ref->ref, stmt, ref->is_read,
5639 ref->is_conditional_in_stmt);
5640 gcc_assert (dr != NULL);
5641 datarefs->safe_push (dr);
5642 }
5643
5644 return ret;
5645 }
5646
5647 /* Search the data references in LOOP, and record the information into
5648 DATAREFS. Returns chrec_dont_know when failing to analyze a
5649 difficult case, returns NULL_TREE otherwise. */
5650
5651 tree
5652 find_data_references_in_bb (class loop *loop, basic_block bb,
5653 vec<data_reference_p> *datarefs)
5654 {
5655 gimple_stmt_iterator bsi;
5656
5657 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
5658 {
5659 gimple *stmt = gsi_stmt (bsi);
5660
5661 if (!find_data_references_in_stmt (loop, stmt, datarefs))
5662 {
5663 struct data_reference *res;
5664 res = XCNEW (struct data_reference);
5665 datarefs->safe_push (res);
5666
5667 return chrec_dont_know;
5668 }
5669 }
5670
5671 return NULL_TREE;
5672 }
5673
5674 /* Search the data references in LOOP, and record the information into
5675 DATAREFS. Returns chrec_dont_know when failing to analyze a
5676 difficult case, returns NULL_TREE otherwise.
5677
5678 TODO: This function should be made smarter so that it can handle address
5679 arithmetic as if they were array accesses, etc. */
5680
5681 tree
5682 find_data_references_in_loop (class loop *loop,
5683 vec<data_reference_p> *datarefs)
5684 {
5685 basic_block bb, *bbs;
5686 unsigned int i;
5687
5688 bbs = get_loop_body_in_dom_order (loop);
5689
5690 for (i = 0; i < loop->num_nodes; i++)
5691 {
5692 bb = bbs[i];
5693
5694 if (find_data_references_in_bb (loop, bb, datarefs) == chrec_dont_know)
5695 {
5696 free (bbs);
5697 return chrec_dont_know;
5698 }
5699 }
5700 free (bbs);
5701
5702 return NULL_TREE;
5703 }
5704
5705 /* Return the alignment in bytes that DRB is guaranteed to have at all
5706 times. */
5707
5708 unsigned int
5709 dr_alignment (innermost_loop_behavior *drb)
5710 {
5711 /* Get the alignment of BASE_ADDRESS + INIT. */
5712 unsigned int alignment = drb->base_alignment;
5713 unsigned int misalignment = (drb->base_misalignment
5714 + TREE_INT_CST_LOW (drb->init));
5715 if (misalignment != 0)
5716 alignment = MIN (alignment, misalignment & -misalignment);
5717
5718 /* Cap it to the alignment of OFFSET. */
5719 if (!integer_zerop (drb->offset))
5720 alignment = MIN (alignment, drb->offset_alignment);
5721
5722 /* Cap it to the alignment of STEP. */
5723 if (!integer_zerop (drb->step))
5724 alignment = MIN (alignment, drb->step_alignment);
5725
5726 return alignment;
5727 }
5728
5729 /* If BASE is a pointer-typed SSA name, try to find the object that it
5730 is based on. Return this object X on success and store the alignment
5731 in bytes of BASE - &X in *ALIGNMENT_OUT. */
5732
5733 static tree
5734 get_base_for_alignment_1 (tree base, unsigned int *alignment_out)
5735 {
5736 if (TREE_CODE (base) != SSA_NAME || !POINTER_TYPE_P (TREE_TYPE (base)))
5737 return NULL_TREE;
5738
5739 gimple *def = SSA_NAME_DEF_STMT (base);
5740 base = analyze_scalar_evolution (loop_containing_stmt (def), base);
5741
5742 /* Peel chrecs and record the minimum alignment preserved by
5743 all steps. */
5744 unsigned int alignment = MAX_OFILE_ALIGNMENT / BITS_PER_UNIT;
5745 while (TREE_CODE (base) == POLYNOMIAL_CHREC)
5746 {
5747 unsigned int step_alignment = highest_pow2_factor (CHREC_RIGHT (base));
5748 alignment = MIN (alignment, step_alignment);
5749 base = CHREC_LEFT (base);
5750 }
5751
5752 /* Punt if the expression is too complicated to handle. */
5753 if (tree_contains_chrecs (base, NULL) || !POINTER_TYPE_P (TREE_TYPE (base)))
5754 return NULL_TREE;
5755
5756 /* The only useful cases are those for which a dereference folds to something
5757 other than an INDIRECT_REF. */
5758 tree ref_type = TREE_TYPE (TREE_TYPE (base));
5759 tree ref = fold_indirect_ref_1 (UNKNOWN_LOCATION, ref_type, base);
5760 if (!ref)
5761 return NULL_TREE;
5762
5763 /* Analyze the base to which the steps we peeled were applied. */
5764 poly_int64 bitsize, bitpos, bytepos;
5765 machine_mode mode;
5766 int unsignedp, reversep, volatilep;
5767 tree offset;
5768 base = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
5769 &unsignedp, &reversep, &volatilep);
5770 if (!base || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
5771 return NULL_TREE;
5772
5773 /* Restrict the alignment to that guaranteed by the offsets. */
5774 unsigned int bytepos_alignment = known_alignment (bytepos);
5775 if (bytepos_alignment != 0)
5776 alignment = MIN (alignment, bytepos_alignment);
5777 if (offset)
5778 {
5779 unsigned int offset_alignment = highest_pow2_factor (offset);
5780 alignment = MIN (alignment, offset_alignment);
5781 }
5782
5783 *alignment_out = alignment;
5784 return base;
5785 }
5786
5787 /* Return the object whose alignment would need to be changed in order
5788 to increase the alignment of ADDR. Store the maximum achievable
5789 alignment in *MAX_ALIGNMENT. */
5790
5791 tree
5792 get_base_for_alignment (tree addr, unsigned int *max_alignment)
5793 {
5794 tree base = get_base_for_alignment_1 (addr, max_alignment);
5795 if (base)
5796 return base;
5797
5798 if (TREE_CODE (addr) == ADDR_EXPR)
5799 addr = TREE_OPERAND (addr, 0);
5800 *max_alignment = MAX_OFILE_ALIGNMENT / BITS_PER_UNIT;
5801 return addr;
5802 }
5803
5804 /* Recursive helper function. */
5805
5806 static bool
5807 find_loop_nest_1 (class loop *loop, vec<loop_p> *loop_nest)
5808 {
5809 /* Inner loops of the nest should not contain siblings. Example:
5810 when there are two consecutive loops,
5811
5812 | loop_0
5813 | loop_1
5814 | A[{0, +, 1}_1]
5815 | endloop_1
5816 | loop_2
5817 | A[{0, +, 1}_2]
5818 | endloop_2
5819 | endloop_0
5820
5821 the dependence relation cannot be captured by the distance
5822 abstraction. */
5823 if (loop->next)
5824 return false;
5825
5826 loop_nest->safe_push (loop);
5827 if (loop->inner)
5828 return find_loop_nest_1 (loop->inner, loop_nest);
5829 return true;
5830 }
5831
5832 /* Return false when the LOOP is not well nested. Otherwise return
5833 true and insert in LOOP_NEST the loops of the nest. LOOP_NEST will
5834 contain the loops from the outermost to the innermost, as they will
5835 appear in the classic distance vector. */
5836
5837 bool
5838 find_loop_nest (class loop *loop, vec<loop_p> *loop_nest)
5839 {
5840 loop_nest->safe_push (loop);
5841 if (loop->inner)
5842 return find_loop_nest_1 (loop->inner, loop_nest);
5843 return true;
5844 }
5845
5846 /* Returns true when the data dependences have been computed, false otherwise.
5847 Given a loop nest LOOP, the following vectors are returned:
5848 DATAREFS is initialized to all the array elements contained in this loop,
5849 DEPENDENCE_RELATIONS contains the relations between the data references.
5850 Compute read-read and self relations if
5851 COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
5852
5853 bool
5854 compute_data_dependences_for_loop (class loop *loop,
5855 bool compute_self_and_read_read_dependences,
5856 vec<loop_p> *loop_nest,
5857 vec<data_reference_p> *datarefs,
5858 vec<ddr_p> *dependence_relations)
5859 {
5860 bool res = true;
5861
5862 memset (&dependence_stats, 0, sizeof (dependence_stats));
5863
5864 /* If the loop nest is not well formed, or one of the data references
5865 is not computable, give up without spending time to compute other
5866 dependences. */
5867 if (!loop
5868 || !find_loop_nest (loop, loop_nest)
5869 || find_data_references_in_loop (loop, datarefs) == chrec_dont_know
5870 || !compute_all_dependences (*datarefs, dependence_relations, *loop_nest,
5871 compute_self_and_read_read_dependences))
5872 res = false;
5873
5874 if (dump_file && (dump_flags & TDF_STATS))
5875 {
5876 fprintf (dump_file, "Dependence tester statistics:\n");
5877
5878 fprintf (dump_file, "Number of dependence tests: %d\n",
5879 dependence_stats.num_dependence_tests);
5880 fprintf (dump_file, "Number of dependence tests classified dependent: %d\n",
5881 dependence_stats.num_dependence_dependent);
5882 fprintf (dump_file, "Number of dependence tests classified independent: %d\n",
5883 dependence_stats.num_dependence_independent);
5884 fprintf (dump_file, "Number of undetermined dependence tests: %d\n",
5885 dependence_stats.num_dependence_undetermined);
5886
5887 fprintf (dump_file, "Number of subscript tests: %d\n",
5888 dependence_stats.num_subscript_tests);
5889 fprintf (dump_file, "Number of undetermined subscript tests: %d\n",
5890 dependence_stats.num_subscript_undetermined);
5891 fprintf (dump_file, "Number of same subscript function: %d\n",
5892 dependence_stats.num_same_subscript_function);
5893
5894 fprintf (dump_file, "Number of ziv tests: %d\n",
5895 dependence_stats.num_ziv);
5896 fprintf (dump_file, "Number of ziv tests returning dependent: %d\n",
5897 dependence_stats.num_ziv_dependent);
5898 fprintf (dump_file, "Number of ziv tests returning independent: %d\n",
5899 dependence_stats.num_ziv_independent);
5900 fprintf (dump_file, "Number of ziv tests unimplemented: %d\n",
5901 dependence_stats.num_ziv_unimplemented);
5902
5903 fprintf (dump_file, "Number of siv tests: %d\n",
5904 dependence_stats.num_siv);
5905 fprintf (dump_file, "Number of siv tests returning dependent: %d\n",
5906 dependence_stats.num_siv_dependent);
5907 fprintf (dump_file, "Number of siv tests returning independent: %d\n",
5908 dependence_stats.num_siv_independent);
5909 fprintf (dump_file, "Number of siv tests unimplemented: %d\n",
5910 dependence_stats.num_siv_unimplemented);
5911
5912 fprintf (dump_file, "Number of miv tests: %d\n",
5913 dependence_stats.num_miv);
5914 fprintf (dump_file, "Number of miv tests returning dependent: %d\n",
5915 dependence_stats.num_miv_dependent);
5916 fprintf (dump_file, "Number of miv tests returning independent: %d\n",
5917 dependence_stats.num_miv_independent);
5918 fprintf (dump_file, "Number of miv tests unimplemented: %d\n",
5919 dependence_stats.num_miv_unimplemented);
5920 }
5921
5922 return res;
5923 }
5924
5925 /* Free the memory used by a data dependence relation DDR. */
5926
5927 void
5928 free_dependence_relation (struct data_dependence_relation *ddr)
5929 {
5930 if (ddr == NULL)
5931 return;
5932
5933 if (DDR_SUBSCRIPTS (ddr).exists ())
5934 free_subscripts (DDR_SUBSCRIPTS (ddr));
5935 DDR_DIST_VECTS (ddr).release ();
5936 DDR_DIR_VECTS (ddr).release ();
5937
5938 free (ddr);
5939 }
5940
5941 /* Free the memory used by the data dependence relations from
5942 DEPENDENCE_RELATIONS. */
5943
5944 void
5945 free_dependence_relations (vec<ddr_p> dependence_relations)
5946 {
5947 unsigned int i;
5948 struct data_dependence_relation *ddr;
5949
5950 FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
5951 if (ddr)
5952 free_dependence_relation (ddr);
5953
5954 dependence_relations.release ();
5955 }
5956
5957 /* Free the memory used by the data references from DATAREFS. */
5958
5959 void
5960 free_data_refs (vec<data_reference_p> datarefs)
5961 {
5962 unsigned int i;
5963 struct data_reference *dr;
5964
5965 FOR_EACH_VEC_ELT (datarefs, i, dr)
5966 free_data_ref (dr);
5967 datarefs.release ();
5968 }
5969
5970 /* Common routine implementing both dr_direction_indicator and
5971 dr_zero_step_indicator. Return USEFUL_MIN if the indicator is known
5972 to be >= USEFUL_MIN and -1 if the indicator is known to be negative.
5973 Return the step as the indicator otherwise. */
5974
5975 static tree
5976 dr_step_indicator (struct data_reference *dr, int useful_min)
5977 {
5978 tree step = DR_STEP (dr);
5979 if (!step)
5980 return NULL_TREE;
5981 STRIP_NOPS (step);
5982 /* Look for cases where the step is scaled by a positive constant
5983 integer, which will often be the access size. If the multiplication
5984 doesn't change the sign (due to overflow effects) then we can
5985 test the unscaled value instead. */
5986 if (TREE_CODE (step) == MULT_EXPR
5987 && TREE_CODE (TREE_OPERAND (step, 1)) == INTEGER_CST
5988 && tree_int_cst_sgn (TREE_OPERAND (step, 1)) > 0)
5989 {
5990 tree factor = TREE_OPERAND (step, 1);
5991 step = TREE_OPERAND (step, 0);
5992
5993 /* Strip widening and truncating conversions as well as nops. */
5994 if (CONVERT_EXPR_P (step)
5995 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (step, 0))))
5996 step = TREE_OPERAND (step, 0);
5997 tree type = TREE_TYPE (step);
5998
5999 /* Get the range of step values that would not cause overflow. */
6000 widest_int minv = (wi::to_widest (TYPE_MIN_VALUE (ssizetype))
6001 / wi::to_widest (factor));
6002 widest_int maxv = (wi::to_widest (TYPE_MAX_VALUE (ssizetype))
6003 / wi::to_widest (factor));
6004
6005 /* Get the range of values that the unconverted step actually has. */
6006 wide_int step_min, step_max;
6007 if (TREE_CODE (step) != SSA_NAME
6008 || get_range_info (step, &step_min, &step_max) != VR_RANGE)
6009 {
6010 step_min = wi::to_wide (TYPE_MIN_VALUE (type));
6011 step_max = wi::to_wide (TYPE_MAX_VALUE (type));
6012 }
6013
6014 /* Check whether the unconverted step has an acceptable range. */
6015 signop sgn = TYPE_SIGN (type);
6016 if (wi::les_p (minv, widest_int::from (step_min, sgn))
6017 && wi::ges_p (maxv, widest_int::from (step_max, sgn)))
6018 {
6019 if (wi::ge_p (step_min, useful_min, sgn))
6020 return ssize_int (useful_min);
6021 else if (wi::lt_p (step_max, 0, sgn))
6022 return ssize_int (-1);
6023 else
6024 return fold_convert (ssizetype, step);
6025 }
6026 }
6027 return DR_STEP (dr);
6028 }
6029
6030 /* Return a value that is negative iff DR has a negative step. */
6031
6032 tree
6033 dr_direction_indicator (struct data_reference *dr)
6034 {
6035 return dr_step_indicator (dr, 0);
6036 }
6037
6038 /* Return a value that is zero iff DR has a zero step. */
6039
6040 tree
6041 dr_zero_step_indicator (struct data_reference *dr)
6042 {
6043 return dr_step_indicator (dr, 1);
6044 }
6045
6046 /* Return true if DR is known to have a nonnegative (but possibly zero)
6047 step. */
6048
6049 bool
6050 dr_known_forward_stride_p (struct data_reference *dr)
6051 {
6052 tree indicator = dr_direction_indicator (dr);
6053 tree neg_step_val = fold_binary (LT_EXPR, boolean_type_node,
6054 fold_convert (ssizetype, indicator),
6055 ssize_int (0));
6056 return neg_step_val && integer_zerop (neg_step_val);
6057 }