]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-affine.c
* output.h (__gcc_host_wide_int__): Move to hwint.h.
[thirdparty/gcc.git] / gcc / tree-affine.c
1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005, 2007, 2008, 2010 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "tree-pretty-print.h"
25 #include "tree-dump.h"
26 #include "pointer-set.h"
27 #include "tree-affine.h"
28 #include "gimple.h"
29 #include "flags.h"
30
31 /* Extends CST as appropriate for the affine combinations COMB. */
32
33 double_int
34 double_int_ext_for_comb (double_int cst, aff_tree *comb)
35 {
36 return double_int_sext (cst, TYPE_PRECISION (comb->type));
37 }
38
39 /* Initializes affine combination COMB so that its value is zero in TYPE. */
40
41 static void
42 aff_combination_zero (aff_tree *comb, tree type)
43 {
44 comb->type = type;
45 comb->offset = double_int_zero;
46 comb->n = 0;
47 comb->rest = NULL_TREE;
48 }
49
50 /* Sets COMB to CST. */
51
52 void
53 aff_combination_const (aff_tree *comb, tree type, double_int cst)
54 {
55 aff_combination_zero (comb, type);
56 comb->offset = double_int_ext_for_comb (cst, comb);
57 }
58
59 /* Sets COMB to single element ELT. */
60
61 void
62 aff_combination_elt (aff_tree *comb, tree type, tree elt)
63 {
64 aff_combination_zero (comb, type);
65
66 comb->n = 1;
67 comb->elts[0].val = elt;
68 comb->elts[0].coef = double_int_one;
69 }
70
71 /* Scales COMB by SCALE. */
72
73 void
74 aff_combination_scale (aff_tree *comb, double_int scale)
75 {
76 unsigned i, j;
77
78 scale = double_int_ext_for_comb (scale, comb);
79 if (double_int_one_p (scale))
80 return;
81
82 if (double_int_zero_p (scale))
83 {
84 aff_combination_zero (comb, comb->type);
85 return;
86 }
87
88 comb->offset
89 = double_int_ext_for_comb (double_int_mul (scale, comb->offset), comb);
90 for (i = 0, j = 0; i < comb->n; i++)
91 {
92 double_int new_coef;
93
94 new_coef
95 = double_int_ext_for_comb (double_int_mul (scale, comb->elts[i].coef),
96 comb);
97 /* A coefficient may become zero due to overflow. Remove the zero
98 elements. */
99 if (double_int_zero_p (new_coef))
100 continue;
101 comb->elts[j].coef = new_coef;
102 comb->elts[j].val = comb->elts[i].val;
103 j++;
104 }
105 comb->n = j;
106
107 if (comb->rest)
108 {
109 tree type = comb->type;
110 if (POINTER_TYPE_P (type))
111 type = sizetype;
112 if (comb->n < MAX_AFF_ELTS)
113 {
114 comb->elts[comb->n].coef = scale;
115 comb->elts[comb->n].val = comb->rest;
116 comb->rest = NULL_TREE;
117 comb->n++;
118 }
119 else
120 comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
121 double_int_to_tree (type, scale));
122 }
123 }
124
125 /* Adds ELT * SCALE to COMB. */
126
127 void
128 aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
129 {
130 unsigned i;
131 tree type;
132
133 scale = double_int_ext_for_comb (scale, comb);
134 if (double_int_zero_p (scale))
135 return;
136
137 for (i = 0; i < comb->n; i++)
138 if (operand_equal_p (comb->elts[i].val, elt, 0))
139 {
140 double_int new_coef;
141
142 new_coef = double_int_add (comb->elts[i].coef, scale);
143 new_coef = double_int_ext_for_comb (new_coef, comb);
144 if (!double_int_zero_p (new_coef))
145 {
146 comb->elts[i].coef = new_coef;
147 return;
148 }
149
150 comb->n--;
151 comb->elts[i] = comb->elts[comb->n];
152
153 if (comb->rest)
154 {
155 gcc_assert (comb->n == MAX_AFF_ELTS - 1);
156 comb->elts[comb->n].coef = double_int_one;
157 comb->elts[comb->n].val = comb->rest;
158 comb->rest = NULL_TREE;
159 comb->n++;
160 }
161 return;
162 }
163 if (comb->n < MAX_AFF_ELTS)
164 {
165 comb->elts[comb->n].coef = scale;
166 comb->elts[comb->n].val = elt;
167 comb->n++;
168 return;
169 }
170
171 type = comb->type;
172 if (POINTER_TYPE_P (type))
173 type = sizetype;
174
175 if (double_int_one_p (scale))
176 elt = fold_convert (type, elt);
177 else
178 elt = fold_build2 (MULT_EXPR, type,
179 fold_convert (type, elt),
180 double_int_to_tree (type, scale));
181
182 if (comb->rest)
183 comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
184 elt);
185 else
186 comb->rest = elt;
187 }
188
189 /* Adds CST to C. */
190
191 static void
192 aff_combination_add_cst (aff_tree *c, double_int cst)
193 {
194 c->offset = double_int_ext_for_comb (double_int_add (c->offset, cst), c);
195 }
196
197 /* Adds COMB2 to COMB1. */
198
199 void
200 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
201 {
202 unsigned i;
203
204 aff_combination_add_cst (comb1, comb2->offset);
205 for (i = 0; i < comb2->n; i++)
206 aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
207 if (comb2->rest)
208 aff_combination_add_elt (comb1, comb2->rest, double_int_one);
209 }
210
211 /* Converts affine combination COMB to TYPE. */
212
213 void
214 aff_combination_convert (aff_tree *comb, tree type)
215 {
216 unsigned i, j;
217 tree comb_type = comb->type;
218
219 if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
220 {
221 tree val = fold_convert (type, aff_combination_to_tree (comb));
222 tree_to_aff_combination (val, type, comb);
223 return;
224 }
225
226 comb->type = type;
227 if (comb->rest && !POINTER_TYPE_P (type))
228 comb->rest = fold_convert (type, comb->rest);
229
230 if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
231 return;
232
233 comb->offset = double_int_ext_for_comb (comb->offset, comb);
234 for (i = j = 0; i < comb->n; i++)
235 {
236 double_int new_coef = double_int_ext_for_comb (comb->elts[i].coef, comb);
237 if (double_int_zero_p (new_coef))
238 continue;
239 comb->elts[j].coef = new_coef;
240 comb->elts[j].val = fold_convert (type, comb->elts[i].val);
241 j++;
242 }
243
244 comb->n = j;
245 if (comb->n < MAX_AFF_ELTS && comb->rest)
246 {
247 comb->elts[comb->n].coef = double_int_one;
248 comb->elts[comb->n].val = comb->rest;
249 comb->rest = NULL_TREE;
250 comb->n++;
251 }
252 }
253
254 /* Splits EXPR into an affine combination of parts. */
255
256 void
257 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
258 {
259 aff_tree tmp;
260 enum tree_code code;
261 tree cst, core, toffset;
262 HOST_WIDE_INT bitpos, bitsize;
263 enum machine_mode mode;
264 int unsignedp, volatilep;
265
266 STRIP_NOPS (expr);
267
268 code = TREE_CODE (expr);
269 switch (code)
270 {
271 case INTEGER_CST:
272 aff_combination_const (comb, type, tree_to_double_int (expr));
273 return;
274
275 case POINTER_PLUS_EXPR:
276 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
277 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
278 aff_combination_add (comb, &tmp);
279 return;
280
281 case PLUS_EXPR:
282 case MINUS_EXPR:
283 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
284 tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
285 if (code == MINUS_EXPR)
286 aff_combination_scale (&tmp, double_int_minus_one);
287 aff_combination_add (comb, &tmp);
288 return;
289
290 case MULT_EXPR:
291 cst = TREE_OPERAND (expr, 1);
292 if (TREE_CODE (cst) != INTEGER_CST)
293 break;
294 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
295 aff_combination_scale (comb, tree_to_double_int (cst));
296 return;
297
298 case NEGATE_EXPR:
299 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
300 aff_combination_scale (comb, double_int_minus_one);
301 return;
302
303 case BIT_NOT_EXPR:
304 /* ~x = -x - 1 */
305 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
306 aff_combination_scale (comb, double_int_minus_one);
307 aff_combination_add_cst (comb, double_int_minus_one);
308 return;
309
310 case ADDR_EXPR:
311 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
312 if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
313 {
314 expr = TREE_OPERAND (expr, 0);
315 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
316 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
317 aff_combination_add (comb, &tmp);
318 return;
319 }
320 core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
321 &toffset, &mode, &unsignedp, &volatilep,
322 false);
323 if (bitpos % BITS_PER_UNIT != 0)
324 break;
325 aff_combination_const (comb, type,
326 uhwi_to_double_int (bitpos / BITS_PER_UNIT));
327 core = build_fold_addr_expr (core);
328 if (TREE_CODE (core) == ADDR_EXPR)
329 aff_combination_add_elt (comb, core, double_int_one);
330 else
331 {
332 tree_to_aff_combination (core, type, &tmp);
333 aff_combination_add (comb, &tmp);
334 }
335 if (toffset)
336 {
337 tree_to_aff_combination (toffset, type, &tmp);
338 aff_combination_add (comb, &tmp);
339 }
340 return;
341
342 case MEM_REF:
343 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
344 tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
345 type, comb);
346 else if (integer_zerop (TREE_OPERAND (expr, 1)))
347 {
348 aff_combination_elt (comb, type, expr);
349 return;
350 }
351 else
352 aff_combination_elt (comb, type,
353 build2 (MEM_REF, TREE_TYPE (expr),
354 TREE_OPERAND (expr, 0),
355 build_int_cst
356 (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
357 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
358 aff_combination_add (comb, &tmp);
359 return;
360
361 default:
362 break;
363 }
364
365 aff_combination_elt (comb, type, expr);
366 }
367
368 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
369 combination COMB. */
370
371 static tree
372 add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
373 aff_tree *comb)
374 {
375 enum tree_code code;
376 tree type1 = type;
377 if (POINTER_TYPE_P (type))
378 type1 = sizetype;
379
380 scale = double_int_ext_for_comb (scale, comb);
381 elt = fold_convert (type1, elt);
382
383 if (double_int_one_p (scale))
384 {
385 if (!expr)
386 return fold_convert (type, elt);
387
388 if (POINTER_TYPE_P (type))
389 return fold_build_pointer_plus (expr, elt);
390 return fold_build2 (PLUS_EXPR, type, expr, elt);
391 }
392
393 if (double_int_minus_one_p (scale))
394 {
395 if (!expr)
396 return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt));
397
398 if (POINTER_TYPE_P (type))
399 {
400 elt = fold_build1 (NEGATE_EXPR, type1, elt);
401 return fold_build_pointer_plus (expr, elt);
402 }
403 return fold_build2 (MINUS_EXPR, type, expr, elt);
404 }
405
406 if (!expr)
407 return fold_convert (type,
408 fold_build2 (MULT_EXPR, type1, elt,
409 double_int_to_tree (type1, scale)));
410
411 if (double_int_negative_p (scale))
412 {
413 code = MINUS_EXPR;
414 scale = double_int_neg (scale);
415 }
416 else
417 code = PLUS_EXPR;
418
419 elt = fold_build2 (MULT_EXPR, type1, elt,
420 double_int_to_tree (type1, scale));
421 if (POINTER_TYPE_P (type))
422 {
423 if (code == MINUS_EXPR)
424 elt = fold_build1 (NEGATE_EXPR, type1, elt);
425 return fold_build_pointer_plus (expr, elt);
426 }
427 return fold_build2 (code, type, expr, elt);
428 }
429
430 /* Makes tree from the affine combination COMB. */
431
432 tree
433 aff_combination_to_tree (aff_tree *comb)
434 {
435 tree type = comb->type;
436 tree expr = NULL_TREE;
437 unsigned i;
438 double_int off, sgn;
439 tree type1 = type;
440 if (POINTER_TYPE_P (type))
441 type1 = sizetype;
442
443 gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
444
445 for (i = 0; i < comb->n; i++)
446 expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef,
447 comb);
448
449 if (comb->rest)
450 expr = add_elt_to_tree (expr, type, comb->rest, double_int_one, comb);
451
452 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
453 unsigned. */
454 if (double_int_negative_p (comb->offset))
455 {
456 off = double_int_neg (comb->offset);
457 sgn = double_int_minus_one;
458 }
459 else
460 {
461 off = comb->offset;
462 sgn = double_int_one;
463 }
464 return add_elt_to_tree (expr, type, double_int_to_tree (type1, off), sgn,
465 comb);
466 }
467
468 /* Copies the tree elements of COMB to ensure that they are not shared. */
469
470 void
471 unshare_aff_combination (aff_tree *comb)
472 {
473 unsigned i;
474
475 for (i = 0; i < comb->n; i++)
476 comb->elts[i].val = unshare_expr (comb->elts[i].val);
477 if (comb->rest)
478 comb->rest = unshare_expr (comb->rest);
479 }
480
481 /* Remove M-th element from COMB. */
482
483 void
484 aff_combination_remove_elt (aff_tree *comb, unsigned m)
485 {
486 comb->n--;
487 if (m <= comb->n)
488 comb->elts[m] = comb->elts[comb->n];
489 if (comb->rest)
490 {
491 comb->elts[comb->n].coef = double_int_one;
492 comb->elts[comb->n].val = comb->rest;
493 comb->rest = NULL_TREE;
494 comb->n++;
495 }
496 }
497
498 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
499 C * COEF is added to R. */
500
501
502 static void
503 aff_combination_add_product (aff_tree *c, double_int coef, tree val,
504 aff_tree *r)
505 {
506 unsigned i;
507 tree aval, type;
508
509 for (i = 0; i < c->n; i++)
510 {
511 aval = c->elts[i].val;
512 if (val)
513 {
514 type = TREE_TYPE (aval);
515 aval = fold_build2 (MULT_EXPR, type, aval,
516 fold_convert (type, val));
517 }
518
519 aff_combination_add_elt (r, aval,
520 double_int_mul (coef, c->elts[i].coef));
521 }
522
523 if (c->rest)
524 {
525 aval = c->rest;
526 if (val)
527 {
528 type = TREE_TYPE (aval);
529 aval = fold_build2 (MULT_EXPR, type, aval,
530 fold_convert (type, val));
531 }
532
533 aff_combination_add_elt (r, aval, coef);
534 }
535
536 if (val)
537 aff_combination_add_elt (r, val,
538 double_int_mul (coef, c->offset));
539 else
540 aff_combination_add_cst (r, double_int_mul (coef, c->offset));
541 }
542
543 /* Multiplies C1 by C2, storing the result to R */
544
545 void
546 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
547 {
548 unsigned i;
549 gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
550
551 aff_combination_zero (r, c1->type);
552
553 for (i = 0; i < c2->n; i++)
554 aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
555 if (c2->rest)
556 aff_combination_add_product (c1, double_int_one, c2->rest, r);
557 aff_combination_add_product (c1, c2->offset, NULL, r);
558 }
559
560 /* Returns the element of COMB whose value is VAL, or NULL if no such
561 element exists. If IDX is not NULL, it is set to the index of VAL in
562 COMB. */
563
564 static struct aff_comb_elt *
565 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
566 {
567 unsigned i;
568
569 for (i = 0; i < comb->n; i++)
570 if (operand_equal_p (comb->elts[i].val, val, 0))
571 {
572 if (idx)
573 *idx = i;
574
575 return &comb->elts[i];
576 }
577
578 return NULL;
579 }
580
581 /* Element of the cache that maps ssa name NAME to its expanded form
582 as an affine expression EXPANSION. */
583
584 struct name_expansion
585 {
586 aff_tree expansion;
587
588 /* True if the expansion for the name is just being generated. */
589 unsigned in_progress : 1;
590 };
591
592 /* Expands SSA names in COMB recursively. CACHE is used to cache the
593 results. */
594
595 void
596 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
597 struct pointer_map_t **cache ATTRIBUTE_UNUSED)
598 {
599 unsigned i;
600 aff_tree to_add, current, curre;
601 tree e, rhs;
602 gimple def;
603 double_int scale;
604 void **slot;
605 struct name_expansion *exp;
606
607 aff_combination_zero (&to_add, comb->type);
608 for (i = 0; i < comb->n; i++)
609 {
610 tree type, name;
611 enum tree_code code;
612
613 e = comb->elts[i].val;
614 type = TREE_TYPE (e);
615 name = e;
616 /* Look through some conversions. */
617 if (TREE_CODE (e) == NOP_EXPR
618 && (TYPE_PRECISION (type)
619 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
620 name = TREE_OPERAND (e, 0);
621 if (TREE_CODE (name) != SSA_NAME)
622 continue;
623 def = SSA_NAME_DEF_STMT (name);
624 if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
625 continue;
626
627 code = gimple_assign_rhs_code (def);
628 if (code != SSA_NAME
629 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
630 && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
631 || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
632 continue;
633
634 /* We do not know whether the reference retains its value at the
635 place where the expansion is used. */
636 if (TREE_CODE_CLASS (code) == tcc_reference)
637 continue;
638
639 if (!*cache)
640 *cache = pointer_map_create ();
641 slot = pointer_map_insert (*cache, e);
642 exp = (struct name_expansion *) *slot;
643
644 if (!exp)
645 {
646 exp = XNEW (struct name_expansion);
647 exp->in_progress = 1;
648 *slot = exp;
649 /* In principle this is a generally valid folding, but
650 it is not unconditionally an optimization, so do it
651 here and not in fold_unary. */
652 /* Convert (T1)(X *+- CST) into (T1)X *+- (T1)CST if T1 is wider
653 than the type of X and overflow for the type of X is
654 undefined. */
655 if (e != name
656 && INTEGRAL_TYPE_P (type)
657 && INTEGRAL_TYPE_P (TREE_TYPE (name))
658 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (name))
659 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (name))
660 && (code == PLUS_EXPR || code == MINUS_EXPR || code == MULT_EXPR)
661 && TREE_CODE (gimple_assign_rhs2 (def)) == INTEGER_CST)
662 rhs = fold_build2 (code, type,
663 fold_convert (type, gimple_assign_rhs1 (def)),
664 fold_convert (type, gimple_assign_rhs2 (def)));
665 else
666 {
667 rhs = gimple_assign_rhs_to_tree (def);
668 if (e != name)
669 rhs = fold_convert (type, rhs);
670 }
671 tree_to_aff_combination_expand (rhs, comb->type, &current, cache);
672 exp->expansion = current;
673 exp->in_progress = 0;
674 }
675 else
676 {
677 /* Since we follow the definitions in the SSA form, we should not
678 enter a cycle unless we pass through a phi node. */
679 gcc_assert (!exp->in_progress);
680 current = exp->expansion;
681 }
682
683 /* Accumulate the new terms to TO_ADD, so that we do not modify
684 COMB while traversing it; include the term -coef * E, to remove
685 it from COMB. */
686 scale = comb->elts[i].coef;
687 aff_combination_zero (&curre, comb->type);
688 aff_combination_add_elt (&curre, e, double_int_neg (scale));
689 aff_combination_scale (&current, scale);
690 aff_combination_add (&to_add, &current);
691 aff_combination_add (&to_add, &curre);
692 }
693 aff_combination_add (comb, &to_add);
694 }
695
696 /* Similar to tree_to_aff_combination, but follows SSA name definitions
697 and expands them recursively. CACHE is used to cache the expansions
698 of the ssa names, to avoid exponential time complexity for cases
699 like
700
701 a1 = a0 + a0;
702 a2 = a1 + a1;
703 a3 = a2 + a2;
704 ... */
705
706 void
707 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
708 struct pointer_map_t **cache)
709 {
710 tree_to_aff_combination (expr, type, comb);
711 aff_combination_expand (comb, cache);
712 }
713
714 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
715 pointer_map_traverse. */
716
717 static bool
718 free_name_expansion (const void *key ATTRIBUTE_UNUSED, void **value,
719 void *data ATTRIBUTE_UNUSED)
720 {
721 struct name_expansion *const exp = (struct name_expansion *) *value;
722
723 free (exp);
724 return true;
725 }
726
727 /* Frees memory allocated for the CACHE used by
728 tree_to_aff_combination_expand. */
729
730 void
731 free_affine_expand_cache (struct pointer_map_t **cache)
732 {
733 if (!*cache)
734 return;
735
736 pointer_map_traverse (*cache, free_name_expansion, NULL);
737 pointer_map_destroy (*cache);
738 *cache = NULL;
739 }
740
741 /* If VAL != CST * DIV for any constant CST, returns false.
742 Otherwise, if VAL != 0 (and hence CST != 0), and *MULT_SET is true,
743 additionally compares CST and MULT, and if they are different,
744 returns false. Finally, if neither of these two cases occur,
745 true is returned, and if CST != 0, CST is stored to MULT and
746 MULT_SET is set to true. */
747
748 static bool
749 double_int_constant_multiple_p (double_int val, double_int div,
750 bool *mult_set, double_int *mult)
751 {
752 double_int rem, cst;
753
754 if (double_int_zero_p (val))
755 return true;
756
757 if (double_int_zero_p (div))
758 return false;
759
760 cst = double_int_sdivmod (val, div, FLOOR_DIV_EXPR, &rem);
761 if (!double_int_zero_p (rem))
762 return false;
763
764 if (*mult_set && !double_int_equal_p (*mult, cst))
765 return false;
766
767 *mult_set = true;
768 *mult = cst;
769 return true;
770 }
771
772 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
773 X is stored to MULT. */
774
775 bool
776 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
777 double_int *mult)
778 {
779 bool mult_set = false;
780 unsigned i;
781
782 if (val->n == 0 && double_int_zero_p (val->offset))
783 {
784 *mult = double_int_zero;
785 return true;
786 }
787 if (val->n != div->n)
788 return false;
789
790 if (val->rest || div->rest)
791 return false;
792
793 if (!double_int_constant_multiple_p (val->offset, div->offset,
794 &mult_set, mult))
795 return false;
796
797 for (i = 0; i < div->n; i++)
798 {
799 struct aff_comb_elt *elt
800 = aff_combination_find_elt (val, div->elts[i].val, NULL);
801 if (!elt)
802 return false;
803 if (!double_int_constant_multiple_p (elt->coef, div->elts[i].coef,
804 &mult_set, mult))
805 return false;
806 }
807
808 gcc_assert (mult_set);
809 return true;
810 }
811
812 /* Prints the affine VAL to the FILE. */
813
814 static void
815 print_aff (FILE *file, aff_tree *val)
816 {
817 unsigned i;
818 bool uns = TYPE_UNSIGNED (val->type);
819 if (POINTER_TYPE_P (val->type))
820 uns = false;
821 fprintf (file, "{\n type = ");
822 print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
823 fprintf (file, "\n offset = ");
824 dump_double_int (file, val->offset, uns);
825 if (val->n > 0)
826 {
827 fprintf (file, "\n elements = {\n");
828 for (i = 0; i < val->n; i++)
829 {
830 fprintf (file, " [%d] = ", i);
831 print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
832
833 fprintf (file, " * ");
834 dump_double_int (file, val->elts[i].coef, uns);
835 if (i != val->n - 1)
836 fprintf (file, ", \n");
837 }
838 fprintf (file, "\n }");
839 }
840 if (val->rest)
841 {
842 fprintf (file, "\n rest = ");
843 print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
844 }
845 fprintf (file, "\n}");
846 }
847
848 /* Prints the affine VAL to the standard error, used for debugging. */
849
850 DEBUG_FUNCTION void
851 debug_aff (aff_tree *val)
852 {
853 print_aff (stderr, val);
854 fprintf (stderr, "\n");
855 }
856
857 /* Returns address of the reference REF in ADDR. The size of the accessed
858 location is stored to SIZE. */
859
860 void
861 get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size)
862 {
863 HOST_WIDE_INT bitsize, bitpos;
864 tree toff;
865 enum machine_mode mode;
866 int uns, vol;
867 aff_tree tmp;
868 tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
869 &uns, &vol, false);
870 tree base_addr = build_fold_addr_expr (base);
871
872 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
873
874 tree_to_aff_combination (base_addr, sizetype, addr);
875
876 if (toff)
877 {
878 tree_to_aff_combination (toff, sizetype, &tmp);
879 aff_combination_add (addr, &tmp);
880 }
881
882 aff_combination_const (&tmp, sizetype,
883 shwi_to_double_int (bitpos / BITS_PER_UNIT));
884 aff_combination_add (addr, &tmp);
885
886 *size = shwi_to_double_int ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
887 }
888
889 /* Returns true if a region of size SIZE1 at position 0 and a region of
890 size SIZE2 at position DIFF cannot overlap. */
891
892 bool
893 aff_comb_cannot_overlap_p (aff_tree *diff, double_int size1, double_int size2)
894 {
895 double_int d, bound;
896
897 /* Unless the difference is a constant, we fail. */
898 if (diff->n != 0)
899 return false;
900
901 d = diff->offset;
902 if (double_int_negative_p (d))
903 {
904 /* The second object is before the first one, we succeed if the last
905 element of the second object is before the start of the first one. */
906 bound = double_int_add (d, double_int_add (size2, double_int_minus_one));
907 return double_int_negative_p (bound);
908 }
909 else
910 {
911 /* We succeed if the second object starts after the first one ends. */
912 return double_int_scmp (size1, d) <= 0;
913 }
914 }
915