]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-affine.c
Merge in trunk.
[thirdparty/gcc.git] / gcc / tree-affine.c
1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "expr.h"
25 #include "tree-pretty-print.h"
26 #include "pointer-set.h"
27 #include "tree-affine.h"
28 #include "basic-block.h"
29 #include "tree-ssa-alias.h"
30 #include "internal-fn.h"
31 #include "gimple-expr.h"
32 #include "is-a.h"
33 #include "gimple.h"
34 #include "gimplify.h"
35 #include "flags.h"
36 #include "dumpfile.h"
37 #include "cfgexpand.h"
38 #include "wide-int-print.h"
39
40 /* Extends CST as appropriate for the affine combinations COMB. */
41
42 widest_int
43 wide_int_ext_for_comb (const widest_int &cst, aff_tree *comb)
44 {
45 return wi::sext (cst, TYPE_PRECISION (comb->type));
46 }
47
48 /* Initializes affine combination COMB so that its value is zero in TYPE. */
49
50 static void
51 aff_combination_zero (aff_tree *comb, tree type)
52 {
53 int i;
54 comb->type = type;
55 comb->offset = 0;
56 comb->n = 0;
57 for (i = 0; i < MAX_AFF_ELTS; i++)
58 comb->elts[i].coef = 0;
59 comb->rest = NULL_TREE;
60 }
61
62 /* Sets COMB to CST. */
63
64 void
65 aff_combination_const (aff_tree *comb, tree type, const widest_int &cst)
66 {
67 aff_combination_zero (comb, type);
68 comb->offset = wide_int_ext_for_comb (cst, comb);;
69 }
70
71 /* Sets COMB to single element ELT. */
72
73 void
74 aff_combination_elt (aff_tree *comb, tree type, tree elt)
75 {
76 aff_combination_zero (comb, type);
77
78 comb->n = 1;
79 comb->elts[0].val = elt;
80 comb->elts[0].coef = 1;
81 }
82
83 /* Scales COMB by SCALE. */
84
85 void
86 aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
87 {
88 unsigned i, j;
89
90 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
91 if (scale == 1)
92 return;
93
94 if (scale == 0)
95 {
96 aff_combination_zero (comb, comb->type);
97 return;
98 }
99
100 comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb);
101 for (i = 0, j = 0; i < comb->n; i++)
102 {
103 widest_int new_coef
104 = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb);
105 /* A coefficient may become zero due to overflow. Remove the zero
106 elements. */
107 if (new_coef == 0)
108 continue;
109 comb->elts[j].coef = new_coef;
110 comb->elts[j].val = comb->elts[i].val;
111 j++;
112 }
113 comb->n = j;
114
115 if (comb->rest)
116 {
117 tree type = comb->type;
118 if (POINTER_TYPE_P (type))
119 type = sizetype;
120 if (comb->n < MAX_AFF_ELTS)
121 {
122 comb->elts[comb->n].coef = scale;
123 comb->elts[comb->n].val = comb->rest;
124 comb->rest = NULL_TREE;
125 comb->n++;
126 }
127 else
128 comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
129 wide_int_to_tree (type, scale));
130 }
131 }
132
133 /* Adds ELT * SCALE to COMB. */
134
135 void
136 aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
137 {
138 unsigned i;
139 tree type;
140
141 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
142 if (scale == 0)
143 return;
144
145 for (i = 0; i < comb->n; i++)
146 if (operand_equal_p (comb->elts[i].val, elt, 0))
147 {
148 widest_int new_coef
149 = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb);
150 if (new_coef != 0)
151 {
152 comb->elts[i].coef = new_coef;
153 return;
154 }
155
156 comb->n--;
157 comb->elts[i] = comb->elts[comb->n];
158
159 if (comb->rest)
160 {
161 gcc_assert (comb->n == MAX_AFF_ELTS - 1);
162 comb->elts[comb->n].coef = 1;
163 comb->elts[comb->n].val = comb->rest;
164 comb->rest = NULL_TREE;
165 comb->n++;
166 }
167 return;
168 }
169 if (comb->n < MAX_AFF_ELTS)
170 {
171 comb->elts[comb->n].coef = scale;
172 comb->elts[comb->n].val = elt;
173 comb->n++;
174 return;
175 }
176
177 type = comb->type;
178 if (POINTER_TYPE_P (type))
179 type = sizetype;
180
181 if (scale == 1)
182 elt = fold_convert (type, elt);
183 else
184 elt = fold_build2 (MULT_EXPR, type,
185 fold_convert (type, elt),
186 wide_int_to_tree (type, scale));
187
188 if (comb->rest)
189 comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
190 elt);
191 else
192 comb->rest = elt;
193 }
194
195 /* Adds CST to C. */
196
197 static void
198 aff_combination_add_cst (aff_tree *c, const widest_int &cst)
199 {
200 c->offset = wide_int_ext_for_comb (c->offset + cst, c);
201 }
202
203 /* Adds COMB2 to COMB1. */
204
205 void
206 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
207 {
208 unsigned i;
209
210 aff_combination_add_cst (comb1, comb2->offset);
211 for (i = 0; i < comb2->n; i++)
212 aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
213 if (comb2->rest)
214 aff_combination_add_elt (comb1, comb2->rest, 1);
215 }
216
217 /* Converts affine combination COMB to TYPE. */
218
219 void
220 aff_combination_convert (aff_tree *comb, tree type)
221 {
222 unsigned i, j;
223 tree comb_type = comb->type;
224
225 if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
226 {
227 tree val = fold_convert (type, aff_combination_to_tree (comb));
228 tree_to_aff_combination (val, type, comb);
229 return;
230 }
231
232 comb->type = type;
233 if (comb->rest && !POINTER_TYPE_P (type))
234 comb->rest = fold_convert (type, comb->rest);
235
236 if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
237 return;
238
239 comb->offset = wide_int_ext_for_comb (comb->offset, comb);
240 for (i = j = 0; i < comb->n; i++)
241 {
242 if (comb->elts[i].coef == 0)
243 continue;
244 comb->elts[j].coef = comb->elts[i].coef;
245 comb->elts[j].val = fold_convert (type, comb->elts[i].val);
246 j++;
247 }
248
249 comb->n = j;
250 if (comb->n < MAX_AFF_ELTS && comb->rest)
251 {
252 comb->elts[comb->n].coef = 1;
253 comb->elts[comb->n].val = comb->rest;
254 comb->rest = NULL_TREE;
255 comb->n++;
256 }
257 }
258
259 /* Splits EXPR into an affine combination of parts. */
260
261 void
262 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
263 {
264 aff_tree tmp;
265 enum tree_code code;
266 tree cst, core, toffset;
267 HOST_WIDE_INT bitpos, bitsize;
268 enum machine_mode mode;
269 int unsignedp, volatilep;
270
271 STRIP_NOPS (expr);
272
273 code = TREE_CODE (expr);
274 switch (code)
275 {
276 case INTEGER_CST:
277 aff_combination_const (comb, type, wi::to_widest (expr));
278 return;
279
280 case POINTER_PLUS_EXPR:
281 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
282 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
283 aff_combination_add (comb, &tmp);
284 return;
285
286 case PLUS_EXPR:
287 case MINUS_EXPR:
288 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
289 tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
290 if (code == MINUS_EXPR)
291 aff_combination_scale (&tmp, -1);
292 aff_combination_add (comb, &tmp);
293 return;
294
295 case MULT_EXPR:
296 cst = TREE_OPERAND (expr, 1);
297 if (TREE_CODE (cst) != INTEGER_CST)
298 break;
299 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
300 aff_combination_scale (comb, wi::to_widest (cst));
301 return;
302
303 case NEGATE_EXPR:
304 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
305 aff_combination_scale (comb, -1);
306 return;
307
308 case BIT_NOT_EXPR:
309 /* ~x = -x - 1 */
310 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
311 aff_combination_scale (comb, -1);
312 aff_combination_add_cst (comb, -1);
313 return;
314
315 case ADDR_EXPR:
316 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
317 if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
318 {
319 expr = TREE_OPERAND (expr, 0);
320 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
321 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
322 aff_combination_add (comb, &tmp);
323 return;
324 }
325 core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
326 &toffset, &mode, &unsignedp, &volatilep,
327 false);
328 if (bitpos % BITS_PER_UNIT != 0)
329 break;
330 aff_combination_const (comb, type, bitpos / BITS_PER_UNIT);
331 core = build_fold_addr_expr (core);
332 if (TREE_CODE (core) == ADDR_EXPR)
333 aff_combination_add_elt (comb, core, 1);
334 else
335 {
336 tree_to_aff_combination (core, type, &tmp);
337 aff_combination_add (comb, &tmp);
338 }
339 if (toffset)
340 {
341 tree_to_aff_combination (toffset, type, &tmp);
342 aff_combination_add (comb, &tmp);
343 }
344 return;
345
346 case MEM_REF:
347 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
348 tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
349 type, comb);
350 else if (integer_zerop (TREE_OPERAND (expr, 1)))
351 {
352 aff_combination_elt (comb, type, expr);
353 return;
354 }
355 else
356 aff_combination_elt (comb, type,
357 build2 (MEM_REF, TREE_TYPE (expr),
358 TREE_OPERAND (expr, 0),
359 build_int_cst
360 (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
361 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
362 aff_combination_add (comb, &tmp);
363 return;
364
365 default:
366 break;
367 }
368
369 aff_combination_elt (comb, type, expr);
370 }
371
372 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
373 combination COMB. */
374
375 static tree
376 add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in,
377 aff_tree *comb ATTRIBUTE_UNUSED)
378 {
379 enum tree_code code;
380 tree type1 = type;
381 if (POINTER_TYPE_P (type))
382 type1 = sizetype;
383
384 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
385
386 if (scale == -1
387 && POINTER_TYPE_P (TREE_TYPE (elt)))
388 {
389 elt = convert_to_ptrofftype (elt);
390 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
391 scale = 1;
392 }
393
394 if (scale == 1)
395 {
396 if (!expr)
397 {
398 if (POINTER_TYPE_P (TREE_TYPE (elt)))
399 return elt;
400 else
401 return fold_convert (type1, elt);
402 }
403
404 if (POINTER_TYPE_P (TREE_TYPE (expr)))
405 return fold_build_pointer_plus (expr, elt);
406 if (POINTER_TYPE_P (TREE_TYPE (elt)))
407 return fold_build_pointer_plus (elt, expr);
408 return fold_build2 (PLUS_EXPR, type1,
409 expr, fold_convert (type1, elt));
410 }
411
412 if (scale == -1)
413 {
414 if (!expr)
415 return fold_build1 (NEGATE_EXPR, type1,
416 fold_convert (type1, elt));
417
418 if (POINTER_TYPE_P (TREE_TYPE (expr)))
419 {
420 elt = convert_to_ptrofftype (elt);
421 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
422 return fold_build_pointer_plus (expr, elt);
423 }
424 return fold_build2 (MINUS_EXPR, type1,
425 expr, fold_convert (type1, elt));
426 }
427
428 elt = fold_convert (type1, elt);
429 if (!expr)
430 return fold_build2 (MULT_EXPR, type1, elt,
431 wide_int_to_tree (type1, scale));
432
433 if (wi::neg_p (scale))
434 {
435 code = MINUS_EXPR;
436 scale = -scale;
437 }
438 else
439 code = PLUS_EXPR;
440
441 elt = fold_build2 (MULT_EXPR, type1, elt,
442 wide_int_to_tree (type1, scale));
443 if (POINTER_TYPE_P (TREE_TYPE (expr)))
444 {
445 if (code == MINUS_EXPR)
446 elt = fold_build1 (NEGATE_EXPR, type1, elt);
447 return fold_build_pointer_plus (expr, elt);
448 }
449 return fold_build2 (code, type1, expr, elt);
450 }
451
452 /* Makes tree from the affine combination COMB. */
453
454 tree
455 aff_combination_to_tree (aff_tree *comb)
456 {
457 tree type = comb->type;
458 tree expr = NULL_TREE;
459 unsigned i;
460 widest_int off, sgn;
461 tree type1 = type;
462 if (POINTER_TYPE_P (type))
463 type1 = sizetype;
464
465 gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
466
467 for (i = 0; i < comb->n; i++)
468 expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef,
469 comb);
470
471 if (comb->rest)
472 expr = add_elt_to_tree (expr, type, comb->rest, 1, comb);
473
474 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
475 unsigned. */
476 if (wi::neg_p (comb->offset))
477 {
478 off = -comb->offset;
479 sgn = -1;
480 }
481 else
482 {
483 off = comb->offset;
484 sgn = 1;
485 }
486 return add_elt_to_tree (expr, type, wide_int_to_tree (type1, off), sgn,
487 comb);
488 }
489
490 /* Copies the tree elements of COMB to ensure that they are not shared. */
491
492 void
493 unshare_aff_combination (aff_tree *comb)
494 {
495 unsigned i;
496
497 for (i = 0; i < comb->n; i++)
498 comb->elts[i].val = unshare_expr (comb->elts[i].val);
499 if (comb->rest)
500 comb->rest = unshare_expr (comb->rest);
501 }
502
503 /* Remove M-th element from COMB. */
504
505 void
506 aff_combination_remove_elt (aff_tree *comb, unsigned m)
507 {
508 comb->n--;
509 if (m <= comb->n)
510 comb->elts[m] = comb->elts[comb->n];
511 if (comb->rest)
512 {
513 comb->elts[comb->n].coef = 1;
514 comb->elts[comb->n].val = comb->rest;
515 comb->rest = NULL_TREE;
516 comb->n++;
517 }
518 }
519
520 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
521 C * COEF is added to R. */
522
523
524 static void
525 aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
526 aff_tree *r)
527 {
528 unsigned i;
529 tree aval, type;
530
531 for (i = 0; i < c->n; i++)
532 {
533 aval = c->elts[i].val;
534 if (val)
535 {
536 type = TREE_TYPE (aval);
537 aval = fold_build2 (MULT_EXPR, type, aval,
538 fold_convert (type, val));
539 }
540
541 aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
542 }
543
544 if (c->rest)
545 {
546 aval = c->rest;
547 if (val)
548 {
549 type = TREE_TYPE (aval);
550 aval = fold_build2 (MULT_EXPR, type, aval,
551 fold_convert (type, val));
552 }
553
554 aff_combination_add_elt (r, aval, coef);
555 }
556
557 if (val)
558 aff_combination_add_elt (r, val, coef * c->offset);
559 else
560 aff_combination_add_cst (r, coef * c->offset);
561 }
562
563 /* Multiplies C1 by C2, storing the result to R */
564
565 void
566 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
567 {
568 unsigned i;
569 gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
570
571 aff_combination_zero (r, c1->type);
572
573 for (i = 0; i < c2->n; i++)
574 aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
575 if (c2->rest)
576 aff_combination_add_product (c1, 1, c2->rest, r);
577 aff_combination_add_product (c1, c2->offset, NULL, r);
578 }
579
580 /* Returns the element of COMB whose value is VAL, or NULL if no such
581 element exists. If IDX is not NULL, it is set to the index of VAL in
582 COMB. */
583
584 static struct aff_comb_elt *
585 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
586 {
587 unsigned i;
588
589 for (i = 0; i < comb->n; i++)
590 if (operand_equal_p (comb->elts[i].val, val, 0))
591 {
592 if (idx)
593 *idx = i;
594
595 return &comb->elts[i];
596 }
597
598 return NULL;
599 }
600
601 /* Element of the cache that maps ssa name NAME to its expanded form
602 as an affine expression EXPANSION. */
603
604 struct name_expansion
605 {
606 aff_tree expansion;
607
608 /* True if the expansion for the name is just being generated. */
609 unsigned in_progress : 1;
610 };
611
612 /* Expands SSA names in COMB recursively. CACHE is used to cache the
613 results. */
614
615 void
616 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
617 struct pointer_map_t **cache ATTRIBUTE_UNUSED)
618 {
619 unsigned i;
620 aff_tree to_add, current, curre;
621 tree e, rhs;
622 gimple def;
623 widest_int scale;
624 void **slot;
625 struct name_expansion *exp;
626
627 aff_combination_zero (&to_add, comb->type);
628 for (i = 0; i < comb->n; i++)
629 {
630 tree type, name;
631 enum tree_code code;
632
633 e = comb->elts[i].val;
634 type = TREE_TYPE (e);
635 name = e;
636 /* Look through some conversions. */
637 if (TREE_CODE (e) == NOP_EXPR
638 && (TYPE_PRECISION (type)
639 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
640 name = TREE_OPERAND (e, 0);
641 if (TREE_CODE (name) != SSA_NAME)
642 continue;
643 def = SSA_NAME_DEF_STMT (name);
644 if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
645 continue;
646
647 code = gimple_assign_rhs_code (def);
648 if (code != SSA_NAME
649 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
650 && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
651 || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
652 continue;
653
654 /* We do not know whether the reference retains its value at the
655 place where the expansion is used. */
656 if (TREE_CODE_CLASS (code) == tcc_reference)
657 continue;
658
659 if (!*cache)
660 *cache = pointer_map_create ();
661 slot = pointer_map_insert (*cache, e);
662 exp = (struct name_expansion *) *slot;
663
664 if (!exp)
665 {
666 exp = XNEW (struct name_expansion);
667 exp->in_progress = 1;
668 *slot = exp;
669 /* In principle this is a generally valid folding, but
670 it is not unconditionally an optimization, so do it
671 here and not in fold_unary. */
672 /* Convert (T1)(X *+- CST) into (T1)X *+- (T1)CST if T1 is wider
673 than the type of X and overflow for the type of X is
674 undefined. */
675 if (e != name
676 && INTEGRAL_TYPE_P (type)
677 && INTEGRAL_TYPE_P (TREE_TYPE (name))
678 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (name))
679 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (name))
680 && (code == PLUS_EXPR || code == MINUS_EXPR || code == MULT_EXPR)
681 && TREE_CODE (gimple_assign_rhs2 (def)) == INTEGER_CST)
682 rhs = fold_build2 (code, type,
683 fold_convert (type, gimple_assign_rhs1 (def)),
684 fold_convert (type, gimple_assign_rhs2 (def)));
685 else
686 {
687 rhs = gimple_assign_rhs_to_tree (def);
688 if (e != name)
689 rhs = fold_convert (type, rhs);
690 }
691 tree_to_aff_combination_expand (rhs, comb->type, &current, cache);
692 exp->expansion = current;
693 exp->in_progress = 0;
694 }
695 else
696 {
697 /* Since we follow the definitions in the SSA form, we should not
698 enter a cycle unless we pass through a phi node. */
699 gcc_assert (!exp->in_progress);
700 current = exp->expansion;
701 }
702
703 /* Accumulate the new terms to TO_ADD, so that we do not modify
704 COMB while traversing it; include the term -coef * E, to remove
705 it from COMB. */
706 scale = comb->elts[i].coef;
707 aff_combination_zero (&curre, comb->type);
708 aff_combination_add_elt (&curre, e, -scale);
709 aff_combination_scale (&current, scale);
710 aff_combination_add (&to_add, &current);
711 aff_combination_add (&to_add, &curre);
712 }
713 aff_combination_add (comb, &to_add);
714 }
715
716 /* Similar to tree_to_aff_combination, but follows SSA name definitions
717 and expands them recursively. CACHE is used to cache the expansions
718 of the ssa names, to avoid exponential time complexity for cases
719 like
720
721 a1 = a0 + a0;
722 a2 = a1 + a1;
723 a3 = a2 + a2;
724 ... */
725
726 void
727 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
728 struct pointer_map_t **cache)
729 {
730 tree_to_aff_combination (expr, type, comb);
731 aff_combination_expand (comb, cache);
732 }
733
734 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
735 pointer_map_traverse. */
736
737 static bool
738 free_name_expansion (const void *key ATTRIBUTE_UNUSED, void **value,
739 void *data ATTRIBUTE_UNUSED)
740 {
741 struct name_expansion *const exp = (struct name_expansion *) *value;
742
743 free (exp);
744 return true;
745 }
746
747 /* Frees memory allocated for the CACHE used by
748 tree_to_aff_combination_expand. */
749
750 void
751 free_affine_expand_cache (struct pointer_map_t **cache)
752 {
753 if (!*cache)
754 return;
755
756 pointer_map_traverse (*cache, free_name_expansion, NULL);
757 pointer_map_destroy (*cache);
758 *cache = NULL;
759 }
760
761 /* If VAL != CST * DIV for any constant CST, returns false.
762 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
763 and if they are different, returns false. Finally, if neither of these
764 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
765 is set to true. */
766
767 static bool
768 wide_int_constant_multiple_p (const widest_int &val, const widest_int &div,
769 bool *mult_set, widest_int *mult)
770 {
771 widest_int rem, cst;
772
773 if (val == 0)
774 {
775 if (*mult_set && mult != 0)
776 return false;
777 *mult_set = true;
778 *mult = 0;
779 return true;
780 }
781
782 if (div == 0)
783 return false;
784
785 if (!wi::multiple_of_p (val, div, SIGNED, &cst))
786 return false;
787
788 if (*mult_set && *mult != cst)
789 return false;
790
791 *mult_set = true;
792 *mult = cst;
793 return true;
794 }
795
796 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
797 X is stored to MULT. */
798
799 bool
800 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
801 widest_int *mult)
802 {
803 bool mult_set = false;
804 unsigned i;
805
806 if (val->n == 0 && val->offset == 0)
807 {
808 *mult = 0;
809 return true;
810 }
811 if (val->n != div->n)
812 return false;
813
814 if (val->rest || div->rest)
815 return false;
816
817 if (!wide_int_constant_multiple_p (val->offset, div->offset,
818 &mult_set, mult))
819 return false;
820
821 for (i = 0; i < div->n; i++)
822 {
823 struct aff_comb_elt *elt
824 = aff_combination_find_elt (val, div->elts[i].val, NULL);
825 if (!elt)
826 return false;
827 if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef,
828 &mult_set, mult))
829 return false;
830 }
831
832 gcc_assert (mult_set);
833 return true;
834 }
835
836 /* Prints the affine VAL to the FILE. */
837
838 static void
839 print_aff (FILE *file, aff_tree *val)
840 {
841 unsigned i;
842 signop sgn = TYPE_SIGN (val->type);
843 if (POINTER_TYPE_P (val->type))
844 sgn = SIGNED;
845 fprintf (file, "{\n type = ");
846 print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
847 fprintf (file, "\n offset = ");
848 print_dec (val->offset, file, sgn);
849 if (val->n > 0)
850 {
851 fprintf (file, "\n elements = {\n");
852 for (i = 0; i < val->n; i++)
853 {
854 fprintf (file, " [%d] = ", i);
855 print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
856
857 fprintf (file, " * ");
858 print_dec (val->elts[i].coef, file, sgn);
859 if (i != val->n - 1)
860 fprintf (file, ", \n");
861 }
862 fprintf (file, "\n }");
863 }
864 if (val->rest)
865 {
866 fprintf (file, "\n rest = ");
867 print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
868 }
869 fprintf (file, "\n}");
870 }
871
872 /* Prints the affine VAL to the standard error, used for debugging. */
873
874 DEBUG_FUNCTION void
875 debug_aff (aff_tree *val)
876 {
877 print_aff (stderr, val);
878 fprintf (stderr, "\n");
879 }
880
881 /* Computes address of the reference REF in ADDR. The size of the accessed
882 location is stored to SIZE. Returns the ultimate containing object to
883 which REF refers. */
884
885 tree
886 get_inner_reference_aff (tree ref, aff_tree *addr, widest_int *size)
887 {
888 HOST_WIDE_INT bitsize, bitpos;
889 tree toff;
890 enum machine_mode mode;
891 int uns, vol;
892 aff_tree tmp;
893 tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
894 &uns, &vol, false);
895 tree base_addr = build_fold_addr_expr (base);
896
897 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
898
899 tree_to_aff_combination (base_addr, sizetype, addr);
900
901 if (toff)
902 {
903 tree_to_aff_combination (toff, sizetype, &tmp);
904 aff_combination_add (addr, &tmp);
905 }
906
907 aff_combination_const (&tmp, sizetype, bitpos / BITS_PER_UNIT);
908 aff_combination_add (addr, &tmp);
909
910 *size = (bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
911
912 return base;
913 }
914
915 /* Returns true if a region of size SIZE1 at position 0 and a region of
916 size SIZE2 at position DIFF cannot overlap. */
917
918 bool
919 aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1,
920 const widest_int &size2)
921 {
922 /* Unless the difference is a constant, we fail. */
923 if (diff->n != 0)
924 return false;
925
926 if (wi::neg_p (diff->offset))
927 {
928 /* The second object is before the first one, we succeed if the last
929 element of the second object is before the start of the first one. */
930 return wi::neg_p (diff->offset + size2 - 1);
931 }
932 else
933 {
934 /* We succeed if the second object starts after the first one ends. */
935 return wi::les_p (size1, diff->offset);
936 }
937 }
938