]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-affine.c
* doc/extend.texi (Common Function Attributes): Clarify
[thirdparty/gcc.git] / gcc / tree-affine.c
1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "ssa.h"
28 #include "tree-pretty-print.h"
29 #include "fold-const.h"
30 #include "tree-affine.h"
31 #include "gimplify.h"
32 #include "dumpfile.h"
33 #include "cfgexpand.h"
34
35 /* Extends CST as appropriate for the affine combinations COMB. */
36
37 static widest_int
38 wide_int_ext_for_comb (const widest_int &cst, tree type)
39 {
40 return wi::sext (cst, TYPE_PRECISION (type));
41 }
42
43 /* Likewise for polynomial offsets. */
44
45 static poly_widest_int
46 wide_int_ext_for_comb (const poly_widest_int &cst, tree type)
47 {
48 return wi::sext (cst, TYPE_PRECISION (type));
49 }
50
51 /* Initializes affine combination COMB so that its value is zero in TYPE. */
52
53 static void
54 aff_combination_zero (aff_tree *comb, tree type)
55 {
56 int i;
57 comb->type = type;
58 comb->offset = 0;
59 comb->n = 0;
60 for (i = 0; i < MAX_AFF_ELTS; i++)
61 comb->elts[i].coef = 0;
62 comb->rest = NULL_TREE;
63 }
64
65 /* Sets COMB to CST. */
66
67 void
68 aff_combination_const (aff_tree *comb, tree type, const poly_widest_int &cst)
69 {
70 aff_combination_zero (comb, type);
71 comb->offset = wide_int_ext_for_comb (cst, comb->type);;
72 }
73
74 /* Sets COMB to single element ELT. */
75
76 void
77 aff_combination_elt (aff_tree *comb, tree type, tree elt)
78 {
79 aff_combination_zero (comb, type);
80
81 comb->n = 1;
82 comb->elts[0].val = elt;
83 comb->elts[0].coef = 1;
84 }
85
86 /* Scales COMB by SCALE. */
87
88 void
89 aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
90 {
91 unsigned i, j;
92
93 widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
94 if (scale == 1)
95 return;
96
97 if (scale == 0)
98 {
99 aff_combination_zero (comb, comb->type);
100 return;
101 }
102
103 comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb->type);
104 for (i = 0, j = 0; i < comb->n; i++)
105 {
106 widest_int new_coef
107 = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb->type);
108 /* A coefficient may become zero due to overflow. Remove the zero
109 elements. */
110 if (new_coef == 0)
111 continue;
112 comb->elts[j].coef = new_coef;
113 comb->elts[j].val = comb->elts[i].val;
114 j++;
115 }
116 comb->n = j;
117
118 if (comb->rest)
119 {
120 tree type = comb->type;
121 if (POINTER_TYPE_P (type))
122 type = sizetype;
123 if (comb->n < MAX_AFF_ELTS)
124 {
125 comb->elts[comb->n].coef = scale;
126 comb->elts[comb->n].val = comb->rest;
127 comb->rest = NULL_TREE;
128 comb->n++;
129 }
130 else
131 comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
132 wide_int_to_tree (type, scale));
133 }
134 }
135
136 /* Adds ELT * SCALE to COMB. */
137
138 void
139 aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
140 {
141 unsigned i;
142 tree type;
143
144 widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
145 if (scale == 0)
146 return;
147
148 for (i = 0; i < comb->n; i++)
149 if (operand_equal_p (comb->elts[i].val, elt, 0))
150 {
151 widest_int new_coef
152 = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb->type);
153 if (new_coef != 0)
154 {
155 comb->elts[i].coef = new_coef;
156 return;
157 }
158
159 comb->n--;
160 comb->elts[i] = comb->elts[comb->n];
161
162 if (comb->rest)
163 {
164 gcc_assert (comb->n == MAX_AFF_ELTS - 1);
165 comb->elts[comb->n].coef = 1;
166 comb->elts[comb->n].val = comb->rest;
167 comb->rest = NULL_TREE;
168 comb->n++;
169 }
170 return;
171 }
172 if (comb->n < MAX_AFF_ELTS)
173 {
174 comb->elts[comb->n].coef = scale;
175 comb->elts[comb->n].val = elt;
176 comb->n++;
177 return;
178 }
179
180 type = comb->type;
181 if (POINTER_TYPE_P (type))
182 type = sizetype;
183
184 if (scale == 1)
185 elt = fold_convert (type, elt);
186 else
187 elt = fold_build2 (MULT_EXPR, type,
188 fold_convert (type, elt),
189 wide_int_to_tree (type, scale));
190
191 if (comb->rest)
192 comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
193 elt);
194 else
195 comb->rest = elt;
196 }
197
198 /* Adds CST to C. */
199
200 static void
201 aff_combination_add_cst (aff_tree *c, const poly_widest_int &cst)
202 {
203 c->offset = wide_int_ext_for_comb (c->offset + cst, c->type);
204 }
205
206 /* Adds COMB2 to COMB1. */
207
208 void
209 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
210 {
211 unsigned i;
212
213 aff_combination_add_cst (comb1, comb2->offset);
214 for (i = 0; i < comb2->n; i++)
215 aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
216 if (comb2->rest)
217 aff_combination_add_elt (comb1, comb2->rest, 1);
218 }
219
220 /* Converts affine combination COMB to TYPE. */
221
222 void
223 aff_combination_convert (aff_tree *comb, tree type)
224 {
225 unsigned i, j;
226 tree comb_type = comb->type;
227
228 if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
229 {
230 tree val = fold_convert (type, aff_combination_to_tree (comb));
231 tree_to_aff_combination (val, type, comb);
232 return;
233 }
234
235 comb->type = type;
236 if (comb->rest && !POINTER_TYPE_P (type))
237 comb->rest = fold_convert (type, comb->rest);
238
239 if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
240 return;
241
242 comb->offset = wide_int_ext_for_comb (comb->offset, comb->type);
243 for (i = j = 0; i < comb->n; i++)
244 {
245 if (comb->elts[i].coef == 0)
246 continue;
247 comb->elts[j].coef = comb->elts[i].coef;
248 comb->elts[j].val = fold_convert (type, comb->elts[i].val);
249 j++;
250 }
251
252 comb->n = j;
253 if (comb->n < MAX_AFF_ELTS && comb->rest)
254 {
255 comb->elts[comb->n].coef = 1;
256 comb->elts[comb->n].val = comb->rest;
257 comb->rest = NULL_TREE;
258 comb->n++;
259 }
260 }
261
262 /* Tries to handle OP0 CODE OP1 as affine combination of parts. Returns
263 true when that was successful and returns the combination in COMB. */
264
265 static bool
266 expr_to_aff_combination (aff_tree *comb, tree_code code, tree type,
267 tree op0, tree op1 = NULL_TREE)
268 {
269 aff_tree tmp;
270 poly_int64 bitpos, bitsize, bytepos;
271
272 switch (code)
273 {
274 case POINTER_PLUS_EXPR:
275 tree_to_aff_combination (op0, type, comb);
276 tree_to_aff_combination (op1, sizetype, &tmp);
277 aff_combination_add (comb, &tmp);
278 return true;
279
280 case PLUS_EXPR:
281 case MINUS_EXPR:
282 tree_to_aff_combination (op0, type, comb);
283 tree_to_aff_combination (op1, type, &tmp);
284 if (code == MINUS_EXPR)
285 aff_combination_scale (&tmp, -1);
286 aff_combination_add (comb, &tmp);
287 return true;
288
289 case MULT_EXPR:
290 if (TREE_CODE (op1) != INTEGER_CST)
291 break;
292 tree_to_aff_combination (op0, type, comb);
293 aff_combination_scale (comb, wi::to_widest (op1));
294 return true;
295
296 case NEGATE_EXPR:
297 tree_to_aff_combination (op0, type, comb);
298 aff_combination_scale (comb, -1);
299 return true;
300
301 case BIT_NOT_EXPR:
302 /* ~x = -x - 1 */
303 tree_to_aff_combination (op0, type, comb);
304 aff_combination_scale (comb, -1);
305 aff_combination_add_cst (comb, -1);
306 return true;
307
308 CASE_CONVERT:
309 {
310 tree otype = type;
311 tree inner = op0;
312 tree itype = TREE_TYPE (inner);
313 enum tree_code icode = TREE_CODE (inner);
314
315 /* STRIP_NOPS */
316 if (tree_nop_conversion_p (otype, itype))
317 {
318 tree_to_aff_combination (op0, type, comb);
319 return true;
320 }
321
322 /* In principle this is a valid folding, but it isn't necessarily
323 an optimization, so do it here and not in fold_unary. */
324 if ((icode == PLUS_EXPR || icode == MINUS_EXPR || icode == MULT_EXPR)
325 && TREE_CODE (itype) == INTEGER_TYPE
326 && TREE_CODE (otype) == INTEGER_TYPE
327 && TYPE_PRECISION (otype) > TYPE_PRECISION (itype))
328 {
329 tree op0 = TREE_OPERAND (inner, 0), op1 = TREE_OPERAND (inner, 1);
330
331 /* If inner type has undefined overflow behavior, fold conversion
332 for below two cases:
333 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
334 (T1)(X + X) -> (T1)X + (T1)X. */
335 if (TYPE_OVERFLOW_UNDEFINED (itype)
336 && (TREE_CODE (op1) == INTEGER_CST
337 || (icode == PLUS_EXPR && operand_equal_p (op0, op1, 0))))
338 {
339 op0 = fold_convert (otype, op0);
340 op1 = fold_convert (otype, op1);
341 return expr_to_aff_combination (comb, icode, otype, op0, op1);
342 }
343 wide_int minv, maxv;
344 /* If inner type has wrapping overflow behavior, fold conversion
345 for below case:
346 (T1)(X - CST) -> (T1)X - (T1)CST
347 if X - CST doesn't overflow by range information. Also handle
348 (T1)(X + CST) as (T1)(X - (-CST)). */
349 if (TYPE_UNSIGNED (itype)
350 && TYPE_OVERFLOW_WRAPS (itype)
351 && TREE_CODE (op0) == SSA_NAME
352 && TREE_CODE (op1) == INTEGER_CST
353 && icode != MULT_EXPR
354 && get_range_info (op0, &minv, &maxv) == VR_RANGE)
355 {
356 if (icode == PLUS_EXPR)
357 op1 = wide_int_to_tree (itype, -wi::to_wide (op1));
358 if (wi::geu_p (minv, wi::to_wide (op1)))
359 {
360 op0 = fold_convert (otype, op0);
361 op1 = fold_convert (otype, op1);
362 return expr_to_aff_combination (comb, MINUS_EXPR, otype,
363 op0, op1);
364 }
365 }
366 }
367 }
368 break;
369
370 default:;
371 }
372
373 return false;
374 }
375
376 /* Splits EXPR into an affine combination of parts. */
377
378 void
379 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
380 {
381 aff_tree tmp;
382 enum tree_code code;
383 tree core, toffset;
384 poly_int64 bitpos, bitsize, bytepos;
385 machine_mode mode;
386 int unsignedp, reversep, volatilep;
387
388 STRIP_NOPS (expr);
389
390 code = TREE_CODE (expr);
391 switch (code)
392 {
393 case POINTER_PLUS_EXPR:
394 case PLUS_EXPR:
395 case MINUS_EXPR:
396 case MULT_EXPR:
397 if (expr_to_aff_combination (comb, code, type, TREE_OPERAND (expr, 0),
398 TREE_OPERAND (expr, 1)))
399 return;
400 break;
401
402 case NEGATE_EXPR:
403 case BIT_NOT_EXPR:
404 if (expr_to_aff_combination (comb, code, type, TREE_OPERAND (expr, 0)))
405 return;
406 break;
407
408 CASE_CONVERT:
409 /* ??? TREE_TYPE (expr) should be equal to type here, but IVOPTS
410 calls this with not showing an outer widening cast. */
411 if (expr_to_aff_combination (comb, code,
412 TREE_TYPE (expr), TREE_OPERAND (expr, 0)))
413 {
414 aff_combination_convert (comb, type);
415 return;
416 }
417 break;
418
419 case ADDR_EXPR:
420 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
421 if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
422 {
423 expr = TREE_OPERAND (expr, 0);
424 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
425 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
426 aff_combination_add (comb, &tmp);
427 return;
428 }
429 core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
430 &toffset, &mode, &unsignedp, &reversep,
431 &volatilep);
432 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
433 break;
434 aff_combination_const (comb, type, bytepos);
435 if (TREE_CODE (core) == MEM_REF)
436 {
437 tree mem_offset = TREE_OPERAND (core, 1);
438 aff_combination_add_cst (comb, wi::to_poly_widest (mem_offset));
439 core = TREE_OPERAND (core, 0);
440 }
441 else
442 core = build_fold_addr_expr (core);
443
444 if (TREE_CODE (core) == ADDR_EXPR)
445 aff_combination_add_elt (comb, core, 1);
446 else
447 {
448 tree_to_aff_combination (core, type, &tmp);
449 aff_combination_add (comb, &tmp);
450 }
451 if (toffset)
452 {
453 tree_to_aff_combination (toffset, type, &tmp);
454 aff_combination_add (comb, &tmp);
455 }
456 return;
457
458 default:
459 {
460 if (poly_int_tree_p (expr))
461 {
462 aff_combination_const (comb, type, wi::to_poly_widest (expr));
463 return;
464 }
465 break;
466 }
467 }
468
469 aff_combination_elt (comb, type, expr);
470 }
471
472 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
473 combination COMB. */
474
475 static tree
476 add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in)
477 {
478 enum tree_code code;
479
480 widest_int scale = wide_int_ext_for_comb (scale_in, type);
481
482 elt = fold_convert (type, elt);
483 if (scale == 1)
484 {
485 if (!expr)
486 return elt;
487
488 return fold_build2 (PLUS_EXPR, type, expr, elt);
489 }
490
491 if (scale == -1)
492 {
493 if (!expr)
494 return fold_build1 (NEGATE_EXPR, type, elt);
495
496 return fold_build2 (MINUS_EXPR, type, expr, elt);
497 }
498
499 if (!expr)
500 return fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));
501
502 if (wi::neg_p (scale))
503 {
504 code = MINUS_EXPR;
505 scale = -scale;
506 }
507 else
508 code = PLUS_EXPR;
509
510 elt = fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));
511 return fold_build2 (code, type, expr, elt);
512 }
513
514 /* Makes tree from the affine combination COMB. */
515
516 tree
517 aff_combination_to_tree (aff_tree *comb)
518 {
519 tree type = comb->type, base = NULL_TREE, expr = NULL_TREE;
520 unsigned i;
521 poly_widest_int off;
522 int sgn;
523
524 gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
525
526 i = 0;
527 if (POINTER_TYPE_P (type))
528 {
529 type = sizetype;
530 if (comb->n > 0 && comb->elts[0].coef == 1
531 && POINTER_TYPE_P (TREE_TYPE (comb->elts[0].val)))
532 {
533 base = comb->elts[0].val;
534 ++i;
535 }
536 }
537
538 for (; i < comb->n; i++)
539 expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef);
540
541 if (comb->rest)
542 expr = add_elt_to_tree (expr, type, comb->rest, 1);
543
544 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
545 unsigned. */
546 if (known_lt (comb->offset, 0))
547 {
548 off = -comb->offset;
549 sgn = -1;
550 }
551 else
552 {
553 off = comb->offset;
554 sgn = 1;
555 }
556 expr = add_elt_to_tree (expr, type, wide_int_to_tree (type, off), sgn);
557
558 if (base)
559 return fold_build_pointer_plus (base, expr);
560 else
561 return fold_convert (comb->type, expr);
562 }
563
564 /* Copies the tree elements of COMB to ensure that they are not shared. */
565
566 void
567 unshare_aff_combination (aff_tree *comb)
568 {
569 unsigned i;
570
571 for (i = 0; i < comb->n; i++)
572 comb->elts[i].val = unshare_expr (comb->elts[i].val);
573 if (comb->rest)
574 comb->rest = unshare_expr (comb->rest);
575 }
576
577 /* Remove M-th element from COMB. */
578
579 void
580 aff_combination_remove_elt (aff_tree *comb, unsigned m)
581 {
582 comb->n--;
583 if (m <= comb->n)
584 comb->elts[m] = comb->elts[comb->n];
585 if (comb->rest)
586 {
587 comb->elts[comb->n].coef = 1;
588 comb->elts[comb->n].val = comb->rest;
589 comb->rest = NULL_TREE;
590 comb->n++;
591 }
592 }
593
594 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
595 C * COEF is added to R. */
596
597
598 static void
599 aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
600 aff_tree *r)
601 {
602 unsigned i;
603 tree aval, type;
604
605 for (i = 0; i < c->n; i++)
606 {
607 aval = c->elts[i].val;
608 if (val)
609 {
610 type = TREE_TYPE (aval);
611 aval = fold_build2 (MULT_EXPR, type, aval,
612 fold_convert (type, val));
613 }
614
615 aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
616 }
617
618 if (c->rest)
619 {
620 aval = c->rest;
621 if (val)
622 {
623 type = TREE_TYPE (aval);
624 aval = fold_build2 (MULT_EXPR, type, aval,
625 fold_convert (type, val));
626 }
627
628 aff_combination_add_elt (r, aval, coef);
629 }
630
631 if (val)
632 {
633 if (c->offset.is_constant ())
634 /* Access coeffs[0] directly, for efficiency. */
635 aff_combination_add_elt (r, val, coef * c->offset.coeffs[0]);
636 else
637 {
638 /* c->offset is polynomial, so multiply VAL rather than COEF
639 by it. */
640 tree offset = wide_int_to_tree (TREE_TYPE (val), c->offset);
641 val = fold_build2 (MULT_EXPR, TREE_TYPE (val), val, offset);
642 aff_combination_add_elt (r, val, coef);
643 }
644 }
645 else
646 aff_combination_add_cst (r, coef * c->offset);
647 }
648
649 /* Multiplies C1 by C2, storing the result to R */
650
651 void
652 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
653 {
654 unsigned i;
655 gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
656
657 aff_combination_zero (r, c1->type);
658
659 for (i = 0; i < c2->n; i++)
660 aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
661 if (c2->rest)
662 aff_combination_add_product (c1, 1, c2->rest, r);
663 if (c2->offset.is_constant ())
664 /* Access coeffs[0] directly, for efficiency. */
665 aff_combination_add_product (c1, c2->offset.coeffs[0], NULL, r);
666 else
667 {
668 /* c2->offset is polynomial, so do the multiplication in tree form. */
669 tree offset = wide_int_to_tree (c2->type, c2->offset);
670 aff_combination_add_product (c1, 1, offset, r);
671 }
672 }
673
674 /* Returns the element of COMB whose value is VAL, or NULL if no such
675 element exists. If IDX is not NULL, it is set to the index of VAL in
676 COMB. */
677
678 static struct aff_comb_elt *
679 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
680 {
681 unsigned i;
682
683 for (i = 0; i < comb->n; i++)
684 if (operand_equal_p (comb->elts[i].val, val, 0))
685 {
686 if (idx)
687 *idx = i;
688
689 return &comb->elts[i];
690 }
691
692 return NULL;
693 }
694
695 /* Element of the cache that maps ssa name NAME to its expanded form
696 as an affine expression EXPANSION. */
697
698 struct name_expansion
699 {
700 aff_tree expansion;
701
702 /* True if the expansion for the name is just being generated. */
703 unsigned in_progress : 1;
704 };
705
706 /* Expands SSA names in COMB recursively. CACHE is used to cache the
707 results. */
708
709 void
710 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
711 hash_map<tree, name_expansion *> **cache)
712 {
713 unsigned i;
714 aff_tree to_add, current, curre;
715 tree e;
716 gimple *def;
717 widest_int scale;
718 struct name_expansion *exp;
719
720 aff_combination_zero (&to_add, comb->type);
721 for (i = 0; i < comb->n; i++)
722 {
723 tree type, name;
724 enum tree_code code;
725
726 e = comb->elts[i].val;
727 type = TREE_TYPE (e);
728 name = e;
729 /* Look through some conversions. */
730 if (CONVERT_EXPR_P (e)
731 && (TYPE_PRECISION (type)
732 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
733 name = TREE_OPERAND (e, 0);
734 if (TREE_CODE (name) != SSA_NAME)
735 continue;
736 def = SSA_NAME_DEF_STMT (name);
737 if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
738 continue;
739
740 code = gimple_assign_rhs_code (def);
741 if (code != SSA_NAME
742 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
743 && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
744 || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
745 continue;
746
747 /* We do not know whether the reference retains its value at the
748 place where the expansion is used. */
749 if (TREE_CODE_CLASS (code) == tcc_reference)
750 continue;
751
752 name_expansion **slot = NULL;
753 if (*cache)
754 slot = (*cache)->get (name);
755 exp = slot ? *slot : NULL;
756 if (!exp)
757 {
758 /* Only bother to handle cases tree_to_aff_combination will. */
759 switch (code)
760 {
761 case POINTER_PLUS_EXPR:
762 case PLUS_EXPR:
763 case MINUS_EXPR:
764 case MULT_EXPR:
765 if (!expr_to_aff_combination (&current, code, TREE_TYPE (name),
766 gimple_assign_rhs1 (def),
767 gimple_assign_rhs2 (def)))
768 continue;
769 break;
770 case NEGATE_EXPR:
771 case BIT_NOT_EXPR:
772 if (!expr_to_aff_combination (&current, code, TREE_TYPE (name),
773 gimple_assign_rhs1 (def)))
774 continue;
775 break;
776 CASE_CONVERT:
777 if (!expr_to_aff_combination (&current, code, TREE_TYPE (name),
778 gimple_assign_rhs1 (def)))
779 /* This makes us always expand conversions which we did
780 in the past and makes gcc.dg/tree-ssa/ivopts-lt-2.c
781 PASS, eliminating one induction variable in IVOPTs.
782 ??? But it is really excessive and we should try
783 harder to do without it. */
784 aff_combination_elt (&current, TREE_TYPE (name),
785 fold_convert (TREE_TYPE (name),
786 gimple_assign_rhs1 (def)));
787 break;
788 case ADDR_EXPR:
789 case INTEGER_CST:
790 case POLY_INT_CST:
791 tree_to_aff_combination (gimple_assign_rhs1 (def),
792 TREE_TYPE (name), &current);
793 break;
794 default:
795 continue;
796 }
797 exp = XNEW (struct name_expansion);
798 exp->in_progress = 1;
799 if (!*cache)
800 *cache = new hash_map<tree, name_expansion *>;
801 (*cache)->put (name, exp);
802 aff_combination_expand (&current, cache);
803 exp->expansion = current;
804 exp->in_progress = 0;
805 }
806 else
807 {
808 /* Since we follow the definitions in the SSA form, we should not
809 enter a cycle unless we pass through a phi node. */
810 gcc_assert (!exp->in_progress);
811 current = exp->expansion;
812 }
813 if (!useless_type_conversion_p (comb->type, current.type))
814 aff_combination_convert (&current, comb->type);
815
816 /* Accumulate the new terms to TO_ADD, so that we do not modify
817 COMB while traversing it; include the term -coef * E, to remove
818 it from COMB. */
819 scale = comb->elts[i].coef;
820 aff_combination_zero (&curre, comb->type);
821 aff_combination_add_elt (&curre, e, -scale);
822 aff_combination_scale (&current, scale);
823 aff_combination_add (&to_add, &current);
824 aff_combination_add (&to_add, &curre);
825 }
826 aff_combination_add (comb, &to_add);
827 }
828
829 /* Similar to tree_to_aff_combination, but follows SSA name definitions
830 and expands them recursively. CACHE is used to cache the expansions
831 of the ssa names, to avoid exponential time complexity for cases
832 like
833
834 a1 = a0 + a0;
835 a2 = a1 + a1;
836 a3 = a2 + a2;
837 ... */
838
839 void
840 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
841 hash_map<tree, name_expansion *> **cache)
842 {
843 tree_to_aff_combination (expr, type, comb);
844 aff_combination_expand (comb, cache);
845 }
846
847 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
848 hash_map::traverse. */
849
850 bool
851 free_name_expansion (tree const &, name_expansion **value, void *)
852 {
853 free (*value);
854 return true;
855 }
856
857 /* Frees memory allocated for the CACHE used by
858 tree_to_aff_combination_expand. */
859
860 void
861 free_affine_expand_cache (hash_map<tree, name_expansion *> **cache)
862 {
863 if (!*cache)
864 return;
865
866 (*cache)->traverse<void *, free_name_expansion> (NULL);
867 delete (*cache);
868 *cache = NULL;
869 }
870
871 /* If VAL != CST * DIV for any constant CST, returns false.
872 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
873 and if they are different, returns false. Finally, if neither of these
874 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
875 is set to true. */
876
877 static bool
878 wide_int_constant_multiple_p (const poly_widest_int &val,
879 const poly_widest_int &div,
880 bool *mult_set, poly_widest_int *mult)
881 {
882 poly_widest_int rem, cst;
883
884 if (known_eq (val, 0))
885 {
886 if (*mult_set && maybe_ne (*mult, 0))
887 return false;
888 *mult_set = true;
889 *mult = 0;
890 return true;
891 }
892
893 if (maybe_eq (div, 0))
894 return false;
895
896 if (!multiple_p (val, div, &cst))
897 return false;
898
899 if (*mult_set && maybe_ne (*mult, cst))
900 return false;
901
902 *mult_set = true;
903 *mult = cst;
904 return true;
905 }
906
907 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
908 X is stored to MULT. */
909
910 bool
911 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
912 poly_widest_int *mult)
913 {
914 bool mult_set = false;
915 unsigned i;
916
917 if (val->n == 0 && known_eq (val->offset, 0))
918 {
919 *mult = 0;
920 return true;
921 }
922 if (val->n != div->n)
923 return false;
924
925 if (val->rest || div->rest)
926 return false;
927
928 if (!wide_int_constant_multiple_p (val->offset, div->offset,
929 &mult_set, mult))
930 return false;
931
932 for (i = 0; i < div->n; i++)
933 {
934 struct aff_comb_elt *elt
935 = aff_combination_find_elt (val, div->elts[i].val, NULL);
936 if (!elt)
937 return false;
938 if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef,
939 &mult_set, mult))
940 return false;
941 }
942
943 gcc_assert (mult_set);
944 return true;
945 }
946
947 /* Prints the affine VAL to the FILE. */
948
949 static void
950 print_aff (FILE *file, aff_tree *val)
951 {
952 unsigned i;
953 signop sgn = TYPE_SIGN (val->type);
954 if (POINTER_TYPE_P (val->type))
955 sgn = SIGNED;
956 fprintf (file, "{\n type = ");
957 print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
958 fprintf (file, "\n offset = ");
959 print_dec (val->offset, file, sgn);
960 if (val->n > 0)
961 {
962 fprintf (file, "\n elements = {\n");
963 for (i = 0; i < val->n; i++)
964 {
965 fprintf (file, " [%d] = ", i);
966 print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
967
968 fprintf (file, " * ");
969 print_dec (val->elts[i].coef, file, sgn);
970 if (i != val->n - 1)
971 fprintf (file, ", \n");
972 }
973 fprintf (file, "\n }");
974 }
975 if (val->rest)
976 {
977 fprintf (file, "\n rest = ");
978 print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
979 }
980 fprintf (file, "\n}");
981 }
982
983 /* Prints the affine VAL to the standard error, used for debugging. */
984
985 DEBUG_FUNCTION void
986 debug_aff (aff_tree *val)
987 {
988 print_aff (stderr, val);
989 fprintf (stderr, "\n");
990 }
991
992 /* Computes address of the reference REF in ADDR. The size of the accessed
993 location is stored to SIZE. Returns the ultimate containing object to
994 which REF refers. */
995
996 tree
997 get_inner_reference_aff (tree ref, aff_tree *addr, poly_widest_int *size)
998 {
999 poly_int64 bitsize, bitpos;
1000 tree toff;
1001 machine_mode mode;
1002 int uns, rev, vol;
1003 aff_tree tmp;
1004 tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
1005 &uns, &rev, &vol);
1006 tree base_addr = build_fold_addr_expr (base);
1007
1008 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
1009
1010 tree_to_aff_combination (base_addr, sizetype, addr);
1011
1012 if (toff)
1013 {
1014 tree_to_aff_combination (toff, sizetype, &tmp);
1015 aff_combination_add (addr, &tmp);
1016 }
1017
1018 aff_combination_const (&tmp, sizetype, bits_to_bytes_round_down (bitpos));
1019 aff_combination_add (addr, &tmp);
1020
1021 *size = bits_to_bytes_round_up (bitsize);
1022
1023 return base;
1024 }
1025
1026 /* Returns true if a region of size SIZE1 at position 0 and a region of
1027 size SIZE2 at position DIFF cannot overlap. */
1028
1029 bool
1030 aff_comb_cannot_overlap_p (aff_tree *diff, const poly_widest_int &size1,
1031 const poly_widest_int &size2)
1032 {
1033 /* Unless the difference is a constant, we fail. */
1034 if (diff->n != 0)
1035 return false;
1036
1037 if (!ordered_p (diff->offset, 0))
1038 return false;
1039
1040 if (maybe_lt (diff->offset, 0))
1041 {
1042 /* The second object is before the first one, we succeed if the last
1043 element of the second object is before the start of the first one. */
1044 return known_le (diff->offset + size2, 0);
1045 }
1046 else
1047 {
1048 /* We succeed if the second object starts after the first one ends. */
1049 return known_le (size1, diff->offset);
1050 }
1051 }
1052