decl.c (gnat_to_gnu_entity): For a derived untagged type that renames discriminants...
[gcc.git] / gcc / tree-affine.c
1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "expr.h"
25 #include "tree-pretty-print.h"
26 #include "tree-affine.h"
27 #include "predict.h"
28 #include "vec.h"
29 #include "hashtab.h"
30 #include "hash-set.h"
31 #include "machmode.h"
32 #include "tm.h"
33 #include "hard-reg-set.h"
34 #include "input.h"
35 #include "function.h"
36 #include "basic-block.h"
37 #include "tree-ssa-alias.h"
38 #include "internal-fn.h"
39 #include "gimple-expr.h"
40 #include "is-a.h"
41 #include "gimple.h"
42 #include "gimplify.h"
43 #include "flags.h"
44 #include "dumpfile.h"
45 #include "cfgexpand.h"
46 #include "wide-int-print.h"
47
48 /* Extends CST as appropriate for the affine combinations COMB. */
49
50 widest_int
51 wide_int_ext_for_comb (const widest_int &cst, aff_tree *comb)
52 {
53 return wi::sext (cst, TYPE_PRECISION (comb->type));
54 }
55
56 /* Initializes affine combination COMB so that its value is zero in TYPE. */
57
58 static void
59 aff_combination_zero (aff_tree *comb, tree type)
60 {
61 int i;
62 comb->type = type;
63 comb->offset = 0;
64 comb->n = 0;
65 for (i = 0; i < MAX_AFF_ELTS; i++)
66 comb->elts[i].coef = 0;
67 comb->rest = NULL_TREE;
68 }
69
70 /* Sets COMB to CST. */
71
72 void
73 aff_combination_const (aff_tree *comb, tree type, const widest_int &cst)
74 {
75 aff_combination_zero (comb, type);
76 comb->offset = wide_int_ext_for_comb (cst, comb);;
77 }
78
79 /* Sets COMB to single element ELT. */
80
81 void
82 aff_combination_elt (aff_tree *comb, tree type, tree elt)
83 {
84 aff_combination_zero (comb, type);
85
86 comb->n = 1;
87 comb->elts[0].val = elt;
88 comb->elts[0].coef = 1;
89 }
90
91 /* Scales COMB by SCALE. */
92
93 void
94 aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
95 {
96 unsigned i, j;
97
98 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
99 if (scale == 1)
100 return;
101
102 if (scale == 0)
103 {
104 aff_combination_zero (comb, comb->type);
105 return;
106 }
107
108 comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb);
109 for (i = 0, j = 0; i < comb->n; i++)
110 {
111 widest_int new_coef
112 = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb);
113 /* A coefficient may become zero due to overflow. Remove the zero
114 elements. */
115 if (new_coef == 0)
116 continue;
117 comb->elts[j].coef = new_coef;
118 comb->elts[j].val = comb->elts[i].val;
119 j++;
120 }
121 comb->n = j;
122
123 if (comb->rest)
124 {
125 tree type = comb->type;
126 if (POINTER_TYPE_P (type))
127 type = sizetype;
128 if (comb->n < MAX_AFF_ELTS)
129 {
130 comb->elts[comb->n].coef = scale;
131 comb->elts[comb->n].val = comb->rest;
132 comb->rest = NULL_TREE;
133 comb->n++;
134 }
135 else
136 comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
137 wide_int_to_tree (type, scale));
138 }
139 }
140
141 /* Adds ELT * SCALE to COMB. */
142
143 void
144 aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
145 {
146 unsigned i;
147 tree type;
148
149 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
150 if (scale == 0)
151 return;
152
153 for (i = 0; i < comb->n; i++)
154 if (operand_equal_p (comb->elts[i].val, elt, 0))
155 {
156 widest_int new_coef
157 = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb);
158 if (new_coef != 0)
159 {
160 comb->elts[i].coef = new_coef;
161 return;
162 }
163
164 comb->n--;
165 comb->elts[i] = comb->elts[comb->n];
166
167 if (comb->rest)
168 {
169 gcc_assert (comb->n == MAX_AFF_ELTS - 1);
170 comb->elts[comb->n].coef = 1;
171 comb->elts[comb->n].val = comb->rest;
172 comb->rest = NULL_TREE;
173 comb->n++;
174 }
175 return;
176 }
177 if (comb->n < MAX_AFF_ELTS)
178 {
179 comb->elts[comb->n].coef = scale;
180 comb->elts[comb->n].val = elt;
181 comb->n++;
182 return;
183 }
184
185 type = comb->type;
186 if (POINTER_TYPE_P (type))
187 type = sizetype;
188
189 if (scale == 1)
190 elt = fold_convert (type, elt);
191 else
192 elt = fold_build2 (MULT_EXPR, type,
193 fold_convert (type, elt),
194 wide_int_to_tree (type, scale));
195
196 if (comb->rest)
197 comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
198 elt);
199 else
200 comb->rest = elt;
201 }
202
203 /* Adds CST to C. */
204
205 static void
206 aff_combination_add_cst (aff_tree *c, const widest_int &cst)
207 {
208 c->offset = wide_int_ext_for_comb (c->offset + cst, c);
209 }
210
211 /* Adds COMB2 to COMB1. */
212
213 void
214 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
215 {
216 unsigned i;
217
218 aff_combination_add_cst (comb1, comb2->offset);
219 for (i = 0; i < comb2->n; i++)
220 aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
221 if (comb2->rest)
222 aff_combination_add_elt (comb1, comb2->rest, 1);
223 }
224
225 /* Converts affine combination COMB to TYPE. */
226
227 void
228 aff_combination_convert (aff_tree *comb, tree type)
229 {
230 unsigned i, j;
231 tree comb_type = comb->type;
232
233 if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
234 {
235 tree val = fold_convert (type, aff_combination_to_tree (comb));
236 tree_to_aff_combination (val, type, comb);
237 return;
238 }
239
240 comb->type = type;
241 if (comb->rest && !POINTER_TYPE_P (type))
242 comb->rest = fold_convert (type, comb->rest);
243
244 if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
245 return;
246
247 comb->offset = wide_int_ext_for_comb (comb->offset, comb);
248 for (i = j = 0; i < comb->n; i++)
249 {
250 if (comb->elts[i].coef == 0)
251 continue;
252 comb->elts[j].coef = comb->elts[i].coef;
253 comb->elts[j].val = fold_convert (type, comb->elts[i].val);
254 j++;
255 }
256
257 comb->n = j;
258 if (comb->n < MAX_AFF_ELTS && comb->rest)
259 {
260 comb->elts[comb->n].coef = 1;
261 comb->elts[comb->n].val = comb->rest;
262 comb->rest = NULL_TREE;
263 comb->n++;
264 }
265 }
266
267 /* Splits EXPR into an affine combination of parts. */
268
269 void
270 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
271 {
272 aff_tree tmp;
273 enum tree_code code;
274 tree cst, core, toffset;
275 HOST_WIDE_INT bitpos, bitsize;
276 machine_mode mode;
277 int unsignedp, volatilep;
278
279 STRIP_NOPS (expr);
280
281 code = TREE_CODE (expr);
282 switch (code)
283 {
284 case INTEGER_CST:
285 aff_combination_const (comb, type, wi::to_widest (expr));
286 return;
287
288 case POINTER_PLUS_EXPR:
289 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
290 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
291 aff_combination_add (comb, &tmp);
292 return;
293
294 case PLUS_EXPR:
295 case MINUS_EXPR:
296 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
297 tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
298 if (code == MINUS_EXPR)
299 aff_combination_scale (&tmp, -1);
300 aff_combination_add (comb, &tmp);
301 return;
302
303 case MULT_EXPR:
304 cst = TREE_OPERAND (expr, 1);
305 if (TREE_CODE (cst) != INTEGER_CST)
306 break;
307 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
308 aff_combination_scale (comb, wi::to_widest (cst));
309 return;
310
311 case NEGATE_EXPR:
312 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
313 aff_combination_scale (comb, -1);
314 return;
315
316 case BIT_NOT_EXPR:
317 /* ~x = -x - 1 */
318 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
319 aff_combination_scale (comb, -1);
320 aff_combination_add_cst (comb, -1);
321 return;
322
323 case ADDR_EXPR:
324 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
325 if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
326 {
327 expr = TREE_OPERAND (expr, 0);
328 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
329 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
330 aff_combination_add (comb, &tmp);
331 return;
332 }
333 core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
334 &toffset, &mode, &unsignedp, &volatilep,
335 false);
336 if (bitpos % BITS_PER_UNIT != 0)
337 break;
338 aff_combination_const (comb, type, bitpos / BITS_PER_UNIT);
339 if (TREE_CODE (core) == MEM_REF)
340 {
341 aff_combination_add_cst (comb, wi::to_widest (TREE_OPERAND (core, 1)));
342 core = TREE_OPERAND (core, 0);
343 }
344 else
345 core = build_fold_addr_expr (core);
346
347 if (TREE_CODE (core) == ADDR_EXPR)
348 aff_combination_add_elt (comb, core, 1);
349 else
350 {
351 tree_to_aff_combination (core, type, &tmp);
352 aff_combination_add (comb, &tmp);
353 }
354 if (toffset)
355 {
356 tree_to_aff_combination (toffset, type, &tmp);
357 aff_combination_add (comb, &tmp);
358 }
359 return;
360
361 case MEM_REF:
362 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
363 tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
364 type, comb);
365 else if (integer_zerop (TREE_OPERAND (expr, 1)))
366 {
367 aff_combination_elt (comb, type, expr);
368 return;
369 }
370 else
371 aff_combination_elt (comb, type,
372 build2 (MEM_REF, TREE_TYPE (expr),
373 TREE_OPERAND (expr, 0),
374 build_int_cst
375 (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
376 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
377 aff_combination_add (comb, &tmp);
378 return;
379
380 default:
381 break;
382 }
383
384 aff_combination_elt (comb, type, expr);
385 }
386
387 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
388 combination COMB. */
389
390 static tree
391 add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in,
392 aff_tree *comb ATTRIBUTE_UNUSED)
393 {
394 enum tree_code code;
395 tree type1 = type;
396 if (POINTER_TYPE_P (type))
397 type1 = sizetype;
398
399 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
400
401 if (scale == -1
402 && POINTER_TYPE_P (TREE_TYPE (elt)))
403 {
404 elt = convert_to_ptrofftype (elt);
405 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
406 scale = 1;
407 }
408
409 if (scale == 1)
410 {
411 if (!expr)
412 {
413 if (POINTER_TYPE_P (TREE_TYPE (elt)))
414 return elt;
415 else
416 return fold_convert (type1, elt);
417 }
418
419 if (POINTER_TYPE_P (TREE_TYPE (expr)))
420 return fold_build_pointer_plus (expr, elt);
421 if (POINTER_TYPE_P (TREE_TYPE (elt)))
422 return fold_build_pointer_plus (elt, expr);
423 return fold_build2 (PLUS_EXPR, type1,
424 expr, fold_convert (type1, elt));
425 }
426
427 if (scale == -1)
428 {
429 if (!expr)
430 return fold_build1 (NEGATE_EXPR, type1,
431 fold_convert (type1, elt));
432
433 if (POINTER_TYPE_P (TREE_TYPE (expr)))
434 {
435 elt = convert_to_ptrofftype (elt);
436 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
437 return fold_build_pointer_plus (expr, elt);
438 }
439 return fold_build2 (MINUS_EXPR, type1,
440 expr, fold_convert (type1, elt));
441 }
442
443 elt = fold_convert (type1, elt);
444 if (!expr)
445 return fold_build2 (MULT_EXPR, type1, elt,
446 wide_int_to_tree (type1, scale));
447
448 if (wi::neg_p (scale))
449 {
450 code = MINUS_EXPR;
451 scale = -scale;
452 }
453 else
454 code = PLUS_EXPR;
455
456 elt = fold_build2 (MULT_EXPR, type1, elt,
457 wide_int_to_tree (type1, scale));
458 if (POINTER_TYPE_P (TREE_TYPE (expr)))
459 {
460 if (code == MINUS_EXPR)
461 elt = fold_build1 (NEGATE_EXPR, type1, elt);
462 return fold_build_pointer_plus (expr, elt);
463 }
464 return fold_build2 (code, type1, expr, elt);
465 }
466
467 /* Makes tree from the affine combination COMB. */
468
469 tree
470 aff_combination_to_tree (aff_tree *comb)
471 {
472 tree type = comb->type;
473 tree expr = NULL_TREE;
474 unsigned i;
475 widest_int off, sgn;
476 tree type1 = type;
477 if (POINTER_TYPE_P (type))
478 type1 = sizetype;
479
480 gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
481
482 for (i = 0; i < comb->n; i++)
483 expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef,
484 comb);
485
486 if (comb->rest)
487 expr = add_elt_to_tree (expr, type, comb->rest, 1, comb);
488
489 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
490 unsigned. */
491 if (wi::neg_p (comb->offset))
492 {
493 off = -comb->offset;
494 sgn = -1;
495 }
496 else
497 {
498 off = comb->offset;
499 sgn = 1;
500 }
501 return add_elt_to_tree (expr, type, wide_int_to_tree (type1, off), sgn,
502 comb);
503 }
504
505 /* Copies the tree elements of COMB to ensure that they are not shared. */
506
507 void
508 unshare_aff_combination (aff_tree *comb)
509 {
510 unsigned i;
511
512 for (i = 0; i < comb->n; i++)
513 comb->elts[i].val = unshare_expr (comb->elts[i].val);
514 if (comb->rest)
515 comb->rest = unshare_expr (comb->rest);
516 }
517
518 /* Remove M-th element from COMB. */
519
520 void
521 aff_combination_remove_elt (aff_tree *comb, unsigned m)
522 {
523 comb->n--;
524 if (m <= comb->n)
525 comb->elts[m] = comb->elts[comb->n];
526 if (comb->rest)
527 {
528 comb->elts[comb->n].coef = 1;
529 comb->elts[comb->n].val = comb->rest;
530 comb->rest = NULL_TREE;
531 comb->n++;
532 }
533 }
534
535 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
536 C * COEF is added to R. */
537
538
539 static void
540 aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
541 aff_tree *r)
542 {
543 unsigned i;
544 tree aval, type;
545
546 for (i = 0; i < c->n; i++)
547 {
548 aval = c->elts[i].val;
549 if (val)
550 {
551 type = TREE_TYPE (aval);
552 aval = fold_build2 (MULT_EXPR, type, aval,
553 fold_convert (type, val));
554 }
555
556 aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
557 }
558
559 if (c->rest)
560 {
561 aval = c->rest;
562 if (val)
563 {
564 type = TREE_TYPE (aval);
565 aval = fold_build2 (MULT_EXPR, type, aval,
566 fold_convert (type, val));
567 }
568
569 aff_combination_add_elt (r, aval, coef);
570 }
571
572 if (val)
573 aff_combination_add_elt (r, val, coef * c->offset);
574 else
575 aff_combination_add_cst (r, coef * c->offset);
576 }
577
578 /* Multiplies C1 by C2, storing the result to R */
579
580 void
581 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
582 {
583 unsigned i;
584 gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
585
586 aff_combination_zero (r, c1->type);
587
588 for (i = 0; i < c2->n; i++)
589 aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
590 if (c2->rest)
591 aff_combination_add_product (c1, 1, c2->rest, r);
592 aff_combination_add_product (c1, c2->offset, NULL, r);
593 }
594
595 /* Returns the element of COMB whose value is VAL, or NULL if no such
596 element exists. If IDX is not NULL, it is set to the index of VAL in
597 COMB. */
598
599 static struct aff_comb_elt *
600 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
601 {
602 unsigned i;
603
604 for (i = 0; i < comb->n; i++)
605 if (operand_equal_p (comb->elts[i].val, val, 0))
606 {
607 if (idx)
608 *idx = i;
609
610 return &comb->elts[i];
611 }
612
613 return NULL;
614 }
615
616 /* Element of the cache that maps ssa name NAME to its expanded form
617 as an affine expression EXPANSION. */
618
619 struct name_expansion
620 {
621 aff_tree expansion;
622
623 /* True if the expansion for the name is just being generated. */
624 unsigned in_progress : 1;
625 };
626
627 /* Expands SSA names in COMB recursively. CACHE is used to cache the
628 results. */
629
630 void
631 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
632 hash_map<tree, name_expansion *> **cache)
633 {
634 unsigned i;
635 aff_tree to_add, current, curre;
636 tree e, rhs;
637 gimple def;
638 widest_int scale;
639 struct name_expansion *exp;
640
641 aff_combination_zero (&to_add, comb->type);
642 for (i = 0; i < comb->n; i++)
643 {
644 tree type, name;
645 enum tree_code code;
646
647 e = comb->elts[i].val;
648 type = TREE_TYPE (e);
649 name = e;
650 /* Look through some conversions. */
651 if (CONVERT_EXPR_P (e)
652 && (TYPE_PRECISION (type)
653 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
654 name = TREE_OPERAND (e, 0);
655 if (TREE_CODE (name) != SSA_NAME)
656 continue;
657 def = SSA_NAME_DEF_STMT (name);
658 if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
659 continue;
660
661 code = gimple_assign_rhs_code (def);
662 if (code != SSA_NAME
663 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
664 && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
665 || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
666 continue;
667
668 /* We do not know whether the reference retains its value at the
669 place where the expansion is used. */
670 if (TREE_CODE_CLASS (code) == tcc_reference)
671 continue;
672
673 if (!*cache)
674 *cache = new hash_map<tree, name_expansion *>;
675 name_expansion **slot = &(*cache)->get_or_insert (e);
676 exp = *slot;
677
678 if (!exp)
679 {
680 exp = XNEW (struct name_expansion);
681 exp->in_progress = 1;
682 *slot = exp;
683 /* In principle this is a generally valid folding, but
684 it is not unconditionally an optimization, so do it
685 here and not in fold_unary. */
686 /* Convert (T1)(X *+- CST) into (T1)X *+- (T1)CST if T1 is wider
687 than the type of X and overflow for the type of X is
688 undefined. */
689 if (e != name
690 && INTEGRAL_TYPE_P (type)
691 && INTEGRAL_TYPE_P (TREE_TYPE (name))
692 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (name))
693 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (name))
694 && (code == PLUS_EXPR || code == MINUS_EXPR || code == MULT_EXPR)
695 && TREE_CODE (gimple_assign_rhs2 (def)) == INTEGER_CST)
696 rhs = fold_build2 (code, type,
697 fold_convert (type, gimple_assign_rhs1 (def)),
698 fold_convert (type, gimple_assign_rhs2 (def)));
699 else
700 {
701 rhs = gimple_assign_rhs_to_tree (def);
702 if (e != name)
703 rhs = fold_convert (type, rhs);
704 }
705 tree_to_aff_combination_expand (rhs, comb->type, &current, cache);
706 exp->expansion = current;
707 exp->in_progress = 0;
708 }
709 else
710 {
711 /* Since we follow the definitions in the SSA form, we should not
712 enter a cycle unless we pass through a phi node. */
713 gcc_assert (!exp->in_progress);
714 current = exp->expansion;
715 }
716
717 /* Accumulate the new terms to TO_ADD, so that we do not modify
718 COMB while traversing it; include the term -coef * E, to remove
719 it from COMB. */
720 scale = comb->elts[i].coef;
721 aff_combination_zero (&curre, comb->type);
722 aff_combination_add_elt (&curre, e, -scale);
723 aff_combination_scale (&current, scale);
724 aff_combination_add (&to_add, &current);
725 aff_combination_add (&to_add, &curre);
726 }
727 aff_combination_add (comb, &to_add);
728 }
729
730 /* Similar to tree_to_aff_combination, but follows SSA name definitions
731 and expands them recursively. CACHE is used to cache the expansions
732 of the ssa names, to avoid exponential time complexity for cases
733 like
734
735 a1 = a0 + a0;
736 a2 = a1 + a1;
737 a3 = a2 + a2;
738 ... */
739
740 void
741 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
742 hash_map<tree, name_expansion *> **cache)
743 {
744 tree_to_aff_combination (expr, type, comb);
745 aff_combination_expand (comb, cache);
746 }
747
748 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
749 hash_map::traverse. */
750
751 bool
752 free_name_expansion (tree const &, name_expansion **value, void *)
753 {
754 free (*value);
755 return true;
756 }
757
758 /* Frees memory allocated for the CACHE used by
759 tree_to_aff_combination_expand. */
760
761 void
762 free_affine_expand_cache (hash_map<tree, name_expansion *> **cache)
763 {
764 if (!*cache)
765 return;
766
767 (*cache)->traverse<void *, free_name_expansion> (NULL);
768 delete (*cache);
769 *cache = NULL;
770 }
771
772 /* If VAL != CST * DIV for any constant CST, returns false.
773 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
774 and if they are different, returns false. Finally, if neither of these
775 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
776 is set to true. */
777
778 static bool
779 wide_int_constant_multiple_p (const widest_int &val, const widest_int &div,
780 bool *mult_set, widest_int *mult)
781 {
782 widest_int rem, cst;
783
784 if (val == 0)
785 {
786 if (*mult_set && mult != 0)
787 return false;
788 *mult_set = true;
789 *mult = 0;
790 return true;
791 }
792
793 if (div == 0)
794 return false;
795
796 if (!wi::multiple_of_p (val, div, SIGNED, &cst))
797 return false;
798
799 if (*mult_set && *mult != cst)
800 return false;
801
802 *mult_set = true;
803 *mult = cst;
804 return true;
805 }
806
807 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
808 X is stored to MULT. */
809
810 bool
811 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
812 widest_int *mult)
813 {
814 bool mult_set = false;
815 unsigned i;
816
817 if (val->n == 0 && val->offset == 0)
818 {
819 *mult = 0;
820 return true;
821 }
822 if (val->n != div->n)
823 return false;
824
825 if (val->rest || div->rest)
826 return false;
827
828 if (!wide_int_constant_multiple_p (val->offset, div->offset,
829 &mult_set, mult))
830 return false;
831
832 for (i = 0; i < div->n; i++)
833 {
834 struct aff_comb_elt *elt
835 = aff_combination_find_elt (val, div->elts[i].val, NULL);
836 if (!elt)
837 return false;
838 if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef,
839 &mult_set, mult))
840 return false;
841 }
842
843 gcc_assert (mult_set);
844 return true;
845 }
846
847 /* Prints the affine VAL to the FILE. */
848
849 static void
850 print_aff (FILE *file, aff_tree *val)
851 {
852 unsigned i;
853 signop sgn = TYPE_SIGN (val->type);
854 if (POINTER_TYPE_P (val->type))
855 sgn = SIGNED;
856 fprintf (file, "{\n type = ");
857 print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
858 fprintf (file, "\n offset = ");
859 print_dec (val->offset, file, sgn);
860 if (val->n > 0)
861 {
862 fprintf (file, "\n elements = {\n");
863 for (i = 0; i < val->n; i++)
864 {
865 fprintf (file, " [%d] = ", i);
866 print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
867
868 fprintf (file, " * ");
869 print_dec (val->elts[i].coef, file, sgn);
870 if (i != val->n - 1)
871 fprintf (file, ", \n");
872 }
873 fprintf (file, "\n }");
874 }
875 if (val->rest)
876 {
877 fprintf (file, "\n rest = ");
878 print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
879 }
880 fprintf (file, "\n}");
881 }
882
883 /* Prints the affine VAL to the standard error, used for debugging. */
884
885 DEBUG_FUNCTION void
886 debug_aff (aff_tree *val)
887 {
888 print_aff (stderr, val);
889 fprintf (stderr, "\n");
890 }
891
892 /* Computes address of the reference REF in ADDR. The size of the accessed
893 location is stored to SIZE. Returns the ultimate containing object to
894 which REF refers. */
895
896 tree
897 get_inner_reference_aff (tree ref, aff_tree *addr, widest_int *size)
898 {
899 HOST_WIDE_INT bitsize, bitpos;
900 tree toff;
901 machine_mode mode;
902 int uns, vol;
903 aff_tree tmp;
904 tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
905 &uns, &vol, false);
906 tree base_addr = build_fold_addr_expr (base);
907
908 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
909
910 tree_to_aff_combination (base_addr, sizetype, addr);
911
912 if (toff)
913 {
914 tree_to_aff_combination (toff, sizetype, &tmp);
915 aff_combination_add (addr, &tmp);
916 }
917
918 aff_combination_const (&tmp, sizetype, bitpos / BITS_PER_UNIT);
919 aff_combination_add (addr, &tmp);
920
921 *size = (bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
922
923 return base;
924 }
925
926 /* Returns true if a region of size SIZE1 at position 0 and a region of
927 size SIZE2 at position DIFF cannot overlap. */
928
929 bool
930 aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1,
931 const widest_int &size2)
932 {
933 /* Unless the difference is a constant, we fail. */
934 if (diff->n != 0)
935 return false;
936
937 if (wi::neg_p (diff->offset))
938 {
939 /* The second object is before the first one, we succeed if the last
940 element of the second object is before the start of the first one. */
941 return wi::neg_p (diff->offset + size2 - 1);
942 }
943 else
944 {
945 /* We succeed if the second object starts after the first one ends. */
946 return wi::les_p (size1, diff->offset);
947 }
948 }
949