[ARM] Add ACLE 2.0 predefined marco __ARM_FEATURE_IDIV
[gcc.git] / gcc / tree-affine.c
1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "expr.h"
25 #include "tree-pretty-print.h"
26 #include "tree-affine.h"
27 #include "basic-block.h"
28 #include "tree-ssa-alias.h"
29 #include "internal-fn.h"
30 #include "gimple-expr.h"
31 #include "is-a.h"
32 #include "gimple.h"
33 #include "gimplify.h"
34 #include "flags.h"
35 #include "dumpfile.h"
36 #include "cfgexpand.h"
37 #include "wide-int-print.h"
38
39 /* Extends CST as appropriate for the affine combinations COMB. */
40
41 widest_int
42 wide_int_ext_for_comb (const widest_int &cst, aff_tree *comb)
43 {
44 return wi::sext (cst, TYPE_PRECISION (comb->type));
45 }
46
47 /* Initializes affine combination COMB so that its value is zero in TYPE. */
48
49 static void
50 aff_combination_zero (aff_tree *comb, tree type)
51 {
52 int i;
53 comb->type = type;
54 comb->offset = 0;
55 comb->n = 0;
56 for (i = 0; i < MAX_AFF_ELTS; i++)
57 comb->elts[i].coef = 0;
58 comb->rest = NULL_TREE;
59 }
60
61 /* Sets COMB to CST. */
62
63 void
64 aff_combination_const (aff_tree *comb, tree type, const widest_int &cst)
65 {
66 aff_combination_zero (comb, type);
67 comb->offset = wide_int_ext_for_comb (cst, comb);;
68 }
69
70 /* Sets COMB to single element ELT. */
71
72 void
73 aff_combination_elt (aff_tree *comb, tree type, tree elt)
74 {
75 aff_combination_zero (comb, type);
76
77 comb->n = 1;
78 comb->elts[0].val = elt;
79 comb->elts[0].coef = 1;
80 }
81
82 /* Scales COMB by SCALE. */
83
84 void
85 aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
86 {
87 unsigned i, j;
88
89 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
90 if (scale == 1)
91 return;
92
93 if (scale == 0)
94 {
95 aff_combination_zero (comb, comb->type);
96 return;
97 }
98
99 comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb);
100 for (i = 0, j = 0; i < comb->n; i++)
101 {
102 widest_int new_coef
103 = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb);
104 /* A coefficient may become zero due to overflow. Remove the zero
105 elements. */
106 if (new_coef == 0)
107 continue;
108 comb->elts[j].coef = new_coef;
109 comb->elts[j].val = comb->elts[i].val;
110 j++;
111 }
112 comb->n = j;
113
114 if (comb->rest)
115 {
116 tree type = comb->type;
117 if (POINTER_TYPE_P (type))
118 type = sizetype;
119 if (comb->n < MAX_AFF_ELTS)
120 {
121 comb->elts[comb->n].coef = scale;
122 comb->elts[comb->n].val = comb->rest;
123 comb->rest = NULL_TREE;
124 comb->n++;
125 }
126 else
127 comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
128 wide_int_to_tree (type, scale));
129 }
130 }
131
132 /* Adds ELT * SCALE to COMB. */
133
134 void
135 aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
136 {
137 unsigned i;
138 tree type;
139
140 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
141 if (scale == 0)
142 return;
143
144 for (i = 0; i < comb->n; i++)
145 if (operand_equal_p (comb->elts[i].val, elt, 0))
146 {
147 widest_int new_coef
148 = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb);
149 if (new_coef != 0)
150 {
151 comb->elts[i].coef = new_coef;
152 return;
153 }
154
155 comb->n--;
156 comb->elts[i] = comb->elts[comb->n];
157
158 if (comb->rest)
159 {
160 gcc_assert (comb->n == MAX_AFF_ELTS - 1);
161 comb->elts[comb->n].coef = 1;
162 comb->elts[comb->n].val = comb->rest;
163 comb->rest = NULL_TREE;
164 comb->n++;
165 }
166 return;
167 }
168 if (comb->n < MAX_AFF_ELTS)
169 {
170 comb->elts[comb->n].coef = scale;
171 comb->elts[comb->n].val = elt;
172 comb->n++;
173 return;
174 }
175
176 type = comb->type;
177 if (POINTER_TYPE_P (type))
178 type = sizetype;
179
180 if (scale == 1)
181 elt = fold_convert (type, elt);
182 else
183 elt = fold_build2 (MULT_EXPR, type,
184 fold_convert (type, elt),
185 wide_int_to_tree (type, scale));
186
187 if (comb->rest)
188 comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
189 elt);
190 else
191 comb->rest = elt;
192 }
193
194 /* Adds CST to C. */
195
196 static void
197 aff_combination_add_cst (aff_tree *c, const widest_int &cst)
198 {
199 c->offset = wide_int_ext_for_comb (c->offset + cst, c);
200 }
201
202 /* Adds COMB2 to COMB1. */
203
204 void
205 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
206 {
207 unsigned i;
208
209 aff_combination_add_cst (comb1, comb2->offset);
210 for (i = 0; i < comb2->n; i++)
211 aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
212 if (comb2->rest)
213 aff_combination_add_elt (comb1, comb2->rest, 1);
214 }
215
216 /* Converts affine combination COMB to TYPE. */
217
218 void
219 aff_combination_convert (aff_tree *comb, tree type)
220 {
221 unsigned i, j;
222 tree comb_type = comb->type;
223
224 if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
225 {
226 tree val = fold_convert (type, aff_combination_to_tree (comb));
227 tree_to_aff_combination (val, type, comb);
228 return;
229 }
230
231 comb->type = type;
232 if (comb->rest && !POINTER_TYPE_P (type))
233 comb->rest = fold_convert (type, comb->rest);
234
235 if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
236 return;
237
238 comb->offset = wide_int_ext_for_comb (comb->offset, comb);
239 for (i = j = 0; i < comb->n; i++)
240 {
241 if (comb->elts[i].coef == 0)
242 continue;
243 comb->elts[j].coef = comb->elts[i].coef;
244 comb->elts[j].val = fold_convert (type, comb->elts[i].val);
245 j++;
246 }
247
248 comb->n = j;
249 if (comb->n < MAX_AFF_ELTS && comb->rest)
250 {
251 comb->elts[comb->n].coef = 1;
252 comb->elts[comb->n].val = comb->rest;
253 comb->rest = NULL_TREE;
254 comb->n++;
255 }
256 }
257
258 /* Splits EXPR into an affine combination of parts. */
259
260 void
261 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
262 {
263 aff_tree tmp;
264 enum tree_code code;
265 tree cst, core, toffset;
266 HOST_WIDE_INT bitpos, bitsize;
267 enum machine_mode mode;
268 int unsignedp, volatilep;
269
270 STRIP_NOPS (expr);
271
272 code = TREE_CODE (expr);
273 switch (code)
274 {
275 case INTEGER_CST:
276 aff_combination_const (comb, type, wi::to_widest (expr));
277 return;
278
279 case POINTER_PLUS_EXPR:
280 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
281 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
282 aff_combination_add (comb, &tmp);
283 return;
284
285 case PLUS_EXPR:
286 case MINUS_EXPR:
287 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
288 tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
289 if (code == MINUS_EXPR)
290 aff_combination_scale (&tmp, -1);
291 aff_combination_add (comb, &tmp);
292 return;
293
294 case MULT_EXPR:
295 cst = TREE_OPERAND (expr, 1);
296 if (TREE_CODE (cst) != INTEGER_CST)
297 break;
298 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
299 aff_combination_scale (comb, wi::to_widest (cst));
300 return;
301
302 case NEGATE_EXPR:
303 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
304 aff_combination_scale (comb, -1);
305 return;
306
307 case BIT_NOT_EXPR:
308 /* ~x = -x - 1 */
309 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
310 aff_combination_scale (comb, -1);
311 aff_combination_add_cst (comb, -1);
312 return;
313
314 case ADDR_EXPR:
315 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
316 if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
317 {
318 expr = TREE_OPERAND (expr, 0);
319 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
320 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
321 aff_combination_add (comb, &tmp);
322 return;
323 }
324 core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
325 &toffset, &mode, &unsignedp, &volatilep,
326 false);
327 if (bitpos % BITS_PER_UNIT != 0)
328 break;
329 aff_combination_const (comb, type, bitpos / BITS_PER_UNIT);
330 if (TREE_CODE (core) == MEM_REF)
331 {
332 aff_combination_add_cst (comb, wi::to_widest (TREE_OPERAND (core, 1)));
333 core = TREE_OPERAND (core, 0);
334 }
335 else
336 core = build_fold_addr_expr (core);
337
338 if (TREE_CODE (core) == ADDR_EXPR)
339 aff_combination_add_elt (comb, core, 1);
340 else
341 {
342 tree_to_aff_combination (core, type, &tmp);
343 aff_combination_add (comb, &tmp);
344 }
345 if (toffset)
346 {
347 tree_to_aff_combination (toffset, type, &tmp);
348 aff_combination_add (comb, &tmp);
349 }
350 return;
351
352 case MEM_REF:
353 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
354 tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
355 type, comb);
356 else if (integer_zerop (TREE_OPERAND (expr, 1)))
357 {
358 aff_combination_elt (comb, type, expr);
359 return;
360 }
361 else
362 aff_combination_elt (comb, type,
363 build2 (MEM_REF, TREE_TYPE (expr),
364 TREE_OPERAND (expr, 0),
365 build_int_cst
366 (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
367 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
368 aff_combination_add (comb, &tmp);
369 return;
370
371 default:
372 break;
373 }
374
375 aff_combination_elt (comb, type, expr);
376 }
377
378 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
379 combination COMB. */
380
381 static tree
382 add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in,
383 aff_tree *comb ATTRIBUTE_UNUSED)
384 {
385 enum tree_code code;
386 tree type1 = type;
387 if (POINTER_TYPE_P (type))
388 type1 = sizetype;
389
390 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
391
392 if (scale == -1
393 && POINTER_TYPE_P (TREE_TYPE (elt)))
394 {
395 elt = convert_to_ptrofftype (elt);
396 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
397 scale = 1;
398 }
399
400 if (scale == 1)
401 {
402 if (!expr)
403 {
404 if (POINTER_TYPE_P (TREE_TYPE (elt)))
405 return elt;
406 else
407 return fold_convert (type1, elt);
408 }
409
410 if (POINTER_TYPE_P (TREE_TYPE (expr)))
411 return fold_build_pointer_plus (expr, elt);
412 if (POINTER_TYPE_P (TREE_TYPE (elt)))
413 return fold_build_pointer_plus (elt, expr);
414 return fold_build2 (PLUS_EXPR, type1,
415 expr, fold_convert (type1, elt));
416 }
417
418 if (scale == -1)
419 {
420 if (!expr)
421 return fold_build1 (NEGATE_EXPR, type1,
422 fold_convert (type1, elt));
423
424 if (POINTER_TYPE_P (TREE_TYPE (expr)))
425 {
426 elt = convert_to_ptrofftype (elt);
427 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
428 return fold_build_pointer_plus (expr, elt);
429 }
430 return fold_build2 (MINUS_EXPR, type1,
431 expr, fold_convert (type1, elt));
432 }
433
434 elt = fold_convert (type1, elt);
435 if (!expr)
436 return fold_build2 (MULT_EXPR, type1, elt,
437 wide_int_to_tree (type1, scale));
438
439 if (wi::neg_p (scale))
440 {
441 code = MINUS_EXPR;
442 scale = -scale;
443 }
444 else
445 code = PLUS_EXPR;
446
447 elt = fold_build2 (MULT_EXPR, type1, elt,
448 wide_int_to_tree (type1, scale));
449 if (POINTER_TYPE_P (TREE_TYPE (expr)))
450 {
451 if (code == MINUS_EXPR)
452 elt = fold_build1 (NEGATE_EXPR, type1, elt);
453 return fold_build_pointer_plus (expr, elt);
454 }
455 return fold_build2 (code, type1, expr, elt);
456 }
457
458 /* Makes tree from the affine combination COMB. */
459
460 tree
461 aff_combination_to_tree (aff_tree *comb)
462 {
463 tree type = comb->type;
464 tree expr = NULL_TREE;
465 unsigned i;
466 widest_int off, sgn;
467 tree type1 = type;
468 if (POINTER_TYPE_P (type))
469 type1 = sizetype;
470
471 gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
472
473 for (i = 0; i < comb->n; i++)
474 expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef,
475 comb);
476
477 if (comb->rest)
478 expr = add_elt_to_tree (expr, type, comb->rest, 1, comb);
479
480 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
481 unsigned. */
482 if (wi::neg_p (comb->offset))
483 {
484 off = -comb->offset;
485 sgn = -1;
486 }
487 else
488 {
489 off = comb->offset;
490 sgn = 1;
491 }
492 return add_elt_to_tree (expr, type, wide_int_to_tree (type1, off), sgn,
493 comb);
494 }
495
496 /* Copies the tree elements of COMB to ensure that they are not shared. */
497
498 void
499 unshare_aff_combination (aff_tree *comb)
500 {
501 unsigned i;
502
503 for (i = 0; i < comb->n; i++)
504 comb->elts[i].val = unshare_expr (comb->elts[i].val);
505 if (comb->rest)
506 comb->rest = unshare_expr (comb->rest);
507 }
508
509 /* Remove M-th element from COMB. */
510
511 void
512 aff_combination_remove_elt (aff_tree *comb, unsigned m)
513 {
514 comb->n--;
515 if (m <= comb->n)
516 comb->elts[m] = comb->elts[comb->n];
517 if (comb->rest)
518 {
519 comb->elts[comb->n].coef = 1;
520 comb->elts[comb->n].val = comb->rest;
521 comb->rest = NULL_TREE;
522 comb->n++;
523 }
524 }
525
526 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
527 C * COEF is added to R. */
528
529
530 static void
531 aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
532 aff_tree *r)
533 {
534 unsigned i;
535 tree aval, type;
536
537 for (i = 0; i < c->n; i++)
538 {
539 aval = c->elts[i].val;
540 if (val)
541 {
542 type = TREE_TYPE (aval);
543 aval = fold_build2 (MULT_EXPR, type, aval,
544 fold_convert (type, val));
545 }
546
547 aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
548 }
549
550 if (c->rest)
551 {
552 aval = c->rest;
553 if (val)
554 {
555 type = TREE_TYPE (aval);
556 aval = fold_build2 (MULT_EXPR, type, aval,
557 fold_convert (type, val));
558 }
559
560 aff_combination_add_elt (r, aval, coef);
561 }
562
563 if (val)
564 aff_combination_add_elt (r, val, coef * c->offset);
565 else
566 aff_combination_add_cst (r, coef * c->offset);
567 }
568
569 /* Multiplies C1 by C2, storing the result to R */
570
571 void
572 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
573 {
574 unsigned i;
575 gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
576
577 aff_combination_zero (r, c1->type);
578
579 for (i = 0; i < c2->n; i++)
580 aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
581 if (c2->rest)
582 aff_combination_add_product (c1, 1, c2->rest, r);
583 aff_combination_add_product (c1, c2->offset, NULL, r);
584 }
585
586 /* Returns the element of COMB whose value is VAL, or NULL if no such
587 element exists. If IDX is not NULL, it is set to the index of VAL in
588 COMB. */
589
590 static struct aff_comb_elt *
591 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
592 {
593 unsigned i;
594
595 for (i = 0; i < comb->n; i++)
596 if (operand_equal_p (comb->elts[i].val, val, 0))
597 {
598 if (idx)
599 *idx = i;
600
601 return &comb->elts[i];
602 }
603
604 return NULL;
605 }
606
607 /* Element of the cache that maps ssa name NAME to its expanded form
608 as an affine expression EXPANSION. */
609
610 struct name_expansion
611 {
612 aff_tree expansion;
613
614 /* True if the expansion for the name is just being generated. */
615 unsigned in_progress : 1;
616 };
617
618 /* Expands SSA names in COMB recursively. CACHE is used to cache the
619 results. */
620
621 void
622 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
623 hash_map<tree, name_expansion *> **cache)
624 {
625 unsigned i;
626 aff_tree to_add, current, curre;
627 tree e, rhs;
628 gimple def;
629 widest_int scale;
630 struct name_expansion *exp;
631
632 aff_combination_zero (&to_add, comb->type);
633 for (i = 0; i < comb->n; i++)
634 {
635 tree type, name;
636 enum tree_code code;
637
638 e = comb->elts[i].val;
639 type = TREE_TYPE (e);
640 name = e;
641 /* Look through some conversions. */
642 if (TREE_CODE (e) == NOP_EXPR
643 && (TYPE_PRECISION (type)
644 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
645 name = TREE_OPERAND (e, 0);
646 if (TREE_CODE (name) != SSA_NAME)
647 continue;
648 def = SSA_NAME_DEF_STMT (name);
649 if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
650 continue;
651
652 code = gimple_assign_rhs_code (def);
653 if (code != SSA_NAME
654 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
655 && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
656 || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
657 continue;
658
659 /* We do not know whether the reference retains its value at the
660 place where the expansion is used. */
661 if (TREE_CODE_CLASS (code) == tcc_reference)
662 continue;
663
664 if (!*cache)
665 *cache = new hash_map<tree, name_expansion *>;
666 name_expansion **slot = &(*cache)->get_or_insert (e);
667 exp = *slot;
668
669 if (!exp)
670 {
671 exp = XNEW (struct name_expansion);
672 exp->in_progress = 1;
673 *slot = exp;
674 /* In principle this is a generally valid folding, but
675 it is not unconditionally an optimization, so do it
676 here and not in fold_unary. */
677 /* Convert (T1)(X *+- CST) into (T1)X *+- (T1)CST if T1 is wider
678 than the type of X and overflow for the type of X is
679 undefined. */
680 if (e != name
681 && INTEGRAL_TYPE_P (type)
682 && INTEGRAL_TYPE_P (TREE_TYPE (name))
683 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (name))
684 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (name))
685 && (code == PLUS_EXPR || code == MINUS_EXPR || code == MULT_EXPR)
686 && TREE_CODE (gimple_assign_rhs2 (def)) == INTEGER_CST)
687 rhs = fold_build2 (code, type,
688 fold_convert (type, gimple_assign_rhs1 (def)),
689 fold_convert (type, gimple_assign_rhs2 (def)));
690 else
691 {
692 rhs = gimple_assign_rhs_to_tree (def);
693 if (e != name)
694 rhs = fold_convert (type, rhs);
695 }
696 tree_to_aff_combination_expand (rhs, comb->type, &current, cache);
697 exp->expansion = current;
698 exp->in_progress = 0;
699 }
700 else
701 {
702 /* Since we follow the definitions in the SSA form, we should not
703 enter a cycle unless we pass through a phi node. */
704 gcc_assert (!exp->in_progress);
705 current = exp->expansion;
706 }
707
708 /* Accumulate the new terms to TO_ADD, so that we do not modify
709 COMB while traversing it; include the term -coef * E, to remove
710 it from COMB. */
711 scale = comb->elts[i].coef;
712 aff_combination_zero (&curre, comb->type);
713 aff_combination_add_elt (&curre, e, -scale);
714 aff_combination_scale (&current, scale);
715 aff_combination_add (&to_add, &current);
716 aff_combination_add (&to_add, &curre);
717 }
718 aff_combination_add (comb, &to_add);
719 }
720
721 /* Similar to tree_to_aff_combination, but follows SSA name definitions
722 and expands them recursively. CACHE is used to cache the expansions
723 of the ssa names, to avoid exponential time complexity for cases
724 like
725
726 a1 = a0 + a0;
727 a2 = a1 + a1;
728 a3 = a2 + a2;
729 ... */
730
731 void
732 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
733 hash_map<tree, name_expansion *> **cache)
734 {
735 tree_to_aff_combination (expr, type, comb);
736 aff_combination_expand (comb, cache);
737 }
738
739 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
740 hash_map::traverse. */
741
742 bool
743 free_name_expansion (tree const &, name_expansion **value, void *)
744 {
745 free (*value);
746 return true;
747 }
748
749 /* Frees memory allocated for the CACHE used by
750 tree_to_aff_combination_expand. */
751
752 void
753 free_affine_expand_cache (hash_map<tree, name_expansion *> **cache)
754 {
755 if (!*cache)
756 return;
757
758 (*cache)->traverse<void *, free_name_expansion> (NULL);
759 delete (*cache);
760 *cache = NULL;
761 }
762
763 /* If VAL != CST * DIV for any constant CST, returns false.
764 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
765 and if they are different, returns false. Finally, if neither of these
766 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
767 is set to true. */
768
769 static bool
770 wide_int_constant_multiple_p (const widest_int &val, const widest_int &div,
771 bool *mult_set, widest_int *mult)
772 {
773 widest_int rem, cst;
774
775 if (val == 0)
776 {
777 if (*mult_set && mult != 0)
778 return false;
779 *mult_set = true;
780 *mult = 0;
781 return true;
782 }
783
784 if (div == 0)
785 return false;
786
787 if (!wi::multiple_of_p (val, div, SIGNED, &cst))
788 return false;
789
790 if (*mult_set && *mult != cst)
791 return false;
792
793 *mult_set = true;
794 *mult = cst;
795 return true;
796 }
797
798 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
799 X is stored to MULT. */
800
801 bool
802 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
803 widest_int *mult)
804 {
805 bool mult_set = false;
806 unsigned i;
807
808 if (val->n == 0 && val->offset == 0)
809 {
810 *mult = 0;
811 return true;
812 }
813 if (val->n != div->n)
814 return false;
815
816 if (val->rest || div->rest)
817 return false;
818
819 if (!wide_int_constant_multiple_p (val->offset, div->offset,
820 &mult_set, mult))
821 return false;
822
823 for (i = 0; i < div->n; i++)
824 {
825 struct aff_comb_elt *elt
826 = aff_combination_find_elt (val, div->elts[i].val, NULL);
827 if (!elt)
828 return false;
829 if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef,
830 &mult_set, mult))
831 return false;
832 }
833
834 gcc_assert (mult_set);
835 return true;
836 }
837
838 /* Prints the affine VAL to the FILE. */
839
840 static void
841 print_aff (FILE *file, aff_tree *val)
842 {
843 unsigned i;
844 signop sgn = TYPE_SIGN (val->type);
845 if (POINTER_TYPE_P (val->type))
846 sgn = SIGNED;
847 fprintf (file, "{\n type = ");
848 print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
849 fprintf (file, "\n offset = ");
850 print_dec (val->offset, file, sgn);
851 if (val->n > 0)
852 {
853 fprintf (file, "\n elements = {\n");
854 for (i = 0; i < val->n; i++)
855 {
856 fprintf (file, " [%d] = ", i);
857 print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
858
859 fprintf (file, " * ");
860 print_dec (val->elts[i].coef, file, sgn);
861 if (i != val->n - 1)
862 fprintf (file, ", \n");
863 }
864 fprintf (file, "\n }");
865 }
866 if (val->rest)
867 {
868 fprintf (file, "\n rest = ");
869 print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
870 }
871 fprintf (file, "\n}");
872 }
873
874 /* Prints the affine VAL to the standard error, used for debugging. */
875
876 DEBUG_FUNCTION void
877 debug_aff (aff_tree *val)
878 {
879 print_aff (stderr, val);
880 fprintf (stderr, "\n");
881 }
882
883 /* Computes address of the reference REF in ADDR. The size of the accessed
884 location is stored to SIZE. Returns the ultimate containing object to
885 which REF refers. */
886
887 tree
888 get_inner_reference_aff (tree ref, aff_tree *addr, widest_int *size)
889 {
890 HOST_WIDE_INT bitsize, bitpos;
891 tree toff;
892 enum machine_mode mode;
893 int uns, vol;
894 aff_tree tmp;
895 tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
896 &uns, &vol, false);
897 tree base_addr = build_fold_addr_expr (base);
898
899 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
900
901 tree_to_aff_combination (base_addr, sizetype, addr);
902
903 if (toff)
904 {
905 tree_to_aff_combination (toff, sizetype, &tmp);
906 aff_combination_add (addr, &tmp);
907 }
908
909 aff_combination_const (&tmp, sizetype, bitpos / BITS_PER_UNIT);
910 aff_combination_add (addr, &tmp);
911
912 *size = (bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
913
914 return base;
915 }
916
917 /* Returns true if a region of size SIZE1 at position 0 and a region of
918 size SIZE2 at position DIFF cannot overlap. */
919
920 bool
921 aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1,
922 const widest_int &size2)
923 {
924 /* Unless the difference is a constant, we fail. */
925 if (diff->n != 0)
926 return false;
927
928 if (wi::neg_p (diff->offset))
929 {
930 /* The second object is before the first one, we succeed if the last
931 element of the second object is before the start of the first one. */
932 return wi::neg_p (diff->offset + size2 - 1);
933 }
934 else
935 {
936 /* We succeed if the second object starts after the first one ends. */
937 return wi::les_p (size1, diff->offset);
938 }
939 }
940