cond.md (stzx_16): Use register_operand for operand 0.
[gcc.git] / gcc / tree-affine.c
1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "expr.h"
25 #include "tree-pretty-print.h"
26 #include "pointer-set.h"
27 #include "tree-affine.h"
28 #include "gimple.h"
29 #include "gimplify.h"
30 #include "flags.h"
31 #include "dumpfile.h"
32 #include "cfgexpand.h"
33
34 /* Extends CST as appropriate for the affine combinations COMB. */
35
36 double_int
37 double_int_ext_for_comb (double_int cst, aff_tree *comb)
38 {
39 return cst.sext (TYPE_PRECISION (comb->type));
40 }
41
42 /* Initializes affine combination COMB so that its value is zero in TYPE. */
43
44 static void
45 aff_combination_zero (aff_tree *comb, tree type)
46 {
47 comb->type = type;
48 comb->offset = double_int_zero;
49 comb->n = 0;
50 comb->rest = NULL_TREE;
51 }
52
53 /* Sets COMB to CST. */
54
55 void
56 aff_combination_const (aff_tree *comb, tree type, double_int cst)
57 {
58 aff_combination_zero (comb, type);
59 comb->offset = double_int_ext_for_comb (cst, comb);
60 }
61
62 /* Sets COMB to single element ELT. */
63
64 void
65 aff_combination_elt (aff_tree *comb, tree type, tree elt)
66 {
67 aff_combination_zero (comb, type);
68
69 comb->n = 1;
70 comb->elts[0].val = elt;
71 comb->elts[0].coef = double_int_one;
72 }
73
74 /* Scales COMB by SCALE. */
75
76 void
77 aff_combination_scale (aff_tree *comb, double_int scale)
78 {
79 unsigned i, j;
80
81 scale = double_int_ext_for_comb (scale, comb);
82 if (scale.is_one ())
83 return;
84
85 if (scale.is_zero ())
86 {
87 aff_combination_zero (comb, comb->type);
88 return;
89 }
90
91 comb->offset
92 = double_int_ext_for_comb (scale * comb->offset, comb);
93 for (i = 0, j = 0; i < comb->n; i++)
94 {
95 double_int new_coef;
96
97 new_coef
98 = double_int_ext_for_comb (scale * comb->elts[i].coef, comb);
99 /* A coefficient may become zero due to overflow. Remove the zero
100 elements. */
101 if (new_coef.is_zero ())
102 continue;
103 comb->elts[j].coef = new_coef;
104 comb->elts[j].val = comb->elts[i].val;
105 j++;
106 }
107 comb->n = j;
108
109 if (comb->rest)
110 {
111 tree type = comb->type;
112 if (POINTER_TYPE_P (type))
113 type = sizetype;
114 if (comb->n < MAX_AFF_ELTS)
115 {
116 comb->elts[comb->n].coef = scale;
117 comb->elts[comb->n].val = comb->rest;
118 comb->rest = NULL_TREE;
119 comb->n++;
120 }
121 else
122 comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
123 double_int_to_tree (type, scale));
124 }
125 }
126
127 /* Adds ELT * SCALE to COMB. */
128
129 void
130 aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
131 {
132 unsigned i;
133 tree type;
134
135 scale = double_int_ext_for_comb (scale, comb);
136 if (scale.is_zero ())
137 return;
138
139 for (i = 0; i < comb->n; i++)
140 if (operand_equal_p (comb->elts[i].val, elt, 0))
141 {
142 double_int new_coef;
143
144 new_coef = comb->elts[i].coef + scale;
145 new_coef = double_int_ext_for_comb (new_coef, comb);
146 if (!new_coef.is_zero ())
147 {
148 comb->elts[i].coef = new_coef;
149 return;
150 }
151
152 comb->n--;
153 comb->elts[i] = comb->elts[comb->n];
154
155 if (comb->rest)
156 {
157 gcc_assert (comb->n == MAX_AFF_ELTS - 1);
158 comb->elts[comb->n].coef = double_int_one;
159 comb->elts[comb->n].val = comb->rest;
160 comb->rest = NULL_TREE;
161 comb->n++;
162 }
163 return;
164 }
165 if (comb->n < MAX_AFF_ELTS)
166 {
167 comb->elts[comb->n].coef = scale;
168 comb->elts[comb->n].val = elt;
169 comb->n++;
170 return;
171 }
172
173 type = comb->type;
174 if (POINTER_TYPE_P (type))
175 type = sizetype;
176
177 if (scale.is_one ())
178 elt = fold_convert (type, elt);
179 else
180 elt = fold_build2 (MULT_EXPR, type,
181 fold_convert (type, elt),
182 double_int_to_tree (type, scale));
183
184 if (comb->rest)
185 comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
186 elt);
187 else
188 comb->rest = elt;
189 }
190
191 /* Adds CST to C. */
192
193 static void
194 aff_combination_add_cst (aff_tree *c, double_int cst)
195 {
196 c->offset = double_int_ext_for_comb (c->offset + cst, c);
197 }
198
199 /* Adds COMB2 to COMB1. */
200
201 void
202 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
203 {
204 unsigned i;
205
206 aff_combination_add_cst (comb1, comb2->offset);
207 for (i = 0; i < comb2->n; i++)
208 aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
209 if (comb2->rest)
210 aff_combination_add_elt (comb1, comb2->rest, double_int_one);
211 }
212
213 /* Converts affine combination COMB to TYPE. */
214
215 void
216 aff_combination_convert (aff_tree *comb, tree type)
217 {
218 unsigned i, j;
219 tree comb_type = comb->type;
220
221 if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
222 {
223 tree val = fold_convert (type, aff_combination_to_tree (comb));
224 tree_to_aff_combination (val, type, comb);
225 return;
226 }
227
228 comb->type = type;
229 if (comb->rest && !POINTER_TYPE_P (type))
230 comb->rest = fold_convert (type, comb->rest);
231
232 if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
233 return;
234
235 comb->offset = double_int_ext_for_comb (comb->offset, comb);
236 for (i = j = 0; i < comb->n; i++)
237 {
238 double_int new_coef = double_int_ext_for_comb (comb->elts[i].coef, comb);
239 if (new_coef.is_zero ())
240 continue;
241 comb->elts[j].coef = new_coef;
242 comb->elts[j].val = fold_convert (type, comb->elts[i].val);
243 j++;
244 }
245
246 comb->n = j;
247 if (comb->n < MAX_AFF_ELTS && comb->rest)
248 {
249 comb->elts[comb->n].coef = double_int_one;
250 comb->elts[comb->n].val = comb->rest;
251 comb->rest = NULL_TREE;
252 comb->n++;
253 }
254 }
255
256 /* Splits EXPR into an affine combination of parts. */
257
258 void
259 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
260 {
261 aff_tree tmp;
262 enum tree_code code;
263 tree cst, core, toffset;
264 HOST_WIDE_INT bitpos, bitsize;
265 enum machine_mode mode;
266 int unsignedp, volatilep;
267
268 STRIP_NOPS (expr);
269
270 code = TREE_CODE (expr);
271 switch (code)
272 {
273 case INTEGER_CST:
274 aff_combination_const (comb, type, tree_to_double_int (expr));
275 return;
276
277 case POINTER_PLUS_EXPR:
278 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
279 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
280 aff_combination_add (comb, &tmp);
281 return;
282
283 case PLUS_EXPR:
284 case MINUS_EXPR:
285 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
286 tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
287 if (code == MINUS_EXPR)
288 aff_combination_scale (&tmp, double_int_minus_one);
289 aff_combination_add (comb, &tmp);
290 return;
291
292 case MULT_EXPR:
293 cst = TREE_OPERAND (expr, 1);
294 if (TREE_CODE (cst) != INTEGER_CST)
295 break;
296 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
297 aff_combination_scale (comb, tree_to_double_int (cst));
298 return;
299
300 case NEGATE_EXPR:
301 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
302 aff_combination_scale (comb, double_int_minus_one);
303 return;
304
305 case BIT_NOT_EXPR:
306 /* ~x = -x - 1 */
307 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
308 aff_combination_scale (comb, double_int_minus_one);
309 aff_combination_add_cst (comb, double_int_minus_one);
310 return;
311
312 case ADDR_EXPR:
313 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
314 if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
315 {
316 expr = TREE_OPERAND (expr, 0);
317 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
318 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
319 aff_combination_add (comb, &tmp);
320 return;
321 }
322 core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
323 &toffset, &mode, &unsignedp, &volatilep,
324 false);
325 if (bitpos % BITS_PER_UNIT != 0)
326 break;
327 aff_combination_const (comb, type,
328 double_int::from_uhwi (bitpos / BITS_PER_UNIT));
329 core = build_fold_addr_expr (core);
330 if (TREE_CODE (core) == ADDR_EXPR)
331 aff_combination_add_elt (comb, core, double_int_one);
332 else
333 {
334 tree_to_aff_combination (core, type, &tmp);
335 aff_combination_add (comb, &tmp);
336 }
337 if (toffset)
338 {
339 tree_to_aff_combination (toffset, type, &tmp);
340 aff_combination_add (comb, &tmp);
341 }
342 return;
343
344 case MEM_REF:
345 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
346 tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
347 type, comb);
348 else if (integer_zerop (TREE_OPERAND (expr, 1)))
349 {
350 aff_combination_elt (comb, type, expr);
351 return;
352 }
353 else
354 aff_combination_elt (comb, type,
355 build2 (MEM_REF, TREE_TYPE (expr),
356 TREE_OPERAND (expr, 0),
357 build_int_cst
358 (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
359 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
360 aff_combination_add (comb, &tmp);
361 return;
362
363 default:
364 break;
365 }
366
367 aff_combination_elt (comb, type, expr);
368 }
369
370 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
371 combination COMB. */
372
373 static tree
374 add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
375 aff_tree *comb)
376 {
377 enum tree_code code;
378 tree type1 = type;
379 if (POINTER_TYPE_P (type))
380 type1 = sizetype;
381
382 scale = double_int_ext_for_comb (scale, comb);
383
384 if (scale.is_minus_one ()
385 && POINTER_TYPE_P (TREE_TYPE (elt)))
386 {
387 elt = convert_to_ptrofftype (elt);
388 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
389 scale = double_int_one;
390 }
391
392 if (scale.is_one ())
393 {
394 if (!expr)
395 {
396 if (POINTER_TYPE_P (TREE_TYPE (elt)))
397 return elt;
398 else
399 return fold_convert (type1, elt);
400 }
401
402 if (POINTER_TYPE_P (TREE_TYPE (expr)))
403 return fold_build_pointer_plus (expr, elt);
404 if (POINTER_TYPE_P (TREE_TYPE (elt)))
405 return fold_build_pointer_plus (elt, expr);
406 return fold_build2 (PLUS_EXPR, type1,
407 expr, fold_convert (type1, elt));
408 }
409
410 if (scale.is_minus_one ())
411 {
412 if (!expr)
413 return fold_build1 (NEGATE_EXPR, type1,
414 fold_convert (type1, elt));
415
416 if (POINTER_TYPE_P (TREE_TYPE (expr)))
417 {
418 elt = convert_to_ptrofftype (elt);
419 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
420 return fold_build_pointer_plus (expr, elt);
421 }
422 return fold_build2 (MINUS_EXPR, type1,
423 expr, fold_convert (type1, elt));
424 }
425
426 elt = fold_convert (type1, elt);
427 if (!expr)
428 return fold_build2 (MULT_EXPR, type1, elt,
429 double_int_to_tree (type1, scale));
430
431 if (scale.is_negative ())
432 {
433 code = MINUS_EXPR;
434 scale = -scale;
435 }
436 else
437 code = PLUS_EXPR;
438
439 elt = fold_build2 (MULT_EXPR, type1, elt,
440 double_int_to_tree (type1, scale));
441 if (POINTER_TYPE_P (TREE_TYPE (expr)))
442 {
443 if (code == MINUS_EXPR)
444 elt = fold_build1 (NEGATE_EXPR, type1, elt);
445 return fold_build_pointer_plus (expr, elt);
446 }
447 return fold_build2 (code, type1, expr, elt);
448 }
449
450 /* Makes tree from the affine combination COMB. */
451
452 tree
453 aff_combination_to_tree (aff_tree *comb)
454 {
455 tree type = comb->type;
456 tree expr = NULL_TREE;
457 unsigned i;
458 double_int off, sgn;
459 tree type1 = type;
460 if (POINTER_TYPE_P (type))
461 type1 = sizetype;
462
463 gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
464
465 for (i = 0; i < comb->n; i++)
466 expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef,
467 comb);
468
469 if (comb->rest)
470 expr = add_elt_to_tree (expr, type, comb->rest, double_int_one, comb);
471
472 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
473 unsigned. */
474 if (comb->offset.is_negative ())
475 {
476 off = -comb->offset;
477 sgn = double_int_minus_one;
478 }
479 else
480 {
481 off = comb->offset;
482 sgn = double_int_one;
483 }
484 return add_elt_to_tree (expr, type, double_int_to_tree (type1, off), sgn,
485 comb);
486 }
487
488 /* Copies the tree elements of COMB to ensure that they are not shared. */
489
490 void
491 unshare_aff_combination (aff_tree *comb)
492 {
493 unsigned i;
494
495 for (i = 0; i < comb->n; i++)
496 comb->elts[i].val = unshare_expr (comb->elts[i].val);
497 if (comb->rest)
498 comb->rest = unshare_expr (comb->rest);
499 }
500
501 /* Remove M-th element from COMB. */
502
503 void
504 aff_combination_remove_elt (aff_tree *comb, unsigned m)
505 {
506 comb->n--;
507 if (m <= comb->n)
508 comb->elts[m] = comb->elts[comb->n];
509 if (comb->rest)
510 {
511 comb->elts[comb->n].coef = double_int_one;
512 comb->elts[comb->n].val = comb->rest;
513 comb->rest = NULL_TREE;
514 comb->n++;
515 }
516 }
517
518 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
519 C * COEF is added to R. */
520
521
522 static void
523 aff_combination_add_product (aff_tree *c, double_int coef, tree val,
524 aff_tree *r)
525 {
526 unsigned i;
527 tree aval, type;
528
529 for (i = 0; i < c->n; i++)
530 {
531 aval = c->elts[i].val;
532 if (val)
533 {
534 type = TREE_TYPE (aval);
535 aval = fold_build2 (MULT_EXPR, type, aval,
536 fold_convert (type, val));
537 }
538
539 aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
540 }
541
542 if (c->rest)
543 {
544 aval = c->rest;
545 if (val)
546 {
547 type = TREE_TYPE (aval);
548 aval = fold_build2 (MULT_EXPR, type, aval,
549 fold_convert (type, val));
550 }
551
552 aff_combination_add_elt (r, aval, coef);
553 }
554
555 if (val)
556 aff_combination_add_elt (r, val, coef * c->offset);
557 else
558 aff_combination_add_cst (r, coef * c->offset);
559 }
560
561 /* Multiplies C1 by C2, storing the result to R */
562
563 void
564 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
565 {
566 unsigned i;
567 gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
568
569 aff_combination_zero (r, c1->type);
570
571 for (i = 0; i < c2->n; i++)
572 aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
573 if (c2->rest)
574 aff_combination_add_product (c1, double_int_one, c2->rest, r);
575 aff_combination_add_product (c1, c2->offset, NULL, r);
576 }
577
578 /* Returns the element of COMB whose value is VAL, or NULL if no such
579 element exists. If IDX is not NULL, it is set to the index of VAL in
580 COMB. */
581
582 static struct aff_comb_elt *
583 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
584 {
585 unsigned i;
586
587 for (i = 0; i < comb->n; i++)
588 if (operand_equal_p (comb->elts[i].val, val, 0))
589 {
590 if (idx)
591 *idx = i;
592
593 return &comb->elts[i];
594 }
595
596 return NULL;
597 }
598
599 /* Element of the cache that maps ssa name NAME to its expanded form
600 as an affine expression EXPANSION. */
601
602 struct name_expansion
603 {
604 aff_tree expansion;
605
606 /* True if the expansion for the name is just being generated. */
607 unsigned in_progress : 1;
608 };
609
610 /* Expands SSA names in COMB recursively. CACHE is used to cache the
611 results. */
612
613 void
614 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
615 struct pointer_map_t **cache ATTRIBUTE_UNUSED)
616 {
617 unsigned i;
618 aff_tree to_add, current, curre;
619 tree e, rhs;
620 gimple def;
621 double_int scale;
622 void **slot;
623 struct name_expansion *exp;
624
625 aff_combination_zero (&to_add, comb->type);
626 for (i = 0; i < comb->n; i++)
627 {
628 tree type, name;
629 enum tree_code code;
630
631 e = comb->elts[i].val;
632 type = TREE_TYPE (e);
633 name = e;
634 /* Look through some conversions. */
635 if (TREE_CODE (e) == NOP_EXPR
636 && (TYPE_PRECISION (type)
637 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
638 name = TREE_OPERAND (e, 0);
639 if (TREE_CODE (name) != SSA_NAME)
640 continue;
641 def = SSA_NAME_DEF_STMT (name);
642 if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
643 continue;
644
645 code = gimple_assign_rhs_code (def);
646 if (code != SSA_NAME
647 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
648 && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
649 || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
650 continue;
651
652 /* We do not know whether the reference retains its value at the
653 place where the expansion is used. */
654 if (TREE_CODE_CLASS (code) == tcc_reference)
655 continue;
656
657 if (!*cache)
658 *cache = pointer_map_create ();
659 slot = pointer_map_insert (*cache, e);
660 exp = (struct name_expansion *) *slot;
661
662 if (!exp)
663 {
664 exp = XNEW (struct name_expansion);
665 exp->in_progress = 1;
666 *slot = exp;
667 /* In principle this is a generally valid folding, but
668 it is not unconditionally an optimization, so do it
669 here and not in fold_unary. */
670 /* Convert (T1)(X *+- CST) into (T1)X *+- (T1)CST if T1 is wider
671 than the type of X and overflow for the type of X is
672 undefined. */
673 if (e != name
674 && INTEGRAL_TYPE_P (type)
675 && INTEGRAL_TYPE_P (TREE_TYPE (name))
676 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (name))
677 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (name))
678 && (code == PLUS_EXPR || code == MINUS_EXPR || code == MULT_EXPR)
679 && TREE_CODE (gimple_assign_rhs2 (def)) == INTEGER_CST)
680 rhs = fold_build2 (code, type,
681 fold_convert (type, gimple_assign_rhs1 (def)),
682 fold_convert (type, gimple_assign_rhs2 (def)));
683 else
684 {
685 rhs = gimple_assign_rhs_to_tree (def);
686 if (e != name)
687 rhs = fold_convert (type, rhs);
688 }
689 tree_to_aff_combination_expand (rhs, comb->type, &current, cache);
690 exp->expansion = current;
691 exp->in_progress = 0;
692 }
693 else
694 {
695 /* Since we follow the definitions in the SSA form, we should not
696 enter a cycle unless we pass through a phi node. */
697 gcc_assert (!exp->in_progress);
698 current = exp->expansion;
699 }
700
701 /* Accumulate the new terms to TO_ADD, so that we do not modify
702 COMB while traversing it; include the term -coef * E, to remove
703 it from COMB. */
704 scale = comb->elts[i].coef;
705 aff_combination_zero (&curre, comb->type);
706 aff_combination_add_elt (&curre, e, -scale);
707 aff_combination_scale (&current, scale);
708 aff_combination_add (&to_add, &current);
709 aff_combination_add (&to_add, &curre);
710 }
711 aff_combination_add (comb, &to_add);
712 }
713
714 /* Similar to tree_to_aff_combination, but follows SSA name definitions
715 and expands them recursively. CACHE is used to cache the expansions
716 of the ssa names, to avoid exponential time complexity for cases
717 like
718
719 a1 = a0 + a0;
720 a2 = a1 + a1;
721 a3 = a2 + a2;
722 ... */
723
724 void
725 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
726 struct pointer_map_t **cache)
727 {
728 tree_to_aff_combination (expr, type, comb);
729 aff_combination_expand (comb, cache);
730 }
731
732 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
733 pointer_map_traverse. */
734
735 static bool
736 free_name_expansion (const void *key ATTRIBUTE_UNUSED, void **value,
737 void *data ATTRIBUTE_UNUSED)
738 {
739 struct name_expansion *const exp = (struct name_expansion *) *value;
740
741 free (exp);
742 return true;
743 }
744
745 /* Frees memory allocated for the CACHE used by
746 tree_to_aff_combination_expand. */
747
748 void
749 free_affine_expand_cache (struct pointer_map_t **cache)
750 {
751 if (!*cache)
752 return;
753
754 pointer_map_traverse (*cache, free_name_expansion, NULL);
755 pointer_map_destroy (*cache);
756 *cache = NULL;
757 }
758
759 /* If VAL != CST * DIV for any constant CST, returns false.
760 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
761 and if they are different, returns false. Finally, if neither of these
762 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
763 is set to true. */
764
765 static bool
766 double_int_constant_multiple_p (double_int val, double_int div,
767 bool *mult_set, double_int *mult)
768 {
769 double_int rem, cst;
770
771 if (val.is_zero ())
772 {
773 if (*mult_set && !mult->is_zero ())
774 return false;
775 *mult_set = true;
776 *mult = double_int_zero;
777 return true;
778 }
779
780 if (div.is_zero ())
781 return false;
782
783 cst = val.sdivmod (div, FLOOR_DIV_EXPR, &rem);
784 if (!rem.is_zero ())
785 return false;
786
787 if (*mult_set && *mult != cst)
788 return false;
789
790 *mult_set = true;
791 *mult = cst;
792 return true;
793 }
794
795 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
796 X is stored to MULT. */
797
798 bool
799 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
800 double_int *mult)
801 {
802 bool mult_set = false;
803 unsigned i;
804
805 if (val->n == 0 && val->offset.is_zero ())
806 {
807 *mult = double_int_zero;
808 return true;
809 }
810 if (val->n != div->n)
811 return false;
812
813 if (val->rest || div->rest)
814 return false;
815
816 if (!double_int_constant_multiple_p (val->offset, div->offset,
817 &mult_set, mult))
818 return false;
819
820 for (i = 0; i < div->n; i++)
821 {
822 struct aff_comb_elt *elt
823 = aff_combination_find_elt (val, div->elts[i].val, NULL);
824 if (!elt)
825 return false;
826 if (!double_int_constant_multiple_p (elt->coef, div->elts[i].coef,
827 &mult_set, mult))
828 return false;
829 }
830
831 gcc_assert (mult_set);
832 return true;
833 }
834
835 /* Prints the affine VAL to the FILE. */
836
837 static void
838 print_aff (FILE *file, aff_tree *val)
839 {
840 unsigned i;
841 bool uns = TYPE_UNSIGNED (val->type);
842 if (POINTER_TYPE_P (val->type))
843 uns = false;
844 fprintf (file, "{\n type = ");
845 print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
846 fprintf (file, "\n offset = ");
847 dump_double_int (file, val->offset, uns);
848 if (val->n > 0)
849 {
850 fprintf (file, "\n elements = {\n");
851 for (i = 0; i < val->n; i++)
852 {
853 fprintf (file, " [%d] = ", i);
854 print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
855
856 fprintf (file, " * ");
857 dump_double_int (file, val->elts[i].coef, uns);
858 if (i != val->n - 1)
859 fprintf (file, ", \n");
860 }
861 fprintf (file, "\n }");
862 }
863 if (val->rest)
864 {
865 fprintf (file, "\n rest = ");
866 print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
867 }
868 fprintf (file, "\n}");
869 }
870
871 /* Prints the affine VAL to the standard error, used for debugging. */
872
873 DEBUG_FUNCTION void
874 debug_aff (aff_tree *val)
875 {
876 print_aff (stderr, val);
877 fprintf (stderr, "\n");
878 }
879
880 /* Computes address of the reference REF in ADDR. The size of the accessed
881 location is stored to SIZE. Returns the ultimate containing object to
882 which REF refers. */
883
884 tree
885 get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size)
886 {
887 HOST_WIDE_INT bitsize, bitpos;
888 tree toff;
889 enum machine_mode mode;
890 int uns, vol;
891 aff_tree tmp;
892 tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
893 &uns, &vol, false);
894 tree base_addr = build_fold_addr_expr (base);
895
896 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
897
898 tree_to_aff_combination (base_addr, sizetype, addr);
899
900 if (toff)
901 {
902 tree_to_aff_combination (toff, sizetype, &tmp);
903 aff_combination_add (addr, &tmp);
904 }
905
906 aff_combination_const (&tmp, sizetype,
907 double_int::from_shwi (bitpos / BITS_PER_UNIT));
908 aff_combination_add (addr, &tmp);
909
910 *size = double_int::from_shwi ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
911
912 return base;
913 }
914
915 /* Returns true if a region of size SIZE1 at position 0 and a region of
916 size SIZE2 at position DIFF cannot overlap. */
917
918 bool
919 aff_comb_cannot_overlap_p (aff_tree *diff, double_int size1, double_int size2)
920 {
921 double_int d, bound;
922
923 /* Unless the difference is a constant, we fail. */
924 if (diff->n != 0)
925 return false;
926
927 d = diff->offset;
928 if (d.is_negative ())
929 {
930 /* The second object is before the first one, we succeed if the last
931 element of the second object is before the start of the first one. */
932 bound = d + size2 + double_int_minus_one;
933 return bound.is_negative ();
934 }
935 else
936 {
937 /* We succeed if the second object starts after the first one ends. */
938 return size1.sle (d);
939 }
940 }
941