(Synchronize with addition made to binutils sources):
[gcc.git] / gcc / alias.c
1 /* Alias analysis for GNU C
2 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009 Free Software Foundation, Inc.
4 Contributed by John Carr (jfc@mit.edu).
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "function.h"
30 #include "alias.h"
31 #include "emit-rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
35 #include "flags.h"
36 #include "output.h"
37 #include "toplev.h"
38 #include "cselib.h"
39 #include "splay-tree.h"
40 #include "ggc.h"
41 #include "langhooks.h"
42 #include "timevar.h"
43 #include "target.h"
44 #include "cgraph.h"
45 #include "varray.h"
46 #include "tree-pass.h"
47 #include "ipa-type-escape.h"
48 #include "df.h"
49
50 /* The aliasing API provided here solves related but different problems:
51
52 Say there exists (in c)
53
54 struct X {
55 struct Y y1;
56 struct Z z2;
57 } x1, *px1, *px2;
58
59 struct Y y2, *py;
60 struct Z z2, *pz;
61
62
63 py = &px1.y1;
64 px2 = &x1;
65
66 Consider the four questions:
67
68 Can a store to x1 interfere with px2->y1?
69 Can a store to x1 interfere with px2->z2?
70 (*px2).z2
71 Can a store to x1 change the value pointed to by with py?
72 Can a store to x1 change the value pointed to by with pz?
73
74 The answer to these questions can be yes, yes, yes, and maybe.
75
76 The first two questions can be answered with a simple examination
77 of the type system. If structure X contains a field of type Y then
78 a store thru a pointer to an X can overwrite any field that is
79 contained (recursively) in an X (unless we know that px1 != px2).
80
81 The last two of the questions can be solved in the same way as the
82 first two questions but this is too conservative. The observation
83 is that in some cases analysis we can know if which (if any) fields
84 are addressed and if those addresses are used in bad ways. This
85 analysis may be language specific. In C, arbitrary operations may
86 be applied to pointers. However, there is some indication that
87 this may be too conservative for some C++ types.
88
89 The pass ipa-type-escape does this analysis for the types whose
90 instances do not escape across the compilation boundary.
91
92 Historically in GCC, these two problems were combined and a single
93 data structure was used to represent the solution to these
94 problems. We now have two similar but different data structures,
95 The data structure to solve the last two question is similar to the
96 first, but does not contain have the fields in it whose address are
97 never taken. For types that do escape the compilation unit, the
98 data structures will have identical information.
99 */
100
101 /* The alias sets assigned to MEMs assist the back-end in determining
102 which MEMs can alias which other MEMs. In general, two MEMs in
103 different alias sets cannot alias each other, with one important
104 exception. Consider something like:
105
106 struct S { int i; double d; };
107
108 a store to an `S' can alias something of either type `int' or type
109 `double'. (However, a store to an `int' cannot alias a `double'
110 and vice versa.) We indicate this via a tree structure that looks
111 like:
112 struct S
113 / \
114 / \
115 |/_ _\|
116 int double
117
118 (The arrows are directed and point downwards.)
119 In this situation we say the alias set for `struct S' is the
120 `superset' and that those for `int' and `double' are `subsets'.
121
122 To see whether two alias sets can point to the same memory, we must
123 see if either alias set is a subset of the other. We need not trace
124 past immediate descendants, however, since we propagate all
125 grandchildren up one level.
126
127 Alias set zero is implicitly a superset of all other alias sets.
128 However, this is no actual entry for alias set zero. It is an
129 error to attempt to explicitly construct a subset of zero. */
130
131 struct GTY(()) alias_set_entry_d {
132 /* The alias set number, as stored in MEM_ALIAS_SET. */
133 alias_set_type alias_set;
134
135 /* Nonzero if would have a child of zero: this effectively makes this
136 alias set the same as alias set zero. */
137 int has_zero_child;
138
139 /* The children of the alias set. These are not just the immediate
140 children, but, in fact, all descendants. So, if we have:
141
142 struct T { struct S s; float f; }
143
144 continuing our example above, the children here will be all of
145 `int', `double', `float', and `struct S'. */
146 splay_tree GTY((param1_is (int), param2_is (int))) children;
147 };
148 typedef struct alias_set_entry_d *alias_set_entry;
149
150 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
151 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
152 static void record_set (rtx, const_rtx, void *);
153 static int base_alias_check (rtx, rtx, enum machine_mode,
154 enum machine_mode);
155 static rtx find_base_value (rtx);
156 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
157 static int insert_subset_children (splay_tree_node, void*);
158 static tree find_base_decl (tree);
159 static alias_set_entry get_alias_set_entry (alias_set_type);
160 static const_rtx fixed_scalar_and_varying_struct_p (const_rtx, const_rtx, rtx, rtx,
161 bool (*) (const_rtx, bool));
162 static int aliases_everything_p (const_rtx);
163 static bool nonoverlapping_component_refs_p (const_tree, const_tree);
164 static tree decl_for_component_ref (tree);
165 static rtx adjust_offset_for_component_ref (tree, rtx);
166 static int write_dependence_p (const_rtx, const_rtx, int);
167
168 static void memory_modified_1 (rtx, const_rtx, void *);
169
170 /* Set up all info needed to perform alias analysis on memory references. */
171
172 /* Returns the size in bytes of the mode of X. */
173 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
174
175 /* Returns nonzero if MEM1 and MEM2 do not alias because they are in
176 different alias sets. We ignore alias sets in functions making use
177 of variable arguments because the va_arg macros on some systems are
178 not legal ANSI C. */
179 #define DIFFERENT_ALIAS_SETS_P(MEM1, MEM2) \
180 mems_in_disjoint_alias_sets_p (MEM1, MEM2)
181
182 /* Cap the number of passes we make over the insns propagating alias
183 information through set chains. 10 is a completely arbitrary choice. */
184 #define MAX_ALIAS_LOOP_PASSES 10
185
186 /* reg_base_value[N] gives an address to which register N is related.
187 If all sets after the first add or subtract to the current value
188 or otherwise modify it so it does not point to a different top level
189 object, reg_base_value[N] is equal to the address part of the source
190 of the first set.
191
192 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
193 expressions represent certain special values: function arguments and
194 the stack, frame, and argument pointers.
195
196 The contents of an ADDRESS is not normally used, the mode of the
197 ADDRESS determines whether the ADDRESS is a function argument or some
198 other special value. Pointer equality, not rtx_equal_p, determines whether
199 two ADDRESS expressions refer to the same base address.
200
201 The only use of the contents of an ADDRESS is for determining if the
202 current function performs nonlocal memory memory references for the
203 purposes of marking the function as a constant function. */
204
205 static GTY(()) VEC(rtx,gc) *reg_base_value;
206 static rtx *new_reg_base_value;
207
208 /* We preserve the copy of old array around to avoid amount of garbage
209 produced. About 8% of garbage produced were attributed to this
210 array. */
211 static GTY((deletable)) VEC(rtx,gc) *old_reg_base_value;
212
213 /* Static hunks of RTL used by the aliasing code; these are initialized
214 once per function to avoid unnecessary RTL allocations. */
215 static GTY (()) rtx static_reg_base_value[FIRST_PSEUDO_REGISTER];
216
217 #define REG_BASE_VALUE(X) \
218 (REGNO (X) < VEC_length (rtx, reg_base_value) \
219 ? VEC_index (rtx, reg_base_value, REGNO (X)) : 0)
220
221 /* Vector indexed by N giving the initial (unchanging) value known for
222 pseudo-register N. This array is initialized in init_alias_analysis,
223 and does not change until end_alias_analysis is called. */
224 static GTY((length("reg_known_value_size"))) rtx *reg_known_value;
225
226 /* Indicates number of valid entries in reg_known_value. */
227 static GTY(()) unsigned int reg_known_value_size;
228
229 /* Vector recording for each reg_known_value whether it is due to a
230 REG_EQUIV note. Future passes (viz., reload) may replace the
231 pseudo with the equivalent expression and so we account for the
232 dependences that would be introduced if that happens.
233
234 The REG_EQUIV notes created in assign_parms may mention the arg
235 pointer, and there are explicit insns in the RTL that modify the
236 arg pointer. Thus we must ensure that such insns don't get
237 scheduled across each other because that would invalidate the
238 REG_EQUIV notes. One could argue that the REG_EQUIV notes are
239 wrong, but solving the problem in the scheduler will likely give
240 better code, so we do it here. */
241 static bool *reg_known_equiv_p;
242
243 /* True when scanning insns from the start of the rtl to the
244 NOTE_INSN_FUNCTION_BEG note. */
245 static bool copying_arguments;
246
247 DEF_VEC_P(alias_set_entry);
248 DEF_VEC_ALLOC_P(alias_set_entry,gc);
249
250 /* The splay-tree used to store the various alias set entries. */
251 static GTY (()) VEC(alias_set_entry,gc) *alias_sets;
252 \f
253 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
254 such an entry, or NULL otherwise. */
255
256 static inline alias_set_entry
257 get_alias_set_entry (alias_set_type alias_set)
258 {
259 return VEC_index (alias_set_entry, alias_sets, alias_set);
260 }
261
262 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
263 the two MEMs cannot alias each other. */
264
265 static inline int
266 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
267 {
268 /* Perform a basic sanity check. Namely, that there are no alias sets
269 if we're not using strict aliasing. This helps to catch bugs
270 whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
271 where a MEM is allocated in some way other than by the use of
272 gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared. If we begin to
273 use alias sets to indicate that spilled registers cannot alias each
274 other, we might need to remove this check. */
275 gcc_assert (flag_strict_aliasing
276 || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
277
278 return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
279 }
280
281 /* Insert the NODE into the splay tree given by DATA. Used by
282 record_alias_subset via splay_tree_foreach. */
283
284 static int
285 insert_subset_children (splay_tree_node node, void *data)
286 {
287 splay_tree_insert ((splay_tree) data, node->key, node->value);
288
289 return 0;
290 }
291
292 /* Return true if the first alias set is a subset of the second. */
293
294 bool
295 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
296 {
297 alias_set_entry ase;
298
299 /* Everything is a subset of the "aliases everything" set. */
300 if (set2 == 0)
301 return true;
302
303 /* Otherwise, check if set1 is a subset of set2. */
304 ase = get_alias_set_entry (set2);
305 if (ase != 0
306 && ((ase->has_zero_child && set1 == 0)
307 || splay_tree_lookup (ase->children,
308 (splay_tree_key) set1)))
309 return true;
310 return false;
311 }
312
313 /* Return 1 if the two specified alias sets may conflict. */
314
315 int
316 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
317 {
318 alias_set_entry ase;
319
320 /* The easy case. */
321 if (alias_sets_must_conflict_p (set1, set2))
322 return 1;
323
324 /* See if the first alias set is a subset of the second. */
325 ase = get_alias_set_entry (set1);
326 if (ase != 0
327 && (ase->has_zero_child
328 || splay_tree_lookup (ase->children,
329 (splay_tree_key) set2)))
330 return 1;
331
332 /* Now do the same, but with the alias sets reversed. */
333 ase = get_alias_set_entry (set2);
334 if (ase != 0
335 && (ase->has_zero_child
336 || splay_tree_lookup (ase->children,
337 (splay_tree_key) set1)))
338 return 1;
339
340 /* The two alias sets are distinct and neither one is the
341 child of the other. Therefore, they cannot conflict. */
342 return 0;
343 }
344
345 static int
346 walk_mems_2 (rtx *x, rtx mem)
347 {
348 if (MEM_P (*x))
349 {
350 if (alias_sets_conflict_p (MEM_ALIAS_SET(*x), MEM_ALIAS_SET(mem)))
351 return 1;
352
353 return -1;
354 }
355 return 0;
356 }
357
358 static int
359 walk_mems_1 (rtx *x, rtx *pat)
360 {
361 if (MEM_P (*x))
362 {
363 /* Visit all MEMs in *PAT and check indepedence. */
364 if (for_each_rtx (pat, (rtx_function) walk_mems_2, *x))
365 /* Indicate that dependence was determined and stop traversal. */
366 return 1;
367
368 return -1;
369 }
370 return 0;
371 }
372
373 /* Return 1 if two specified instructions have mem expr with conflict alias sets*/
374 bool
375 insn_alias_sets_conflict_p (rtx insn1, rtx insn2)
376 {
377 /* For each pair of MEMs in INSN1 and INSN2 check their independence. */
378 return for_each_rtx (&PATTERN (insn1), (rtx_function) walk_mems_1,
379 &PATTERN (insn2));
380 }
381
382 /* Return 1 if the two specified alias sets will always conflict. */
383
384 int
385 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
386 {
387 if (set1 == 0 || set2 == 0 || set1 == set2)
388 return 1;
389
390 return 0;
391 }
392
393 /* Return 1 if any MEM object of type T1 will always conflict (using the
394 dependency routines in this file) with any MEM object of type T2.
395 This is used when allocating temporary storage. If T1 and/or T2 are
396 NULL_TREE, it means we know nothing about the storage. */
397
398 int
399 objects_must_conflict_p (tree t1, tree t2)
400 {
401 alias_set_type set1, set2;
402
403 /* If neither has a type specified, we don't know if they'll conflict
404 because we may be using them to store objects of various types, for
405 example the argument and local variables areas of inlined functions. */
406 if (t1 == 0 && t2 == 0)
407 return 0;
408
409 /* If they are the same type, they must conflict. */
410 if (t1 == t2
411 /* Likewise if both are volatile. */
412 || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
413 return 1;
414
415 set1 = t1 ? get_alias_set (t1) : 0;
416 set2 = t2 ? get_alias_set (t2) : 0;
417
418 /* We can't use alias_sets_conflict_p because we must make sure
419 that every subtype of t1 will conflict with every subtype of
420 t2 for which a pair of subobjects of these respective subtypes
421 overlaps on the stack. */
422 return alias_sets_must_conflict_p (set1, set2);
423 }
424 \f
425 /* T is an expression with pointer type. Find the DECL on which this
426 expression is based. (For example, in `a[i]' this would be `a'.)
427 If there is no such DECL, or a unique decl cannot be determined,
428 NULL_TREE is returned. */
429
430 static tree
431 find_base_decl (tree t)
432 {
433 tree d0, d1;
434
435 if (t == 0 || t == error_mark_node || ! POINTER_TYPE_P (TREE_TYPE (t)))
436 return 0;
437
438 if (TREE_CODE (t) == SSA_NAME)
439 t = SSA_NAME_VAR (t);
440
441 /* If this is a declaration, return it. If T is based on a restrict
442 qualified decl, return that decl. */
443 if (DECL_P (t))
444 {
445 if (TREE_CODE (t) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (t))
446 t = DECL_GET_RESTRICT_BASE (t);
447 return t;
448 }
449
450 /* Handle general expressions. It would be nice to deal with
451 COMPONENT_REFs here. If we could tell that `a' and `b' were the
452 same, then `a->f' and `b->f' are also the same. */
453 switch (TREE_CODE_CLASS (TREE_CODE (t)))
454 {
455 case tcc_unary:
456 return find_base_decl (TREE_OPERAND (t, 0));
457
458 case tcc_binary:
459 /* Return 0 if found in neither or both are the same. */
460 d0 = find_base_decl (TREE_OPERAND (t, 0));
461 d1 = find_base_decl (TREE_OPERAND (t, 1));
462 if (d0 == d1)
463 return d0;
464 else if (d0 == 0)
465 return d1;
466 else if (d1 == 0)
467 return d0;
468 else
469 return 0;
470
471 default:
472 return 0;
473 }
474 }
475
476 /* Return true if all nested component references handled by
477 get_inner_reference in T are such that we should use the alias set
478 provided by the object at the heart of T.
479
480 This is true for non-addressable components (which don't have their
481 own alias set), as well as components of objects in alias set zero.
482 This later point is a special case wherein we wish to override the
483 alias set used by the component, but we don't have per-FIELD_DECL
484 assignable alias sets. */
485
486 bool
487 component_uses_parent_alias_set (const_tree t)
488 {
489 while (1)
490 {
491 /* If we're at the end, it vacuously uses its own alias set. */
492 if (!handled_component_p (t))
493 return false;
494
495 switch (TREE_CODE (t))
496 {
497 case COMPONENT_REF:
498 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
499 return true;
500 break;
501
502 case ARRAY_REF:
503 case ARRAY_RANGE_REF:
504 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
505 return true;
506 break;
507
508 case REALPART_EXPR:
509 case IMAGPART_EXPR:
510 break;
511
512 default:
513 /* Bitfields and casts are never addressable. */
514 return true;
515 }
516
517 t = TREE_OPERAND (t, 0);
518 if (get_alias_set (TREE_TYPE (t)) == 0)
519 return true;
520 }
521 }
522
523 /* Return the alias set for the memory pointed to by T, which may be
524 either a type or an expression. Return -1 if there is nothing
525 special about dereferencing T. */
526
527 static alias_set_type
528 get_deref_alias_set_1 (tree t)
529 {
530 /* If we're not doing any alias analysis, just assume everything
531 aliases everything else. */
532 if (!flag_strict_aliasing)
533 return 0;
534
535 if (! TYPE_P (t))
536 {
537 tree decl = find_base_decl (t);
538
539 if (decl && DECL_POINTER_ALIAS_SET_KNOWN_P (decl))
540 {
541 /* If we haven't computed the actual alias set, do it now. */
542 if (DECL_POINTER_ALIAS_SET (decl) == -2)
543 {
544 tree pointed_to_type = TREE_TYPE (TREE_TYPE (decl));
545
546 /* No two restricted pointers can point at the same thing.
547 However, a restricted pointer can point at the same thing
548 as an unrestricted pointer, if that unrestricted pointer
549 is based on the restricted pointer. So, we make the
550 alias set for the restricted pointer a subset of the
551 alias set for the type pointed to by the type of the
552 decl. */
553 alias_set_type pointed_to_alias_set
554 = get_alias_set (pointed_to_type);
555
556 if (pointed_to_alias_set == 0)
557 /* It's not legal to make a subset of alias set zero. */
558 DECL_POINTER_ALIAS_SET (decl) = 0;
559 else if (AGGREGATE_TYPE_P (pointed_to_type))
560 /* For an aggregate, we must treat the restricted
561 pointer the same as an ordinary pointer. If we
562 were to make the type pointed to by the
563 restricted pointer a subset of the pointed-to
564 type, then we would believe that other subsets
565 of the pointed-to type (such as fields of that
566 type) do not conflict with the type pointed to
567 by the restricted pointer. */
568 DECL_POINTER_ALIAS_SET (decl)
569 = pointed_to_alias_set;
570 else
571 {
572 DECL_POINTER_ALIAS_SET (decl) = new_alias_set ();
573 record_alias_subset (pointed_to_alias_set,
574 DECL_POINTER_ALIAS_SET (decl));
575 }
576 }
577
578 /* We use the alias set indicated in the declaration. */
579 return DECL_POINTER_ALIAS_SET (decl);
580 }
581
582 /* Now all we care about is the type. */
583 t = TREE_TYPE (t);
584 }
585
586 /* If we have an INDIRECT_REF via a void pointer, we don't
587 know anything about what that might alias. Likewise if the
588 pointer is marked that way. */
589 if (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
590 || TYPE_REF_CAN_ALIAS_ALL (t))
591 return 0;
592
593 return -1;
594 }
595
596 /* Return the alias set for the memory pointed to by T, which may be
597 either a type or an expression. */
598
599 alias_set_type
600 get_deref_alias_set (tree t)
601 {
602 alias_set_type set = get_deref_alias_set_1 (t);
603
604 /* Fall back to the alias-set of the pointed-to type. */
605 if (set == -1)
606 {
607 if (! TYPE_P (t))
608 t = TREE_TYPE (t);
609 set = get_alias_set (TREE_TYPE (t));
610 }
611
612 return set;
613 }
614
615 /* Return the alias set for T, which may be either a type or an
616 expression. Call language-specific routine for help, if needed. */
617
618 alias_set_type
619 get_alias_set (tree t)
620 {
621 alias_set_type set;
622
623 /* If we're not doing any alias analysis, just assume everything
624 aliases everything else. Also return 0 if this or its type is
625 an error. */
626 if (! flag_strict_aliasing || t == error_mark_node
627 || (! TYPE_P (t)
628 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
629 return 0;
630
631 /* We can be passed either an expression or a type. This and the
632 language-specific routine may make mutually-recursive calls to each other
633 to figure out what to do. At each juncture, we see if this is a tree
634 that the language may need to handle specially. First handle things that
635 aren't types. */
636 if (! TYPE_P (t))
637 {
638 tree inner = t;
639
640 /* Remove any nops, then give the language a chance to do
641 something with this tree before we look at it. */
642 STRIP_NOPS (t);
643 set = lang_hooks.get_alias_set (t);
644 if (set != -1)
645 return set;
646
647 /* First see if the actual object referenced is an INDIRECT_REF from a
648 restrict-qualified pointer or a "void *". */
649 while (handled_component_p (inner))
650 {
651 inner = TREE_OPERAND (inner, 0);
652 STRIP_NOPS (inner);
653 }
654
655 if (INDIRECT_REF_P (inner))
656 {
657 set = get_deref_alias_set_1 (TREE_OPERAND (inner, 0));
658 if (set != -1)
659 return set;
660 }
661
662 /* Otherwise, pick up the outermost object that we could have a pointer
663 to, processing conversions as above. */
664 while (component_uses_parent_alias_set (t))
665 {
666 t = TREE_OPERAND (t, 0);
667 STRIP_NOPS (t);
668 }
669
670 /* If we've already determined the alias set for a decl, just return
671 it. This is necessary for C++ anonymous unions, whose component
672 variables don't look like union members (boo!). */
673 if (TREE_CODE (t) == VAR_DECL
674 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
675 return MEM_ALIAS_SET (DECL_RTL (t));
676
677 /* Now all we care about is the type. */
678 t = TREE_TYPE (t);
679 }
680
681 /* Variant qualifiers don't affect the alias set, so get the main
682 variant. Always use the canonical type as well.
683 If this is a type with a known alias set, return it. */
684 t = TYPE_MAIN_VARIANT (t);
685 if (TYPE_CANONICAL (t))
686 t = TYPE_CANONICAL (t);
687 if (TYPE_ALIAS_SET_KNOWN_P (t))
688 return TYPE_ALIAS_SET (t);
689
690 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */
691 if (!COMPLETE_TYPE_P (t))
692 {
693 /* For arrays with unknown size the conservative answer is the
694 alias set of the element type. */
695 if (TREE_CODE (t) == ARRAY_TYPE)
696 return get_alias_set (TREE_TYPE (t));
697
698 /* But return zero as a conservative answer for incomplete types. */
699 return 0;
700 }
701
702 /* See if the language has special handling for this type. */
703 set = lang_hooks.get_alias_set (t);
704 if (set != -1)
705 return set;
706
707 /* There are no objects of FUNCTION_TYPE, so there's no point in
708 using up an alias set for them. (There are, of course, pointers
709 and references to functions, but that's different.) */
710 else if (TREE_CODE (t) == FUNCTION_TYPE
711 || TREE_CODE (t) == METHOD_TYPE)
712 set = 0;
713
714 /* Unless the language specifies otherwise, let vector types alias
715 their components. This avoids some nasty type punning issues in
716 normal usage. And indeed lets vectors be treated more like an
717 array slice. */
718 else if (TREE_CODE (t) == VECTOR_TYPE)
719 set = get_alias_set (TREE_TYPE (t));
720
721 /* Unless the language specifies otherwise, treat array types the
722 same as their components. This avoids the asymmetry we get
723 through recording the components. Consider accessing a
724 character(kind=1) through a reference to a character(kind=1)[1:1].
725 Or consider if we want to assign integer(kind=4)[0:D.1387] and
726 integer(kind=4)[4] the same alias set or not.
727 Just be pragmatic here and make sure the array and its element
728 type get the same alias set assigned. */
729 else if (TREE_CODE (t) == ARRAY_TYPE
730 && !TYPE_NONALIASED_COMPONENT (t))
731 set = get_alias_set (TREE_TYPE (t));
732
733 else
734 /* Otherwise make a new alias set for this type. */
735 set = new_alias_set ();
736
737 TYPE_ALIAS_SET (t) = set;
738
739 /* If this is an aggregate type, we must record any component aliasing
740 information. */
741 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
742 record_component_aliases (t);
743
744 return set;
745 }
746
747 /* Return a brand-new alias set. */
748
749 alias_set_type
750 new_alias_set (void)
751 {
752 if (flag_strict_aliasing)
753 {
754 if (alias_sets == 0)
755 VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
756 VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
757 return VEC_length (alias_set_entry, alias_sets) - 1;
758 }
759 else
760 return 0;
761 }
762
763 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
764 not everything that aliases SUPERSET also aliases SUBSET. For example,
765 in C, a store to an `int' can alias a load of a structure containing an
766 `int', and vice versa. But it can't alias a load of a 'double' member
767 of the same structure. Here, the structure would be the SUPERSET and
768 `int' the SUBSET. This relationship is also described in the comment at
769 the beginning of this file.
770
771 This function should be called only once per SUPERSET/SUBSET pair.
772
773 It is illegal for SUPERSET to be zero; everything is implicitly a
774 subset of alias set zero. */
775
776 void
777 record_alias_subset (alias_set_type superset, alias_set_type subset)
778 {
779 alias_set_entry superset_entry;
780 alias_set_entry subset_entry;
781
782 /* It is possible in complex type situations for both sets to be the same,
783 in which case we can ignore this operation. */
784 if (superset == subset)
785 return;
786
787 gcc_assert (superset);
788
789 superset_entry = get_alias_set_entry (superset);
790 if (superset_entry == 0)
791 {
792 /* Create an entry for the SUPERSET, so that we have a place to
793 attach the SUBSET. */
794 superset_entry = GGC_NEW (struct alias_set_entry_d);
795 superset_entry->alias_set = superset;
796 superset_entry->children
797 = splay_tree_new_ggc (splay_tree_compare_ints);
798 superset_entry->has_zero_child = 0;
799 VEC_replace (alias_set_entry, alias_sets, superset, superset_entry);
800 }
801
802 if (subset == 0)
803 superset_entry->has_zero_child = 1;
804 else
805 {
806 subset_entry = get_alias_set_entry (subset);
807 /* If there is an entry for the subset, enter all of its children
808 (if they are not already present) as children of the SUPERSET. */
809 if (subset_entry)
810 {
811 if (subset_entry->has_zero_child)
812 superset_entry->has_zero_child = 1;
813
814 splay_tree_foreach (subset_entry->children, insert_subset_children,
815 superset_entry->children);
816 }
817
818 /* Enter the SUBSET itself as a child of the SUPERSET. */
819 splay_tree_insert (superset_entry->children,
820 (splay_tree_key) subset, 0);
821 }
822 }
823
824 /* Record that component types of TYPE, if any, are part of that type for
825 aliasing purposes. For record types, we only record component types
826 for fields that are not marked non-addressable. For array types, we
827 only record the component type if it is not marked non-aliased. */
828
829 void
830 record_component_aliases (tree type)
831 {
832 alias_set_type superset = get_alias_set (type);
833 tree field;
834
835 if (superset == 0)
836 return;
837
838 switch (TREE_CODE (type))
839 {
840 case RECORD_TYPE:
841 case UNION_TYPE:
842 case QUAL_UNION_TYPE:
843 /* Recursively record aliases for the base classes, if there are any. */
844 if (TYPE_BINFO (type))
845 {
846 int i;
847 tree binfo, base_binfo;
848
849 for (binfo = TYPE_BINFO (type), i = 0;
850 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
851 record_alias_subset (superset,
852 get_alias_set (BINFO_TYPE (base_binfo)));
853 }
854 for (field = TYPE_FIELDS (type); field != 0; field = TREE_CHAIN (field))
855 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
856 record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
857 break;
858
859 case COMPLEX_TYPE:
860 record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
861 break;
862
863 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
864 element type. */
865
866 default:
867 break;
868 }
869 }
870
871 /* Allocate an alias set for use in storing and reading from the varargs
872 spill area. */
873
874 static GTY(()) alias_set_type varargs_set = -1;
875
876 alias_set_type
877 get_varargs_alias_set (void)
878 {
879 #if 1
880 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
881 varargs alias set to an INDIRECT_REF (FIXME!), so we can't
882 consistently use the varargs alias set for loads from the varargs
883 area. So don't use it anywhere. */
884 return 0;
885 #else
886 if (varargs_set == -1)
887 varargs_set = new_alias_set ();
888
889 return varargs_set;
890 #endif
891 }
892
893 /* Likewise, but used for the fixed portions of the frame, e.g., register
894 save areas. */
895
896 static GTY(()) alias_set_type frame_set = -1;
897
898 alias_set_type
899 get_frame_alias_set (void)
900 {
901 if (frame_set == -1)
902 frame_set = new_alias_set ();
903
904 return frame_set;
905 }
906
907 /* Inside SRC, the source of a SET, find a base address. */
908
909 static rtx
910 find_base_value (rtx src)
911 {
912 unsigned int regno;
913
914 #if defined (FIND_BASE_TERM)
915 /* Try machine-dependent ways to find the base term. */
916 src = FIND_BASE_TERM (src);
917 #endif
918
919 switch (GET_CODE (src))
920 {
921 case SYMBOL_REF:
922 case LABEL_REF:
923 return src;
924
925 case REG:
926 regno = REGNO (src);
927 /* At the start of a function, argument registers have known base
928 values which may be lost later. Returning an ADDRESS
929 expression here allows optimization based on argument values
930 even when the argument registers are used for other purposes. */
931 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
932 return new_reg_base_value[regno];
933
934 /* If a pseudo has a known base value, return it. Do not do this
935 for non-fixed hard regs since it can result in a circular
936 dependency chain for registers which have values at function entry.
937
938 The test above is not sufficient because the scheduler may move
939 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
940 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
941 && regno < VEC_length (rtx, reg_base_value))
942 {
943 /* If we're inside init_alias_analysis, use new_reg_base_value
944 to reduce the number of relaxation iterations. */
945 if (new_reg_base_value && new_reg_base_value[regno]
946 && DF_REG_DEF_COUNT (regno) == 1)
947 return new_reg_base_value[regno];
948
949 if (VEC_index (rtx, reg_base_value, regno))
950 return VEC_index (rtx, reg_base_value, regno);
951 }
952
953 return 0;
954
955 case MEM:
956 /* Check for an argument passed in memory. Only record in the
957 copying-arguments block; it is too hard to track changes
958 otherwise. */
959 if (copying_arguments
960 && (XEXP (src, 0) == arg_pointer_rtx
961 || (GET_CODE (XEXP (src, 0)) == PLUS
962 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
963 return gen_rtx_ADDRESS (VOIDmode, src);
964 return 0;
965
966 case CONST:
967 src = XEXP (src, 0);
968 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
969 break;
970
971 /* ... fall through ... */
972
973 case PLUS:
974 case MINUS:
975 {
976 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
977
978 /* If either operand is a REG that is a known pointer, then it
979 is the base. */
980 if (REG_P (src_0) && REG_POINTER (src_0))
981 return find_base_value (src_0);
982 if (REG_P (src_1) && REG_POINTER (src_1))
983 return find_base_value (src_1);
984
985 /* If either operand is a REG, then see if we already have
986 a known value for it. */
987 if (REG_P (src_0))
988 {
989 temp = find_base_value (src_0);
990 if (temp != 0)
991 src_0 = temp;
992 }
993
994 if (REG_P (src_1))
995 {
996 temp = find_base_value (src_1);
997 if (temp!= 0)
998 src_1 = temp;
999 }
1000
1001 /* If either base is named object or a special address
1002 (like an argument or stack reference), then use it for the
1003 base term. */
1004 if (src_0 != 0
1005 && (GET_CODE (src_0) == SYMBOL_REF
1006 || GET_CODE (src_0) == LABEL_REF
1007 || (GET_CODE (src_0) == ADDRESS
1008 && GET_MODE (src_0) != VOIDmode)))
1009 return src_0;
1010
1011 if (src_1 != 0
1012 && (GET_CODE (src_1) == SYMBOL_REF
1013 || GET_CODE (src_1) == LABEL_REF
1014 || (GET_CODE (src_1) == ADDRESS
1015 && GET_MODE (src_1) != VOIDmode)))
1016 return src_1;
1017
1018 /* Guess which operand is the base address:
1019 If either operand is a symbol, then it is the base. If
1020 either operand is a CONST_INT, then the other is the base. */
1021 if (GET_CODE (src_1) == CONST_INT || CONSTANT_P (src_0))
1022 return find_base_value (src_0);
1023 else if (GET_CODE (src_0) == CONST_INT || CONSTANT_P (src_1))
1024 return find_base_value (src_1);
1025
1026 return 0;
1027 }
1028
1029 case LO_SUM:
1030 /* The standard form is (lo_sum reg sym) so look only at the
1031 second operand. */
1032 return find_base_value (XEXP (src, 1));
1033
1034 case AND:
1035 /* If the second operand is constant set the base
1036 address to the first operand. */
1037 if (GET_CODE (XEXP (src, 1)) == CONST_INT && INTVAL (XEXP (src, 1)) != 0)
1038 return find_base_value (XEXP (src, 0));
1039 return 0;
1040
1041 case TRUNCATE:
1042 if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1043 break;
1044 /* Fall through. */
1045 case HIGH:
1046 case PRE_INC:
1047 case PRE_DEC:
1048 case POST_INC:
1049 case POST_DEC:
1050 case PRE_MODIFY:
1051 case POST_MODIFY:
1052 return find_base_value (XEXP (src, 0));
1053
1054 case ZERO_EXTEND:
1055 case SIGN_EXTEND: /* used for NT/Alpha pointers */
1056 {
1057 rtx temp = find_base_value (XEXP (src, 0));
1058
1059 if (temp != 0 && CONSTANT_P (temp))
1060 temp = convert_memory_address (Pmode, temp);
1061
1062 return temp;
1063 }
1064
1065 default:
1066 break;
1067 }
1068
1069 return 0;
1070 }
1071
1072 /* Called from init_alias_analysis indirectly through note_stores. */
1073
1074 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1075 register N has been set in this function. */
1076 static char *reg_seen;
1077
1078 /* Addresses which are known not to alias anything else are identified
1079 by a unique integer. */
1080 static int unique_id;
1081
1082 static void
1083 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1084 {
1085 unsigned regno;
1086 rtx src;
1087 int n;
1088
1089 if (!REG_P (dest))
1090 return;
1091
1092 regno = REGNO (dest);
1093
1094 gcc_assert (regno < VEC_length (rtx, reg_base_value));
1095
1096 /* If this spans multiple hard registers, then we must indicate that every
1097 register has an unusable value. */
1098 if (regno < FIRST_PSEUDO_REGISTER)
1099 n = hard_regno_nregs[regno][GET_MODE (dest)];
1100 else
1101 n = 1;
1102 if (n != 1)
1103 {
1104 while (--n >= 0)
1105 {
1106 reg_seen[regno + n] = 1;
1107 new_reg_base_value[regno + n] = 0;
1108 }
1109 return;
1110 }
1111
1112 if (set)
1113 {
1114 /* A CLOBBER wipes out any old value but does not prevent a previously
1115 unset register from acquiring a base address (i.e. reg_seen is not
1116 set). */
1117 if (GET_CODE (set) == CLOBBER)
1118 {
1119 new_reg_base_value[regno] = 0;
1120 return;
1121 }
1122 src = SET_SRC (set);
1123 }
1124 else
1125 {
1126 if (reg_seen[regno])
1127 {
1128 new_reg_base_value[regno] = 0;
1129 return;
1130 }
1131 reg_seen[regno] = 1;
1132 new_reg_base_value[regno] = gen_rtx_ADDRESS (Pmode,
1133 GEN_INT (unique_id++));
1134 return;
1135 }
1136
1137 /* If this is not the first set of REGNO, see whether the new value
1138 is related to the old one. There are two cases of interest:
1139
1140 (1) The register might be assigned an entirely new value
1141 that has the same base term as the original set.
1142
1143 (2) The set might be a simple self-modification that
1144 cannot change REGNO's base value.
1145
1146 If neither case holds, reject the original base value as invalid.
1147 Note that the following situation is not detected:
1148
1149 extern int x, y; int *p = &x; p += (&y-&x);
1150
1151 ANSI C does not allow computing the difference of addresses
1152 of distinct top level objects. */
1153 if (new_reg_base_value[regno] != 0
1154 && find_base_value (src) != new_reg_base_value[regno])
1155 switch (GET_CODE (src))
1156 {
1157 case LO_SUM:
1158 case MINUS:
1159 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1160 new_reg_base_value[regno] = 0;
1161 break;
1162 case PLUS:
1163 /* If the value we add in the PLUS is also a valid base value,
1164 this might be the actual base value, and the original value
1165 an index. */
1166 {
1167 rtx other = NULL_RTX;
1168
1169 if (XEXP (src, 0) == dest)
1170 other = XEXP (src, 1);
1171 else if (XEXP (src, 1) == dest)
1172 other = XEXP (src, 0);
1173
1174 if (! other || find_base_value (other))
1175 new_reg_base_value[regno] = 0;
1176 break;
1177 }
1178 case AND:
1179 if (XEXP (src, 0) != dest || GET_CODE (XEXP (src, 1)) != CONST_INT)
1180 new_reg_base_value[regno] = 0;
1181 break;
1182 default:
1183 new_reg_base_value[regno] = 0;
1184 break;
1185 }
1186 /* If this is the first set of a register, record the value. */
1187 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1188 && ! reg_seen[regno] && new_reg_base_value[regno] == 0)
1189 new_reg_base_value[regno] = find_base_value (src);
1190
1191 reg_seen[regno] = 1;
1192 }
1193
1194 /* If a value is known for REGNO, return it. */
1195
1196 rtx
1197 get_reg_known_value (unsigned int regno)
1198 {
1199 if (regno >= FIRST_PSEUDO_REGISTER)
1200 {
1201 regno -= FIRST_PSEUDO_REGISTER;
1202 if (regno < reg_known_value_size)
1203 return reg_known_value[regno];
1204 }
1205 return NULL;
1206 }
1207
1208 /* Set it. */
1209
1210 static void
1211 set_reg_known_value (unsigned int regno, rtx val)
1212 {
1213 if (regno >= FIRST_PSEUDO_REGISTER)
1214 {
1215 regno -= FIRST_PSEUDO_REGISTER;
1216 if (regno < reg_known_value_size)
1217 reg_known_value[regno] = val;
1218 }
1219 }
1220
1221 /* Similarly for reg_known_equiv_p. */
1222
1223 bool
1224 get_reg_known_equiv_p (unsigned int regno)
1225 {
1226 if (regno >= FIRST_PSEUDO_REGISTER)
1227 {
1228 regno -= FIRST_PSEUDO_REGISTER;
1229 if (regno < reg_known_value_size)
1230 return reg_known_equiv_p[regno];
1231 }
1232 return false;
1233 }
1234
1235 static void
1236 set_reg_known_equiv_p (unsigned int regno, bool val)
1237 {
1238 if (regno >= FIRST_PSEUDO_REGISTER)
1239 {
1240 regno -= FIRST_PSEUDO_REGISTER;
1241 if (regno < reg_known_value_size)
1242 reg_known_equiv_p[regno] = val;
1243 }
1244 }
1245
1246
1247 /* Returns a canonical version of X, from the point of view alias
1248 analysis. (For example, if X is a MEM whose address is a register,
1249 and the register has a known value (say a SYMBOL_REF), then a MEM
1250 whose address is the SYMBOL_REF is returned.) */
1251
1252 rtx
1253 canon_rtx (rtx x)
1254 {
1255 /* Recursively look for equivalences. */
1256 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1257 {
1258 rtx t = get_reg_known_value (REGNO (x));
1259 if (t == x)
1260 return x;
1261 if (t)
1262 return canon_rtx (t);
1263 }
1264
1265 if (GET_CODE (x) == PLUS)
1266 {
1267 rtx x0 = canon_rtx (XEXP (x, 0));
1268 rtx x1 = canon_rtx (XEXP (x, 1));
1269
1270 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1271 {
1272 if (GET_CODE (x0) == CONST_INT)
1273 return plus_constant (x1, INTVAL (x0));
1274 else if (GET_CODE (x1) == CONST_INT)
1275 return plus_constant (x0, INTVAL (x1));
1276 return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1277 }
1278 }
1279
1280 /* This gives us much better alias analysis when called from
1281 the loop optimizer. Note we want to leave the original
1282 MEM alone, but need to return the canonicalized MEM with
1283 all the flags with their original values. */
1284 else if (MEM_P (x))
1285 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1286
1287 return x;
1288 }
1289
1290 /* Return 1 if X and Y are identical-looking rtx's.
1291 Expect that X and Y has been already canonicalized.
1292
1293 We use the data in reg_known_value above to see if two registers with
1294 different numbers are, in fact, equivalent. */
1295
1296 static int
1297 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1298 {
1299 int i;
1300 int j;
1301 enum rtx_code code;
1302 const char *fmt;
1303
1304 if (x == 0 && y == 0)
1305 return 1;
1306 if (x == 0 || y == 0)
1307 return 0;
1308
1309 if (x == y)
1310 return 1;
1311
1312 code = GET_CODE (x);
1313 /* Rtx's of different codes cannot be equal. */
1314 if (code != GET_CODE (y))
1315 return 0;
1316
1317 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1318 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1319
1320 if (GET_MODE (x) != GET_MODE (y))
1321 return 0;
1322
1323 /* Some RTL can be compared without a recursive examination. */
1324 switch (code)
1325 {
1326 case REG:
1327 return REGNO (x) == REGNO (y);
1328
1329 case LABEL_REF:
1330 return XEXP (x, 0) == XEXP (y, 0);
1331
1332 case SYMBOL_REF:
1333 return XSTR (x, 0) == XSTR (y, 0);
1334
1335 case VALUE:
1336 case CONST_INT:
1337 case CONST_DOUBLE:
1338 case CONST_FIXED:
1339 /* There's no need to compare the contents of CONST_DOUBLEs or
1340 CONST_INTs because pointer equality is a good enough
1341 comparison for these nodes. */
1342 return 0;
1343
1344 default:
1345 break;
1346 }
1347
1348 /* canon_rtx knows how to handle plus. No need to canonicalize. */
1349 if (code == PLUS)
1350 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1351 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1352 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1353 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1354 /* For commutative operations, the RTX match if the operand match in any
1355 order. Also handle the simple binary and unary cases without a loop. */
1356 if (COMMUTATIVE_P (x))
1357 {
1358 rtx xop0 = canon_rtx (XEXP (x, 0));
1359 rtx yop0 = canon_rtx (XEXP (y, 0));
1360 rtx yop1 = canon_rtx (XEXP (y, 1));
1361
1362 return ((rtx_equal_for_memref_p (xop0, yop0)
1363 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1364 || (rtx_equal_for_memref_p (xop0, yop1)
1365 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1366 }
1367 else if (NON_COMMUTATIVE_P (x))
1368 {
1369 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1370 canon_rtx (XEXP (y, 0)))
1371 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1372 canon_rtx (XEXP (y, 1))));
1373 }
1374 else if (UNARY_P (x))
1375 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1376 canon_rtx (XEXP (y, 0)));
1377
1378 /* Compare the elements. If any pair of corresponding elements
1379 fail to match, return 0 for the whole things.
1380
1381 Limit cases to types which actually appear in addresses. */
1382
1383 fmt = GET_RTX_FORMAT (code);
1384 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1385 {
1386 switch (fmt[i])
1387 {
1388 case 'i':
1389 if (XINT (x, i) != XINT (y, i))
1390 return 0;
1391 break;
1392
1393 case 'E':
1394 /* Two vectors must have the same length. */
1395 if (XVECLEN (x, i) != XVECLEN (y, i))
1396 return 0;
1397
1398 /* And the corresponding elements must match. */
1399 for (j = 0; j < XVECLEN (x, i); j++)
1400 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1401 canon_rtx (XVECEXP (y, i, j))) == 0)
1402 return 0;
1403 break;
1404
1405 case 'e':
1406 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1407 canon_rtx (XEXP (y, i))) == 0)
1408 return 0;
1409 break;
1410
1411 /* This can happen for asm operands. */
1412 case 's':
1413 if (strcmp (XSTR (x, i), XSTR (y, i)))
1414 return 0;
1415 break;
1416
1417 /* This can happen for an asm which clobbers memory. */
1418 case '0':
1419 break;
1420
1421 /* It is believed that rtx's at this level will never
1422 contain anything but integers and other rtx's,
1423 except for within LABEL_REFs and SYMBOL_REFs. */
1424 default:
1425 gcc_unreachable ();
1426 }
1427 }
1428 return 1;
1429 }
1430
1431 rtx
1432 find_base_term (rtx x)
1433 {
1434 cselib_val *val;
1435 struct elt_loc_list *l;
1436
1437 #if defined (FIND_BASE_TERM)
1438 /* Try machine-dependent ways to find the base term. */
1439 x = FIND_BASE_TERM (x);
1440 #endif
1441
1442 switch (GET_CODE (x))
1443 {
1444 case REG:
1445 return REG_BASE_VALUE (x);
1446
1447 case TRUNCATE:
1448 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1449 return 0;
1450 /* Fall through. */
1451 case HIGH:
1452 case PRE_INC:
1453 case PRE_DEC:
1454 case POST_INC:
1455 case POST_DEC:
1456 case PRE_MODIFY:
1457 case POST_MODIFY:
1458 return find_base_term (XEXP (x, 0));
1459
1460 case ZERO_EXTEND:
1461 case SIGN_EXTEND: /* Used for Alpha/NT pointers */
1462 {
1463 rtx temp = find_base_term (XEXP (x, 0));
1464
1465 if (temp != 0 && CONSTANT_P (temp))
1466 temp = convert_memory_address (Pmode, temp);
1467
1468 return temp;
1469 }
1470
1471 case VALUE:
1472 val = CSELIB_VAL_PTR (x);
1473 if (!val)
1474 return 0;
1475 for (l = val->locs; l; l = l->next)
1476 if ((x = find_base_term (l->loc)) != 0)
1477 return x;
1478 return 0;
1479
1480 case LO_SUM:
1481 /* The standard form is (lo_sum reg sym) so look only at the
1482 second operand. */
1483 return find_base_term (XEXP (x, 1));
1484
1485 case CONST:
1486 x = XEXP (x, 0);
1487 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1488 return 0;
1489 /* Fall through. */
1490 case PLUS:
1491 case MINUS:
1492 {
1493 rtx tmp1 = XEXP (x, 0);
1494 rtx tmp2 = XEXP (x, 1);
1495
1496 /* This is a little bit tricky since we have to determine which of
1497 the two operands represents the real base address. Otherwise this
1498 routine may return the index register instead of the base register.
1499
1500 That may cause us to believe no aliasing was possible, when in
1501 fact aliasing is possible.
1502
1503 We use a few simple tests to guess the base register. Additional
1504 tests can certainly be added. For example, if one of the operands
1505 is a shift or multiply, then it must be the index register and the
1506 other operand is the base register. */
1507
1508 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1509 return find_base_term (tmp2);
1510
1511 /* If either operand is known to be a pointer, then use it
1512 to determine the base term. */
1513 if (REG_P (tmp1) && REG_POINTER (tmp1))
1514 return find_base_term (tmp1);
1515
1516 if (REG_P (tmp2) && REG_POINTER (tmp2))
1517 return find_base_term (tmp2);
1518
1519 /* Neither operand was known to be a pointer. Go ahead and find the
1520 base term for both operands. */
1521 tmp1 = find_base_term (tmp1);
1522 tmp2 = find_base_term (tmp2);
1523
1524 /* If either base term is named object or a special address
1525 (like an argument or stack reference), then use it for the
1526 base term. */
1527 if (tmp1 != 0
1528 && (GET_CODE (tmp1) == SYMBOL_REF
1529 || GET_CODE (tmp1) == LABEL_REF
1530 || (GET_CODE (tmp1) == ADDRESS
1531 && GET_MODE (tmp1) != VOIDmode)))
1532 return tmp1;
1533
1534 if (tmp2 != 0
1535 && (GET_CODE (tmp2) == SYMBOL_REF
1536 || GET_CODE (tmp2) == LABEL_REF
1537 || (GET_CODE (tmp2) == ADDRESS
1538 && GET_MODE (tmp2) != VOIDmode)))
1539 return tmp2;
1540
1541 /* We could not determine which of the two operands was the
1542 base register and which was the index. So we can determine
1543 nothing from the base alias check. */
1544 return 0;
1545 }
1546
1547 case AND:
1548 if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) != 0)
1549 return find_base_term (XEXP (x, 0));
1550 return 0;
1551
1552 case SYMBOL_REF:
1553 case LABEL_REF:
1554 return x;
1555
1556 default:
1557 return 0;
1558 }
1559 }
1560
1561 /* Return 0 if the addresses X and Y are known to point to different
1562 objects, 1 if they might be pointers to the same object. */
1563
1564 static int
1565 base_alias_check (rtx x, rtx y, enum machine_mode x_mode,
1566 enum machine_mode y_mode)
1567 {
1568 rtx x_base = find_base_term (x);
1569 rtx y_base = find_base_term (y);
1570
1571 /* If the address itself has no known base see if a known equivalent
1572 value has one. If either address still has no known base, nothing
1573 is known about aliasing. */
1574 if (x_base == 0)
1575 {
1576 rtx x_c;
1577
1578 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1579 return 1;
1580
1581 x_base = find_base_term (x_c);
1582 if (x_base == 0)
1583 return 1;
1584 }
1585
1586 if (y_base == 0)
1587 {
1588 rtx y_c;
1589 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1590 return 1;
1591
1592 y_base = find_base_term (y_c);
1593 if (y_base == 0)
1594 return 1;
1595 }
1596
1597 /* If the base addresses are equal nothing is known about aliasing. */
1598 if (rtx_equal_p (x_base, y_base))
1599 return 1;
1600
1601 /* The base addresses are different expressions. If they are not accessed
1602 via AND, there is no conflict. We can bring knowledge of object
1603 alignment into play here. For example, on alpha, "char a, b;" can
1604 alias one another, though "char a; long b;" cannot. AND addesses may
1605 implicitly alias surrounding objects; i.e. unaligned access in DImode
1606 via AND address can alias all surrounding object types except those
1607 with aligment 8 or higher. */
1608 if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1609 return 1;
1610 if (GET_CODE (x) == AND
1611 && (GET_CODE (XEXP (x, 1)) != CONST_INT
1612 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1613 return 1;
1614 if (GET_CODE (y) == AND
1615 && (GET_CODE (XEXP (y, 1)) != CONST_INT
1616 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1617 return 1;
1618
1619 /* Differing symbols not accessed via AND never alias. */
1620 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1621 return 0;
1622
1623 /* If one address is a stack reference there can be no alias:
1624 stack references using different base registers do not alias,
1625 a stack reference can not alias a parameter, and a stack reference
1626 can not alias a global. */
1627 if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode)
1628 || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
1629 return 0;
1630
1631 if (! flag_argument_noalias)
1632 return 1;
1633
1634 if (flag_argument_noalias > 1)
1635 return 0;
1636
1637 /* Weak noalias assertion (arguments are distinct, but may match globals). */
1638 return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode);
1639 }
1640
1641 /* Convert the address X into something we can use. This is done by returning
1642 it unchanged unless it is a value; in the latter case we call cselib to get
1643 a more useful rtx. */
1644
1645 rtx
1646 get_addr (rtx x)
1647 {
1648 cselib_val *v;
1649 struct elt_loc_list *l;
1650
1651 if (GET_CODE (x) != VALUE)
1652 return x;
1653 v = CSELIB_VAL_PTR (x);
1654 if (v)
1655 {
1656 for (l = v->locs; l; l = l->next)
1657 if (CONSTANT_P (l->loc))
1658 return l->loc;
1659 for (l = v->locs; l; l = l->next)
1660 if (!REG_P (l->loc) && !MEM_P (l->loc))
1661 return l->loc;
1662 if (v->locs)
1663 return v->locs->loc;
1664 }
1665 return x;
1666 }
1667
1668 /* Return the address of the (N_REFS + 1)th memory reference to ADDR
1669 where SIZE is the size in bytes of the memory reference. If ADDR
1670 is not modified by the memory reference then ADDR is returned. */
1671
1672 static rtx
1673 addr_side_effect_eval (rtx addr, int size, int n_refs)
1674 {
1675 int offset = 0;
1676
1677 switch (GET_CODE (addr))
1678 {
1679 case PRE_INC:
1680 offset = (n_refs + 1) * size;
1681 break;
1682 case PRE_DEC:
1683 offset = -(n_refs + 1) * size;
1684 break;
1685 case POST_INC:
1686 offset = n_refs * size;
1687 break;
1688 case POST_DEC:
1689 offset = -n_refs * size;
1690 break;
1691
1692 default:
1693 return addr;
1694 }
1695
1696 if (offset)
1697 addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1698 GEN_INT (offset));
1699 else
1700 addr = XEXP (addr, 0);
1701 addr = canon_rtx (addr);
1702
1703 return addr;
1704 }
1705
1706 /* Return nonzero if X and Y (memory addresses) could reference the
1707 same location in memory. C is an offset accumulator. When
1708 C is nonzero, we are testing aliases between X and Y + C.
1709 XSIZE is the size in bytes of the X reference,
1710 similarly YSIZE is the size in bytes for Y.
1711 Expect that canon_rtx has been already called for X and Y.
1712
1713 If XSIZE or YSIZE is zero, we do not know the amount of memory being
1714 referenced (the reference was BLKmode), so make the most pessimistic
1715 assumptions.
1716
1717 If XSIZE or YSIZE is negative, we may access memory outside the object
1718 being referenced as a side effect. This can happen when using AND to
1719 align memory references, as is done on the Alpha.
1720
1721 Nice to notice that varying addresses cannot conflict with fp if no
1722 local variables had their addresses taken, but that's too hard now. */
1723
1724 static int
1725 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
1726 {
1727 if (GET_CODE (x) == VALUE)
1728 x = get_addr (x);
1729 if (GET_CODE (y) == VALUE)
1730 y = get_addr (y);
1731 if (GET_CODE (x) == HIGH)
1732 x = XEXP (x, 0);
1733 else if (GET_CODE (x) == LO_SUM)
1734 x = XEXP (x, 1);
1735 else
1736 x = addr_side_effect_eval (x, xsize, 0);
1737 if (GET_CODE (y) == HIGH)
1738 y = XEXP (y, 0);
1739 else if (GET_CODE (y) == LO_SUM)
1740 y = XEXP (y, 1);
1741 else
1742 y = addr_side_effect_eval (y, ysize, 0);
1743
1744 if (rtx_equal_for_memref_p (x, y))
1745 {
1746 if (xsize <= 0 || ysize <= 0)
1747 return 1;
1748 if (c >= 0 && xsize > c)
1749 return 1;
1750 if (c < 0 && ysize+c > 0)
1751 return 1;
1752 return 0;
1753 }
1754
1755 /* This code used to check for conflicts involving stack references and
1756 globals but the base address alias code now handles these cases. */
1757
1758 if (GET_CODE (x) == PLUS)
1759 {
1760 /* The fact that X is canonicalized means that this
1761 PLUS rtx is canonicalized. */
1762 rtx x0 = XEXP (x, 0);
1763 rtx x1 = XEXP (x, 1);
1764
1765 if (GET_CODE (y) == PLUS)
1766 {
1767 /* The fact that Y is canonicalized means that this
1768 PLUS rtx is canonicalized. */
1769 rtx y0 = XEXP (y, 0);
1770 rtx y1 = XEXP (y, 1);
1771
1772 if (rtx_equal_for_memref_p (x1, y1))
1773 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1774 if (rtx_equal_for_memref_p (x0, y0))
1775 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
1776 if (GET_CODE (x1) == CONST_INT)
1777 {
1778 if (GET_CODE (y1) == CONST_INT)
1779 return memrefs_conflict_p (xsize, x0, ysize, y0,
1780 c - INTVAL (x1) + INTVAL (y1));
1781 else
1782 return memrefs_conflict_p (xsize, x0, ysize, y,
1783 c - INTVAL (x1));
1784 }
1785 else if (GET_CODE (y1) == CONST_INT)
1786 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1787
1788 return 1;
1789 }
1790 else if (GET_CODE (x1) == CONST_INT)
1791 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
1792 }
1793 else if (GET_CODE (y) == PLUS)
1794 {
1795 /* The fact that Y is canonicalized means that this
1796 PLUS rtx is canonicalized. */
1797 rtx y0 = XEXP (y, 0);
1798 rtx y1 = XEXP (y, 1);
1799
1800 if (GET_CODE (y1) == CONST_INT)
1801 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1802 else
1803 return 1;
1804 }
1805
1806 if (GET_CODE (x) == GET_CODE (y))
1807 switch (GET_CODE (x))
1808 {
1809 case MULT:
1810 {
1811 /* Handle cases where we expect the second operands to be the
1812 same, and check only whether the first operand would conflict
1813 or not. */
1814 rtx x0, y0;
1815 rtx x1 = canon_rtx (XEXP (x, 1));
1816 rtx y1 = canon_rtx (XEXP (y, 1));
1817 if (! rtx_equal_for_memref_p (x1, y1))
1818 return 1;
1819 x0 = canon_rtx (XEXP (x, 0));
1820 y0 = canon_rtx (XEXP (y, 0));
1821 if (rtx_equal_for_memref_p (x0, y0))
1822 return (xsize == 0 || ysize == 0
1823 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1824
1825 /* Can't properly adjust our sizes. */
1826 if (GET_CODE (x1) != CONST_INT)
1827 return 1;
1828 xsize /= INTVAL (x1);
1829 ysize /= INTVAL (x1);
1830 c /= INTVAL (x1);
1831 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1832 }
1833
1834 default:
1835 break;
1836 }
1837
1838 /* Treat an access through an AND (e.g. a subword access on an Alpha)
1839 as an access with indeterminate size. Assume that references
1840 besides AND are aligned, so if the size of the other reference is
1841 at least as large as the alignment, assume no other overlap. */
1842 if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT)
1843 {
1844 if (GET_CODE (y) == AND || ysize < -INTVAL (XEXP (x, 1)))
1845 xsize = -1;
1846 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c);
1847 }
1848 if (GET_CODE (y) == AND && GET_CODE (XEXP (y, 1)) == CONST_INT)
1849 {
1850 /* ??? If we are indexing far enough into the array/structure, we
1851 may yet be able to determine that we can not overlap. But we
1852 also need to that we are far enough from the end not to overlap
1853 a following reference, so we do nothing with that for now. */
1854 if (GET_CODE (x) == AND || xsize < -INTVAL (XEXP (y, 1)))
1855 ysize = -1;
1856 return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c);
1857 }
1858
1859 if (CONSTANT_P (x))
1860 {
1861 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
1862 {
1863 c += (INTVAL (y) - INTVAL (x));
1864 return (xsize <= 0 || ysize <= 0
1865 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1866 }
1867
1868 if (GET_CODE (x) == CONST)
1869 {
1870 if (GET_CODE (y) == CONST)
1871 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1872 ysize, canon_rtx (XEXP (y, 0)), c);
1873 else
1874 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1875 ysize, y, c);
1876 }
1877 if (GET_CODE (y) == CONST)
1878 return memrefs_conflict_p (xsize, x, ysize,
1879 canon_rtx (XEXP (y, 0)), c);
1880
1881 if (CONSTANT_P (y))
1882 return (xsize <= 0 || ysize <= 0
1883 || (rtx_equal_for_memref_p (x, y)
1884 && ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
1885
1886 return 1;
1887 }
1888 return 1;
1889 }
1890
1891 /* Functions to compute memory dependencies.
1892
1893 Since we process the insns in execution order, we can build tables
1894 to keep track of what registers are fixed (and not aliased), what registers
1895 are varying in known ways, and what registers are varying in unknown
1896 ways.
1897
1898 If both memory references are volatile, then there must always be a
1899 dependence between the two references, since their order can not be
1900 changed. A volatile and non-volatile reference can be interchanged
1901 though.
1902
1903 A MEM_IN_STRUCT reference at a non-AND varying address can never
1904 conflict with a non-MEM_IN_STRUCT reference at a fixed address. We
1905 also must allow AND addresses, because they may generate accesses
1906 outside the object being referenced. This is used to generate
1907 aligned addresses from unaligned addresses, for instance, the alpha
1908 storeqi_unaligned pattern. */
1909
1910 /* Read dependence: X is read after read in MEM takes place. There can
1911 only be a dependence here if both reads are volatile. */
1912
1913 int
1914 read_dependence (const_rtx mem, const_rtx x)
1915 {
1916 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
1917 }
1918
1919 /* Returns MEM1 if and only if MEM1 is a scalar at a fixed address and
1920 MEM2 is a reference to a structure at a varying address, or returns
1921 MEM2 if vice versa. Otherwise, returns NULL_RTX. If a non-NULL
1922 value is returned MEM1 and MEM2 can never alias. VARIES_P is used
1923 to decide whether or not an address may vary; it should return
1924 nonzero whenever variation is possible.
1925 MEM1_ADDR and MEM2_ADDR are the addresses of MEM1 and MEM2. */
1926
1927 static const_rtx
1928 fixed_scalar_and_varying_struct_p (const_rtx mem1, const_rtx mem2, rtx mem1_addr,
1929 rtx mem2_addr,
1930 bool (*varies_p) (const_rtx, bool))
1931 {
1932 if (! flag_strict_aliasing)
1933 return NULL_RTX;
1934
1935 if (MEM_ALIAS_SET (mem2)
1936 && MEM_SCALAR_P (mem1) && MEM_IN_STRUCT_P (mem2)
1937 && !varies_p (mem1_addr, 1) && varies_p (mem2_addr, 1))
1938 /* MEM1 is a scalar at a fixed address; MEM2 is a struct at a
1939 varying address. */
1940 return mem1;
1941
1942 if (MEM_ALIAS_SET (mem1)
1943 && MEM_IN_STRUCT_P (mem1) && MEM_SCALAR_P (mem2)
1944 && varies_p (mem1_addr, 1) && !varies_p (mem2_addr, 1))
1945 /* MEM2 is a scalar at a fixed address; MEM1 is a struct at a
1946 varying address. */
1947 return mem2;
1948
1949 return NULL_RTX;
1950 }
1951
1952 /* Returns nonzero if something about the mode or address format MEM1
1953 indicates that it might well alias *anything*. */
1954
1955 static int
1956 aliases_everything_p (const_rtx mem)
1957 {
1958 if (GET_CODE (XEXP (mem, 0)) == AND)
1959 /* If the address is an AND, it's very hard to know at what it is
1960 actually pointing. */
1961 return 1;
1962
1963 return 0;
1964 }
1965
1966 /* Return true if we can determine that the fields referenced cannot
1967 overlap for any pair of objects. */
1968
1969 static bool
1970 nonoverlapping_component_refs_p (const_tree x, const_tree y)
1971 {
1972 const_tree fieldx, fieldy, typex, typey, orig_y;
1973
1974 do
1975 {
1976 /* The comparison has to be done at a common type, since we don't
1977 know how the inheritance hierarchy works. */
1978 orig_y = y;
1979 do
1980 {
1981 fieldx = TREE_OPERAND (x, 1);
1982 typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
1983
1984 y = orig_y;
1985 do
1986 {
1987 fieldy = TREE_OPERAND (y, 1);
1988 typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
1989
1990 if (typex == typey)
1991 goto found;
1992
1993 y = TREE_OPERAND (y, 0);
1994 }
1995 while (y && TREE_CODE (y) == COMPONENT_REF);
1996
1997 x = TREE_OPERAND (x, 0);
1998 }
1999 while (x && TREE_CODE (x) == COMPONENT_REF);
2000 /* Never found a common type. */
2001 return false;
2002
2003 found:
2004 /* If we're left with accessing different fields of a structure,
2005 then no overlap. */
2006 if (TREE_CODE (typex) == RECORD_TYPE
2007 && fieldx != fieldy)
2008 return true;
2009
2010 /* The comparison on the current field failed. If we're accessing
2011 a very nested structure, look at the next outer level. */
2012 x = TREE_OPERAND (x, 0);
2013 y = TREE_OPERAND (y, 0);
2014 }
2015 while (x && y
2016 && TREE_CODE (x) == COMPONENT_REF
2017 && TREE_CODE (y) == COMPONENT_REF);
2018
2019 return false;
2020 }
2021
2022 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */
2023
2024 static tree
2025 decl_for_component_ref (tree x)
2026 {
2027 do
2028 {
2029 x = TREE_OPERAND (x, 0);
2030 }
2031 while (x && TREE_CODE (x) == COMPONENT_REF);
2032
2033 return x && DECL_P (x) ? x : NULL_TREE;
2034 }
2035
2036 /* Walk up the COMPONENT_REF list and adjust OFFSET to compensate for the
2037 offset of the field reference. */
2038
2039 static rtx
2040 adjust_offset_for_component_ref (tree x, rtx offset)
2041 {
2042 HOST_WIDE_INT ioffset;
2043
2044 if (! offset)
2045 return NULL_RTX;
2046
2047 ioffset = INTVAL (offset);
2048 do
2049 {
2050 tree offset = component_ref_field_offset (x);
2051 tree field = TREE_OPERAND (x, 1);
2052
2053 if (! host_integerp (offset, 1))
2054 return NULL_RTX;
2055 ioffset += (tree_low_cst (offset, 1)
2056 + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
2057 / BITS_PER_UNIT));
2058
2059 x = TREE_OPERAND (x, 0);
2060 }
2061 while (x && TREE_CODE (x) == COMPONENT_REF);
2062
2063 return GEN_INT (ioffset);
2064 }
2065
2066 /* Return nonzero if we can determine the exprs corresponding to memrefs
2067 X and Y and they do not overlap. */
2068
2069 int
2070 nonoverlapping_memrefs_p (const_rtx x, const_rtx y)
2071 {
2072 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2073 rtx rtlx, rtly;
2074 rtx basex, basey;
2075 rtx moffsetx, moffsety;
2076 HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2077
2078 /* Unless both have exprs, we can't tell anything. */
2079 if (exprx == 0 || expry == 0)
2080 return 0;
2081
2082 /* If both are field references, we may be able to determine something. */
2083 if (TREE_CODE (exprx) == COMPONENT_REF
2084 && TREE_CODE (expry) == COMPONENT_REF
2085 && nonoverlapping_component_refs_p (exprx, expry))
2086 return 1;
2087
2088
2089 /* If the field reference test failed, look at the DECLs involved. */
2090 moffsetx = MEM_OFFSET (x);
2091 if (TREE_CODE (exprx) == COMPONENT_REF)
2092 {
2093 if (TREE_CODE (expry) == VAR_DECL
2094 && POINTER_TYPE_P (TREE_TYPE (expry)))
2095 {
2096 tree field = TREE_OPERAND (exprx, 1);
2097 tree fieldcontext = DECL_FIELD_CONTEXT (field);
2098 if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
2099 TREE_TYPE (field)))
2100 return 1;
2101 }
2102 {
2103 tree t = decl_for_component_ref (exprx);
2104 if (! t)
2105 return 0;
2106 moffsetx = adjust_offset_for_component_ref (exprx, moffsetx);
2107 exprx = t;
2108 }
2109 }
2110 else if (INDIRECT_REF_P (exprx))
2111 {
2112 exprx = TREE_OPERAND (exprx, 0);
2113 if (flag_argument_noalias < 2
2114 || TREE_CODE (exprx) != PARM_DECL)
2115 return 0;
2116 }
2117
2118 moffsety = MEM_OFFSET (y);
2119 if (TREE_CODE (expry) == COMPONENT_REF)
2120 {
2121 if (TREE_CODE (exprx) == VAR_DECL
2122 && POINTER_TYPE_P (TREE_TYPE (exprx)))
2123 {
2124 tree field = TREE_OPERAND (expry, 1);
2125 tree fieldcontext = DECL_FIELD_CONTEXT (field);
2126 if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
2127 TREE_TYPE (field)))
2128 return 1;
2129 }
2130 {
2131 tree t = decl_for_component_ref (expry);
2132 if (! t)
2133 return 0;
2134 moffsety = adjust_offset_for_component_ref (expry, moffsety);
2135 expry = t;
2136 }
2137 }
2138 else if (INDIRECT_REF_P (expry))
2139 {
2140 expry = TREE_OPERAND (expry, 0);
2141 if (flag_argument_noalias < 2
2142 || TREE_CODE (expry) != PARM_DECL)
2143 return 0;
2144 }
2145
2146 if (! DECL_P (exprx) || ! DECL_P (expry))
2147 return 0;
2148
2149 rtlx = DECL_RTL (exprx);
2150 rtly = DECL_RTL (expry);
2151
2152 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2153 can't overlap unless they are the same because we never reuse that part
2154 of the stack frame used for locals for spilled pseudos. */
2155 if ((!MEM_P (rtlx) || !MEM_P (rtly))
2156 && ! rtx_equal_p (rtlx, rtly))
2157 return 1;
2158
2159 /* Get the base and offsets of both decls. If either is a register, we
2160 know both are and are the same, so use that as the base. The only
2161 we can avoid overlap is if we can deduce that they are nonoverlapping
2162 pieces of that decl, which is very rare. */
2163 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2164 if (GET_CODE (basex) == PLUS && GET_CODE (XEXP (basex, 1)) == CONST_INT)
2165 offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2166
2167 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2168 if (GET_CODE (basey) == PLUS && GET_CODE (XEXP (basey, 1)) == CONST_INT)
2169 offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2170
2171 /* If the bases are different, we know they do not overlap if both
2172 are constants or if one is a constant and the other a pointer into the
2173 stack frame. Otherwise a different base means we can't tell if they
2174 overlap or not. */
2175 if (! rtx_equal_p (basex, basey))
2176 return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2177 || (CONSTANT_P (basex) && REG_P (basey)
2178 && REGNO_PTR_FRAME_P (REGNO (basey)))
2179 || (CONSTANT_P (basey) && REG_P (basex)
2180 && REGNO_PTR_FRAME_P (REGNO (basex))));
2181
2182 sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2183 : MEM_SIZE (rtlx) ? INTVAL (MEM_SIZE (rtlx))
2184 : -1);
2185 sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2186 : MEM_SIZE (rtly) ? INTVAL (MEM_SIZE (rtly)) :
2187 -1);
2188
2189 /* If we have an offset for either memref, it can update the values computed
2190 above. */
2191 if (moffsetx)
2192 offsetx += INTVAL (moffsetx), sizex -= INTVAL (moffsetx);
2193 if (moffsety)
2194 offsety += INTVAL (moffsety), sizey -= INTVAL (moffsety);
2195
2196 /* If a memref has both a size and an offset, we can use the smaller size.
2197 We can't do this if the offset isn't known because we must view this
2198 memref as being anywhere inside the DECL's MEM. */
2199 if (MEM_SIZE (x) && moffsetx)
2200 sizex = INTVAL (MEM_SIZE (x));
2201 if (MEM_SIZE (y) && moffsety)
2202 sizey = INTVAL (MEM_SIZE (y));
2203
2204 /* Put the values of the memref with the lower offset in X's values. */
2205 if (offsetx > offsety)
2206 {
2207 tem = offsetx, offsetx = offsety, offsety = tem;
2208 tem = sizex, sizex = sizey, sizey = tem;
2209 }
2210
2211 /* If we don't know the size of the lower-offset value, we can't tell
2212 if they conflict. Otherwise, we do the test. */
2213 return sizex >= 0 && offsety >= offsetx + sizex;
2214 }
2215
2216 /* True dependence: X is read after store in MEM takes place. */
2217
2218 int
2219 true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x,
2220 bool (*varies) (const_rtx, bool))
2221 {
2222 rtx x_addr, mem_addr;
2223 rtx base;
2224
2225 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2226 return 1;
2227
2228 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2229 This is used in epilogue deallocation functions, and in cselib. */
2230 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2231 return 1;
2232 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2233 return 1;
2234 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2235 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2236 return 1;
2237
2238 if (DIFFERENT_ALIAS_SETS_P (x, mem))
2239 return 0;
2240
2241 /* Read-only memory is by definition never modified, and therefore can't
2242 conflict with anything. We don't expect to find read-only set on MEM,
2243 but stupid user tricks can produce them, so don't die. */
2244 if (MEM_READONLY_P (x))
2245 return 0;
2246
2247 if (nonoverlapping_memrefs_p (mem, x))
2248 return 0;
2249
2250 if (mem_mode == VOIDmode)
2251 mem_mode = GET_MODE (mem);
2252
2253 x_addr = get_addr (XEXP (x, 0));
2254 mem_addr = get_addr (XEXP (mem, 0));
2255
2256 base = find_base_term (x_addr);
2257 if (base && (GET_CODE (base) == LABEL_REF
2258 || (GET_CODE (base) == SYMBOL_REF
2259 && CONSTANT_POOL_ADDRESS_P (base))))
2260 return 0;
2261
2262 if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2263 return 0;
2264
2265 x_addr = canon_rtx (x_addr);
2266 mem_addr = canon_rtx (mem_addr);
2267
2268 if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2269 SIZE_FOR_MODE (x), x_addr, 0))
2270 return 0;
2271
2272 if (aliases_everything_p (x))
2273 return 1;
2274
2275 /* We cannot use aliases_everything_p to test MEM, since we must look
2276 at MEM_MODE, rather than GET_MODE (MEM). */
2277 if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2278 return 1;
2279
2280 /* In true_dependence we also allow BLKmode to alias anything. Why
2281 don't we do this in anti_dependence and output_dependence? */
2282 if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2283 return 1;
2284
2285 return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2286 varies);
2287 }
2288
2289 /* Canonical true dependence: X is read after store in MEM takes place.
2290 Variant of true_dependence which assumes MEM has already been
2291 canonicalized (hence we no longer do that here).
2292 The mem_addr argument has been added, since true_dependence computed
2293 this value prior to canonicalizing.
2294 If x_addr is non-NULL, it is used in preference of XEXP (x, 0). */
2295
2296 int
2297 canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2298 const_rtx x, rtx x_addr, bool (*varies) (const_rtx, bool))
2299 {
2300 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2301 return 1;
2302
2303 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2304 This is used in epilogue deallocation functions. */
2305 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2306 return 1;
2307 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2308 return 1;
2309 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2310 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2311 return 1;
2312
2313 if (DIFFERENT_ALIAS_SETS_P (x, mem))
2314 return 0;
2315
2316 /* Read-only memory is by definition never modified, and therefore can't
2317 conflict with anything. We don't expect to find read-only set on MEM,
2318 but stupid user tricks can produce them, so don't die. */
2319 if (MEM_READONLY_P (x))
2320 return 0;
2321
2322 if (nonoverlapping_memrefs_p (x, mem))
2323 return 0;
2324
2325 if (! x_addr)
2326 x_addr = get_addr (XEXP (x, 0));
2327
2328 if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2329 return 0;
2330
2331 x_addr = canon_rtx (x_addr);
2332 if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2333 SIZE_FOR_MODE (x), x_addr, 0))
2334 return 0;
2335
2336 if (aliases_everything_p (x))
2337 return 1;
2338
2339 /* We cannot use aliases_everything_p to test MEM, since we must look
2340 at MEM_MODE, rather than GET_MODE (MEM). */
2341 if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2342 return 1;
2343
2344 /* In true_dependence we also allow BLKmode to alias anything. Why
2345 don't we do this in anti_dependence and output_dependence? */
2346 if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2347 return 1;
2348
2349 return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2350 varies);
2351 }
2352
2353 /* Returns nonzero if a write to X might alias a previous read from
2354 (or, if WRITEP is nonzero, a write to) MEM. */
2355
2356 static int
2357 write_dependence_p (const_rtx mem, const_rtx x, int writep)
2358 {
2359 rtx x_addr, mem_addr;
2360 const_rtx fixed_scalar;
2361 rtx base;
2362
2363 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2364 return 1;
2365
2366 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2367 This is used in epilogue deallocation functions. */
2368 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2369 return 1;
2370 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2371 return 1;
2372 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2373 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2374 return 1;
2375
2376 /* A read from read-only memory can't conflict with read-write memory. */
2377 if (!writep && MEM_READONLY_P (mem))
2378 return 0;
2379
2380 if (nonoverlapping_memrefs_p (x, mem))
2381 return 0;
2382
2383 x_addr = get_addr (XEXP (x, 0));
2384 mem_addr = get_addr (XEXP (mem, 0));
2385
2386 if (! writep)
2387 {
2388 base = find_base_term (mem_addr);
2389 if (base && (GET_CODE (base) == LABEL_REF
2390 || (GET_CODE (base) == SYMBOL_REF
2391 && CONSTANT_POOL_ADDRESS_P (base))))
2392 return 0;
2393 }
2394
2395 if (! base_alias_check (x_addr, mem_addr, GET_MODE (x),
2396 GET_MODE (mem)))
2397 return 0;
2398
2399 x_addr = canon_rtx (x_addr);
2400 mem_addr = canon_rtx (mem_addr);
2401
2402 if (!memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2403 SIZE_FOR_MODE (x), x_addr, 0))
2404 return 0;
2405
2406 fixed_scalar
2407 = fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2408 rtx_addr_varies_p);
2409
2410 return (!(fixed_scalar == mem && !aliases_everything_p (x))
2411 && !(fixed_scalar == x && !aliases_everything_p (mem)));
2412 }
2413
2414 /* Anti dependence: X is written after read in MEM takes place. */
2415
2416 int
2417 anti_dependence (const_rtx mem, const_rtx x)
2418 {
2419 return write_dependence_p (mem, x, /*writep=*/0);
2420 }
2421
2422 /* Output dependence: X is written after store in MEM takes place. */
2423
2424 int
2425 output_dependence (const_rtx mem, const_rtx x)
2426 {
2427 return write_dependence_p (mem, x, /*writep=*/1);
2428 }
2429 \f
2430
2431 void
2432 init_alias_target (void)
2433 {
2434 int i;
2435
2436 memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2437
2438 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2439 /* Check whether this register can hold an incoming pointer
2440 argument. FUNCTION_ARG_REGNO_P tests outgoing register
2441 numbers, so translate if necessary due to register windows. */
2442 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2443 && HARD_REGNO_MODE_OK (i, Pmode))
2444 static_reg_base_value[i]
2445 = gen_rtx_ADDRESS (VOIDmode, gen_rtx_REG (Pmode, i));
2446
2447 static_reg_base_value[STACK_POINTER_REGNUM]
2448 = gen_rtx_ADDRESS (Pmode, stack_pointer_rtx);
2449 static_reg_base_value[ARG_POINTER_REGNUM]
2450 = gen_rtx_ADDRESS (Pmode, arg_pointer_rtx);
2451 static_reg_base_value[FRAME_POINTER_REGNUM]
2452 = gen_rtx_ADDRESS (Pmode, frame_pointer_rtx);
2453 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2454 static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2455 = gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx);
2456 #endif
2457 }
2458
2459 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2460 to be memory reference. */
2461 static bool memory_modified;
2462 static void
2463 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2464 {
2465 if (MEM_P (x))
2466 {
2467 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2468 memory_modified = true;
2469 }
2470 }
2471
2472
2473 /* Return true when INSN possibly modify memory contents of MEM
2474 (i.e. address can be modified). */
2475 bool
2476 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2477 {
2478 if (!INSN_P (insn))
2479 return false;
2480 memory_modified = false;
2481 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2482 return memory_modified;
2483 }
2484
2485 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
2486 array. */
2487
2488 void
2489 init_alias_analysis (void)
2490 {
2491 unsigned int maxreg = max_reg_num ();
2492 int changed, pass;
2493 int i;
2494 unsigned int ui;
2495 rtx insn;
2496
2497 timevar_push (TV_ALIAS_ANALYSIS);
2498
2499 reg_known_value_size = maxreg - FIRST_PSEUDO_REGISTER;
2500 reg_known_value = GGC_CNEWVEC (rtx, reg_known_value_size);
2501 reg_known_equiv_p = XCNEWVEC (bool, reg_known_value_size);
2502
2503 /* If we have memory allocated from the previous run, use it. */
2504 if (old_reg_base_value)
2505 reg_base_value = old_reg_base_value;
2506
2507 if (reg_base_value)
2508 VEC_truncate (rtx, reg_base_value, 0);
2509
2510 VEC_safe_grow_cleared (rtx, gc, reg_base_value, maxreg);
2511
2512 new_reg_base_value = XNEWVEC (rtx, maxreg);
2513 reg_seen = XNEWVEC (char, maxreg);
2514
2515 /* The basic idea is that each pass through this loop will use the
2516 "constant" information from the previous pass to propagate alias
2517 information through another level of assignments.
2518
2519 This could get expensive if the assignment chains are long. Maybe
2520 we should throttle the number of iterations, possibly based on
2521 the optimization level or flag_expensive_optimizations.
2522
2523 We could propagate more information in the first pass by making use
2524 of DF_REG_DEF_COUNT to determine immediately that the alias information
2525 for a pseudo is "constant".
2526
2527 A program with an uninitialized variable can cause an infinite loop
2528 here. Instead of doing a full dataflow analysis to detect such problems
2529 we just cap the number of iterations for the loop.
2530
2531 The state of the arrays for the set chain in question does not matter
2532 since the program has undefined behavior. */
2533
2534 pass = 0;
2535 do
2536 {
2537 /* Assume nothing will change this iteration of the loop. */
2538 changed = 0;
2539
2540 /* We want to assign the same IDs each iteration of this loop, so
2541 start counting from zero each iteration of the loop. */
2542 unique_id = 0;
2543
2544 /* We're at the start of the function each iteration through the
2545 loop, so we're copying arguments. */
2546 copying_arguments = true;
2547
2548 /* Wipe the potential alias information clean for this pass. */
2549 memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2550
2551 /* Wipe the reg_seen array clean. */
2552 memset (reg_seen, 0, maxreg);
2553
2554 /* Mark all hard registers which may contain an address.
2555 The stack, frame and argument pointers may contain an address.
2556 An argument register which can hold a Pmode value may contain
2557 an address even if it is not in BASE_REGS.
2558
2559 The address expression is VOIDmode for an argument and
2560 Pmode for other registers. */
2561
2562 memcpy (new_reg_base_value, static_reg_base_value,
2563 FIRST_PSEUDO_REGISTER * sizeof (rtx));
2564
2565 /* Walk the insns adding values to the new_reg_base_value array. */
2566 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2567 {
2568 if (INSN_P (insn))
2569 {
2570 rtx note, set;
2571
2572 #if defined (HAVE_prologue) || defined (HAVE_epilogue)
2573 /* The prologue/epilogue insns are not threaded onto the
2574 insn chain until after reload has completed. Thus,
2575 there is no sense wasting time checking if INSN is in
2576 the prologue/epilogue until after reload has completed. */
2577 if (reload_completed
2578 && prologue_epilogue_contains (insn))
2579 continue;
2580 #endif
2581
2582 /* If this insn has a noalias note, process it, Otherwise,
2583 scan for sets. A simple set will have no side effects
2584 which could change the base value of any other register. */
2585
2586 if (GET_CODE (PATTERN (insn)) == SET
2587 && REG_NOTES (insn) != 0
2588 && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2589 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2590 else
2591 note_stores (PATTERN (insn), record_set, NULL);
2592
2593 set = single_set (insn);
2594
2595 if (set != 0
2596 && REG_P (SET_DEST (set))
2597 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2598 {
2599 unsigned int regno = REGNO (SET_DEST (set));
2600 rtx src = SET_SRC (set);
2601 rtx t;
2602
2603 note = find_reg_equal_equiv_note (insn);
2604 if (note && REG_NOTE_KIND (note) == REG_EQUAL
2605 && DF_REG_DEF_COUNT (regno) != 1)
2606 note = NULL_RTX;
2607
2608 if (note != NULL_RTX
2609 && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2610 && ! rtx_varies_p (XEXP (note, 0), 1)
2611 && ! reg_overlap_mentioned_p (SET_DEST (set),
2612 XEXP (note, 0)))
2613 {
2614 set_reg_known_value (regno, XEXP (note, 0));
2615 set_reg_known_equiv_p (regno,
2616 REG_NOTE_KIND (note) == REG_EQUIV);
2617 }
2618 else if (DF_REG_DEF_COUNT (regno) == 1
2619 && GET_CODE (src) == PLUS
2620 && REG_P (XEXP (src, 0))
2621 && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
2622 && GET_CODE (XEXP (src, 1)) == CONST_INT)
2623 {
2624 t = plus_constant (t, INTVAL (XEXP (src, 1)));
2625 set_reg_known_value (regno, t);
2626 set_reg_known_equiv_p (regno, 0);
2627 }
2628 else if (DF_REG_DEF_COUNT (regno) == 1
2629 && ! rtx_varies_p (src, 1))
2630 {
2631 set_reg_known_value (regno, src);
2632 set_reg_known_equiv_p (regno, 0);
2633 }
2634 }
2635 }
2636 else if (NOTE_P (insn)
2637 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
2638 copying_arguments = false;
2639 }
2640
2641 /* Now propagate values from new_reg_base_value to reg_base_value. */
2642 gcc_assert (maxreg == (unsigned int) max_reg_num ());
2643
2644 for (ui = 0; ui < maxreg; ui++)
2645 {
2646 if (new_reg_base_value[ui]
2647 && new_reg_base_value[ui] != VEC_index (rtx, reg_base_value, ui)
2648 && ! rtx_equal_p (new_reg_base_value[ui],
2649 VEC_index (rtx, reg_base_value, ui)))
2650 {
2651 VEC_replace (rtx, reg_base_value, ui, new_reg_base_value[ui]);
2652 changed = 1;
2653 }
2654 }
2655 }
2656 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
2657
2658 /* Fill in the remaining entries. */
2659 for (i = 0; i < (int)reg_known_value_size; i++)
2660 if (reg_known_value[i] == 0)
2661 reg_known_value[i] = regno_reg_rtx[i + FIRST_PSEUDO_REGISTER];
2662
2663 /* Clean up. */
2664 free (new_reg_base_value);
2665 new_reg_base_value = 0;
2666 free (reg_seen);
2667 reg_seen = 0;
2668 timevar_pop (TV_ALIAS_ANALYSIS);
2669 }
2670
2671 void
2672 end_alias_analysis (void)
2673 {
2674 old_reg_base_value = reg_base_value;
2675 ggc_free (reg_known_value);
2676 reg_known_value = 0;
2677 reg_known_value_size = 0;
2678 free (reg_known_equiv_p);
2679 reg_known_equiv_p = 0;
2680 }
2681
2682 #include "gt-alias.h"