re PR c/48088 (-Werror=frame-larger-than=100 does not work as expected)
[gcc.git] / gcc / alias.c
1 /* Alias analysis for GNU C
2 Copyright (C) 1997-2015 Free Software Foundation, Inc.
3 Contributed by John Carr (jfc@mit.edu).
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "df.h"
30 #include "tm_p.h"
31 #include "gimple-ssa.h"
32 #include "emit-rtl.h"
33 #include "alias.h"
34 #include "fold-const.h"
35 #include "varasm.h"
36 #include "cselib.h"
37 #include "langhooks.h"
38 #include "cfganal.h"
39 #include "rtl-iter.h"
40
41 /* The aliasing API provided here solves related but different problems:
42
43 Say there exists (in c)
44
45 struct X {
46 struct Y y1;
47 struct Z z2;
48 } x1, *px1, *px2;
49
50 struct Y y2, *py;
51 struct Z z2, *pz;
52
53
54 py = &x1.y1;
55 px2 = &x1;
56
57 Consider the four questions:
58
59 Can a store to x1 interfere with px2->y1?
60 Can a store to x1 interfere with px2->z2?
61 Can a store to x1 change the value pointed to by with py?
62 Can a store to x1 change the value pointed to by with pz?
63
64 The answer to these questions can be yes, yes, yes, and maybe.
65
66 The first two questions can be answered with a simple examination
67 of the type system. If structure X contains a field of type Y then
68 a store through a pointer to an X can overwrite any field that is
69 contained (recursively) in an X (unless we know that px1 != px2).
70
71 The last two questions can be solved in the same way as the first
72 two questions but this is too conservative. The observation is
73 that in some cases we can know which (if any) fields are addressed
74 and if those addresses are used in bad ways. This analysis may be
75 language specific. In C, arbitrary operations may be applied to
76 pointers. However, there is some indication that this may be too
77 conservative for some C++ types.
78
79 The pass ipa-type-escape does this analysis for the types whose
80 instances do not escape across the compilation boundary.
81
82 Historically in GCC, these two problems were combined and a single
83 data structure that was used to represent the solution to these
84 problems. We now have two similar but different data structures,
85 The data structure to solve the last two questions is similar to
86 the first, but does not contain the fields whose address are never
87 taken. For types that do escape the compilation unit, the data
88 structures will have identical information.
89 */
90
91 /* The alias sets assigned to MEMs assist the back-end in determining
92 which MEMs can alias which other MEMs. In general, two MEMs in
93 different alias sets cannot alias each other, with one important
94 exception. Consider something like:
95
96 struct S { int i; double d; };
97
98 a store to an `S' can alias something of either type `int' or type
99 `double'. (However, a store to an `int' cannot alias a `double'
100 and vice versa.) We indicate this via a tree structure that looks
101 like:
102 struct S
103 / \
104 / \
105 |/_ _\|
106 int double
107
108 (The arrows are directed and point downwards.)
109 In this situation we say the alias set for `struct S' is the
110 `superset' and that those for `int' and `double' are `subsets'.
111
112 To see whether two alias sets can point to the same memory, we must
113 see if either alias set is a subset of the other. We need not trace
114 past immediate descendants, however, since we propagate all
115 grandchildren up one level.
116
117 Alias set zero is implicitly a superset of all other alias sets.
118 However, this is no actual entry for alias set zero. It is an
119 error to attempt to explicitly construct a subset of zero. */
120
121 struct alias_set_hash : int_hash <int, INT_MIN, INT_MIN + 1> {};
122
123 struct GTY(()) alias_set_entry {
124 /* The alias set number, as stored in MEM_ALIAS_SET. */
125 alias_set_type alias_set;
126
127 /* The children of the alias set. These are not just the immediate
128 children, but, in fact, all descendants. So, if we have:
129
130 struct T { struct S s; float f; }
131
132 continuing our example above, the children here will be all of
133 `int', `double', `float', and `struct S'. */
134 hash_map<alias_set_hash, int> *children;
135
136 /* Nonzero if would have a child of zero: this effectively makes this
137 alias set the same as alias set zero. */
138 bool has_zero_child;
139 /* Nonzero if alias set corresponds to pointer type itself (i.e. not to
140 aggregate contaiing pointer.
141 This is used for a special case where we need an universal pointer type
142 compatible with all other pointer types. */
143 bool is_pointer;
144 /* Nonzero if is_pointer or if one of childs have has_pointer set. */
145 bool has_pointer;
146 };
147
148 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
149 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
150 static void record_set (rtx, const_rtx, void *);
151 static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode,
152 machine_mode);
153 static rtx find_base_value (rtx);
154 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
155 static alias_set_entry *get_alias_set_entry (alias_set_type);
156 static tree decl_for_component_ref (tree);
157 static int write_dependence_p (const_rtx,
158 const_rtx, machine_mode, rtx,
159 bool, bool, bool);
160
161 static void memory_modified_1 (rtx, const_rtx, void *);
162
163 /* Query statistics for the different low-level disambiguators.
164 A high-level query may trigger multiple of them. */
165
166 static struct {
167 unsigned long long num_alias_zero;
168 unsigned long long num_same_alias_set;
169 unsigned long long num_same_objects;
170 unsigned long long num_volatile;
171 unsigned long long num_dag;
172 unsigned long long num_universal;
173 unsigned long long num_disambiguated;
174 } alias_stats;
175
176
177 /* Set up all info needed to perform alias analysis on memory references. */
178
179 /* Returns the size in bytes of the mode of X. */
180 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
181
182 /* Cap the number of passes we make over the insns propagating alias
183 information through set chains.
184 ??? 10 is a completely arbitrary choice. This should be based on the
185 maximum loop depth in the CFG, but we do not have this information
186 available (even if current_loops _is_ available). */
187 #define MAX_ALIAS_LOOP_PASSES 10
188
189 /* reg_base_value[N] gives an address to which register N is related.
190 If all sets after the first add or subtract to the current value
191 or otherwise modify it so it does not point to a different top level
192 object, reg_base_value[N] is equal to the address part of the source
193 of the first set.
194
195 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
196 expressions represent three types of base:
197
198 1. incoming arguments. There is just one ADDRESS to represent all
199 arguments, since we do not know at this level whether accesses
200 based on different arguments can alias. The ADDRESS has id 0.
201
202 2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
203 (if distinct from frame_pointer_rtx) and arg_pointer_rtx.
204 Each of these rtxes has a separate ADDRESS associated with it,
205 each with a negative id.
206
207 GCC is (and is required to be) precise in which register it
208 chooses to access a particular region of stack. We can therefore
209 assume that accesses based on one of these rtxes do not alias
210 accesses based on another of these rtxes.
211
212 3. bases that are derived from malloc()ed memory (REG_NOALIAS).
213 Each such piece of memory has a separate ADDRESS associated
214 with it, each with an id greater than 0.
215
216 Accesses based on one ADDRESS do not alias accesses based on other
217 ADDRESSes. Accesses based on ADDRESSes in groups (2) and (3) do not
218 alias globals either; the ADDRESSes have Pmode to indicate this.
219 The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
220 indicate this. */
221
222 static GTY(()) vec<rtx, va_gc> *reg_base_value;
223 static rtx *new_reg_base_value;
224
225 /* The single VOIDmode ADDRESS that represents all argument bases.
226 It has id 0. */
227 static GTY(()) rtx arg_base_value;
228
229 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS. */
230 static int unique_id;
231
232 /* We preserve the copy of old array around to avoid amount of garbage
233 produced. About 8% of garbage produced were attributed to this
234 array. */
235 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
236
237 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
238 registers. */
239 #define UNIQUE_BASE_VALUE_SP -1
240 #define UNIQUE_BASE_VALUE_ARGP -2
241 #define UNIQUE_BASE_VALUE_FP -3
242 #define UNIQUE_BASE_VALUE_HFP -4
243
244 #define static_reg_base_value \
245 (this_target_rtl->x_static_reg_base_value)
246
247 #define REG_BASE_VALUE(X) \
248 (REGNO (X) < vec_safe_length (reg_base_value) \
249 ? (*reg_base_value)[REGNO (X)] : 0)
250
251 /* Vector indexed by N giving the initial (unchanging) value known for
252 pseudo-register N. This vector is initialized in init_alias_analysis,
253 and does not change until end_alias_analysis is called. */
254 static GTY(()) vec<rtx, va_gc> *reg_known_value;
255
256 /* Vector recording for each reg_known_value whether it is due to a
257 REG_EQUIV note. Future passes (viz., reload) may replace the
258 pseudo with the equivalent expression and so we account for the
259 dependences that would be introduced if that happens.
260
261 The REG_EQUIV notes created in assign_parms may mention the arg
262 pointer, and there are explicit insns in the RTL that modify the
263 arg pointer. Thus we must ensure that such insns don't get
264 scheduled across each other because that would invalidate the
265 REG_EQUIV notes. One could argue that the REG_EQUIV notes are
266 wrong, but solving the problem in the scheduler will likely give
267 better code, so we do it here. */
268 static sbitmap reg_known_equiv_p;
269
270 /* True when scanning insns from the start of the rtl to the
271 NOTE_INSN_FUNCTION_BEG note. */
272 static bool copying_arguments;
273
274
275 /* The splay-tree used to store the various alias set entries. */
276 static GTY (()) vec<alias_set_entry *, va_gc> *alias_sets;
277 \f
278 /* Build a decomposed reference object for querying the alias-oracle
279 from the MEM rtx and store it in *REF.
280 Returns false if MEM is not suitable for the alias-oracle. */
281
282 static bool
283 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
284 {
285 tree expr = MEM_EXPR (mem);
286 tree base;
287
288 if (!expr)
289 return false;
290
291 ao_ref_init (ref, expr);
292
293 /* Get the base of the reference and see if we have to reject or
294 adjust it. */
295 base = ao_ref_base (ref);
296 if (base == NULL_TREE)
297 return false;
298
299 /* The tree oracle doesn't like bases that are neither decls
300 nor indirect references of SSA names. */
301 if (!(DECL_P (base)
302 || (TREE_CODE (base) == MEM_REF
303 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
304 || (TREE_CODE (base) == TARGET_MEM_REF
305 && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
306 return false;
307
308 /* If this is a reference based on a partitioned decl replace the
309 base with a MEM_REF of the pointer representative we
310 created during stack slot partitioning. */
311 if (TREE_CODE (base) == VAR_DECL
312 && ! is_global_var (base)
313 && cfun->gimple_df->decls_to_pointers != NULL)
314 {
315 tree *namep = cfun->gimple_df->decls_to_pointers->get (base);
316 if (namep)
317 ref->base = build_simple_mem_ref (*namep);
318 }
319
320 ref->ref_alias_set = MEM_ALIAS_SET (mem);
321
322 /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
323 is conservative, so trust it. */
324 if (!MEM_OFFSET_KNOWN_P (mem)
325 || !MEM_SIZE_KNOWN_P (mem))
326 return true;
327
328 /* If MEM_OFFSET/MEM_SIZE get us outside of ref->offset/ref->max_size
329 drop ref->ref. */
330 if (MEM_OFFSET (mem) < 0
331 || (ref->max_size != -1
332 && ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT
333 > ref->max_size)))
334 ref->ref = NULL_TREE;
335
336 /* Refine size and offset we got from analyzing MEM_EXPR by using
337 MEM_SIZE and MEM_OFFSET. */
338
339 ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
340 ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
341
342 /* The MEM may extend into adjacent fields, so adjust max_size if
343 necessary. */
344 if (ref->max_size != -1
345 && ref->size > ref->max_size)
346 ref->max_size = ref->size;
347
348 /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
349 the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
350 if (MEM_EXPR (mem) != get_spill_slot_decl (false)
351 && (ref->offset < 0
352 || (DECL_P (ref->base)
353 && (DECL_SIZE (ref->base) == NULL_TREE
354 || TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST
355 || wi::ltu_p (wi::to_offset (DECL_SIZE (ref->base)),
356 ref->offset + ref->size)))))
357 return false;
358
359 return true;
360 }
361
362 /* Query the alias-oracle on whether the two memory rtx X and MEM may
363 alias. If TBAA_P is set also apply TBAA. Returns true if the
364 two rtxen may alias, false otherwise. */
365
366 static bool
367 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
368 {
369 ao_ref ref1, ref2;
370
371 if (!ao_ref_from_mem (&ref1, x)
372 || !ao_ref_from_mem (&ref2, mem))
373 return true;
374
375 return refs_may_alias_p_1 (&ref1, &ref2,
376 tbaa_p
377 && MEM_ALIAS_SET (x) != 0
378 && MEM_ALIAS_SET (mem) != 0);
379 }
380
381 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
382 such an entry, or NULL otherwise. */
383
384 static inline alias_set_entry *
385 get_alias_set_entry (alias_set_type alias_set)
386 {
387 return (*alias_sets)[alias_set];
388 }
389
390 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
391 the two MEMs cannot alias each other. */
392
393 static inline int
394 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
395 {
396 return (flag_strict_aliasing
397 && ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1),
398 MEM_ALIAS_SET (mem2)));
399 }
400
401 /* Return true if the first alias set is a subset of the second. */
402
403 bool
404 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
405 {
406 alias_set_entry *ase2;
407
408 /* Disable TBAA oracle with !flag_strict_aliasing. */
409 if (!flag_strict_aliasing)
410 return true;
411
412 /* Everything is a subset of the "aliases everything" set. */
413 if (set2 == 0)
414 return true;
415
416 /* Check if set1 is a subset of set2. */
417 ase2 = get_alias_set_entry (set2);
418 if (ase2 != 0
419 && (ase2->has_zero_child
420 || (ase2->children && ase2->children->get (set1))))
421 return true;
422
423 /* As a special case we consider alias set of "void *" to be both subset
424 and superset of every alias set of a pointer. This extra symmetry does
425 not matter for alias_sets_conflict_p but it makes aliasing_component_refs_p
426 to return true on the following testcase:
427
428 void *ptr;
429 char **ptr2=(char **)&ptr;
430 *ptr2 = ...
431
432 Additionally if a set contains universal pointer, we consider every pointer
433 to be a subset of it, but we do not represent this explicitely - doing so
434 would require us to update transitive closure each time we introduce new
435 pointer type. This makes aliasing_component_refs_p to return true
436 on the following testcase:
437
438 struct a {void *ptr;}
439 char **ptr = (char **)&a.ptr;
440 ptr = ...
441
442 This makes void * truly universal pointer type. See pointer handling in
443 get_alias_set for more details. */
444 if (ase2 && ase2->has_pointer)
445 {
446 alias_set_entry *ase1 = get_alias_set_entry (set1);
447
448 if (ase1 && ase1->is_pointer)
449 {
450 alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node);
451 /* If one is ptr_type_node and other is pointer, then we consider
452 them subset of each other. */
453 if (set1 == voidptr_set || set2 == voidptr_set)
454 return true;
455 /* If SET2 contains universal pointer's alias set, then we consdier
456 every (non-universal) pointer. */
457 if (ase2->children && set1 != voidptr_set
458 && ase2->children->get (voidptr_set))
459 return true;
460 }
461 }
462 return false;
463 }
464
465 /* Return 1 if the two specified alias sets may conflict. */
466
467 int
468 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
469 {
470 alias_set_entry *ase1;
471 alias_set_entry *ase2;
472
473 /* The easy case. */
474 if (alias_sets_must_conflict_p (set1, set2))
475 return 1;
476
477 /* See if the first alias set is a subset of the second. */
478 ase1 = get_alias_set_entry (set1);
479 if (ase1 != 0
480 && ase1->children && ase1->children->get (set2))
481 {
482 ++alias_stats.num_dag;
483 return 1;
484 }
485
486 /* Now do the same, but with the alias sets reversed. */
487 ase2 = get_alias_set_entry (set2);
488 if (ase2 != 0
489 && ase2->children && ase2->children->get (set1))
490 {
491 ++alias_stats.num_dag;
492 return 1;
493 }
494
495 /* We want void * to be compatible with any other pointer without
496 really dropping it to alias set 0. Doing so would make it
497 compatible with all non-pointer types too.
498
499 This is not strictly necessary by the C/C++ language
500 standards, but avoids common type punning mistakes. In
501 addition to that, we need the existence of such universal
502 pointer to implement Fortran's C_PTR type (which is defined as
503 type compatible with all C pointers). */
504 if (ase1 && ase2 && ase1->has_pointer && ase2->has_pointer)
505 {
506 alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node);
507
508 /* If one of the sets corresponds to universal pointer,
509 we consider it to conflict with anything that is
510 or contains pointer. */
511 if (set1 == voidptr_set || set2 == voidptr_set)
512 {
513 ++alias_stats.num_universal;
514 return true;
515 }
516 /* If one of sets is (non-universal) pointer and the other
517 contains universal pointer, we also get conflict. */
518 if (ase1->is_pointer && set2 != voidptr_set
519 && ase2->children && ase2->children->get (voidptr_set))
520 {
521 ++alias_stats.num_universal;
522 return true;
523 }
524 if (ase2->is_pointer && set1 != voidptr_set
525 && ase1->children && ase1->children->get (voidptr_set))
526 {
527 ++alias_stats.num_universal;
528 return true;
529 }
530 }
531
532 ++alias_stats.num_disambiguated;
533
534 /* The two alias sets are distinct and neither one is the
535 child of the other. Therefore, they cannot conflict. */
536 return 0;
537 }
538
539 /* Return 1 if the two specified alias sets will always conflict. */
540
541 int
542 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
543 {
544 /* Disable TBAA oracle with !flag_strict_aliasing. */
545 if (!flag_strict_aliasing)
546 return 1;
547 if (set1 == 0 || set2 == 0)
548 {
549 ++alias_stats.num_alias_zero;
550 return 1;
551 }
552 if (set1 == set2)
553 {
554 ++alias_stats.num_same_alias_set;
555 return 1;
556 }
557
558 return 0;
559 }
560
561 /* Return 1 if any MEM object of type T1 will always conflict (using the
562 dependency routines in this file) with any MEM object of type T2.
563 This is used when allocating temporary storage. If T1 and/or T2 are
564 NULL_TREE, it means we know nothing about the storage. */
565
566 int
567 objects_must_conflict_p (tree t1, tree t2)
568 {
569 alias_set_type set1, set2;
570
571 /* If neither has a type specified, we don't know if they'll conflict
572 because we may be using them to store objects of various types, for
573 example the argument and local variables areas of inlined functions. */
574 if (t1 == 0 && t2 == 0)
575 return 0;
576
577 /* If they are the same type, they must conflict. */
578 if (t1 == t2)
579 {
580 ++alias_stats.num_same_objects;
581 return 1;
582 }
583 /* Likewise if both are volatile. */
584 if (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2))
585 {
586 ++alias_stats.num_volatile;
587 return 1;
588 }
589
590 set1 = t1 ? get_alias_set (t1) : 0;
591 set2 = t2 ? get_alias_set (t2) : 0;
592
593 /* We can't use alias_sets_conflict_p because we must make sure
594 that every subtype of t1 will conflict with every subtype of
595 t2 for which a pair of subobjects of these respective subtypes
596 overlaps on the stack. */
597 return alias_sets_must_conflict_p (set1, set2);
598 }
599 \f
600 /* Return the outermost parent of component present in the chain of
601 component references handled by get_inner_reference in T with the
602 following property:
603 - the component is non-addressable, or
604 - the parent has alias set zero,
605 or NULL_TREE if no such parent exists. In the former cases, the alias
606 set of this parent is the alias set that must be used for T itself. */
607
608 tree
609 component_uses_parent_alias_set_from (const_tree t)
610 {
611 const_tree found = NULL_TREE;
612
613 while (handled_component_p (t))
614 {
615 switch (TREE_CODE (t))
616 {
617 case COMPONENT_REF:
618 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
619 found = t;
620 break;
621
622 case ARRAY_REF:
623 case ARRAY_RANGE_REF:
624 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
625 found = t;
626 break;
627
628 case REALPART_EXPR:
629 case IMAGPART_EXPR:
630 break;
631
632 case BIT_FIELD_REF:
633 case VIEW_CONVERT_EXPR:
634 /* Bitfields and casts are never addressable. */
635 found = t;
636 break;
637
638 default:
639 gcc_unreachable ();
640 }
641
642 if (get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) == 0)
643 found = t;
644
645 t = TREE_OPERAND (t, 0);
646 }
647
648 if (found)
649 return TREE_OPERAND (found, 0);
650
651 return NULL_TREE;
652 }
653
654
655 /* Return whether the pointer-type T effective for aliasing may
656 access everything and thus the reference has to be assigned
657 alias-set zero. */
658
659 static bool
660 ref_all_alias_ptr_type_p (const_tree t)
661 {
662 return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
663 || TYPE_REF_CAN_ALIAS_ALL (t));
664 }
665
666 /* Return the alias set for the memory pointed to by T, which may be
667 either a type or an expression. Return -1 if there is nothing
668 special about dereferencing T. */
669
670 static alias_set_type
671 get_deref_alias_set_1 (tree t)
672 {
673 /* All we care about is the type. */
674 if (! TYPE_P (t))
675 t = TREE_TYPE (t);
676
677 /* If we have an INDIRECT_REF via a void pointer, we don't
678 know anything about what that might alias. Likewise if the
679 pointer is marked that way. */
680 if (ref_all_alias_ptr_type_p (t))
681 return 0;
682
683 return -1;
684 }
685
686 /* Return the alias set for the memory pointed to by T, which may be
687 either a type or an expression. */
688
689 alias_set_type
690 get_deref_alias_set (tree t)
691 {
692 /* If we're not doing any alias analysis, just assume everything
693 aliases everything else. */
694 if (!flag_strict_aliasing)
695 return 0;
696
697 alias_set_type set = get_deref_alias_set_1 (t);
698
699 /* Fall back to the alias-set of the pointed-to type. */
700 if (set == -1)
701 {
702 if (! TYPE_P (t))
703 t = TREE_TYPE (t);
704 set = get_alias_set (TREE_TYPE (t));
705 }
706
707 return set;
708 }
709
710 /* Return the pointer-type relevant for TBAA purposes from the
711 memory reference tree *T or NULL_TREE in which case *T is
712 adjusted to point to the outermost component reference that
713 can be used for assigning an alias set. */
714
715 static tree
716 reference_alias_ptr_type_1 (tree *t)
717 {
718 tree inner;
719
720 /* Get the base object of the reference. */
721 inner = *t;
722 while (handled_component_p (inner))
723 {
724 /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
725 the type of any component references that wrap it to
726 determine the alias-set. */
727 if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
728 *t = TREE_OPERAND (inner, 0);
729 inner = TREE_OPERAND (inner, 0);
730 }
731
732 /* Handle pointer dereferences here, they can override the
733 alias-set. */
734 if (INDIRECT_REF_P (inner)
735 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0))))
736 return TREE_TYPE (TREE_OPERAND (inner, 0));
737 else if (TREE_CODE (inner) == TARGET_MEM_REF)
738 return TREE_TYPE (TMR_OFFSET (inner));
739 else if (TREE_CODE (inner) == MEM_REF
740 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1))))
741 return TREE_TYPE (TREE_OPERAND (inner, 1));
742
743 /* If the innermost reference is a MEM_REF that has a
744 conversion embedded treat it like a VIEW_CONVERT_EXPR above,
745 using the memory access type for determining the alias-set. */
746 if (TREE_CODE (inner) == MEM_REF
747 && (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
748 != TYPE_MAIN_VARIANT
749 (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1))))))
750 return TREE_TYPE (TREE_OPERAND (inner, 1));
751
752 /* Otherwise, pick up the outermost object that we could have
753 a pointer to. */
754 tree tem = component_uses_parent_alias_set_from (*t);
755 if (tem)
756 *t = tem;
757
758 return NULL_TREE;
759 }
760
761 /* Return the pointer-type relevant for TBAA purposes from the
762 gimple memory reference tree T. This is the type to be used for
763 the offset operand of MEM_REF or TARGET_MEM_REF replacements of T
764 and guarantees that get_alias_set will return the same alias
765 set for T and the replacement. */
766
767 tree
768 reference_alias_ptr_type (tree t)
769 {
770 tree ptype = reference_alias_ptr_type_1 (&t);
771 /* If there is a given pointer type for aliasing purposes, return it. */
772 if (ptype != NULL_TREE)
773 return ptype;
774
775 /* Otherwise build one from the outermost component reference we
776 may use. */
777 if (TREE_CODE (t) == MEM_REF
778 || TREE_CODE (t) == TARGET_MEM_REF)
779 return TREE_TYPE (TREE_OPERAND (t, 1));
780 else
781 return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t)));
782 }
783
784 /* Return whether the pointer-types T1 and T2 used to determine
785 two alias sets of two references will yield the same answer
786 from get_deref_alias_set. */
787
788 bool
789 alias_ptr_types_compatible_p (tree t1, tree t2)
790 {
791 if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
792 return true;
793
794 if (ref_all_alias_ptr_type_p (t1)
795 || ref_all_alias_ptr_type_p (t2))
796 return false;
797
798 return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
799 == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
800 }
801
802 /* Create emptry alias set entry. */
803
804 alias_set_entry *
805 init_alias_set_entry (alias_set_type set)
806 {
807 alias_set_entry *ase = ggc_alloc<alias_set_entry> ();
808 ase->alias_set = set;
809 ase->children = NULL;
810 ase->has_zero_child = false;
811 ase->is_pointer = false;
812 ase->has_pointer = false;
813 gcc_checking_assert (!get_alias_set_entry (set));
814 (*alias_sets)[set] = ase;
815 return ase;
816 }
817
818 /* Return the alias set for T, which may be either a type or an
819 expression. Call language-specific routine for help, if needed. */
820
821 alias_set_type
822 get_alias_set (tree t)
823 {
824 alias_set_type set;
825
826 /* We can not give up with -fno-strict-aliasing because we need to build
827 proper type representation for possible functions which are build with
828 -fstirct-aliasing. */
829
830 /* return 0 if this or its type is an error. */
831 if (t == error_mark_node
832 || (! TYPE_P (t)
833 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
834 return 0;
835
836 /* We can be passed either an expression or a type. This and the
837 language-specific routine may make mutually-recursive calls to each other
838 to figure out what to do. At each juncture, we see if this is a tree
839 that the language may need to handle specially. First handle things that
840 aren't types. */
841 if (! TYPE_P (t))
842 {
843 /* Give the language a chance to do something with this tree
844 before we look at it. */
845 STRIP_NOPS (t);
846 set = lang_hooks.get_alias_set (t);
847 if (set != -1)
848 return set;
849
850 /* Get the alias pointer-type to use or the outermost object
851 that we could have a pointer to. */
852 tree ptype = reference_alias_ptr_type_1 (&t);
853 if (ptype != NULL)
854 return get_deref_alias_set (ptype);
855
856 /* If we've already determined the alias set for a decl, just return
857 it. This is necessary for C++ anonymous unions, whose component
858 variables don't look like union members (boo!). */
859 if (TREE_CODE (t) == VAR_DECL
860 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
861 return MEM_ALIAS_SET (DECL_RTL (t));
862
863 /* Now all we care about is the type. */
864 t = TREE_TYPE (t);
865 }
866
867 /* Variant qualifiers don't affect the alias set, so get the main
868 variant. */
869 t = TYPE_MAIN_VARIANT (t);
870
871 /* Always use the canonical type as well. If this is a type that
872 requires structural comparisons to identify compatible types
873 use alias set zero. */
874 if (TYPE_STRUCTURAL_EQUALITY_P (t))
875 {
876 /* Allow the language to specify another alias set for this
877 type. */
878 set = lang_hooks.get_alias_set (t);
879 if (set != -1)
880 return set;
881 /* Handle structure type equality for pointer types, arrays and vectors.
882 This is easy to do, because the code bellow ignore canonical types on
883 these anyway. This is important for LTO, where TYPE_CANONICAL for
884 pointers can not be meaningfuly computed by the frotnend. */
885 if (canonical_type_used_p (t))
886 {
887 /* In LTO we set canonical types for all types where it makes
888 sense to do so. Double check we did not miss some type. */
889 gcc_checking_assert (!in_lto_p || !type_with_alias_set_p (t));
890 return 0;
891 }
892 }
893 else
894 {
895 t = TYPE_CANONICAL (t);
896 gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
897 }
898
899 /* If this is a type with a known alias set, return it. */
900 gcc_checking_assert (t == TYPE_MAIN_VARIANT (t));
901 if (TYPE_ALIAS_SET_KNOWN_P (t))
902 return TYPE_ALIAS_SET (t);
903
904 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */
905 if (!COMPLETE_TYPE_P (t))
906 {
907 /* For arrays with unknown size the conservative answer is the
908 alias set of the element type. */
909 if (TREE_CODE (t) == ARRAY_TYPE)
910 return get_alias_set (TREE_TYPE (t));
911
912 /* But return zero as a conservative answer for incomplete types. */
913 return 0;
914 }
915
916 /* See if the language has special handling for this type. */
917 set = lang_hooks.get_alias_set (t);
918 if (set != -1)
919 return set;
920
921 /* There are no objects of FUNCTION_TYPE, so there's no point in
922 using up an alias set for them. (There are, of course, pointers
923 and references to functions, but that's different.) */
924 else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
925 set = 0;
926
927 /* Unless the language specifies otherwise, let vector types alias
928 their components. This avoids some nasty type punning issues in
929 normal usage. And indeed lets vectors be treated more like an
930 array slice. */
931 else if (TREE_CODE (t) == VECTOR_TYPE)
932 set = get_alias_set (TREE_TYPE (t));
933
934 /* Unless the language specifies otherwise, treat array types the
935 same as their components. This avoids the asymmetry we get
936 through recording the components. Consider accessing a
937 character(kind=1) through a reference to a character(kind=1)[1:1].
938 Or consider if we want to assign integer(kind=4)[0:D.1387] and
939 integer(kind=4)[4] the same alias set or not.
940 Just be pragmatic here and make sure the array and its element
941 type get the same alias set assigned. */
942 else if (TREE_CODE (t) == ARRAY_TYPE
943 && (!TYPE_NONALIASED_COMPONENT (t)
944 || TYPE_STRUCTURAL_EQUALITY_P (t)))
945 set = get_alias_set (TREE_TYPE (t));
946
947 /* From the former common C and C++ langhook implementation:
948
949 Unfortunately, there is no canonical form of a pointer type.
950 In particular, if we have `typedef int I', then `int *', and
951 `I *' are different types. So, we have to pick a canonical
952 representative. We do this below.
953
954 Technically, this approach is actually more conservative that
955 it needs to be. In particular, `const int *' and `int *'
956 should be in different alias sets, according to the C and C++
957 standard, since their types are not the same, and so,
958 technically, an `int **' and `const int **' cannot point at
959 the same thing.
960
961 But, the standard is wrong. In particular, this code is
962 legal C++:
963
964 int *ip;
965 int **ipp = &ip;
966 const int* const* cipp = ipp;
967 And, it doesn't make sense for that to be legal unless you
968 can dereference IPP and CIPP. So, we ignore cv-qualifiers on
969 the pointed-to types. This issue has been reported to the
970 C++ committee.
971
972 For this reason go to canonical type of the unqalified pointer type.
973 Until GCC 6 this code set all pointers sets to have alias set of
974 ptr_type_node but that is a bad idea, because it prevents disabiguations
975 in between pointers. For Firefox this accounts about 20% of all
976 disambiguations in the program. */
977 else if (POINTER_TYPE_P (t) && t != ptr_type_node)
978 {
979 tree p;
980 auto_vec <bool, 8> reference;
981
982 /* Unnest all pointers and references.
983 We also want to make pointer to array/vector equivalent to pointer to
984 its element (see the reasoning above). Skip all those types, too. */
985 for (p = t; POINTER_TYPE_P (p)
986 || (TREE_CODE (p) == ARRAY_TYPE
987 && (!TYPE_NONALIASED_COMPONENT (p)
988 || !COMPLETE_TYPE_P (p)
989 || TYPE_STRUCTURAL_EQUALITY_P (p)))
990 || TREE_CODE (p) == VECTOR_TYPE;
991 p = TREE_TYPE (p))
992 {
993 if (TREE_CODE (p) == REFERENCE_TYPE)
994 /* In LTO we want languages that use references to be compatible
995 with languages that use pointers. */
996 reference.safe_push (true && !in_lto_p);
997 if (TREE_CODE (p) == POINTER_TYPE)
998 reference.safe_push (false);
999 }
1000 p = TYPE_MAIN_VARIANT (p);
1001
1002 /* Make void * compatible with char * and also void **.
1003 Programs are commonly violating TBAA by this.
1004
1005 We also make void * to conflict with every pointer
1006 (see record_component_aliases) and thus it is safe it to use it for
1007 pointers to types with TYPE_STRUCTURAL_EQUALITY_P. */
1008 if (TREE_CODE (p) == VOID_TYPE || TYPE_STRUCTURAL_EQUALITY_P (p))
1009 set = get_alias_set (ptr_type_node);
1010 else
1011 {
1012 /* Rebuild pointer type starting from canonical types using
1013 unqualified pointers and references only. This way all such
1014 pointers will have the same alias set and will conflict with
1015 each other.
1016
1017 Most of time we already have pointers or references of a given type.
1018 If not we build new one just to be sure that if someone later
1019 (probably only middle-end can, as we should assign all alias
1020 classes only after finishing translation unit) builds the pointer
1021 type, the canonical type will match. */
1022 p = TYPE_CANONICAL (p);
1023 while (!reference.is_empty ())
1024 {
1025 if (reference.pop ())
1026 p = build_reference_type (p);
1027 else
1028 p = build_pointer_type (p);
1029 gcc_checking_assert (p == TYPE_MAIN_VARIANT (p));
1030 /* build_pointer_type should always return the canonical type.
1031 For LTO TYPE_CANOINCAL may be NULL, because we do not compute
1032 them. Be sure that frontends do not glob canonical types of
1033 pointers in unexpected way and that p == TYPE_CANONICAL (p)
1034 in all other cases. */
1035 gcc_checking_assert (!TYPE_CANONICAL (p)
1036 || p == TYPE_CANONICAL (p));
1037 }
1038
1039 /* Assign the alias set to both p and t.
1040 We can not call get_alias_set (p) here as that would trigger
1041 infinite recursion when p == t. In other cases it would just
1042 trigger unnecesary legwork of rebuilding the pointer again. */
1043 gcc_checking_assert (p == TYPE_MAIN_VARIANT (p));
1044 if (TYPE_ALIAS_SET_KNOWN_P (p))
1045 set = TYPE_ALIAS_SET (p);
1046 else
1047 {
1048 set = new_alias_set ();
1049 TYPE_ALIAS_SET (p) = set;
1050 }
1051 }
1052 }
1053 /* Alias set of ptr_type_node is special and serve as universal pointer which
1054 is TBAA compatible with every other pointer type. Be sure we have the
1055 alias set built even for LTO which otherwise keeps all TYPE_CANONICAL
1056 of pointer types NULL. */
1057 else if (t == ptr_type_node)
1058 set = new_alias_set ();
1059
1060 /* Otherwise make a new alias set for this type. */
1061 else
1062 {
1063 /* Each canonical type gets its own alias set, so canonical types
1064 shouldn't form a tree. It doesn't really matter for types
1065 we handle specially above, so only check it where it possibly
1066 would result in a bogus alias set. */
1067 gcc_checking_assert (TYPE_CANONICAL (t) == t);
1068
1069 set = new_alias_set ();
1070 }
1071
1072 TYPE_ALIAS_SET (t) = set;
1073
1074 /* If this is an aggregate type or a complex type, we must record any
1075 component aliasing information. */
1076 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
1077 record_component_aliases (t);
1078
1079 /* We treat pointer types specially in alias_set_subset_of. */
1080 if (POINTER_TYPE_P (t) && set)
1081 {
1082 alias_set_entry *ase = get_alias_set_entry (set);
1083 if (!ase)
1084 ase = init_alias_set_entry (set);
1085 ase->is_pointer = true;
1086 ase->has_pointer = true;
1087 }
1088
1089 return set;
1090 }
1091
1092 /* Return a brand-new alias set. */
1093
1094 alias_set_type
1095 new_alias_set (void)
1096 {
1097 if (alias_sets == 0)
1098 vec_safe_push (alias_sets, (alias_set_entry *) NULL);
1099 vec_safe_push (alias_sets, (alias_set_entry *) NULL);
1100 return alias_sets->length () - 1;
1101 }
1102
1103 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
1104 not everything that aliases SUPERSET also aliases SUBSET. For example,
1105 in C, a store to an `int' can alias a load of a structure containing an
1106 `int', and vice versa. But it can't alias a load of a 'double' member
1107 of the same structure. Here, the structure would be the SUPERSET and
1108 `int' the SUBSET. This relationship is also described in the comment at
1109 the beginning of this file.
1110
1111 This function should be called only once per SUPERSET/SUBSET pair.
1112
1113 It is illegal for SUPERSET to be zero; everything is implicitly a
1114 subset of alias set zero. */
1115
1116 void
1117 record_alias_subset (alias_set_type superset, alias_set_type subset)
1118 {
1119 alias_set_entry *superset_entry;
1120 alias_set_entry *subset_entry;
1121
1122 /* It is possible in complex type situations for both sets to be the same,
1123 in which case we can ignore this operation. */
1124 if (superset == subset)
1125 return;
1126
1127 gcc_assert (superset);
1128
1129 superset_entry = get_alias_set_entry (superset);
1130 if (superset_entry == 0)
1131 {
1132 /* Create an entry for the SUPERSET, so that we have a place to
1133 attach the SUBSET. */
1134 superset_entry = init_alias_set_entry (superset);
1135 }
1136
1137 if (subset == 0)
1138 superset_entry->has_zero_child = 1;
1139 else
1140 {
1141 subset_entry = get_alias_set_entry (subset);
1142 if (!superset_entry->children)
1143 superset_entry->children
1144 = hash_map<alias_set_hash, int>::create_ggc (64);
1145 /* If there is an entry for the subset, enter all of its children
1146 (if they are not already present) as children of the SUPERSET. */
1147 if (subset_entry)
1148 {
1149 if (subset_entry->has_zero_child)
1150 superset_entry->has_zero_child = true;
1151 if (subset_entry->has_pointer)
1152 superset_entry->has_pointer = true;
1153
1154 if (subset_entry->children)
1155 {
1156 hash_map<alias_set_hash, int>::iterator iter
1157 = subset_entry->children->begin ();
1158 for (; iter != subset_entry->children->end (); ++iter)
1159 superset_entry->children->put ((*iter).first, (*iter).second);
1160 }
1161 }
1162
1163 /* Enter the SUBSET itself as a child of the SUPERSET. */
1164 superset_entry->children->put (subset, 0);
1165 }
1166 }
1167
1168 /* Record that component types of TYPE, if any, are part of that type for
1169 aliasing purposes. For record types, we only record component types
1170 for fields that are not marked non-addressable. For array types, we
1171 only record the component type if it is not marked non-aliased. */
1172
1173 void
1174 record_component_aliases (tree type)
1175 {
1176 alias_set_type superset = get_alias_set (type);
1177 tree field;
1178
1179 if (superset == 0)
1180 return;
1181
1182 switch (TREE_CODE (type))
1183 {
1184 case RECORD_TYPE:
1185 case UNION_TYPE:
1186 case QUAL_UNION_TYPE:
1187 for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
1188 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
1189 {
1190 /* LTO type merging does not make any difference between
1191 component pointer types. We may have
1192
1193 struct foo {int *a;};
1194
1195 as TYPE_CANONICAL of
1196
1197 struct bar {float *a;};
1198
1199 Because accesses to int * and float * do not alias, we would get
1200 false negative when accessing the same memory location by
1201 float ** and bar *. We thus record the canonical type as:
1202
1203 struct {void *a;};
1204
1205 void * is special cased and works as a universal pointer type.
1206 Accesses to it conflicts with accesses to any other pointer
1207 type. */
1208 tree t = TREE_TYPE (field);
1209 if (in_lto_p)
1210 {
1211 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1212 element type and that type has to be normalized to void *,
1213 too, in the case it is a pointer. */
1214 while (!canonical_type_used_p (t) && !POINTER_TYPE_P (t))
1215 {
1216 gcc_checking_assert (TYPE_STRUCTURAL_EQUALITY_P (t));
1217 t = TREE_TYPE (t);
1218 }
1219 if (POINTER_TYPE_P (t))
1220 t = ptr_type_node;
1221 else if (flag_checking)
1222 gcc_checking_assert (get_alias_set (t)
1223 == get_alias_set (TREE_TYPE (field)));
1224 }
1225
1226 record_alias_subset (superset, get_alias_set (t));
1227 }
1228 break;
1229
1230 case COMPLEX_TYPE:
1231 record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
1232 break;
1233
1234 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1235 element type. */
1236
1237 default:
1238 break;
1239 }
1240 }
1241
1242 /* Allocate an alias set for use in storing and reading from the varargs
1243 spill area. */
1244
1245 static GTY(()) alias_set_type varargs_set = -1;
1246
1247 alias_set_type
1248 get_varargs_alias_set (void)
1249 {
1250 #if 1
1251 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
1252 varargs alias set to an INDIRECT_REF (FIXME!), so we can't
1253 consistently use the varargs alias set for loads from the varargs
1254 area. So don't use it anywhere. */
1255 return 0;
1256 #else
1257 if (varargs_set == -1)
1258 varargs_set = new_alias_set ();
1259
1260 return varargs_set;
1261 #endif
1262 }
1263
1264 /* Likewise, but used for the fixed portions of the frame, e.g., register
1265 save areas. */
1266
1267 static GTY(()) alias_set_type frame_set = -1;
1268
1269 alias_set_type
1270 get_frame_alias_set (void)
1271 {
1272 if (frame_set == -1)
1273 frame_set = new_alias_set ();
1274
1275 return frame_set;
1276 }
1277
1278 /* Create a new, unique base with id ID. */
1279
1280 static rtx
1281 unique_base_value (HOST_WIDE_INT id)
1282 {
1283 return gen_rtx_ADDRESS (Pmode, id);
1284 }
1285
1286 /* Return true if accesses based on any other base value cannot alias
1287 those based on X. */
1288
1289 static bool
1290 unique_base_value_p (rtx x)
1291 {
1292 return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
1293 }
1294
1295 /* Return true if X is known to be a base value. */
1296
1297 static bool
1298 known_base_value_p (rtx x)
1299 {
1300 switch (GET_CODE (x))
1301 {
1302 case LABEL_REF:
1303 case SYMBOL_REF:
1304 return true;
1305
1306 case ADDRESS:
1307 /* Arguments may or may not be bases; we don't know for sure. */
1308 return GET_MODE (x) != VOIDmode;
1309
1310 default:
1311 return false;
1312 }
1313 }
1314
1315 /* Inside SRC, the source of a SET, find a base address. */
1316
1317 static rtx
1318 find_base_value (rtx src)
1319 {
1320 unsigned int regno;
1321
1322 #if defined (FIND_BASE_TERM)
1323 /* Try machine-dependent ways to find the base term. */
1324 src = FIND_BASE_TERM (src);
1325 #endif
1326
1327 switch (GET_CODE (src))
1328 {
1329 case SYMBOL_REF:
1330 case LABEL_REF:
1331 return src;
1332
1333 case REG:
1334 regno = REGNO (src);
1335 /* At the start of a function, argument registers have known base
1336 values which may be lost later. Returning an ADDRESS
1337 expression here allows optimization based on argument values
1338 even when the argument registers are used for other purposes. */
1339 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1340 return new_reg_base_value[regno];
1341
1342 /* If a pseudo has a known base value, return it. Do not do this
1343 for non-fixed hard regs since it can result in a circular
1344 dependency chain for registers which have values at function entry.
1345
1346 The test above is not sufficient because the scheduler may move
1347 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
1348 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1349 && regno < vec_safe_length (reg_base_value))
1350 {
1351 /* If we're inside init_alias_analysis, use new_reg_base_value
1352 to reduce the number of relaxation iterations. */
1353 if (new_reg_base_value && new_reg_base_value[regno]
1354 && DF_REG_DEF_COUNT (regno) == 1)
1355 return new_reg_base_value[regno];
1356
1357 if ((*reg_base_value)[regno])
1358 return (*reg_base_value)[regno];
1359 }
1360
1361 return 0;
1362
1363 case MEM:
1364 /* Check for an argument passed in memory. Only record in the
1365 copying-arguments block; it is too hard to track changes
1366 otherwise. */
1367 if (copying_arguments
1368 && (XEXP (src, 0) == arg_pointer_rtx
1369 || (GET_CODE (XEXP (src, 0)) == PLUS
1370 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1371 return arg_base_value;
1372 return 0;
1373
1374 case CONST:
1375 src = XEXP (src, 0);
1376 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1377 break;
1378
1379 /* ... fall through ... */
1380
1381 case PLUS:
1382 case MINUS:
1383 {
1384 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1385
1386 /* If either operand is a REG that is a known pointer, then it
1387 is the base. */
1388 if (REG_P (src_0) && REG_POINTER (src_0))
1389 return find_base_value (src_0);
1390 if (REG_P (src_1) && REG_POINTER (src_1))
1391 return find_base_value (src_1);
1392
1393 /* If either operand is a REG, then see if we already have
1394 a known value for it. */
1395 if (REG_P (src_0))
1396 {
1397 temp = find_base_value (src_0);
1398 if (temp != 0)
1399 src_0 = temp;
1400 }
1401
1402 if (REG_P (src_1))
1403 {
1404 temp = find_base_value (src_1);
1405 if (temp!= 0)
1406 src_1 = temp;
1407 }
1408
1409 /* If either base is named object or a special address
1410 (like an argument or stack reference), then use it for the
1411 base term. */
1412 if (src_0 != 0 && known_base_value_p (src_0))
1413 return src_0;
1414
1415 if (src_1 != 0 && known_base_value_p (src_1))
1416 return src_1;
1417
1418 /* Guess which operand is the base address:
1419 If either operand is a symbol, then it is the base. If
1420 either operand is a CONST_INT, then the other is the base. */
1421 if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1422 return find_base_value (src_0);
1423 else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1424 return find_base_value (src_1);
1425
1426 return 0;
1427 }
1428
1429 case LO_SUM:
1430 /* The standard form is (lo_sum reg sym) so look only at the
1431 second operand. */
1432 return find_base_value (XEXP (src, 1));
1433
1434 case AND:
1435 /* If the second operand is constant set the base
1436 address to the first operand. */
1437 if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1438 return find_base_value (XEXP (src, 0));
1439 return 0;
1440
1441 case TRUNCATE:
1442 /* As we do not know which address space the pointer is referring to, we can
1443 handle this only if the target does not support different pointer or
1444 address modes depending on the address space. */
1445 if (!target_default_pointer_address_modes_p ())
1446 break;
1447 if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1448 break;
1449 /* Fall through. */
1450 case HIGH:
1451 case PRE_INC:
1452 case PRE_DEC:
1453 case POST_INC:
1454 case POST_DEC:
1455 case PRE_MODIFY:
1456 case POST_MODIFY:
1457 return find_base_value (XEXP (src, 0));
1458
1459 case ZERO_EXTEND:
1460 case SIGN_EXTEND: /* used for NT/Alpha pointers */
1461 /* As we do not know which address space the pointer is referring to, we can
1462 handle this only if the target does not support different pointer or
1463 address modes depending on the address space. */
1464 if (!target_default_pointer_address_modes_p ())
1465 break;
1466
1467 {
1468 rtx temp = find_base_value (XEXP (src, 0));
1469
1470 if (temp != 0 && CONSTANT_P (temp))
1471 temp = convert_memory_address (Pmode, temp);
1472
1473 return temp;
1474 }
1475
1476 default:
1477 break;
1478 }
1479
1480 return 0;
1481 }
1482
1483 /* Called from init_alias_analysis indirectly through note_stores,
1484 or directly if DEST is a register with a REG_NOALIAS note attached.
1485 SET is null in the latter case. */
1486
1487 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1488 register N has been set in this function. */
1489 static sbitmap reg_seen;
1490
1491 static void
1492 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1493 {
1494 unsigned regno;
1495 rtx src;
1496 int n;
1497
1498 if (!REG_P (dest))
1499 return;
1500
1501 regno = REGNO (dest);
1502
1503 gcc_checking_assert (regno < reg_base_value->length ());
1504
1505 n = REG_NREGS (dest);
1506 if (n != 1)
1507 {
1508 while (--n >= 0)
1509 {
1510 bitmap_set_bit (reg_seen, regno + n);
1511 new_reg_base_value[regno + n] = 0;
1512 }
1513 return;
1514 }
1515
1516 if (set)
1517 {
1518 /* A CLOBBER wipes out any old value but does not prevent a previously
1519 unset register from acquiring a base address (i.e. reg_seen is not
1520 set). */
1521 if (GET_CODE (set) == CLOBBER)
1522 {
1523 new_reg_base_value[regno] = 0;
1524 return;
1525 }
1526 src = SET_SRC (set);
1527 }
1528 else
1529 {
1530 /* There's a REG_NOALIAS note against DEST. */
1531 if (bitmap_bit_p (reg_seen, regno))
1532 {
1533 new_reg_base_value[regno] = 0;
1534 return;
1535 }
1536 bitmap_set_bit (reg_seen, regno);
1537 new_reg_base_value[regno] = unique_base_value (unique_id++);
1538 return;
1539 }
1540
1541 /* If this is not the first set of REGNO, see whether the new value
1542 is related to the old one. There are two cases of interest:
1543
1544 (1) The register might be assigned an entirely new value
1545 that has the same base term as the original set.
1546
1547 (2) The set might be a simple self-modification that
1548 cannot change REGNO's base value.
1549
1550 If neither case holds, reject the original base value as invalid.
1551 Note that the following situation is not detected:
1552
1553 extern int x, y; int *p = &x; p += (&y-&x);
1554
1555 ANSI C does not allow computing the difference of addresses
1556 of distinct top level objects. */
1557 if (new_reg_base_value[regno] != 0
1558 && find_base_value (src) != new_reg_base_value[regno])
1559 switch (GET_CODE (src))
1560 {
1561 case LO_SUM:
1562 case MINUS:
1563 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1564 new_reg_base_value[regno] = 0;
1565 break;
1566 case PLUS:
1567 /* If the value we add in the PLUS is also a valid base value,
1568 this might be the actual base value, and the original value
1569 an index. */
1570 {
1571 rtx other = NULL_RTX;
1572
1573 if (XEXP (src, 0) == dest)
1574 other = XEXP (src, 1);
1575 else if (XEXP (src, 1) == dest)
1576 other = XEXP (src, 0);
1577
1578 if (! other || find_base_value (other))
1579 new_reg_base_value[regno] = 0;
1580 break;
1581 }
1582 case AND:
1583 if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1584 new_reg_base_value[regno] = 0;
1585 break;
1586 default:
1587 new_reg_base_value[regno] = 0;
1588 break;
1589 }
1590 /* If this is the first set of a register, record the value. */
1591 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1592 && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1593 new_reg_base_value[regno] = find_base_value (src);
1594
1595 bitmap_set_bit (reg_seen, regno);
1596 }
1597
1598 /* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid
1599 using hard registers with non-null REG_BASE_VALUE for renaming. */
1600 rtx
1601 get_reg_base_value (unsigned int regno)
1602 {
1603 return (*reg_base_value)[regno];
1604 }
1605
1606 /* If a value is known for REGNO, return it. */
1607
1608 rtx
1609 get_reg_known_value (unsigned int regno)
1610 {
1611 if (regno >= FIRST_PSEUDO_REGISTER)
1612 {
1613 regno -= FIRST_PSEUDO_REGISTER;
1614 if (regno < vec_safe_length (reg_known_value))
1615 return (*reg_known_value)[regno];
1616 }
1617 return NULL;
1618 }
1619
1620 /* Set it. */
1621
1622 static void
1623 set_reg_known_value (unsigned int regno, rtx val)
1624 {
1625 if (regno >= FIRST_PSEUDO_REGISTER)
1626 {
1627 regno -= FIRST_PSEUDO_REGISTER;
1628 if (regno < vec_safe_length (reg_known_value))
1629 (*reg_known_value)[regno] = val;
1630 }
1631 }
1632
1633 /* Similarly for reg_known_equiv_p. */
1634
1635 bool
1636 get_reg_known_equiv_p (unsigned int regno)
1637 {
1638 if (regno >= FIRST_PSEUDO_REGISTER)
1639 {
1640 regno -= FIRST_PSEUDO_REGISTER;
1641 if (regno < vec_safe_length (reg_known_value))
1642 return bitmap_bit_p (reg_known_equiv_p, regno);
1643 }
1644 return false;
1645 }
1646
1647 static void
1648 set_reg_known_equiv_p (unsigned int regno, bool val)
1649 {
1650 if (regno >= FIRST_PSEUDO_REGISTER)
1651 {
1652 regno -= FIRST_PSEUDO_REGISTER;
1653 if (regno < vec_safe_length (reg_known_value))
1654 {
1655 if (val)
1656 bitmap_set_bit (reg_known_equiv_p, regno);
1657 else
1658 bitmap_clear_bit (reg_known_equiv_p, regno);
1659 }
1660 }
1661 }
1662
1663
1664 /* Returns a canonical version of X, from the point of view alias
1665 analysis. (For example, if X is a MEM whose address is a register,
1666 and the register has a known value (say a SYMBOL_REF), then a MEM
1667 whose address is the SYMBOL_REF is returned.) */
1668
1669 rtx
1670 canon_rtx (rtx x)
1671 {
1672 /* Recursively look for equivalences. */
1673 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1674 {
1675 rtx t = get_reg_known_value (REGNO (x));
1676 if (t == x)
1677 return x;
1678 if (t)
1679 return canon_rtx (t);
1680 }
1681
1682 if (GET_CODE (x) == PLUS)
1683 {
1684 rtx x0 = canon_rtx (XEXP (x, 0));
1685 rtx x1 = canon_rtx (XEXP (x, 1));
1686
1687 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1688 {
1689 if (CONST_INT_P (x0))
1690 return plus_constant (GET_MODE (x), x1, INTVAL (x0));
1691 else if (CONST_INT_P (x1))
1692 return plus_constant (GET_MODE (x), x0, INTVAL (x1));
1693 return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1694 }
1695 }
1696
1697 /* This gives us much better alias analysis when called from
1698 the loop optimizer. Note we want to leave the original
1699 MEM alone, but need to return the canonicalized MEM with
1700 all the flags with their original values. */
1701 else if (MEM_P (x))
1702 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1703
1704 return x;
1705 }
1706
1707 /* Return 1 if X and Y are identical-looking rtx's.
1708 Expect that X and Y has been already canonicalized.
1709
1710 We use the data in reg_known_value above to see if two registers with
1711 different numbers are, in fact, equivalent. */
1712
1713 static int
1714 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1715 {
1716 int i;
1717 int j;
1718 enum rtx_code code;
1719 const char *fmt;
1720
1721 if (x == 0 && y == 0)
1722 return 1;
1723 if (x == 0 || y == 0)
1724 return 0;
1725
1726 if (x == y)
1727 return 1;
1728
1729 code = GET_CODE (x);
1730 /* Rtx's of different codes cannot be equal. */
1731 if (code != GET_CODE (y))
1732 return 0;
1733
1734 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1735 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1736
1737 if (GET_MODE (x) != GET_MODE (y))
1738 return 0;
1739
1740 /* Some RTL can be compared without a recursive examination. */
1741 switch (code)
1742 {
1743 case REG:
1744 return REGNO (x) == REGNO (y);
1745
1746 case LABEL_REF:
1747 return LABEL_REF_LABEL (x) == LABEL_REF_LABEL (y);
1748
1749 case SYMBOL_REF:
1750 return XSTR (x, 0) == XSTR (y, 0);
1751
1752 case ENTRY_VALUE:
1753 /* This is magic, don't go through canonicalization et al. */
1754 return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1755
1756 case VALUE:
1757 CASE_CONST_UNIQUE:
1758 /* Pointer equality guarantees equality for these nodes. */
1759 return 0;
1760
1761 default:
1762 break;
1763 }
1764
1765 /* canon_rtx knows how to handle plus. No need to canonicalize. */
1766 if (code == PLUS)
1767 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1768 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1769 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1770 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1771 /* For commutative operations, the RTX match if the operand match in any
1772 order. Also handle the simple binary and unary cases without a loop. */
1773 if (COMMUTATIVE_P (x))
1774 {
1775 rtx xop0 = canon_rtx (XEXP (x, 0));
1776 rtx yop0 = canon_rtx (XEXP (y, 0));
1777 rtx yop1 = canon_rtx (XEXP (y, 1));
1778
1779 return ((rtx_equal_for_memref_p (xop0, yop0)
1780 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1781 || (rtx_equal_for_memref_p (xop0, yop1)
1782 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1783 }
1784 else if (NON_COMMUTATIVE_P (x))
1785 {
1786 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1787 canon_rtx (XEXP (y, 0)))
1788 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1789 canon_rtx (XEXP (y, 1))));
1790 }
1791 else if (UNARY_P (x))
1792 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1793 canon_rtx (XEXP (y, 0)));
1794
1795 /* Compare the elements. If any pair of corresponding elements
1796 fail to match, return 0 for the whole things.
1797
1798 Limit cases to types which actually appear in addresses. */
1799
1800 fmt = GET_RTX_FORMAT (code);
1801 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1802 {
1803 switch (fmt[i])
1804 {
1805 case 'i':
1806 if (XINT (x, i) != XINT (y, i))
1807 return 0;
1808 break;
1809
1810 case 'E':
1811 /* Two vectors must have the same length. */
1812 if (XVECLEN (x, i) != XVECLEN (y, i))
1813 return 0;
1814
1815 /* And the corresponding elements must match. */
1816 for (j = 0; j < XVECLEN (x, i); j++)
1817 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1818 canon_rtx (XVECEXP (y, i, j))) == 0)
1819 return 0;
1820 break;
1821
1822 case 'e':
1823 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1824 canon_rtx (XEXP (y, i))) == 0)
1825 return 0;
1826 break;
1827
1828 /* This can happen for asm operands. */
1829 case 's':
1830 if (strcmp (XSTR (x, i), XSTR (y, i)))
1831 return 0;
1832 break;
1833
1834 /* This can happen for an asm which clobbers memory. */
1835 case '0':
1836 break;
1837
1838 /* It is believed that rtx's at this level will never
1839 contain anything but integers and other rtx's,
1840 except for within LABEL_REFs and SYMBOL_REFs. */
1841 default:
1842 gcc_unreachable ();
1843 }
1844 }
1845 return 1;
1846 }
1847
1848 static rtx
1849 find_base_term (rtx x)
1850 {
1851 cselib_val *val;
1852 struct elt_loc_list *l, *f;
1853 rtx ret;
1854
1855 #if defined (FIND_BASE_TERM)
1856 /* Try machine-dependent ways to find the base term. */
1857 x = FIND_BASE_TERM (x);
1858 #endif
1859
1860 switch (GET_CODE (x))
1861 {
1862 case REG:
1863 return REG_BASE_VALUE (x);
1864
1865 case TRUNCATE:
1866 /* As we do not know which address space the pointer is referring to, we can
1867 handle this only if the target does not support different pointer or
1868 address modes depending on the address space. */
1869 if (!target_default_pointer_address_modes_p ())
1870 return 0;
1871 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1872 return 0;
1873 /* Fall through. */
1874 case HIGH:
1875 case PRE_INC:
1876 case PRE_DEC:
1877 case POST_INC:
1878 case POST_DEC:
1879 case PRE_MODIFY:
1880 case POST_MODIFY:
1881 return find_base_term (XEXP (x, 0));
1882
1883 case ZERO_EXTEND:
1884 case SIGN_EXTEND: /* Used for Alpha/NT pointers */
1885 /* As we do not know which address space the pointer is referring to, we can
1886 handle this only if the target does not support different pointer or
1887 address modes depending on the address space. */
1888 if (!target_default_pointer_address_modes_p ())
1889 return 0;
1890
1891 {
1892 rtx temp = find_base_term (XEXP (x, 0));
1893
1894 if (temp != 0 && CONSTANT_P (temp))
1895 temp = convert_memory_address (Pmode, temp);
1896
1897 return temp;
1898 }
1899
1900 case VALUE:
1901 val = CSELIB_VAL_PTR (x);
1902 ret = NULL_RTX;
1903
1904 if (!val)
1905 return ret;
1906
1907 if (cselib_sp_based_value_p (val))
1908 return static_reg_base_value[STACK_POINTER_REGNUM];
1909
1910 f = val->locs;
1911 /* Temporarily reset val->locs to avoid infinite recursion. */
1912 val->locs = NULL;
1913
1914 for (l = f; l; l = l->next)
1915 if (GET_CODE (l->loc) == VALUE
1916 && CSELIB_VAL_PTR (l->loc)->locs
1917 && !CSELIB_VAL_PTR (l->loc)->locs->next
1918 && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1919 continue;
1920 else if ((ret = find_base_term (l->loc)) != 0)
1921 break;
1922
1923 val->locs = f;
1924 return ret;
1925
1926 case LO_SUM:
1927 /* The standard form is (lo_sum reg sym) so look only at the
1928 second operand. */
1929 return find_base_term (XEXP (x, 1));
1930
1931 case CONST:
1932 x = XEXP (x, 0);
1933 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1934 return 0;
1935 /* Fall through. */
1936 case PLUS:
1937 case MINUS:
1938 {
1939 rtx tmp1 = XEXP (x, 0);
1940 rtx tmp2 = XEXP (x, 1);
1941
1942 /* This is a little bit tricky since we have to determine which of
1943 the two operands represents the real base address. Otherwise this
1944 routine may return the index register instead of the base register.
1945
1946 That may cause us to believe no aliasing was possible, when in
1947 fact aliasing is possible.
1948
1949 We use a few simple tests to guess the base register. Additional
1950 tests can certainly be added. For example, if one of the operands
1951 is a shift or multiply, then it must be the index register and the
1952 other operand is the base register. */
1953
1954 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1955 return find_base_term (tmp2);
1956
1957 /* If either operand is known to be a pointer, then prefer it
1958 to determine the base term. */
1959 if (REG_P (tmp1) && REG_POINTER (tmp1))
1960 ;
1961 else if (REG_P (tmp2) && REG_POINTER (tmp2))
1962 std::swap (tmp1, tmp2);
1963 /* If second argument is constant which has base term, prefer it
1964 over variable tmp1. See PR64025. */
1965 else if (CONSTANT_P (tmp2) && !CONST_INT_P (tmp2))
1966 std::swap (tmp1, tmp2);
1967
1968 /* Go ahead and find the base term for both operands. If either base
1969 term is from a pointer or is a named object or a special address
1970 (like an argument or stack reference), then use it for the
1971 base term. */
1972 rtx base = find_base_term (tmp1);
1973 if (base != NULL_RTX
1974 && ((REG_P (tmp1) && REG_POINTER (tmp1))
1975 || known_base_value_p (base)))
1976 return base;
1977 base = find_base_term (tmp2);
1978 if (base != NULL_RTX
1979 && ((REG_P (tmp2) && REG_POINTER (tmp2))
1980 || known_base_value_p (base)))
1981 return base;
1982
1983 /* We could not determine which of the two operands was the
1984 base register and which was the index. So we can determine
1985 nothing from the base alias check. */
1986 return 0;
1987 }
1988
1989 case AND:
1990 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1991 return find_base_term (XEXP (x, 0));
1992 return 0;
1993
1994 case SYMBOL_REF:
1995 case LABEL_REF:
1996 return x;
1997
1998 default:
1999 return 0;
2000 }
2001 }
2002
2003 /* Return true if accesses to address X may alias accesses based
2004 on the stack pointer. */
2005
2006 bool
2007 may_be_sp_based_p (rtx x)
2008 {
2009 rtx base = find_base_term (x);
2010 return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
2011 }
2012
2013 /* Return 0 if the addresses X and Y are known to point to different
2014 objects, 1 if they might be pointers to the same object. */
2015
2016 static int
2017 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
2018 machine_mode x_mode, machine_mode y_mode)
2019 {
2020 /* If the address itself has no known base see if a known equivalent
2021 value has one. If either address still has no known base, nothing
2022 is known about aliasing. */
2023 if (x_base == 0)
2024 {
2025 rtx x_c;
2026
2027 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
2028 return 1;
2029
2030 x_base = find_base_term (x_c);
2031 if (x_base == 0)
2032 return 1;
2033 }
2034
2035 if (y_base == 0)
2036 {
2037 rtx y_c;
2038 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
2039 return 1;
2040
2041 y_base = find_base_term (y_c);
2042 if (y_base == 0)
2043 return 1;
2044 }
2045
2046 /* If the base addresses are equal nothing is known about aliasing. */
2047 if (rtx_equal_p (x_base, y_base))
2048 return 1;
2049
2050 /* The base addresses are different expressions. If they are not accessed
2051 via AND, there is no conflict. We can bring knowledge of object
2052 alignment into play here. For example, on alpha, "char a, b;" can
2053 alias one another, though "char a; long b;" cannot. AND addesses may
2054 implicitly alias surrounding objects; i.e. unaligned access in DImode
2055 via AND address can alias all surrounding object types except those
2056 with aligment 8 or higher. */
2057 if (GET_CODE (x) == AND && GET_CODE (y) == AND)
2058 return 1;
2059 if (GET_CODE (x) == AND
2060 && (!CONST_INT_P (XEXP (x, 1))
2061 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
2062 return 1;
2063 if (GET_CODE (y) == AND
2064 && (!CONST_INT_P (XEXP (y, 1))
2065 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
2066 return 1;
2067
2068 /* Differing symbols not accessed via AND never alias. */
2069 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
2070 return 0;
2071
2072 if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
2073 return 0;
2074
2075 return 1;
2076 }
2077
2078 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than
2079 that of V. */
2080
2081 static bool
2082 refs_newer_value_p (const_rtx expr, rtx v)
2083 {
2084 int minuid = CSELIB_VAL_PTR (v)->uid;
2085 subrtx_iterator::array_type array;
2086 FOR_EACH_SUBRTX (iter, array, expr, NONCONST)
2087 if (GET_CODE (*iter) == VALUE && CSELIB_VAL_PTR (*iter)->uid > minuid)
2088 return true;
2089 return false;
2090 }
2091
2092 /* Convert the address X into something we can use. This is done by returning
2093 it unchanged unless it is a value; in the latter case we call cselib to get
2094 a more useful rtx. */
2095
2096 rtx
2097 get_addr (rtx x)
2098 {
2099 cselib_val *v;
2100 struct elt_loc_list *l;
2101
2102 if (GET_CODE (x) != VALUE)
2103 return x;
2104 v = CSELIB_VAL_PTR (x);
2105 if (v)
2106 {
2107 bool have_equivs = cselib_have_permanent_equivalences ();
2108 if (have_equivs)
2109 v = canonical_cselib_val (v);
2110 for (l = v->locs; l; l = l->next)
2111 if (CONSTANT_P (l->loc))
2112 return l->loc;
2113 for (l = v->locs; l; l = l->next)
2114 if (!REG_P (l->loc) && !MEM_P (l->loc)
2115 /* Avoid infinite recursion when potentially dealing with
2116 var-tracking artificial equivalences, by skipping the
2117 equivalences themselves, and not choosing expressions
2118 that refer to newer VALUEs. */
2119 && (!have_equivs
2120 || (GET_CODE (l->loc) != VALUE
2121 && !refs_newer_value_p (l->loc, x))))
2122 return l->loc;
2123 if (have_equivs)
2124 {
2125 for (l = v->locs; l; l = l->next)
2126 if (REG_P (l->loc)
2127 || (GET_CODE (l->loc) != VALUE
2128 && !refs_newer_value_p (l->loc, x)))
2129 return l->loc;
2130 /* Return the canonical value. */
2131 return v->val_rtx;
2132 }
2133 if (v->locs)
2134 return v->locs->loc;
2135 }
2136 return x;
2137 }
2138
2139 /* Return the address of the (N_REFS + 1)th memory reference to ADDR
2140 where SIZE is the size in bytes of the memory reference. If ADDR
2141 is not modified by the memory reference then ADDR is returned. */
2142
2143 static rtx
2144 addr_side_effect_eval (rtx addr, int size, int n_refs)
2145 {
2146 int offset = 0;
2147
2148 switch (GET_CODE (addr))
2149 {
2150 case PRE_INC:
2151 offset = (n_refs + 1) * size;
2152 break;
2153 case PRE_DEC:
2154 offset = -(n_refs + 1) * size;
2155 break;
2156 case POST_INC:
2157 offset = n_refs * size;
2158 break;
2159 case POST_DEC:
2160 offset = -n_refs * size;
2161 break;
2162
2163 default:
2164 return addr;
2165 }
2166
2167 if (offset)
2168 addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
2169 gen_int_mode (offset, GET_MODE (addr)));
2170 else
2171 addr = XEXP (addr, 0);
2172 addr = canon_rtx (addr);
2173
2174 return addr;
2175 }
2176
2177 /* Return TRUE if an object X sized at XSIZE bytes and another object
2178 Y sized at YSIZE bytes, starting C bytes after X, may overlap. If
2179 any of the sizes is zero, assume an overlap, otherwise use the
2180 absolute value of the sizes as the actual sizes. */
2181
2182 static inline bool
2183 offset_overlap_p (HOST_WIDE_INT c, int xsize, int ysize)
2184 {
2185 return (xsize == 0 || ysize == 0
2186 || (c >= 0
2187 ? (abs (xsize) > c)
2188 : (abs (ysize) > -c)));
2189 }
2190
2191 /* Return one if X and Y (memory addresses) reference the
2192 same location in memory or if the references overlap.
2193 Return zero if they do not overlap, else return
2194 minus one in which case they still might reference the same location.
2195
2196 C is an offset accumulator. When
2197 C is nonzero, we are testing aliases between X and Y + C.
2198 XSIZE is the size in bytes of the X reference,
2199 similarly YSIZE is the size in bytes for Y.
2200 Expect that canon_rtx has been already called for X and Y.
2201
2202 If XSIZE or YSIZE is zero, we do not know the amount of memory being
2203 referenced (the reference was BLKmode), so make the most pessimistic
2204 assumptions.
2205
2206 If XSIZE or YSIZE is negative, we may access memory outside the object
2207 being referenced as a side effect. This can happen when using AND to
2208 align memory references, as is done on the Alpha.
2209
2210 Nice to notice that varying addresses cannot conflict with fp if no
2211 local variables had their addresses taken, but that's too hard now.
2212
2213 ??? Contrary to the tree alias oracle this does not return
2214 one for X + non-constant and Y + non-constant when X and Y are equal.
2215 If that is fixed the TBAA hack for union type-punning can be removed. */
2216
2217 static int
2218 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
2219 {
2220 if (GET_CODE (x) == VALUE)
2221 {
2222 if (REG_P (y))
2223 {
2224 struct elt_loc_list *l = NULL;
2225 if (CSELIB_VAL_PTR (x))
2226 for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
2227 l; l = l->next)
2228 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
2229 break;
2230 if (l)
2231 x = y;
2232 else
2233 x = get_addr (x);
2234 }
2235 /* Don't call get_addr if y is the same VALUE. */
2236 else if (x != y)
2237 x = get_addr (x);
2238 }
2239 if (GET_CODE (y) == VALUE)
2240 {
2241 if (REG_P (x))
2242 {
2243 struct elt_loc_list *l = NULL;
2244 if (CSELIB_VAL_PTR (y))
2245 for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
2246 l; l = l->next)
2247 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
2248 break;
2249 if (l)
2250 y = x;
2251 else
2252 y = get_addr (y);
2253 }
2254 /* Don't call get_addr if x is the same VALUE. */
2255 else if (y != x)
2256 y = get_addr (y);
2257 }
2258 if (GET_CODE (x) == HIGH)
2259 x = XEXP (x, 0);
2260 else if (GET_CODE (x) == LO_SUM)
2261 x = XEXP (x, 1);
2262 else
2263 x = addr_side_effect_eval (x, abs (xsize), 0);
2264 if (GET_CODE (y) == HIGH)
2265 y = XEXP (y, 0);
2266 else if (GET_CODE (y) == LO_SUM)
2267 y = XEXP (y, 1);
2268 else
2269 y = addr_side_effect_eval (y, abs (ysize), 0);
2270
2271 if (rtx_equal_for_memref_p (x, y))
2272 {
2273 return offset_overlap_p (c, xsize, ysize);
2274 }
2275
2276 /* This code used to check for conflicts involving stack references and
2277 globals but the base address alias code now handles these cases. */
2278
2279 if (GET_CODE (x) == PLUS)
2280 {
2281 /* The fact that X is canonicalized means that this
2282 PLUS rtx is canonicalized. */
2283 rtx x0 = XEXP (x, 0);
2284 rtx x1 = XEXP (x, 1);
2285
2286 /* However, VALUEs might end up in different positions even in
2287 canonical PLUSes. Comparing their addresses is enough. */
2288 if (x0 == y)
2289 return memrefs_conflict_p (xsize, x1, ysize, const0_rtx, c);
2290 else if (x1 == y)
2291 return memrefs_conflict_p (xsize, x0, ysize, const0_rtx, c);
2292
2293 if (GET_CODE (y) == PLUS)
2294 {
2295 /* The fact that Y is canonicalized means that this
2296 PLUS rtx is canonicalized. */
2297 rtx y0 = XEXP (y, 0);
2298 rtx y1 = XEXP (y, 1);
2299
2300 if (x0 == y1)
2301 return memrefs_conflict_p (xsize, x1, ysize, y0, c);
2302 if (x1 == y0)
2303 return memrefs_conflict_p (xsize, x0, ysize, y1, c);
2304
2305 if (rtx_equal_for_memref_p (x1, y1))
2306 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2307 if (rtx_equal_for_memref_p (x0, y0))
2308 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2309 if (CONST_INT_P (x1))
2310 {
2311 if (CONST_INT_P (y1))
2312 return memrefs_conflict_p (xsize, x0, ysize, y0,
2313 c - INTVAL (x1) + INTVAL (y1));
2314 else
2315 return memrefs_conflict_p (xsize, x0, ysize, y,
2316 c - INTVAL (x1));
2317 }
2318 else if (CONST_INT_P (y1))
2319 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2320
2321 return -1;
2322 }
2323 else if (CONST_INT_P (x1))
2324 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
2325 }
2326 else if (GET_CODE (y) == PLUS)
2327 {
2328 /* The fact that Y is canonicalized means that this
2329 PLUS rtx is canonicalized. */
2330 rtx y0 = XEXP (y, 0);
2331 rtx y1 = XEXP (y, 1);
2332
2333 if (x == y0)
2334 return memrefs_conflict_p (xsize, const0_rtx, ysize, y1, c);
2335 if (x == y1)
2336 return memrefs_conflict_p (xsize, const0_rtx, ysize, y0, c);
2337
2338 if (CONST_INT_P (y1))
2339 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2340 else
2341 return -1;
2342 }
2343
2344 if (GET_CODE (x) == GET_CODE (y))
2345 switch (GET_CODE (x))
2346 {
2347 case MULT:
2348 {
2349 /* Handle cases where we expect the second operands to be the
2350 same, and check only whether the first operand would conflict
2351 or not. */
2352 rtx x0, y0;
2353 rtx x1 = canon_rtx (XEXP (x, 1));
2354 rtx y1 = canon_rtx (XEXP (y, 1));
2355 if (! rtx_equal_for_memref_p (x1, y1))
2356 return -1;
2357 x0 = canon_rtx (XEXP (x, 0));
2358 y0 = canon_rtx (XEXP (y, 0));
2359 if (rtx_equal_for_memref_p (x0, y0))
2360 return offset_overlap_p (c, xsize, ysize);
2361
2362 /* Can't properly adjust our sizes. */
2363 if (!CONST_INT_P (x1))
2364 return -1;
2365 xsize /= INTVAL (x1);
2366 ysize /= INTVAL (x1);
2367 c /= INTVAL (x1);
2368 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2369 }
2370
2371 default:
2372 break;
2373 }
2374
2375 /* Deal with alignment ANDs by adjusting offset and size so as to
2376 cover the maximum range, without taking any previously known
2377 alignment into account. Make a size negative after such an
2378 adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2379 assume a potential overlap, because they may end up in contiguous
2380 memory locations and the stricter-alignment access may span over
2381 part of both. */
2382 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2383 {
2384 HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2385 unsigned HOST_WIDE_INT uc = sc;
2386 if (sc < 0 && -uc == (uc & -uc))
2387 {
2388 if (xsize > 0)
2389 xsize = -xsize;
2390 if (xsize)
2391 xsize += sc + 1;
2392 c -= sc + 1;
2393 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2394 ysize, y, c);
2395 }
2396 }
2397 if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2398 {
2399 HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2400 unsigned HOST_WIDE_INT uc = sc;
2401 if (sc < 0 && -uc == (uc & -uc))
2402 {
2403 if (ysize > 0)
2404 ysize = -ysize;
2405 if (ysize)
2406 ysize += sc + 1;
2407 c += sc + 1;
2408 return memrefs_conflict_p (xsize, x,
2409 ysize, canon_rtx (XEXP (y, 0)), c);
2410 }
2411 }
2412
2413 if (CONSTANT_P (x))
2414 {
2415 if (CONST_INT_P (x) && CONST_INT_P (y))
2416 {
2417 c += (INTVAL (y) - INTVAL (x));
2418 return offset_overlap_p (c, xsize, ysize);
2419 }
2420
2421 if (GET_CODE (x) == CONST)
2422 {
2423 if (GET_CODE (y) == CONST)
2424 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2425 ysize, canon_rtx (XEXP (y, 0)), c);
2426 else
2427 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2428 ysize, y, c);
2429 }
2430 if (GET_CODE (y) == CONST)
2431 return memrefs_conflict_p (xsize, x, ysize,
2432 canon_rtx (XEXP (y, 0)), c);
2433
2434 /* Assume a potential overlap for symbolic addresses that went
2435 through alignment adjustments (i.e., that have negative
2436 sizes), because we can't know how far they are from each
2437 other. */
2438 if (CONSTANT_P (y))
2439 return (xsize < 0 || ysize < 0 || offset_overlap_p (c, xsize, ysize));
2440
2441 return -1;
2442 }
2443
2444 return -1;
2445 }
2446
2447 /* Functions to compute memory dependencies.
2448
2449 Since we process the insns in execution order, we can build tables
2450 to keep track of what registers are fixed (and not aliased), what registers
2451 are varying in known ways, and what registers are varying in unknown
2452 ways.
2453
2454 If both memory references are volatile, then there must always be a
2455 dependence between the two references, since their order can not be
2456 changed. A volatile and non-volatile reference can be interchanged
2457 though.
2458
2459 We also must allow AND addresses, because they may generate accesses
2460 outside the object being referenced. This is used to generate aligned
2461 addresses from unaligned addresses, for instance, the alpha
2462 storeqi_unaligned pattern. */
2463
2464 /* Read dependence: X is read after read in MEM takes place. There can
2465 only be a dependence here if both reads are volatile, or if either is
2466 an explicit barrier. */
2467
2468 int
2469 read_dependence (const_rtx mem, const_rtx x)
2470 {
2471 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2472 return true;
2473 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2474 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2475 return true;
2476 return false;
2477 }
2478
2479 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */
2480
2481 static tree
2482 decl_for_component_ref (tree x)
2483 {
2484 do
2485 {
2486 x = TREE_OPERAND (x, 0);
2487 }
2488 while (x && TREE_CODE (x) == COMPONENT_REF);
2489
2490 return x && DECL_P (x) ? x : NULL_TREE;
2491 }
2492
2493 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2494 for the offset of the field reference. *KNOWN_P says whether the
2495 offset is known. */
2496
2497 static void
2498 adjust_offset_for_component_ref (tree x, bool *known_p,
2499 HOST_WIDE_INT *offset)
2500 {
2501 if (!*known_p)
2502 return;
2503 do
2504 {
2505 tree xoffset = component_ref_field_offset (x);
2506 tree field = TREE_OPERAND (x, 1);
2507 if (TREE_CODE (xoffset) != INTEGER_CST)
2508 {
2509 *known_p = false;
2510 return;
2511 }
2512
2513 offset_int woffset
2514 = (wi::to_offset (xoffset)
2515 + wi::lrshift (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
2516 LOG2_BITS_PER_UNIT));
2517 if (!wi::fits_uhwi_p (woffset))
2518 {
2519 *known_p = false;
2520 return;
2521 }
2522 *offset += woffset.to_uhwi ();
2523
2524 x = TREE_OPERAND (x, 0);
2525 }
2526 while (x && TREE_CODE (x) == COMPONENT_REF);
2527 }
2528
2529 /* Return nonzero if we can determine the exprs corresponding to memrefs
2530 X and Y and they do not overlap.
2531 If LOOP_VARIANT is set, skip offset-based disambiguation */
2532
2533 int
2534 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2535 {
2536 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2537 rtx rtlx, rtly;
2538 rtx basex, basey;
2539 bool moffsetx_known_p, moffsety_known_p;
2540 HOST_WIDE_INT moffsetx = 0, moffsety = 0;
2541 HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey;
2542
2543 /* Unless both have exprs, we can't tell anything. */
2544 if (exprx == 0 || expry == 0)
2545 return 0;
2546
2547 /* For spill-slot accesses make sure we have valid offsets. */
2548 if ((exprx == get_spill_slot_decl (false)
2549 && ! MEM_OFFSET_KNOWN_P (x))
2550 || (expry == get_spill_slot_decl (false)
2551 && ! MEM_OFFSET_KNOWN_P (y)))
2552 return 0;
2553
2554 /* If the field reference test failed, look at the DECLs involved. */
2555 moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2556 if (moffsetx_known_p)
2557 moffsetx = MEM_OFFSET (x);
2558 if (TREE_CODE (exprx) == COMPONENT_REF)
2559 {
2560 tree t = decl_for_component_ref (exprx);
2561 if (! t)
2562 return 0;
2563 adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2564 exprx = t;
2565 }
2566
2567 moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2568 if (moffsety_known_p)
2569 moffsety = MEM_OFFSET (y);
2570 if (TREE_CODE (expry) == COMPONENT_REF)
2571 {
2572 tree t = decl_for_component_ref (expry);
2573 if (! t)
2574 return 0;
2575 adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2576 expry = t;
2577 }
2578
2579 if (! DECL_P (exprx) || ! DECL_P (expry))
2580 return 0;
2581
2582 /* If we refer to different gimple registers, or one gimple register
2583 and one non-gimple-register, we know they can't overlap. First,
2584 gimple registers don't have their addresses taken. Now, there
2585 could be more than one stack slot for (different versions of) the
2586 same gimple register, but we can presumably tell they don't
2587 overlap based on offsets from stack base addresses elsewhere.
2588 It's important that we don't proceed to DECL_RTL, because gimple
2589 registers may not pass DECL_RTL_SET_P, and make_decl_rtl won't be
2590 able to do anything about them since no SSA information will have
2591 remained to guide it. */
2592 if (is_gimple_reg (exprx) || is_gimple_reg (expry))
2593 return exprx != expry
2594 || (moffsetx_known_p && moffsety_known_p
2595 && MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y)
2596 && !offset_overlap_p (moffsety - moffsetx,
2597 MEM_SIZE (x), MEM_SIZE (y)));
2598
2599 /* With invalid code we can end up storing into the constant pool.
2600 Bail out to avoid ICEing when creating RTL for this.
2601 See gfortran.dg/lto/20091028-2_0.f90. */
2602 if (TREE_CODE (exprx) == CONST_DECL
2603 || TREE_CODE (expry) == CONST_DECL)
2604 return 1;
2605
2606 rtlx = DECL_RTL (exprx);
2607 rtly = DECL_RTL (expry);
2608
2609 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2610 can't overlap unless they are the same because we never reuse that part
2611 of the stack frame used for locals for spilled pseudos. */
2612 if ((!MEM_P (rtlx) || !MEM_P (rtly))
2613 && ! rtx_equal_p (rtlx, rtly))
2614 return 1;
2615
2616 /* If we have MEMs referring to different address spaces (which can
2617 potentially overlap), we cannot easily tell from the addresses
2618 whether the references overlap. */
2619 if (MEM_P (rtlx) && MEM_P (rtly)
2620 && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2621 return 0;
2622
2623 /* Get the base and offsets of both decls. If either is a register, we
2624 know both are and are the same, so use that as the base. The only
2625 we can avoid overlap is if we can deduce that they are nonoverlapping
2626 pieces of that decl, which is very rare. */
2627 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2628 if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2629 offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2630
2631 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2632 if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2633 offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2634
2635 /* If the bases are different, we know they do not overlap if both
2636 are constants or if one is a constant and the other a pointer into the
2637 stack frame. Otherwise a different base means we can't tell if they
2638 overlap or not. */
2639 if (! rtx_equal_p (basex, basey))
2640 return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2641 || (CONSTANT_P (basex) && REG_P (basey)
2642 && REGNO_PTR_FRAME_P (REGNO (basey)))
2643 || (CONSTANT_P (basey) && REG_P (basex)
2644 && REGNO_PTR_FRAME_P (REGNO (basex))));
2645
2646 /* Offset based disambiguation not appropriate for loop invariant */
2647 if (loop_invariant)
2648 return 0;
2649
2650 sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2651 : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2652 : -1);
2653 sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2654 : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2655 : -1);
2656
2657 /* If we have an offset for either memref, it can update the values computed
2658 above. */
2659 if (moffsetx_known_p)
2660 offsetx += moffsetx, sizex -= moffsetx;
2661 if (moffsety_known_p)
2662 offsety += moffsety, sizey -= moffsety;
2663
2664 /* If a memref has both a size and an offset, we can use the smaller size.
2665 We can't do this if the offset isn't known because we must view this
2666 memref as being anywhere inside the DECL's MEM. */
2667 if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2668 sizex = MEM_SIZE (x);
2669 if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2670 sizey = MEM_SIZE (y);
2671
2672 /* Put the values of the memref with the lower offset in X's values. */
2673 if (offsetx > offsety)
2674 {
2675 std::swap (offsetx, offsety);
2676 std::swap (sizex, sizey);
2677 }
2678
2679 /* If we don't know the size of the lower-offset value, we can't tell
2680 if they conflict. Otherwise, we do the test. */
2681 return sizex >= 0 && offsety >= offsetx + sizex;
2682 }
2683
2684 /* Helper for true_dependence and canon_true_dependence.
2685 Checks for true dependence: X is read after store in MEM takes place.
2686
2687 If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2688 NULL_RTX, and the canonical addresses of MEM and X are both computed
2689 here. If MEM_CANONICALIZED, then MEM must be already canonicalized.
2690
2691 If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2692
2693 Returns 1 if there is a true dependence, 0 otherwise. */
2694
2695 static int
2696 true_dependence_1 (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2697 const_rtx x, rtx x_addr, bool mem_canonicalized)
2698 {
2699 rtx true_mem_addr;
2700 rtx base;
2701 int ret;
2702
2703 gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2704 : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2705
2706 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2707 return 1;
2708
2709 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2710 This is used in epilogue deallocation functions, and in cselib. */
2711 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2712 return 1;
2713 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2714 return 1;
2715 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2716 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2717 return 1;
2718
2719 if (! x_addr)
2720 x_addr = XEXP (x, 0);
2721 x_addr = get_addr (x_addr);
2722
2723 if (! mem_addr)
2724 {
2725 mem_addr = XEXP (mem, 0);
2726 if (mem_mode == VOIDmode)
2727 mem_mode = GET_MODE (mem);
2728 }
2729 true_mem_addr = get_addr (mem_addr);
2730
2731 /* Read-only memory is by definition never modified, and therefore can't
2732 conflict with anything. However, don't assume anything when AND
2733 addresses are involved and leave to the code below to determine
2734 dependence. We don't expect to find read-only set on MEM, but
2735 stupid user tricks can produce them, so don't die. */
2736 if (MEM_READONLY_P (x)
2737 && GET_CODE (x_addr) != AND
2738 && GET_CODE (true_mem_addr) != AND)
2739 return 0;
2740
2741 /* If we have MEMs referring to different address spaces (which can
2742 potentially overlap), we cannot easily tell from the addresses
2743 whether the references overlap. */
2744 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2745 return 1;
2746
2747 base = find_base_term (x_addr);
2748 if (base && (GET_CODE (base) == LABEL_REF
2749 || (GET_CODE (base) == SYMBOL_REF
2750 && CONSTANT_POOL_ADDRESS_P (base))))
2751 return 0;
2752
2753 rtx mem_base = find_base_term (true_mem_addr);
2754 if (! base_alias_check (x_addr, base, true_mem_addr, mem_base,
2755 GET_MODE (x), mem_mode))
2756 return 0;
2757
2758 x_addr = canon_rtx (x_addr);
2759 if (!mem_canonicalized)
2760 mem_addr = canon_rtx (true_mem_addr);
2761
2762 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2763 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2764 return ret;
2765
2766 if (mems_in_disjoint_alias_sets_p (x, mem))
2767 return 0;
2768
2769 if (nonoverlapping_memrefs_p (mem, x, false))
2770 return 0;
2771
2772 return rtx_refs_may_alias_p (x, mem, true);
2773 }
2774
2775 /* True dependence: X is read after store in MEM takes place. */
2776
2777 int
2778 true_dependence (const_rtx mem, machine_mode mem_mode, const_rtx x)
2779 {
2780 return true_dependence_1 (mem, mem_mode, NULL_RTX,
2781 x, NULL_RTX, /*mem_canonicalized=*/false);
2782 }
2783
2784 /* Canonical true dependence: X is read after store in MEM takes place.
2785 Variant of true_dependence which assumes MEM has already been
2786 canonicalized (hence we no longer do that here).
2787 The mem_addr argument has been added, since true_dependence_1 computed
2788 this value prior to canonicalizing. */
2789
2790 int
2791 canon_true_dependence (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2792 const_rtx x, rtx x_addr)
2793 {
2794 return true_dependence_1 (mem, mem_mode, mem_addr,
2795 x, x_addr, /*mem_canonicalized=*/true);
2796 }
2797
2798 /* Returns nonzero if a write to X might alias a previous read from
2799 (or, if WRITEP is true, a write to) MEM.
2800 If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X,
2801 and X_MODE the mode for that access.
2802 If MEM_CANONICALIZED is true, MEM is canonicalized. */
2803
2804 static int
2805 write_dependence_p (const_rtx mem,
2806 const_rtx x, machine_mode x_mode, rtx x_addr,
2807 bool mem_canonicalized, bool x_canonicalized, bool writep)
2808 {
2809 rtx mem_addr;
2810 rtx true_mem_addr, true_x_addr;
2811 rtx base;
2812 int ret;
2813
2814 gcc_checking_assert (x_canonicalized
2815 ? (x_addr != NULL_RTX && x_mode != VOIDmode)
2816 : (x_addr == NULL_RTX && x_mode == VOIDmode));
2817
2818 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2819 return 1;
2820
2821 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2822 This is used in epilogue deallocation functions. */
2823 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2824 return 1;
2825 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2826 return 1;
2827 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2828 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2829 return 1;
2830
2831 if (!x_addr)
2832 x_addr = XEXP (x, 0);
2833 true_x_addr = get_addr (x_addr);
2834
2835 mem_addr = XEXP (mem, 0);
2836 true_mem_addr = get_addr (mem_addr);
2837
2838 /* A read from read-only memory can't conflict with read-write memory.
2839 Don't assume anything when AND addresses are involved and leave to
2840 the code below to determine dependence. */
2841 if (!writep
2842 && MEM_READONLY_P (mem)
2843 && GET_CODE (true_x_addr) != AND
2844 && GET_CODE (true_mem_addr) != AND)
2845 return 0;
2846
2847 /* If we have MEMs referring to different address spaces (which can
2848 potentially overlap), we cannot easily tell from the addresses
2849 whether the references overlap. */
2850 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2851 return 1;
2852
2853 base = find_base_term (true_mem_addr);
2854 if (! writep
2855 && base
2856 && (GET_CODE (base) == LABEL_REF
2857 || (GET_CODE (base) == SYMBOL_REF
2858 && CONSTANT_POOL_ADDRESS_P (base))))
2859 return 0;
2860
2861 rtx x_base = find_base_term (true_x_addr);
2862 if (! base_alias_check (true_x_addr, x_base, true_mem_addr, base,
2863 GET_MODE (x), GET_MODE (mem)))
2864 return 0;
2865
2866 if (!x_canonicalized)
2867 {
2868 x_addr = canon_rtx (true_x_addr);
2869 x_mode = GET_MODE (x);
2870 }
2871 if (!mem_canonicalized)
2872 mem_addr = canon_rtx (true_mem_addr);
2873
2874 if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2875 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1)
2876 return ret;
2877
2878 if (nonoverlapping_memrefs_p (x, mem, false))
2879 return 0;
2880
2881 return rtx_refs_may_alias_p (x, mem, false);
2882 }
2883
2884 /* Anti dependence: X is written after read in MEM takes place. */
2885
2886 int
2887 anti_dependence (const_rtx mem, const_rtx x)
2888 {
2889 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2890 /*mem_canonicalized=*/false,
2891 /*x_canonicalized*/false, /*writep=*/false);
2892 }
2893
2894 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
2895 Also, consider X in X_MODE (which might be from an enclosing
2896 STRICT_LOW_PART / ZERO_EXTRACT).
2897 If MEM_CANONICALIZED is true, MEM is canonicalized. */
2898
2899 int
2900 canon_anti_dependence (const_rtx mem, bool mem_canonicalized,
2901 const_rtx x, machine_mode x_mode, rtx x_addr)
2902 {
2903 return write_dependence_p (mem, x, x_mode, x_addr,
2904 mem_canonicalized, /*x_canonicalized=*/true,
2905 /*writep=*/false);
2906 }
2907
2908 /* Output dependence: X is written after store in MEM takes place. */
2909
2910 int
2911 output_dependence (const_rtx mem, const_rtx x)
2912 {
2913 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2914 /*mem_canonicalized=*/false,
2915 /*x_canonicalized*/false, /*writep=*/true);
2916 }
2917 \f
2918
2919
2920 /* Check whether X may be aliased with MEM. Don't do offset-based
2921 memory disambiguation & TBAA. */
2922 int
2923 may_alias_p (const_rtx mem, const_rtx x)
2924 {
2925 rtx x_addr, mem_addr;
2926
2927 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2928 return 1;
2929
2930 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2931 This is used in epilogue deallocation functions. */
2932 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2933 return 1;
2934 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2935 return 1;
2936 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2937 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2938 return 1;
2939
2940 x_addr = XEXP (x, 0);
2941 x_addr = get_addr (x_addr);
2942
2943 mem_addr = XEXP (mem, 0);
2944 mem_addr = get_addr (mem_addr);
2945
2946 /* Read-only memory is by definition never modified, and therefore can't
2947 conflict with anything. However, don't assume anything when AND
2948 addresses are involved and leave to the code below to determine
2949 dependence. We don't expect to find read-only set on MEM, but
2950 stupid user tricks can produce them, so don't die. */
2951 if (MEM_READONLY_P (x)
2952 && GET_CODE (x_addr) != AND
2953 && GET_CODE (mem_addr) != AND)
2954 return 0;
2955
2956 /* If we have MEMs referring to different address spaces (which can
2957 potentially overlap), we cannot easily tell from the addresses
2958 whether the references overlap. */
2959 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2960 return 1;
2961
2962 rtx x_base = find_base_term (x_addr);
2963 rtx mem_base = find_base_term (mem_addr);
2964 if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
2965 GET_MODE (x), GET_MODE (mem_addr)))
2966 return 0;
2967
2968 if (nonoverlapping_memrefs_p (mem, x, true))
2969 return 0;
2970
2971 /* TBAA not valid for loop_invarint */
2972 return rtx_refs_may_alias_p (x, mem, false);
2973 }
2974
2975 void
2976 init_alias_target (void)
2977 {
2978 int i;
2979
2980 if (!arg_base_value)
2981 arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
2982
2983 memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2984
2985 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2986 /* Check whether this register can hold an incoming pointer
2987 argument. FUNCTION_ARG_REGNO_P tests outgoing register
2988 numbers, so translate if necessary due to register windows. */
2989 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2990 && HARD_REGNO_MODE_OK (i, Pmode))
2991 static_reg_base_value[i] = arg_base_value;
2992
2993 static_reg_base_value[STACK_POINTER_REGNUM]
2994 = unique_base_value (UNIQUE_BASE_VALUE_SP);
2995 static_reg_base_value[ARG_POINTER_REGNUM]
2996 = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
2997 static_reg_base_value[FRAME_POINTER_REGNUM]
2998 = unique_base_value (UNIQUE_BASE_VALUE_FP);
2999 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
3000 static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
3001 = unique_base_value (UNIQUE_BASE_VALUE_HFP);
3002 }
3003
3004 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
3005 to be memory reference. */
3006 static bool memory_modified;
3007 static void
3008 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3009 {
3010 if (MEM_P (x))
3011 {
3012 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
3013 memory_modified = true;
3014 }
3015 }
3016
3017
3018 /* Return true when INSN possibly modify memory contents of MEM
3019 (i.e. address can be modified). */
3020 bool
3021 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
3022 {
3023 if (!INSN_P (insn))
3024 return false;
3025 memory_modified = false;
3026 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
3027 return memory_modified;
3028 }
3029
3030 /* Return TRUE if the destination of a set is rtx identical to
3031 ITEM. */
3032 static inline bool
3033 set_dest_equal_p (const_rtx set, const_rtx item)
3034 {
3035 rtx dest = SET_DEST (set);
3036 return rtx_equal_p (dest, item);
3037 }
3038
3039 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
3040 array. */
3041
3042 void
3043 init_alias_analysis (void)
3044 {
3045 unsigned int maxreg = max_reg_num ();
3046 int changed, pass;
3047 int i;
3048 unsigned int ui;
3049 rtx_insn *insn;
3050 rtx val;
3051 int rpo_cnt;
3052 int *rpo;
3053
3054 timevar_push (TV_ALIAS_ANALYSIS);
3055
3056 vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
3057 reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
3058 bitmap_clear (reg_known_equiv_p);
3059
3060 /* If we have memory allocated from the previous run, use it. */
3061 if (old_reg_base_value)
3062 reg_base_value = old_reg_base_value;
3063
3064 if (reg_base_value)
3065 reg_base_value->truncate (0);
3066
3067 vec_safe_grow_cleared (reg_base_value, maxreg);
3068
3069 new_reg_base_value = XNEWVEC (rtx, maxreg);
3070 reg_seen = sbitmap_alloc (maxreg);
3071
3072 /* The basic idea is that each pass through this loop will use the
3073 "constant" information from the previous pass to propagate alias
3074 information through another level of assignments.
3075
3076 The propagation is done on the CFG in reverse post-order, to propagate
3077 things forward as far as possible in each iteration.
3078
3079 This could get expensive if the assignment chains are long. Maybe
3080 we should throttle the number of iterations, possibly based on
3081 the optimization level or flag_expensive_optimizations.
3082
3083 We could propagate more information in the first pass by making use
3084 of DF_REG_DEF_COUNT to determine immediately that the alias information
3085 for a pseudo is "constant".
3086
3087 A program with an uninitialized variable can cause an infinite loop
3088 here. Instead of doing a full dataflow analysis to detect such problems
3089 we just cap the number of iterations for the loop.
3090
3091 The state of the arrays for the set chain in question does not matter
3092 since the program has undefined behavior. */
3093
3094 rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
3095 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3096
3097 /* The prologue/epilogue insns are not threaded onto the
3098 insn chain until after reload has completed. Thus,
3099 there is no sense wasting time checking if INSN is in
3100 the prologue/epilogue until after reload has completed. */
3101 bool could_be_prologue_epilogue = ((targetm.have_prologue ()
3102 || targetm.have_epilogue ())
3103 && reload_completed);
3104
3105 pass = 0;
3106 do
3107 {
3108 /* Assume nothing will change this iteration of the loop. */
3109 changed = 0;
3110
3111 /* We want to assign the same IDs each iteration of this loop, so
3112 start counting from one each iteration of the loop. */
3113 unique_id = 1;
3114
3115 /* We're at the start of the function each iteration through the
3116 loop, so we're copying arguments. */
3117 copying_arguments = true;
3118
3119 /* Wipe the potential alias information clean for this pass. */
3120 memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
3121
3122 /* Wipe the reg_seen array clean. */
3123 bitmap_clear (reg_seen);
3124
3125 /* Initialize the alias information for this pass. */
3126 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3127 if (static_reg_base_value[i])
3128 {
3129 new_reg_base_value[i] = static_reg_base_value[i];
3130 bitmap_set_bit (reg_seen, i);
3131 }
3132
3133 /* Walk the insns adding values to the new_reg_base_value array. */
3134 for (i = 0; i < rpo_cnt; i++)
3135 {
3136 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3137 FOR_BB_INSNS (bb, insn)
3138 {
3139 if (NONDEBUG_INSN_P (insn))
3140 {
3141 rtx note, set;
3142
3143 if (could_be_prologue_epilogue
3144 && prologue_epilogue_contains (insn))
3145 continue;
3146
3147 /* If this insn has a noalias note, process it, Otherwise,
3148 scan for sets. A simple set will have no side effects
3149 which could change the base value of any other register. */
3150
3151 if (GET_CODE (PATTERN (insn)) == SET
3152 && REG_NOTES (insn) != 0
3153 && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
3154 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
3155 else
3156 note_stores (PATTERN (insn), record_set, NULL);
3157
3158 set = single_set (insn);
3159
3160 if (set != 0
3161 && REG_P (SET_DEST (set))
3162 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3163 {
3164 unsigned int regno = REGNO (SET_DEST (set));
3165 rtx src = SET_SRC (set);
3166 rtx t;
3167
3168 note = find_reg_equal_equiv_note (insn);
3169 if (note && REG_NOTE_KIND (note) == REG_EQUAL
3170 && DF_REG_DEF_COUNT (regno) != 1)
3171 note = NULL_RTX;
3172
3173 if (note != NULL_RTX
3174 && GET_CODE (XEXP (note, 0)) != EXPR_LIST
3175 && ! rtx_varies_p (XEXP (note, 0), 1)
3176 && ! reg_overlap_mentioned_p (SET_DEST (set),
3177 XEXP (note, 0)))
3178 {
3179 set_reg_known_value (regno, XEXP (note, 0));
3180 set_reg_known_equiv_p (regno,
3181 REG_NOTE_KIND (note) == REG_EQUIV);
3182 }
3183 else if (DF_REG_DEF_COUNT (regno) == 1
3184 && GET_CODE (src) == PLUS
3185 && REG_P (XEXP (src, 0))
3186 && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
3187 && CONST_INT_P (XEXP (src, 1)))
3188 {
3189 t = plus_constant (GET_MODE (src), t,
3190 INTVAL (XEXP (src, 1)));
3191 set_reg_known_value (regno, t);
3192 set_reg_known_equiv_p (regno, false);
3193 }
3194 else if (DF_REG_DEF_COUNT (regno) == 1
3195 && ! rtx_varies_p (src, 1))
3196 {
3197 set_reg_known_value (regno, src);
3198 set_reg_known_equiv_p (regno, false);
3199 }
3200 }
3201 }
3202 else if (NOTE_P (insn)
3203 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
3204 copying_arguments = false;
3205 }
3206 }
3207
3208 /* Now propagate values from new_reg_base_value to reg_base_value. */
3209 gcc_assert (maxreg == (unsigned int) max_reg_num ());
3210
3211 for (ui = 0; ui < maxreg; ui++)
3212 {
3213 if (new_reg_base_value[ui]
3214 && new_reg_base_value[ui] != (*reg_base_value)[ui]
3215 && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
3216 {
3217 (*reg_base_value)[ui] = new_reg_base_value[ui];
3218 changed = 1;
3219 }
3220 }
3221 }
3222 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
3223 XDELETEVEC (rpo);
3224
3225 /* Fill in the remaining entries. */
3226 FOR_EACH_VEC_ELT (*reg_known_value, i, val)
3227 {
3228 int regno = i + FIRST_PSEUDO_REGISTER;
3229 if (! val)
3230 set_reg_known_value (regno, regno_reg_rtx[regno]);
3231 }
3232
3233 /* Clean up. */
3234 free (new_reg_base_value);
3235 new_reg_base_value = 0;
3236 sbitmap_free (reg_seen);
3237 reg_seen = 0;
3238 timevar_pop (TV_ALIAS_ANALYSIS);
3239 }
3240
3241 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3242 Special API for var-tracking pass purposes. */
3243
3244 void
3245 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3246 {
3247 (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3248 }
3249
3250 void
3251 end_alias_analysis (void)
3252 {
3253 old_reg_base_value = reg_base_value;
3254 vec_free (reg_known_value);
3255 sbitmap_free (reg_known_equiv_p);
3256 }
3257
3258 void
3259 dump_alias_stats_in_alias_c (FILE *s)
3260 {
3261 fprintf (s, " TBAA oracle: %llu disambiguations %llu queries\n"
3262 " %llu are in alias set 0\n"
3263 " %llu queries asked about the same object\n"
3264 " %llu queries asked about the same alias set\n"
3265 " %llu access volatile\n"
3266 " %llu are dependent in the DAG\n"
3267 " %llu are aritificially in conflict with void *\n",
3268 alias_stats.num_disambiguated,
3269 alias_stats.num_alias_zero + alias_stats.num_same_alias_set
3270 + alias_stats.num_same_objects + alias_stats.num_volatile
3271 + alias_stats.num_dag + alias_stats.num_disambiguated
3272 + alias_stats.num_universal,
3273 alias_stats.num_alias_zero, alias_stats.num_same_alias_set,
3274 alias_stats.num_same_objects, alias_stats.num_volatile,
3275 alias_stats.num_dag, alias_stats.num_universal);
3276 }
3277 #include "gt-alias.h"