re PR tree-optimization/58978 (ICE: Segmentation fault)
[gcc.git] / gcc / alias.c
1 /* Alias analysis for GNU C
2 Copyright (C) 1997-2013 Free Software Foundation, Inc.
3 Contributed by John Carr (jfc@mit.edu).
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "function.h"
29 #include "alias.h"
30 #include "emit-rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
34 #include "flags.h"
35 #include "diagnostic-core.h"
36 #include "cselib.h"
37 #include "splay-tree.h"
38 #include "ggc.h"
39 #include "langhooks.h"
40 #include "timevar.h"
41 #include "dumpfile.h"
42 #include "target.h"
43 #include "df.h"
44 #include "tree-ssa-alias.h"
45 #include "pointer-set.h"
46 #include "gimple.h"
47 #include "gimple-ssa.h"
48
49 /* The aliasing API provided here solves related but different problems:
50
51 Say there exists (in c)
52
53 struct X {
54 struct Y y1;
55 struct Z z2;
56 } x1, *px1, *px2;
57
58 struct Y y2, *py;
59 struct Z z2, *pz;
60
61
62 py = &x1.y1;
63 px2 = &x1;
64
65 Consider the four questions:
66
67 Can a store to x1 interfere with px2->y1?
68 Can a store to x1 interfere with px2->z2?
69 Can a store to x1 change the value pointed to by with py?
70 Can a store to x1 change the value pointed to by with pz?
71
72 The answer to these questions can be yes, yes, yes, and maybe.
73
74 The first two questions can be answered with a simple examination
75 of the type system. If structure X contains a field of type Y then
76 a store through a pointer to an X can overwrite any field that is
77 contained (recursively) in an X (unless we know that px1 != px2).
78
79 The last two questions can be solved in the same way as the first
80 two questions but this is too conservative. The observation is
81 that in some cases we can know which (if any) fields are addressed
82 and if those addresses are used in bad ways. This analysis may be
83 language specific. In C, arbitrary operations may be applied to
84 pointers. However, there is some indication that this may be too
85 conservative for some C++ types.
86
87 The pass ipa-type-escape does this analysis for the types whose
88 instances do not escape across the compilation boundary.
89
90 Historically in GCC, these two problems were combined and a single
91 data structure that was used to represent the solution to these
92 problems. We now have two similar but different data structures,
93 The data structure to solve the last two questions is similar to
94 the first, but does not contain the fields whose address are never
95 taken. For types that do escape the compilation unit, the data
96 structures will have identical information.
97 */
98
99 /* The alias sets assigned to MEMs assist the back-end in determining
100 which MEMs can alias which other MEMs. In general, two MEMs in
101 different alias sets cannot alias each other, with one important
102 exception. Consider something like:
103
104 struct S { int i; double d; };
105
106 a store to an `S' can alias something of either type `int' or type
107 `double'. (However, a store to an `int' cannot alias a `double'
108 and vice versa.) We indicate this via a tree structure that looks
109 like:
110 struct S
111 / \
112 / \
113 |/_ _\|
114 int double
115
116 (The arrows are directed and point downwards.)
117 In this situation we say the alias set for `struct S' is the
118 `superset' and that those for `int' and `double' are `subsets'.
119
120 To see whether two alias sets can point to the same memory, we must
121 see if either alias set is a subset of the other. We need not trace
122 past immediate descendants, however, since we propagate all
123 grandchildren up one level.
124
125 Alias set zero is implicitly a superset of all other alias sets.
126 However, this is no actual entry for alias set zero. It is an
127 error to attempt to explicitly construct a subset of zero. */
128
129 struct GTY(()) alias_set_entry_d {
130 /* The alias set number, as stored in MEM_ALIAS_SET. */
131 alias_set_type alias_set;
132
133 /* Nonzero if would have a child of zero: this effectively makes this
134 alias set the same as alias set zero. */
135 int has_zero_child;
136
137 /* The children of the alias set. These are not just the immediate
138 children, but, in fact, all descendants. So, if we have:
139
140 struct T { struct S s; float f; }
141
142 continuing our example above, the children here will be all of
143 `int', `double', `float', and `struct S'. */
144 splay_tree GTY((param1_is (int), param2_is (int))) children;
145 };
146 typedef struct alias_set_entry_d *alias_set_entry;
147
148 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
149 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
150 static void record_set (rtx, const_rtx, void *);
151 static int base_alias_check (rtx, rtx, rtx, rtx, enum machine_mode,
152 enum machine_mode);
153 static rtx find_base_value (rtx);
154 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
155 static int insert_subset_children (splay_tree_node, void*);
156 static alias_set_entry get_alias_set_entry (alias_set_type);
157 static bool nonoverlapping_component_refs_p (const_rtx, const_rtx);
158 static tree decl_for_component_ref (tree);
159 static int write_dependence_p (const_rtx,
160 const_rtx, enum machine_mode, rtx,
161 bool, bool, bool);
162
163 static void memory_modified_1 (rtx, const_rtx, void *);
164
165 /* Set up all info needed to perform alias analysis on memory references. */
166
167 /* Returns the size in bytes of the mode of X. */
168 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
169
170 /* Cap the number of passes we make over the insns propagating alias
171 information through set chains.
172 ??? 10 is a completely arbitrary choice. This should be based on the
173 maximum loop depth in the CFG, but we do not have this information
174 available (even if current_loops _is_ available). */
175 #define MAX_ALIAS_LOOP_PASSES 10
176
177 /* reg_base_value[N] gives an address to which register N is related.
178 If all sets after the first add or subtract to the current value
179 or otherwise modify it so it does not point to a different top level
180 object, reg_base_value[N] is equal to the address part of the source
181 of the first set.
182
183 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
184 expressions represent three types of base:
185
186 1. incoming arguments. There is just one ADDRESS to represent all
187 arguments, since we do not know at this level whether accesses
188 based on different arguments can alias. The ADDRESS has id 0.
189
190 2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
191 (if distinct from frame_pointer_rtx) and arg_pointer_rtx.
192 Each of these rtxes has a separate ADDRESS associated with it,
193 each with a negative id.
194
195 GCC is (and is required to be) precise in which register it
196 chooses to access a particular region of stack. We can therefore
197 assume that accesses based on one of these rtxes do not alias
198 accesses based on another of these rtxes.
199
200 3. bases that are derived from malloc()ed memory (REG_NOALIAS).
201 Each such piece of memory has a separate ADDRESS associated
202 with it, each with an id greater than 0.
203
204 Accesses based on one ADDRESS do not alias accesses based on other
205 ADDRESSes. Accesses based on ADDRESSes in groups (2) and (3) do not
206 alias globals either; the ADDRESSes have Pmode to indicate this.
207 The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
208 indicate this. */
209
210 static GTY(()) vec<rtx, va_gc> *reg_base_value;
211 static rtx *new_reg_base_value;
212
213 /* The single VOIDmode ADDRESS that represents all argument bases.
214 It has id 0. */
215 static GTY(()) rtx arg_base_value;
216
217 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS. */
218 static int unique_id;
219
220 /* We preserve the copy of old array around to avoid amount of garbage
221 produced. About 8% of garbage produced were attributed to this
222 array. */
223 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
224
225 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
226 registers. */
227 #define UNIQUE_BASE_VALUE_SP -1
228 #define UNIQUE_BASE_VALUE_ARGP -2
229 #define UNIQUE_BASE_VALUE_FP -3
230 #define UNIQUE_BASE_VALUE_HFP -4
231
232 #define static_reg_base_value \
233 (this_target_rtl->x_static_reg_base_value)
234
235 #define REG_BASE_VALUE(X) \
236 (REGNO (X) < vec_safe_length (reg_base_value) \
237 ? (*reg_base_value)[REGNO (X)] : 0)
238
239 /* Vector indexed by N giving the initial (unchanging) value known for
240 pseudo-register N. This vector is initialized in init_alias_analysis,
241 and does not change until end_alias_analysis is called. */
242 static GTY(()) vec<rtx, va_gc> *reg_known_value;
243
244 /* Vector recording for each reg_known_value whether it is due to a
245 REG_EQUIV note. Future passes (viz., reload) may replace the
246 pseudo with the equivalent expression and so we account for the
247 dependences that would be introduced if that happens.
248
249 The REG_EQUIV notes created in assign_parms may mention the arg
250 pointer, and there are explicit insns in the RTL that modify the
251 arg pointer. Thus we must ensure that such insns don't get
252 scheduled across each other because that would invalidate the
253 REG_EQUIV notes. One could argue that the REG_EQUIV notes are
254 wrong, but solving the problem in the scheduler will likely give
255 better code, so we do it here. */
256 static sbitmap reg_known_equiv_p;
257
258 /* True when scanning insns from the start of the rtl to the
259 NOTE_INSN_FUNCTION_BEG note. */
260 static bool copying_arguments;
261
262
263 /* The splay-tree used to store the various alias set entries. */
264 static GTY (()) vec<alias_set_entry, va_gc> *alias_sets;
265 \f
266 /* Build a decomposed reference object for querying the alias-oracle
267 from the MEM rtx and store it in *REF.
268 Returns false if MEM is not suitable for the alias-oracle. */
269
270 static bool
271 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
272 {
273 tree expr = MEM_EXPR (mem);
274 tree base;
275
276 if (!expr)
277 return false;
278
279 ao_ref_init (ref, expr);
280
281 /* Get the base of the reference and see if we have to reject or
282 adjust it. */
283 base = ao_ref_base (ref);
284 if (base == NULL_TREE)
285 return false;
286
287 /* The tree oracle doesn't like bases that are neither decls
288 nor indirect references of SSA names. */
289 if (!(DECL_P (base)
290 || (TREE_CODE (base) == MEM_REF
291 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
292 || (TREE_CODE (base) == TARGET_MEM_REF
293 && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
294 return false;
295
296 /* If this is a reference based on a partitioned decl replace the
297 base with a MEM_REF of the pointer representative we
298 created during stack slot partitioning. */
299 if (TREE_CODE (base) == VAR_DECL
300 && ! is_global_var (base)
301 && cfun->gimple_df->decls_to_pointers != NULL)
302 {
303 void *namep;
304 namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
305 if (namep)
306 ref->base = build_simple_mem_ref (*(tree *)namep);
307 }
308
309 ref->ref_alias_set = MEM_ALIAS_SET (mem);
310
311 /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
312 is conservative, so trust it. */
313 if (!MEM_OFFSET_KNOWN_P (mem)
314 || !MEM_SIZE_KNOWN_P (mem))
315 return true;
316
317 /* If the base decl is a parameter we can have negative MEM_OFFSET in
318 case of promoted subregs on bigendian targets. Trust the MEM_EXPR
319 here. */
320 if (MEM_OFFSET (mem) < 0
321 && (MEM_SIZE (mem) + MEM_OFFSET (mem)) * BITS_PER_UNIT == ref->size)
322 return true;
323
324 /* Otherwise continue and refine size and offset we got from analyzing
325 MEM_EXPR by using MEM_SIZE and MEM_OFFSET. */
326
327 ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
328 ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
329
330 /* The MEM may extend into adjacent fields, so adjust max_size if
331 necessary. */
332 if (ref->max_size != -1
333 && ref->size > ref->max_size)
334 ref->max_size = ref->size;
335
336 /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
337 the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
338 if (MEM_EXPR (mem) != get_spill_slot_decl (false)
339 && (ref->offset < 0
340 || (DECL_P (ref->base)
341 && (!host_integerp (DECL_SIZE (ref->base), 1)
342 || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base)))
343 < (unsigned HOST_WIDE_INT)(ref->offset + ref->size))))))
344 return false;
345
346 return true;
347 }
348
349 /* Query the alias-oracle on whether the two memory rtx X and MEM may
350 alias. If TBAA_P is set also apply TBAA. Returns true if the
351 two rtxen may alias, false otherwise. */
352
353 static bool
354 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
355 {
356 ao_ref ref1, ref2;
357
358 if (!ao_ref_from_mem (&ref1, x)
359 || !ao_ref_from_mem (&ref2, mem))
360 return true;
361
362 return refs_may_alias_p_1 (&ref1, &ref2,
363 tbaa_p
364 && MEM_ALIAS_SET (x) != 0
365 && MEM_ALIAS_SET (mem) != 0);
366 }
367
368 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
369 such an entry, or NULL otherwise. */
370
371 static inline alias_set_entry
372 get_alias_set_entry (alias_set_type alias_set)
373 {
374 return (*alias_sets)[alias_set];
375 }
376
377 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
378 the two MEMs cannot alias each other. */
379
380 static inline int
381 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
382 {
383 /* Perform a basic sanity check. Namely, that there are no alias sets
384 if we're not using strict aliasing. This helps to catch bugs
385 whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
386 where a MEM is allocated in some way other than by the use of
387 gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared. If we begin to
388 use alias sets to indicate that spilled registers cannot alias each
389 other, we might need to remove this check. */
390 gcc_assert (flag_strict_aliasing
391 || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
392
393 return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
394 }
395
396 /* Insert the NODE into the splay tree given by DATA. Used by
397 record_alias_subset via splay_tree_foreach. */
398
399 static int
400 insert_subset_children (splay_tree_node node, void *data)
401 {
402 splay_tree_insert ((splay_tree) data, node->key, node->value);
403
404 return 0;
405 }
406
407 /* Return true if the first alias set is a subset of the second. */
408
409 bool
410 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
411 {
412 alias_set_entry ase;
413
414 /* Everything is a subset of the "aliases everything" set. */
415 if (set2 == 0)
416 return true;
417
418 /* Otherwise, check if set1 is a subset of set2. */
419 ase = get_alias_set_entry (set2);
420 if (ase != 0
421 && (ase->has_zero_child
422 || splay_tree_lookup (ase->children,
423 (splay_tree_key) set1)))
424 return true;
425 return false;
426 }
427
428 /* Return 1 if the two specified alias sets may conflict. */
429
430 int
431 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
432 {
433 alias_set_entry ase;
434
435 /* The easy case. */
436 if (alias_sets_must_conflict_p (set1, set2))
437 return 1;
438
439 /* See if the first alias set is a subset of the second. */
440 ase = get_alias_set_entry (set1);
441 if (ase != 0
442 && (ase->has_zero_child
443 || splay_tree_lookup (ase->children,
444 (splay_tree_key) set2)))
445 return 1;
446
447 /* Now do the same, but with the alias sets reversed. */
448 ase = get_alias_set_entry (set2);
449 if (ase != 0
450 && (ase->has_zero_child
451 || splay_tree_lookup (ase->children,
452 (splay_tree_key) set1)))
453 return 1;
454
455 /* The two alias sets are distinct and neither one is the
456 child of the other. Therefore, they cannot conflict. */
457 return 0;
458 }
459
460 /* Return 1 if the two specified alias sets will always conflict. */
461
462 int
463 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
464 {
465 if (set1 == 0 || set2 == 0 || set1 == set2)
466 return 1;
467
468 return 0;
469 }
470
471 /* Return 1 if any MEM object of type T1 will always conflict (using the
472 dependency routines in this file) with any MEM object of type T2.
473 This is used when allocating temporary storage. If T1 and/or T2 are
474 NULL_TREE, it means we know nothing about the storage. */
475
476 int
477 objects_must_conflict_p (tree t1, tree t2)
478 {
479 alias_set_type set1, set2;
480
481 /* If neither has a type specified, we don't know if they'll conflict
482 because we may be using them to store objects of various types, for
483 example the argument and local variables areas of inlined functions. */
484 if (t1 == 0 && t2 == 0)
485 return 0;
486
487 /* If they are the same type, they must conflict. */
488 if (t1 == t2
489 /* Likewise if both are volatile. */
490 || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
491 return 1;
492
493 set1 = t1 ? get_alias_set (t1) : 0;
494 set2 = t2 ? get_alias_set (t2) : 0;
495
496 /* We can't use alias_sets_conflict_p because we must make sure
497 that every subtype of t1 will conflict with every subtype of
498 t2 for which a pair of subobjects of these respective subtypes
499 overlaps on the stack. */
500 return alias_sets_must_conflict_p (set1, set2);
501 }
502 \f
503 /* Return the outermost parent of component present in the chain of
504 component references handled by get_inner_reference in T with the
505 following property:
506 - the component is non-addressable, or
507 - the parent has alias set zero,
508 or NULL_TREE if no such parent exists. In the former cases, the alias
509 set of this parent is the alias set that must be used for T itself. */
510
511 tree
512 component_uses_parent_alias_set_from (const_tree t)
513 {
514 const_tree found = NULL_TREE;
515
516 while (handled_component_p (t))
517 {
518 switch (TREE_CODE (t))
519 {
520 case COMPONENT_REF:
521 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
522 found = t;
523 break;
524
525 case ARRAY_REF:
526 case ARRAY_RANGE_REF:
527 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
528 found = t;
529 break;
530
531 case REALPART_EXPR:
532 case IMAGPART_EXPR:
533 break;
534
535 case BIT_FIELD_REF:
536 case VIEW_CONVERT_EXPR:
537 /* Bitfields and casts are never addressable. */
538 found = t;
539 break;
540
541 default:
542 gcc_unreachable ();
543 }
544
545 if (get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) == 0)
546 found = t;
547
548 t = TREE_OPERAND (t, 0);
549 }
550
551 if (found)
552 return TREE_OPERAND (found, 0);
553
554 return NULL_TREE;
555 }
556
557
558 /* Return whether the pointer-type T effective for aliasing may
559 access everything and thus the reference has to be assigned
560 alias-set zero. */
561
562 static bool
563 ref_all_alias_ptr_type_p (const_tree t)
564 {
565 return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
566 || TYPE_REF_CAN_ALIAS_ALL (t));
567 }
568
569 /* Return the alias set for the memory pointed to by T, which may be
570 either a type or an expression. Return -1 if there is nothing
571 special about dereferencing T. */
572
573 static alias_set_type
574 get_deref_alias_set_1 (tree t)
575 {
576 /* All we care about is the type. */
577 if (! TYPE_P (t))
578 t = TREE_TYPE (t);
579
580 /* If we have an INDIRECT_REF via a void pointer, we don't
581 know anything about what that might alias. Likewise if the
582 pointer is marked that way. */
583 if (ref_all_alias_ptr_type_p (t))
584 return 0;
585
586 return -1;
587 }
588
589 /* Return the alias set for the memory pointed to by T, which may be
590 either a type or an expression. */
591
592 alias_set_type
593 get_deref_alias_set (tree t)
594 {
595 /* If we're not doing any alias analysis, just assume everything
596 aliases everything else. */
597 if (!flag_strict_aliasing)
598 return 0;
599
600 alias_set_type set = get_deref_alias_set_1 (t);
601
602 /* Fall back to the alias-set of the pointed-to type. */
603 if (set == -1)
604 {
605 if (! TYPE_P (t))
606 t = TREE_TYPE (t);
607 set = get_alias_set (TREE_TYPE (t));
608 }
609
610 return set;
611 }
612
613 /* Return the pointer-type relevant for TBAA purposes from the
614 memory reference tree *T or NULL_TREE in which case *T is
615 adjusted to point to the outermost component reference that
616 can be used for assigning an alias set. */
617
618 static tree
619 reference_alias_ptr_type_1 (tree *t)
620 {
621 tree inner;
622
623 /* Get the base object of the reference. */
624 inner = *t;
625 while (handled_component_p (inner))
626 {
627 /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
628 the type of any component references that wrap it to
629 determine the alias-set. */
630 if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
631 *t = TREE_OPERAND (inner, 0);
632 inner = TREE_OPERAND (inner, 0);
633 }
634
635 /* Handle pointer dereferences here, they can override the
636 alias-set. */
637 if (INDIRECT_REF_P (inner)
638 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0))))
639 return TREE_TYPE (TREE_OPERAND (inner, 0));
640 else if (TREE_CODE (inner) == TARGET_MEM_REF)
641 return TREE_TYPE (TMR_OFFSET (inner));
642 else if (TREE_CODE (inner) == MEM_REF
643 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1))))
644 return TREE_TYPE (TREE_OPERAND (inner, 1));
645
646 /* If the innermost reference is a MEM_REF that has a
647 conversion embedded treat it like a VIEW_CONVERT_EXPR above,
648 using the memory access type for determining the alias-set. */
649 if (TREE_CODE (inner) == MEM_REF
650 && (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
651 != TYPE_MAIN_VARIANT
652 (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1))))))
653 return TREE_TYPE (TREE_OPERAND (inner, 1));
654
655 /* Otherwise, pick up the outermost object that we could have
656 a pointer to. */
657 tree tem = component_uses_parent_alias_set_from (*t);
658 if (tem)
659 *t = tem;
660
661 return NULL_TREE;
662 }
663
664 /* Return the pointer-type relevant for TBAA purposes from the
665 gimple memory reference tree T. This is the type to be used for
666 the offset operand of MEM_REF or TARGET_MEM_REF replacements of T
667 and guarantees that get_alias_set will return the same alias
668 set for T and the replacement. */
669
670 tree
671 reference_alias_ptr_type (tree t)
672 {
673 tree ptype = reference_alias_ptr_type_1 (&t);
674 /* If there is a given pointer type for aliasing purposes, return it. */
675 if (ptype != NULL_TREE)
676 return ptype;
677
678 /* Otherwise build one from the outermost component reference we
679 may use. */
680 if (TREE_CODE (t) == MEM_REF
681 || TREE_CODE (t) == TARGET_MEM_REF)
682 return TREE_TYPE (TREE_OPERAND (t, 1));
683 else
684 return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t)));
685 }
686
687 /* Return whether the pointer-types T1 and T2 used to determine
688 two alias sets of two references will yield the same answer
689 from get_deref_alias_set. */
690
691 bool
692 alias_ptr_types_compatible_p (tree t1, tree t2)
693 {
694 if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
695 return true;
696
697 if (ref_all_alias_ptr_type_p (t1)
698 || ref_all_alias_ptr_type_p (t2))
699 return false;
700
701 return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
702 == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
703 }
704
705 /* Return the alias set for T, which may be either a type or an
706 expression. Call language-specific routine for help, if needed. */
707
708 alias_set_type
709 get_alias_set (tree t)
710 {
711 alias_set_type set;
712
713 /* If we're not doing any alias analysis, just assume everything
714 aliases everything else. Also return 0 if this or its type is
715 an error. */
716 if (! flag_strict_aliasing || t == error_mark_node
717 || (! TYPE_P (t)
718 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
719 return 0;
720
721 /* We can be passed either an expression or a type. This and the
722 language-specific routine may make mutually-recursive calls to each other
723 to figure out what to do. At each juncture, we see if this is a tree
724 that the language may need to handle specially. First handle things that
725 aren't types. */
726 if (! TYPE_P (t))
727 {
728 /* Give the language a chance to do something with this tree
729 before we look at it. */
730 STRIP_NOPS (t);
731 set = lang_hooks.get_alias_set (t);
732 if (set != -1)
733 return set;
734
735 /* Get the alias pointer-type to use or the outermost object
736 that we could have a pointer to. */
737 tree ptype = reference_alias_ptr_type_1 (&t);
738 if (ptype != NULL)
739 return get_deref_alias_set (ptype);
740
741 /* If we've already determined the alias set for a decl, just return
742 it. This is necessary for C++ anonymous unions, whose component
743 variables don't look like union members (boo!). */
744 if (TREE_CODE (t) == VAR_DECL
745 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
746 return MEM_ALIAS_SET (DECL_RTL (t));
747
748 /* Now all we care about is the type. */
749 t = TREE_TYPE (t);
750 }
751
752 /* Variant qualifiers don't affect the alias set, so get the main
753 variant. */
754 t = TYPE_MAIN_VARIANT (t);
755
756 /* Always use the canonical type as well. If this is a type that
757 requires structural comparisons to identify compatible types
758 use alias set zero. */
759 if (TYPE_STRUCTURAL_EQUALITY_P (t))
760 {
761 /* Allow the language to specify another alias set for this
762 type. */
763 set = lang_hooks.get_alias_set (t);
764 if (set != -1)
765 return set;
766 return 0;
767 }
768
769 t = TYPE_CANONICAL (t);
770
771 /* The canonical type should not require structural equality checks. */
772 gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
773
774 /* If this is a type with a known alias set, return it. */
775 if (TYPE_ALIAS_SET_KNOWN_P (t))
776 return TYPE_ALIAS_SET (t);
777
778 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */
779 if (!COMPLETE_TYPE_P (t))
780 {
781 /* For arrays with unknown size the conservative answer is the
782 alias set of the element type. */
783 if (TREE_CODE (t) == ARRAY_TYPE)
784 return get_alias_set (TREE_TYPE (t));
785
786 /* But return zero as a conservative answer for incomplete types. */
787 return 0;
788 }
789
790 /* See if the language has special handling for this type. */
791 set = lang_hooks.get_alias_set (t);
792 if (set != -1)
793 return set;
794
795 /* There are no objects of FUNCTION_TYPE, so there's no point in
796 using up an alias set for them. (There are, of course, pointers
797 and references to functions, but that's different.) */
798 else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
799 set = 0;
800
801 /* Unless the language specifies otherwise, let vector types alias
802 their components. This avoids some nasty type punning issues in
803 normal usage. And indeed lets vectors be treated more like an
804 array slice. */
805 else if (TREE_CODE (t) == VECTOR_TYPE)
806 set = get_alias_set (TREE_TYPE (t));
807
808 /* Unless the language specifies otherwise, treat array types the
809 same as their components. This avoids the asymmetry we get
810 through recording the components. Consider accessing a
811 character(kind=1) through a reference to a character(kind=1)[1:1].
812 Or consider if we want to assign integer(kind=4)[0:D.1387] and
813 integer(kind=4)[4] the same alias set or not.
814 Just be pragmatic here and make sure the array and its element
815 type get the same alias set assigned. */
816 else if (TREE_CODE (t) == ARRAY_TYPE && !TYPE_NONALIASED_COMPONENT (t))
817 set = get_alias_set (TREE_TYPE (t));
818
819 /* From the former common C and C++ langhook implementation:
820
821 Unfortunately, there is no canonical form of a pointer type.
822 In particular, if we have `typedef int I', then `int *', and
823 `I *' are different types. So, we have to pick a canonical
824 representative. We do this below.
825
826 Technically, this approach is actually more conservative that
827 it needs to be. In particular, `const int *' and `int *'
828 should be in different alias sets, according to the C and C++
829 standard, since their types are not the same, and so,
830 technically, an `int **' and `const int **' cannot point at
831 the same thing.
832
833 But, the standard is wrong. In particular, this code is
834 legal C++:
835
836 int *ip;
837 int **ipp = &ip;
838 const int* const* cipp = ipp;
839 And, it doesn't make sense for that to be legal unless you
840 can dereference IPP and CIPP. So, we ignore cv-qualifiers on
841 the pointed-to types. This issue has been reported to the
842 C++ committee.
843
844 In addition to the above canonicalization issue, with LTO
845 we should also canonicalize `T (*)[]' to `T *' avoiding
846 alias issues with pointer-to element types and pointer-to
847 array types.
848
849 Likewise we need to deal with the situation of incomplete
850 pointed-to types and make `*(struct X **)&a' and
851 `*(struct X {} **)&a' alias. Otherwise we will have to
852 guarantee that all pointer-to incomplete type variants
853 will be replaced by pointer-to complete type variants if
854 they are available.
855
856 With LTO the convenient situation of using `void *' to
857 access and store any pointer type will also become
858 more apparent (and `void *' is just another pointer-to
859 incomplete type). Assigning alias-set zero to `void *'
860 and all pointer-to incomplete types is a not appealing
861 solution. Assigning an effective alias-set zero only
862 affecting pointers might be - by recording proper subset
863 relationships of all pointer alias-sets.
864
865 Pointer-to function types are another grey area which
866 needs caution. Globbing them all into one alias-set
867 or the above effective zero set would work.
868
869 For now just assign the same alias-set to all pointers.
870 That's simple and avoids all the above problems. */
871 else if (POINTER_TYPE_P (t)
872 && t != ptr_type_node)
873 set = get_alias_set (ptr_type_node);
874
875 /* Otherwise make a new alias set for this type. */
876 else
877 {
878 /* Each canonical type gets its own alias set, so canonical types
879 shouldn't form a tree. It doesn't really matter for types
880 we handle specially above, so only check it where it possibly
881 would result in a bogus alias set. */
882 gcc_checking_assert (TYPE_CANONICAL (t) == t);
883
884 set = new_alias_set ();
885 }
886
887 TYPE_ALIAS_SET (t) = set;
888
889 /* If this is an aggregate type or a complex type, we must record any
890 component aliasing information. */
891 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
892 record_component_aliases (t);
893
894 return set;
895 }
896
897 /* Return a brand-new alias set. */
898
899 alias_set_type
900 new_alias_set (void)
901 {
902 if (flag_strict_aliasing)
903 {
904 if (alias_sets == 0)
905 vec_safe_push (alias_sets, (alias_set_entry) 0);
906 vec_safe_push (alias_sets, (alias_set_entry) 0);
907 return alias_sets->length () - 1;
908 }
909 else
910 return 0;
911 }
912
913 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
914 not everything that aliases SUPERSET also aliases SUBSET. For example,
915 in C, a store to an `int' can alias a load of a structure containing an
916 `int', and vice versa. But it can't alias a load of a 'double' member
917 of the same structure. Here, the structure would be the SUPERSET and
918 `int' the SUBSET. This relationship is also described in the comment at
919 the beginning of this file.
920
921 This function should be called only once per SUPERSET/SUBSET pair.
922
923 It is illegal for SUPERSET to be zero; everything is implicitly a
924 subset of alias set zero. */
925
926 void
927 record_alias_subset (alias_set_type superset, alias_set_type subset)
928 {
929 alias_set_entry superset_entry;
930 alias_set_entry subset_entry;
931
932 /* It is possible in complex type situations for both sets to be the same,
933 in which case we can ignore this operation. */
934 if (superset == subset)
935 return;
936
937 gcc_assert (superset);
938
939 superset_entry = get_alias_set_entry (superset);
940 if (superset_entry == 0)
941 {
942 /* Create an entry for the SUPERSET, so that we have a place to
943 attach the SUBSET. */
944 superset_entry = ggc_alloc_cleared_alias_set_entry_d ();
945 superset_entry->alias_set = superset;
946 superset_entry->children
947 = splay_tree_new_ggc (splay_tree_compare_ints,
948 ggc_alloc_splay_tree_scalar_scalar_splay_tree_s,
949 ggc_alloc_splay_tree_scalar_scalar_splay_tree_node_s);
950 superset_entry->has_zero_child = 0;
951 (*alias_sets)[superset] = superset_entry;
952 }
953
954 if (subset == 0)
955 superset_entry->has_zero_child = 1;
956 else
957 {
958 subset_entry = get_alias_set_entry (subset);
959 /* If there is an entry for the subset, enter all of its children
960 (if they are not already present) as children of the SUPERSET. */
961 if (subset_entry)
962 {
963 if (subset_entry->has_zero_child)
964 superset_entry->has_zero_child = 1;
965
966 splay_tree_foreach (subset_entry->children, insert_subset_children,
967 superset_entry->children);
968 }
969
970 /* Enter the SUBSET itself as a child of the SUPERSET. */
971 splay_tree_insert (superset_entry->children,
972 (splay_tree_key) subset, 0);
973 }
974 }
975
976 /* Record that component types of TYPE, if any, are part of that type for
977 aliasing purposes. For record types, we only record component types
978 for fields that are not marked non-addressable. For array types, we
979 only record the component type if it is not marked non-aliased. */
980
981 void
982 record_component_aliases (tree type)
983 {
984 alias_set_type superset = get_alias_set (type);
985 tree field;
986
987 if (superset == 0)
988 return;
989
990 switch (TREE_CODE (type))
991 {
992 case RECORD_TYPE:
993 case UNION_TYPE:
994 case QUAL_UNION_TYPE:
995 /* Recursively record aliases for the base classes, if there are any. */
996 if (TYPE_BINFO (type))
997 {
998 int i;
999 tree binfo, base_binfo;
1000
1001 for (binfo = TYPE_BINFO (type), i = 0;
1002 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
1003 record_alias_subset (superset,
1004 get_alias_set (BINFO_TYPE (base_binfo)));
1005 }
1006 for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
1007 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
1008 record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
1009 break;
1010
1011 case COMPLEX_TYPE:
1012 record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
1013 break;
1014
1015 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1016 element type. */
1017
1018 default:
1019 break;
1020 }
1021 }
1022
1023 /* Allocate an alias set for use in storing and reading from the varargs
1024 spill area. */
1025
1026 static GTY(()) alias_set_type varargs_set = -1;
1027
1028 alias_set_type
1029 get_varargs_alias_set (void)
1030 {
1031 #if 1
1032 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
1033 varargs alias set to an INDIRECT_REF (FIXME!), so we can't
1034 consistently use the varargs alias set for loads from the varargs
1035 area. So don't use it anywhere. */
1036 return 0;
1037 #else
1038 if (varargs_set == -1)
1039 varargs_set = new_alias_set ();
1040
1041 return varargs_set;
1042 #endif
1043 }
1044
1045 /* Likewise, but used for the fixed portions of the frame, e.g., register
1046 save areas. */
1047
1048 static GTY(()) alias_set_type frame_set = -1;
1049
1050 alias_set_type
1051 get_frame_alias_set (void)
1052 {
1053 if (frame_set == -1)
1054 frame_set = new_alias_set ();
1055
1056 return frame_set;
1057 }
1058
1059 /* Create a new, unique base with id ID. */
1060
1061 static rtx
1062 unique_base_value (HOST_WIDE_INT id)
1063 {
1064 return gen_rtx_ADDRESS (Pmode, id);
1065 }
1066
1067 /* Return true if accesses based on any other base value cannot alias
1068 those based on X. */
1069
1070 static bool
1071 unique_base_value_p (rtx x)
1072 {
1073 return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
1074 }
1075
1076 /* Return true if X is known to be a base value. */
1077
1078 static bool
1079 known_base_value_p (rtx x)
1080 {
1081 switch (GET_CODE (x))
1082 {
1083 case LABEL_REF:
1084 case SYMBOL_REF:
1085 return true;
1086
1087 case ADDRESS:
1088 /* Arguments may or may not be bases; we don't know for sure. */
1089 return GET_MODE (x) != VOIDmode;
1090
1091 default:
1092 return false;
1093 }
1094 }
1095
1096 /* Inside SRC, the source of a SET, find a base address. */
1097
1098 static rtx
1099 find_base_value (rtx src)
1100 {
1101 unsigned int regno;
1102
1103 #if defined (FIND_BASE_TERM)
1104 /* Try machine-dependent ways to find the base term. */
1105 src = FIND_BASE_TERM (src);
1106 #endif
1107
1108 switch (GET_CODE (src))
1109 {
1110 case SYMBOL_REF:
1111 case LABEL_REF:
1112 return src;
1113
1114 case REG:
1115 regno = REGNO (src);
1116 /* At the start of a function, argument registers have known base
1117 values which may be lost later. Returning an ADDRESS
1118 expression here allows optimization based on argument values
1119 even when the argument registers are used for other purposes. */
1120 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1121 return new_reg_base_value[regno];
1122
1123 /* If a pseudo has a known base value, return it. Do not do this
1124 for non-fixed hard regs since it can result in a circular
1125 dependency chain for registers which have values at function entry.
1126
1127 The test above is not sufficient because the scheduler may move
1128 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
1129 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1130 && regno < vec_safe_length (reg_base_value))
1131 {
1132 /* If we're inside init_alias_analysis, use new_reg_base_value
1133 to reduce the number of relaxation iterations. */
1134 if (new_reg_base_value && new_reg_base_value[regno]
1135 && DF_REG_DEF_COUNT (regno) == 1)
1136 return new_reg_base_value[regno];
1137
1138 if ((*reg_base_value)[regno])
1139 return (*reg_base_value)[regno];
1140 }
1141
1142 return 0;
1143
1144 case MEM:
1145 /* Check for an argument passed in memory. Only record in the
1146 copying-arguments block; it is too hard to track changes
1147 otherwise. */
1148 if (copying_arguments
1149 && (XEXP (src, 0) == arg_pointer_rtx
1150 || (GET_CODE (XEXP (src, 0)) == PLUS
1151 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1152 return arg_base_value;
1153 return 0;
1154
1155 case CONST:
1156 src = XEXP (src, 0);
1157 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1158 break;
1159
1160 /* ... fall through ... */
1161
1162 case PLUS:
1163 case MINUS:
1164 {
1165 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1166
1167 /* If either operand is a REG that is a known pointer, then it
1168 is the base. */
1169 if (REG_P (src_0) && REG_POINTER (src_0))
1170 return find_base_value (src_0);
1171 if (REG_P (src_1) && REG_POINTER (src_1))
1172 return find_base_value (src_1);
1173
1174 /* If either operand is a REG, then see if we already have
1175 a known value for it. */
1176 if (REG_P (src_0))
1177 {
1178 temp = find_base_value (src_0);
1179 if (temp != 0)
1180 src_0 = temp;
1181 }
1182
1183 if (REG_P (src_1))
1184 {
1185 temp = find_base_value (src_1);
1186 if (temp!= 0)
1187 src_1 = temp;
1188 }
1189
1190 /* If either base is named object or a special address
1191 (like an argument or stack reference), then use it for the
1192 base term. */
1193 if (src_0 != 0 && known_base_value_p (src_0))
1194 return src_0;
1195
1196 if (src_1 != 0 && known_base_value_p (src_1))
1197 return src_1;
1198
1199 /* Guess which operand is the base address:
1200 If either operand is a symbol, then it is the base. If
1201 either operand is a CONST_INT, then the other is the base. */
1202 if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1203 return find_base_value (src_0);
1204 else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1205 return find_base_value (src_1);
1206
1207 return 0;
1208 }
1209
1210 case LO_SUM:
1211 /* The standard form is (lo_sum reg sym) so look only at the
1212 second operand. */
1213 return find_base_value (XEXP (src, 1));
1214
1215 case AND:
1216 /* If the second operand is constant set the base
1217 address to the first operand. */
1218 if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1219 return find_base_value (XEXP (src, 0));
1220 return 0;
1221
1222 case TRUNCATE:
1223 /* As we do not know which address space the pointer is referring to, we can
1224 handle this only if the target does not support different pointer or
1225 address modes depending on the address space. */
1226 if (!target_default_pointer_address_modes_p ())
1227 break;
1228 if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1229 break;
1230 /* Fall through. */
1231 case HIGH:
1232 case PRE_INC:
1233 case PRE_DEC:
1234 case POST_INC:
1235 case POST_DEC:
1236 case PRE_MODIFY:
1237 case POST_MODIFY:
1238 return find_base_value (XEXP (src, 0));
1239
1240 case ZERO_EXTEND:
1241 case SIGN_EXTEND: /* used for NT/Alpha pointers */
1242 /* As we do not know which address space the pointer is referring to, we can
1243 handle this only if the target does not support different pointer or
1244 address modes depending on the address space. */
1245 if (!target_default_pointer_address_modes_p ())
1246 break;
1247
1248 {
1249 rtx temp = find_base_value (XEXP (src, 0));
1250
1251 if (temp != 0 && CONSTANT_P (temp))
1252 temp = convert_memory_address (Pmode, temp);
1253
1254 return temp;
1255 }
1256
1257 default:
1258 break;
1259 }
1260
1261 return 0;
1262 }
1263
1264 /* Called from init_alias_analysis indirectly through note_stores,
1265 or directly if DEST is a register with a REG_NOALIAS note attached.
1266 SET is null in the latter case. */
1267
1268 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1269 register N has been set in this function. */
1270 static sbitmap reg_seen;
1271
1272 static void
1273 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1274 {
1275 unsigned regno;
1276 rtx src;
1277 int n;
1278
1279 if (!REG_P (dest))
1280 return;
1281
1282 regno = REGNO (dest);
1283
1284 gcc_checking_assert (regno < reg_base_value->length ());
1285
1286 /* If this spans multiple hard registers, then we must indicate that every
1287 register has an unusable value. */
1288 if (regno < FIRST_PSEUDO_REGISTER)
1289 n = hard_regno_nregs[regno][GET_MODE (dest)];
1290 else
1291 n = 1;
1292 if (n != 1)
1293 {
1294 while (--n >= 0)
1295 {
1296 bitmap_set_bit (reg_seen, regno + n);
1297 new_reg_base_value[regno + n] = 0;
1298 }
1299 return;
1300 }
1301
1302 if (set)
1303 {
1304 /* A CLOBBER wipes out any old value but does not prevent a previously
1305 unset register from acquiring a base address (i.e. reg_seen is not
1306 set). */
1307 if (GET_CODE (set) == CLOBBER)
1308 {
1309 new_reg_base_value[regno] = 0;
1310 return;
1311 }
1312 src = SET_SRC (set);
1313 }
1314 else
1315 {
1316 /* There's a REG_NOALIAS note against DEST. */
1317 if (bitmap_bit_p (reg_seen, regno))
1318 {
1319 new_reg_base_value[regno] = 0;
1320 return;
1321 }
1322 bitmap_set_bit (reg_seen, regno);
1323 new_reg_base_value[regno] = unique_base_value (unique_id++);
1324 return;
1325 }
1326
1327 /* If this is not the first set of REGNO, see whether the new value
1328 is related to the old one. There are two cases of interest:
1329
1330 (1) The register might be assigned an entirely new value
1331 that has the same base term as the original set.
1332
1333 (2) The set might be a simple self-modification that
1334 cannot change REGNO's base value.
1335
1336 If neither case holds, reject the original base value as invalid.
1337 Note that the following situation is not detected:
1338
1339 extern int x, y; int *p = &x; p += (&y-&x);
1340
1341 ANSI C does not allow computing the difference of addresses
1342 of distinct top level objects. */
1343 if (new_reg_base_value[regno] != 0
1344 && find_base_value (src) != new_reg_base_value[regno])
1345 switch (GET_CODE (src))
1346 {
1347 case LO_SUM:
1348 case MINUS:
1349 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1350 new_reg_base_value[regno] = 0;
1351 break;
1352 case PLUS:
1353 /* If the value we add in the PLUS is also a valid base value,
1354 this might be the actual base value, and the original value
1355 an index. */
1356 {
1357 rtx other = NULL_RTX;
1358
1359 if (XEXP (src, 0) == dest)
1360 other = XEXP (src, 1);
1361 else if (XEXP (src, 1) == dest)
1362 other = XEXP (src, 0);
1363
1364 if (! other || find_base_value (other))
1365 new_reg_base_value[regno] = 0;
1366 break;
1367 }
1368 case AND:
1369 if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1370 new_reg_base_value[regno] = 0;
1371 break;
1372 default:
1373 new_reg_base_value[regno] = 0;
1374 break;
1375 }
1376 /* If this is the first set of a register, record the value. */
1377 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1378 && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1379 new_reg_base_value[regno] = find_base_value (src);
1380
1381 bitmap_set_bit (reg_seen, regno);
1382 }
1383
1384 /* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid
1385 using hard registers with non-null REG_BASE_VALUE for renaming. */
1386 rtx
1387 get_reg_base_value (unsigned int regno)
1388 {
1389 return (*reg_base_value)[regno];
1390 }
1391
1392 /* If a value is known for REGNO, return it. */
1393
1394 rtx
1395 get_reg_known_value (unsigned int regno)
1396 {
1397 if (regno >= FIRST_PSEUDO_REGISTER)
1398 {
1399 regno -= FIRST_PSEUDO_REGISTER;
1400 if (regno < vec_safe_length (reg_known_value))
1401 return (*reg_known_value)[regno];
1402 }
1403 return NULL;
1404 }
1405
1406 /* Set it. */
1407
1408 static void
1409 set_reg_known_value (unsigned int regno, rtx val)
1410 {
1411 if (regno >= FIRST_PSEUDO_REGISTER)
1412 {
1413 regno -= FIRST_PSEUDO_REGISTER;
1414 if (regno < vec_safe_length (reg_known_value))
1415 (*reg_known_value)[regno] = val;
1416 }
1417 }
1418
1419 /* Similarly for reg_known_equiv_p. */
1420
1421 bool
1422 get_reg_known_equiv_p (unsigned int regno)
1423 {
1424 if (regno >= FIRST_PSEUDO_REGISTER)
1425 {
1426 regno -= FIRST_PSEUDO_REGISTER;
1427 if (regno < vec_safe_length (reg_known_value))
1428 return bitmap_bit_p (reg_known_equiv_p, regno);
1429 }
1430 return false;
1431 }
1432
1433 static void
1434 set_reg_known_equiv_p (unsigned int regno, bool val)
1435 {
1436 if (regno >= FIRST_PSEUDO_REGISTER)
1437 {
1438 regno -= FIRST_PSEUDO_REGISTER;
1439 if (regno < vec_safe_length (reg_known_value))
1440 {
1441 if (val)
1442 bitmap_set_bit (reg_known_equiv_p, regno);
1443 else
1444 bitmap_clear_bit (reg_known_equiv_p, regno);
1445 }
1446 }
1447 }
1448
1449
1450 /* Returns a canonical version of X, from the point of view alias
1451 analysis. (For example, if X is a MEM whose address is a register,
1452 and the register has a known value (say a SYMBOL_REF), then a MEM
1453 whose address is the SYMBOL_REF is returned.) */
1454
1455 rtx
1456 canon_rtx (rtx x)
1457 {
1458 /* Recursively look for equivalences. */
1459 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1460 {
1461 rtx t = get_reg_known_value (REGNO (x));
1462 if (t == x)
1463 return x;
1464 if (t)
1465 return canon_rtx (t);
1466 }
1467
1468 if (GET_CODE (x) == PLUS)
1469 {
1470 rtx x0 = canon_rtx (XEXP (x, 0));
1471 rtx x1 = canon_rtx (XEXP (x, 1));
1472
1473 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1474 {
1475 if (CONST_INT_P (x0))
1476 return plus_constant (GET_MODE (x), x1, INTVAL (x0));
1477 else if (CONST_INT_P (x1))
1478 return plus_constant (GET_MODE (x), x0, INTVAL (x1));
1479 return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1480 }
1481 }
1482
1483 /* This gives us much better alias analysis when called from
1484 the loop optimizer. Note we want to leave the original
1485 MEM alone, but need to return the canonicalized MEM with
1486 all the flags with their original values. */
1487 else if (MEM_P (x))
1488 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1489
1490 return x;
1491 }
1492
1493 /* Return 1 if X and Y are identical-looking rtx's.
1494 Expect that X and Y has been already canonicalized.
1495
1496 We use the data in reg_known_value above to see if two registers with
1497 different numbers are, in fact, equivalent. */
1498
1499 static int
1500 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1501 {
1502 int i;
1503 int j;
1504 enum rtx_code code;
1505 const char *fmt;
1506
1507 if (x == 0 && y == 0)
1508 return 1;
1509 if (x == 0 || y == 0)
1510 return 0;
1511
1512 if (x == y)
1513 return 1;
1514
1515 code = GET_CODE (x);
1516 /* Rtx's of different codes cannot be equal. */
1517 if (code != GET_CODE (y))
1518 return 0;
1519
1520 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1521 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1522
1523 if (GET_MODE (x) != GET_MODE (y))
1524 return 0;
1525
1526 /* Some RTL can be compared without a recursive examination. */
1527 switch (code)
1528 {
1529 case REG:
1530 return REGNO (x) == REGNO (y);
1531
1532 case LABEL_REF:
1533 return XEXP (x, 0) == XEXP (y, 0);
1534
1535 case SYMBOL_REF:
1536 return XSTR (x, 0) == XSTR (y, 0);
1537
1538 case ENTRY_VALUE:
1539 /* This is magic, don't go through canonicalization et al. */
1540 return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1541
1542 case VALUE:
1543 CASE_CONST_UNIQUE:
1544 /* There's no need to compare the contents of CONST_DOUBLEs or
1545 CONST_INTs because pointer equality is a good enough
1546 comparison for these nodes. */
1547 return 0;
1548
1549 default:
1550 break;
1551 }
1552
1553 /* canon_rtx knows how to handle plus. No need to canonicalize. */
1554 if (code == PLUS)
1555 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1556 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1557 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1558 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1559 /* For commutative operations, the RTX match if the operand match in any
1560 order. Also handle the simple binary and unary cases without a loop. */
1561 if (COMMUTATIVE_P (x))
1562 {
1563 rtx xop0 = canon_rtx (XEXP (x, 0));
1564 rtx yop0 = canon_rtx (XEXP (y, 0));
1565 rtx yop1 = canon_rtx (XEXP (y, 1));
1566
1567 return ((rtx_equal_for_memref_p (xop0, yop0)
1568 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1569 || (rtx_equal_for_memref_p (xop0, yop1)
1570 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1571 }
1572 else if (NON_COMMUTATIVE_P (x))
1573 {
1574 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1575 canon_rtx (XEXP (y, 0)))
1576 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1577 canon_rtx (XEXP (y, 1))));
1578 }
1579 else if (UNARY_P (x))
1580 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1581 canon_rtx (XEXP (y, 0)));
1582
1583 /* Compare the elements. If any pair of corresponding elements
1584 fail to match, return 0 for the whole things.
1585
1586 Limit cases to types which actually appear in addresses. */
1587
1588 fmt = GET_RTX_FORMAT (code);
1589 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1590 {
1591 switch (fmt[i])
1592 {
1593 case 'i':
1594 if (XINT (x, i) != XINT (y, i))
1595 return 0;
1596 break;
1597
1598 case 'E':
1599 /* Two vectors must have the same length. */
1600 if (XVECLEN (x, i) != XVECLEN (y, i))
1601 return 0;
1602
1603 /* And the corresponding elements must match. */
1604 for (j = 0; j < XVECLEN (x, i); j++)
1605 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1606 canon_rtx (XVECEXP (y, i, j))) == 0)
1607 return 0;
1608 break;
1609
1610 case 'e':
1611 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1612 canon_rtx (XEXP (y, i))) == 0)
1613 return 0;
1614 break;
1615
1616 /* This can happen for asm operands. */
1617 case 's':
1618 if (strcmp (XSTR (x, i), XSTR (y, i)))
1619 return 0;
1620 break;
1621
1622 /* This can happen for an asm which clobbers memory. */
1623 case '0':
1624 break;
1625
1626 /* It is believed that rtx's at this level will never
1627 contain anything but integers and other rtx's,
1628 except for within LABEL_REFs and SYMBOL_REFs. */
1629 default:
1630 gcc_unreachable ();
1631 }
1632 }
1633 return 1;
1634 }
1635
1636 static rtx
1637 find_base_term (rtx x)
1638 {
1639 cselib_val *val;
1640 struct elt_loc_list *l, *f;
1641 rtx ret;
1642
1643 #if defined (FIND_BASE_TERM)
1644 /* Try machine-dependent ways to find the base term. */
1645 x = FIND_BASE_TERM (x);
1646 #endif
1647
1648 switch (GET_CODE (x))
1649 {
1650 case REG:
1651 return REG_BASE_VALUE (x);
1652
1653 case TRUNCATE:
1654 /* As we do not know which address space the pointer is referring to, we can
1655 handle this only if the target does not support different pointer or
1656 address modes depending on the address space. */
1657 if (!target_default_pointer_address_modes_p ())
1658 return 0;
1659 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1660 return 0;
1661 /* Fall through. */
1662 case HIGH:
1663 case PRE_INC:
1664 case PRE_DEC:
1665 case POST_INC:
1666 case POST_DEC:
1667 case PRE_MODIFY:
1668 case POST_MODIFY:
1669 return find_base_term (XEXP (x, 0));
1670
1671 case ZERO_EXTEND:
1672 case SIGN_EXTEND: /* Used for Alpha/NT pointers */
1673 /* As we do not know which address space the pointer is referring to, we can
1674 handle this only if the target does not support different pointer or
1675 address modes depending on the address space. */
1676 if (!target_default_pointer_address_modes_p ())
1677 return 0;
1678
1679 {
1680 rtx temp = find_base_term (XEXP (x, 0));
1681
1682 if (temp != 0 && CONSTANT_P (temp))
1683 temp = convert_memory_address (Pmode, temp);
1684
1685 return temp;
1686 }
1687
1688 case VALUE:
1689 val = CSELIB_VAL_PTR (x);
1690 ret = NULL_RTX;
1691
1692 if (!val)
1693 return ret;
1694
1695 if (cselib_sp_based_value_p (val))
1696 return static_reg_base_value[STACK_POINTER_REGNUM];
1697
1698 f = val->locs;
1699 /* Temporarily reset val->locs to avoid infinite recursion. */
1700 val->locs = NULL;
1701
1702 for (l = f; l; l = l->next)
1703 if (GET_CODE (l->loc) == VALUE
1704 && CSELIB_VAL_PTR (l->loc)->locs
1705 && !CSELIB_VAL_PTR (l->loc)->locs->next
1706 && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1707 continue;
1708 else if ((ret = find_base_term (l->loc)) != 0)
1709 break;
1710
1711 val->locs = f;
1712 return ret;
1713
1714 case LO_SUM:
1715 /* The standard form is (lo_sum reg sym) so look only at the
1716 second operand. */
1717 return find_base_term (XEXP (x, 1));
1718
1719 case CONST:
1720 x = XEXP (x, 0);
1721 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1722 return 0;
1723 /* Fall through. */
1724 case PLUS:
1725 case MINUS:
1726 {
1727 rtx tmp1 = XEXP (x, 0);
1728 rtx tmp2 = XEXP (x, 1);
1729
1730 /* This is a little bit tricky since we have to determine which of
1731 the two operands represents the real base address. Otherwise this
1732 routine may return the index register instead of the base register.
1733
1734 That may cause us to believe no aliasing was possible, when in
1735 fact aliasing is possible.
1736
1737 We use a few simple tests to guess the base register. Additional
1738 tests can certainly be added. For example, if one of the operands
1739 is a shift or multiply, then it must be the index register and the
1740 other operand is the base register. */
1741
1742 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1743 return find_base_term (tmp2);
1744
1745 /* If either operand is known to be a pointer, then prefer it
1746 to determine the base term. */
1747 if (REG_P (tmp1) && REG_POINTER (tmp1))
1748 ;
1749 else if (REG_P (tmp2) && REG_POINTER (tmp2))
1750 {
1751 rtx tem = tmp1;
1752 tmp1 = tmp2;
1753 tmp2 = tem;
1754 }
1755
1756 /* Go ahead and find the base term for both operands. If either base
1757 term is from a pointer or is a named object or a special address
1758 (like an argument or stack reference), then use it for the
1759 base term. */
1760 rtx base = find_base_term (tmp1);
1761 if (base != NULL_RTX
1762 && ((REG_P (tmp1) && REG_POINTER (tmp1))
1763 || known_base_value_p (base)))
1764 return base;
1765 base = find_base_term (tmp2);
1766 if (base != NULL_RTX
1767 && ((REG_P (tmp2) && REG_POINTER (tmp2))
1768 || known_base_value_p (base)))
1769 return base;
1770
1771 /* We could not determine which of the two operands was the
1772 base register and which was the index. So we can determine
1773 nothing from the base alias check. */
1774 return 0;
1775 }
1776
1777 case AND:
1778 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1779 return find_base_term (XEXP (x, 0));
1780 return 0;
1781
1782 case SYMBOL_REF:
1783 case LABEL_REF:
1784 return x;
1785
1786 default:
1787 return 0;
1788 }
1789 }
1790
1791 /* Return true if accesses to address X may alias accesses based
1792 on the stack pointer. */
1793
1794 bool
1795 may_be_sp_based_p (rtx x)
1796 {
1797 rtx base = find_base_term (x);
1798 return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
1799 }
1800
1801 /* Return 0 if the addresses X and Y are known to point to different
1802 objects, 1 if they might be pointers to the same object. */
1803
1804 static int
1805 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
1806 enum machine_mode x_mode, enum machine_mode y_mode)
1807 {
1808 /* If the address itself has no known base see if a known equivalent
1809 value has one. If either address still has no known base, nothing
1810 is known about aliasing. */
1811 if (x_base == 0)
1812 {
1813 rtx x_c;
1814
1815 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1816 return 1;
1817
1818 x_base = find_base_term (x_c);
1819 if (x_base == 0)
1820 return 1;
1821 }
1822
1823 if (y_base == 0)
1824 {
1825 rtx y_c;
1826 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1827 return 1;
1828
1829 y_base = find_base_term (y_c);
1830 if (y_base == 0)
1831 return 1;
1832 }
1833
1834 /* If the base addresses are equal nothing is known about aliasing. */
1835 if (rtx_equal_p (x_base, y_base))
1836 return 1;
1837
1838 /* The base addresses are different expressions. If they are not accessed
1839 via AND, there is no conflict. We can bring knowledge of object
1840 alignment into play here. For example, on alpha, "char a, b;" can
1841 alias one another, though "char a; long b;" cannot. AND addesses may
1842 implicitly alias surrounding objects; i.e. unaligned access in DImode
1843 via AND address can alias all surrounding object types except those
1844 with aligment 8 or higher. */
1845 if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1846 return 1;
1847 if (GET_CODE (x) == AND
1848 && (!CONST_INT_P (XEXP (x, 1))
1849 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1850 return 1;
1851 if (GET_CODE (y) == AND
1852 && (!CONST_INT_P (XEXP (y, 1))
1853 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1854 return 1;
1855
1856 /* Differing symbols not accessed via AND never alias. */
1857 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1858 return 0;
1859
1860 if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
1861 return 0;
1862
1863 return 1;
1864 }
1865
1866 /* Callback for for_each_rtx, that returns 1 upon encountering a VALUE
1867 whose UID is greater than the int uid that D points to. */
1868
1869 static int
1870 refs_newer_value_cb (rtx *x, void *d)
1871 {
1872 if (GET_CODE (*x) == VALUE && CSELIB_VAL_PTR (*x)->uid > *(int *)d)
1873 return 1;
1874
1875 return 0;
1876 }
1877
1878 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than
1879 that of V. */
1880
1881 static bool
1882 refs_newer_value_p (rtx expr, rtx v)
1883 {
1884 int minuid = CSELIB_VAL_PTR (v)->uid;
1885
1886 return for_each_rtx (&expr, refs_newer_value_cb, &minuid);
1887 }
1888
1889 /* Convert the address X into something we can use. This is done by returning
1890 it unchanged unless it is a value; in the latter case we call cselib to get
1891 a more useful rtx. */
1892
1893 rtx
1894 get_addr (rtx x)
1895 {
1896 cselib_val *v;
1897 struct elt_loc_list *l;
1898
1899 if (GET_CODE (x) != VALUE)
1900 return x;
1901 v = CSELIB_VAL_PTR (x);
1902 if (v)
1903 {
1904 bool have_equivs = cselib_have_permanent_equivalences ();
1905 if (have_equivs)
1906 v = canonical_cselib_val (v);
1907 for (l = v->locs; l; l = l->next)
1908 if (CONSTANT_P (l->loc))
1909 return l->loc;
1910 for (l = v->locs; l; l = l->next)
1911 if (!REG_P (l->loc) && !MEM_P (l->loc)
1912 /* Avoid infinite recursion when potentially dealing with
1913 var-tracking artificial equivalences, by skipping the
1914 equivalences themselves, and not choosing expressions
1915 that refer to newer VALUEs. */
1916 && (!have_equivs
1917 || (GET_CODE (l->loc) != VALUE
1918 && !refs_newer_value_p (l->loc, x))))
1919 return l->loc;
1920 if (have_equivs)
1921 {
1922 for (l = v->locs; l; l = l->next)
1923 if (REG_P (l->loc)
1924 || (GET_CODE (l->loc) != VALUE
1925 && !refs_newer_value_p (l->loc, x)))
1926 return l->loc;
1927 /* Return the canonical value. */
1928 return v->val_rtx;
1929 }
1930 if (v->locs)
1931 return v->locs->loc;
1932 }
1933 return x;
1934 }
1935
1936 /* Return the address of the (N_REFS + 1)th memory reference to ADDR
1937 where SIZE is the size in bytes of the memory reference. If ADDR
1938 is not modified by the memory reference then ADDR is returned. */
1939
1940 static rtx
1941 addr_side_effect_eval (rtx addr, int size, int n_refs)
1942 {
1943 int offset = 0;
1944
1945 switch (GET_CODE (addr))
1946 {
1947 case PRE_INC:
1948 offset = (n_refs + 1) * size;
1949 break;
1950 case PRE_DEC:
1951 offset = -(n_refs + 1) * size;
1952 break;
1953 case POST_INC:
1954 offset = n_refs * size;
1955 break;
1956 case POST_DEC:
1957 offset = -n_refs * size;
1958 break;
1959
1960 default:
1961 return addr;
1962 }
1963
1964 if (offset)
1965 addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1966 gen_int_mode (offset, GET_MODE (addr)));
1967 else
1968 addr = XEXP (addr, 0);
1969 addr = canon_rtx (addr);
1970
1971 return addr;
1972 }
1973
1974 /* Return TRUE if an object X sized at XSIZE bytes and another object
1975 Y sized at YSIZE bytes, starting C bytes after X, may overlap. If
1976 any of the sizes is zero, assume an overlap, otherwise use the
1977 absolute value of the sizes as the actual sizes. */
1978
1979 static inline bool
1980 offset_overlap_p (HOST_WIDE_INT c, int xsize, int ysize)
1981 {
1982 return (xsize == 0 || ysize == 0
1983 || (c >= 0
1984 ? (abs (xsize) > c)
1985 : (abs (ysize) > -c)));
1986 }
1987
1988 /* Return one if X and Y (memory addresses) reference the
1989 same location in memory or if the references overlap.
1990 Return zero if they do not overlap, else return
1991 minus one in which case they still might reference the same location.
1992
1993 C is an offset accumulator. When
1994 C is nonzero, we are testing aliases between X and Y + C.
1995 XSIZE is the size in bytes of the X reference,
1996 similarly YSIZE is the size in bytes for Y.
1997 Expect that canon_rtx has been already called for X and Y.
1998
1999 If XSIZE or YSIZE is zero, we do not know the amount of memory being
2000 referenced (the reference was BLKmode), so make the most pessimistic
2001 assumptions.
2002
2003 If XSIZE or YSIZE is negative, we may access memory outside the object
2004 being referenced as a side effect. This can happen when using AND to
2005 align memory references, as is done on the Alpha.
2006
2007 Nice to notice that varying addresses cannot conflict with fp if no
2008 local variables had their addresses taken, but that's too hard now.
2009
2010 ??? Contrary to the tree alias oracle this does not return
2011 one for X + non-constant and Y + non-constant when X and Y are equal.
2012 If that is fixed the TBAA hack for union type-punning can be removed. */
2013
2014 static int
2015 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
2016 {
2017 if (GET_CODE (x) == VALUE)
2018 {
2019 if (REG_P (y))
2020 {
2021 struct elt_loc_list *l = NULL;
2022 if (CSELIB_VAL_PTR (x))
2023 for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
2024 l; l = l->next)
2025 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
2026 break;
2027 if (l)
2028 x = y;
2029 else
2030 x = get_addr (x);
2031 }
2032 /* Don't call get_addr if y is the same VALUE. */
2033 else if (x != y)
2034 x = get_addr (x);
2035 }
2036 if (GET_CODE (y) == VALUE)
2037 {
2038 if (REG_P (x))
2039 {
2040 struct elt_loc_list *l = NULL;
2041 if (CSELIB_VAL_PTR (y))
2042 for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
2043 l; l = l->next)
2044 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
2045 break;
2046 if (l)
2047 y = x;
2048 else
2049 y = get_addr (y);
2050 }
2051 /* Don't call get_addr if x is the same VALUE. */
2052 else if (y != x)
2053 y = get_addr (y);
2054 }
2055 if (GET_CODE (x) == HIGH)
2056 x = XEXP (x, 0);
2057 else if (GET_CODE (x) == LO_SUM)
2058 x = XEXP (x, 1);
2059 else
2060 x = addr_side_effect_eval (x, abs (xsize), 0);
2061 if (GET_CODE (y) == HIGH)
2062 y = XEXP (y, 0);
2063 else if (GET_CODE (y) == LO_SUM)
2064 y = XEXP (y, 1);
2065 else
2066 y = addr_side_effect_eval (y, abs (ysize), 0);
2067
2068 if (rtx_equal_for_memref_p (x, y))
2069 {
2070 return offset_overlap_p (c, xsize, ysize);
2071 }
2072
2073 /* This code used to check for conflicts involving stack references and
2074 globals but the base address alias code now handles these cases. */
2075
2076 if (GET_CODE (x) == PLUS)
2077 {
2078 /* The fact that X is canonicalized means that this
2079 PLUS rtx is canonicalized. */
2080 rtx x0 = XEXP (x, 0);
2081 rtx x1 = XEXP (x, 1);
2082
2083 if (GET_CODE (y) == PLUS)
2084 {
2085 /* The fact that Y is canonicalized means that this
2086 PLUS rtx is canonicalized. */
2087 rtx y0 = XEXP (y, 0);
2088 rtx y1 = XEXP (y, 1);
2089
2090 if (rtx_equal_for_memref_p (x1, y1))
2091 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2092 if (rtx_equal_for_memref_p (x0, y0))
2093 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2094 if (CONST_INT_P (x1))
2095 {
2096 if (CONST_INT_P (y1))
2097 return memrefs_conflict_p (xsize, x0, ysize, y0,
2098 c - INTVAL (x1) + INTVAL (y1));
2099 else
2100 return memrefs_conflict_p (xsize, x0, ysize, y,
2101 c - INTVAL (x1));
2102 }
2103 else if (CONST_INT_P (y1))
2104 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2105
2106 return -1;
2107 }
2108 else if (CONST_INT_P (x1))
2109 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
2110 }
2111 else if (GET_CODE (y) == PLUS)
2112 {
2113 /* The fact that Y is canonicalized means that this
2114 PLUS rtx is canonicalized. */
2115 rtx y0 = XEXP (y, 0);
2116 rtx y1 = XEXP (y, 1);
2117
2118 if (CONST_INT_P (y1))
2119 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2120 else
2121 return -1;
2122 }
2123
2124 if (GET_CODE (x) == GET_CODE (y))
2125 switch (GET_CODE (x))
2126 {
2127 case MULT:
2128 {
2129 /* Handle cases where we expect the second operands to be the
2130 same, and check only whether the first operand would conflict
2131 or not. */
2132 rtx x0, y0;
2133 rtx x1 = canon_rtx (XEXP (x, 1));
2134 rtx y1 = canon_rtx (XEXP (y, 1));
2135 if (! rtx_equal_for_memref_p (x1, y1))
2136 return -1;
2137 x0 = canon_rtx (XEXP (x, 0));
2138 y0 = canon_rtx (XEXP (y, 0));
2139 if (rtx_equal_for_memref_p (x0, y0))
2140 return offset_overlap_p (c, xsize, ysize);
2141
2142 /* Can't properly adjust our sizes. */
2143 if (!CONST_INT_P (x1))
2144 return -1;
2145 xsize /= INTVAL (x1);
2146 ysize /= INTVAL (x1);
2147 c /= INTVAL (x1);
2148 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2149 }
2150
2151 default:
2152 break;
2153 }
2154
2155 /* Deal with alignment ANDs by adjusting offset and size so as to
2156 cover the maximum range, without taking any previously known
2157 alignment into account. Make a size negative after such an
2158 adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2159 assume a potential overlap, because they may end up in contiguous
2160 memory locations and the stricter-alignment access may span over
2161 part of both. */
2162 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2163 {
2164 HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2165 unsigned HOST_WIDE_INT uc = sc;
2166 if (sc < 0 && -uc == (uc & -uc))
2167 {
2168 if (xsize > 0)
2169 xsize = -xsize;
2170 if (xsize)
2171 xsize += sc + 1;
2172 c -= sc + 1;
2173 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2174 ysize, y, c);
2175 }
2176 }
2177 if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2178 {
2179 HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2180 unsigned HOST_WIDE_INT uc = sc;
2181 if (sc < 0 && -uc == (uc & -uc))
2182 {
2183 if (ysize > 0)
2184 ysize = -ysize;
2185 if (ysize)
2186 ysize += sc + 1;
2187 c += sc + 1;
2188 return memrefs_conflict_p (xsize, x,
2189 ysize, canon_rtx (XEXP (y, 0)), c);
2190 }
2191 }
2192
2193 if (CONSTANT_P (x))
2194 {
2195 if (CONST_INT_P (x) && CONST_INT_P (y))
2196 {
2197 c += (INTVAL (y) - INTVAL (x));
2198 return offset_overlap_p (c, xsize, ysize);
2199 }
2200
2201 if (GET_CODE (x) == CONST)
2202 {
2203 if (GET_CODE (y) == CONST)
2204 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2205 ysize, canon_rtx (XEXP (y, 0)), c);
2206 else
2207 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2208 ysize, y, c);
2209 }
2210 if (GET_CODE (y) == CONST)
2211 return memrefs_conflict_p (xsize, x, ysize,
2212 canon_rtx (XEXP (y, 0)), c);
2213
2214 /* Assume a potential overlap for symbolic addresses that went
2215 through alignment adjustments (i.e., that have negative
2216 sizes), because we can't know how far they are from each
2217 other. */
2218 if (CONSTANT_P (y))
2219 return (xsize < 0 || ysize < 0 || offset_overlap_p (c, xsize, ysize));
2220
2221 return -1;
2222 }
2223
2224 return -1;
2225 }
2226
2227 /* Functions to compute memory dependencies.
2228
2229 Since we process the insns in execution order, we can build tables
2230 to keep track of what registers are fixed (and not aliased), what registers
2231 are varying in known ways, and what registers are varying in unknown
2232 ways.
2233
2234 If both memory references are volatile, then there must always be a
2235 dependence between the two references, since their order can not be
2236 changed. A volatile and non-volatile reference can be interchanged
2237 though.
2238
2239 We also must allow AND addresses, because they may generate accesses
2240 outside the object being referenced. This is used to generate aligned
2241 addresses from unaligned addresses, for instance, the alpha
2242 storeqi_unaligned pattern. */
2243
2244 /* Read dependence: X is read after read in MEM takes place. There can
2245 only be a dependence here if both reads are volatile, or if either is
2246 an explicit barrier. */
2247
2248 int
2249 read_dependence (const_rtx mem, const_rtx x)
2250 {
2251 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2252 return true;
2253 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2254 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2255 return true;
2256 return false;
2257 }
2258
2259 /* Return true if we can determine that the fields referenced cannot
2260 overlap for any pair of objects. */
2261
2262 static bool
2263 nonoverlapping_component_refs_p (const_rtx rtlx, const_rtx rtly)
2264 {
2265 const_tree x = MEM_EXPR (rtlx), y = MEM_EXPR (rtly);
2266 const_tree fieldx, fieldy, typex, typey, orig_y;
2267
2268 if (!flag_strict_aliasing
2269 || !x || !y
2270 || TREE_CODE (x) != COMPONENT_REF
2271 || TREE_CODE (y) != COMPONENT_REF)
2272 return false;
2273
2274 do
2275 {
2276 /* The comparison has to be done at a common type, since we don't
2277 know how the inheritance hierarchy works. */
2278 orig_y = y;
2279 do
2280 {
2281 fieldx = TREE_OPERAND (x, 1);
2282 typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
2283
2284 y = orig_y;
2285 do
2286 {
2287 fieldy = TREE_OPERAND (y, 1);
2288 typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
2289
2290 if (typex == typey)
2291 goto found;
2292
2293 y = TREE_OPERAND (y, 0);
2294 }
2295 while (y && TREE_CODE (y) == COMPONENT_REF);
2296
2297 x = TREE_OPERAND (x, 0);
2298 }
2299 while (x && TREE_CODE (x) == COMPONENT_REF);
2300 /* Never found a common type. */
2301 return false;
2302
2303 found:
2304 /* If we're left with accessing different fields of a structure, then no
2305 possible overlap, unless they are both bitfields. */
2306 if (TREE_CODE (typex) == RECORD_TYPE && fieldx != fieldy)
2307 return !(DECL_BIT_FIELD (fieldx) && DECL_BIT_FIELD (fieldy));
2308
2309 /* The comparison on the current field failed. If we're accessing
2310 a very nested structure, look at the next outer level. */
2311 x = TREE_OPERAND (x, 0);
2312 y = TREE_OPERAND (y, 0);
2313 }
2314 while (x && y
2315 && TREE_CODE (x) == COMPONENT_REF
2316 && TREE_CODE (y) == COMPONENT_REF);
2317
2318 return false;
2319 }
2320
2321 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */
2322
2323 static tree
2324 decl_for_component_ref (tree x)
2325 {
2326 do
2327 {
2328 x = TREE_OPERAND (x, 0);
2329 }
2330 while (x && TREE_CODE (x) == COMPONENT_REF);
2331
2332 return x && DECL_P (x) ? x : NULL_TREE;
2333 }
2334
2335 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2336 for the offset of the field reference. *KNOWN_P says whether the
2337 offset is known. */
2338
2339 static void
2340 adjust_offset_for_component_ref (tree x, bool *known_p,
2341 HOST_WIDE_INT *offset)
2342 {
2343 if (!*known_p)
2344 return;
2345 do
2346 {
2347 tree xoffset = component_ref_field_offset (x);
2348 tree field = TREE_OPERAND (x, 1);
2349
2350 if (! host_integerp (xoffset, 1))
2351 {
2352 *known_p = false;
2353 return;
2354 }
2355 *offset += (tree_low_cst (xoffset, 1)
2356 + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
2357 / BITS_PER_UNIT));
2358
2359 x = TREE_OPERAND (x, 0);
2360 }
2361 while (x && TREE_CODE (x) == COMPONENT_REF);
2362 }
2363
2364 /* Return nonzero if we can determine the exprs corresponding to memrefs
2365 X and Y and they do not overlap.
2366 If LOOP_VARIANT is set, skip offset-based disambiguation */
2367
2368 int
2369 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2370 {
2371 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2372 rtx rtlx, rtly;
2373 rtx basex, basey;
2374 bool moffsetx_known_p, moffsety_known_p;
2375 HOST_WIDE_INT moffsetx = 0, moffsety = 0;
2376 HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2377
2378 /* Unless both have exprs, we can't tell anything. */
2379 if (exprx == 0 || expry == 0)
2380 return 0;
2381
2382 /* For spill-slot accesses make sure we have valid offsets. */
2383 if ((exprx == get_spill_slot_decl (false)
2384 && ! MEM_OFFSET_KNOWN_P (x))
2385 || (expry == get_spill_slot_decl (false)
2386 && ! MEM_OFFSET_KNOWN_P (y)))
2387 return 0;
2388
2389 /* If the field reference test failed, look at the DECLs involved. */
2390 moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2391 if (moffsetx_known_p)
2392 moffsetx = MEM_OFFSET (x);
2393 if (TREE_CODE (exprx) == COMPONENT_REF)
2394 {
2395 tree t = decl_for_component_ref (exprx);
2396 if (! t)
2397 return 0;
2398 adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2399 exprx = t;
2400 }
2401
2402 moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2403 if (moffsety_known_p)
2404 moffsety = MEM_OFFSET (y);
2405 if (TREE_CODE (expry) == COMPONENT_REF)
2406 {
2407 tree t = decl_for_component_ref (expry);
2408 if (! t)
2409 return 0;
2410 adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2411 expry = t;
2412 }
2413
2414 if (! DECL_P (exprx) || ! DECL_P (expry))
2415 return 0;
2416
2417 /* With invalid code we can end up storing into the constant pool.
2418 Bail out to avoid ICEing when creating RTL for this.
2419 See gfortran.dg/lto/20091028-2_0.f90. */
2420 if (TREE_CODE (exprx) == CONST_DECL
2421 || TREE_CODE (expry) == CONST_DECL)
2422 return 1;
2423
2424 rtlx = DECL_RTL (exprx);
2425 rtly = DECL_RTL (expry);
2426
2427 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2428 can't overlap unless they are the same because we never reuse that part
2429 of the stack frame used for locals for spilled pseudos. */
2430 if ((!MEM_P (rtlx) || !MEM_P (rtly))
2431 && ! rtx_equal_p (rtlx, rtly))
2432 return 1;
2433
2434 /* If we have MEMs referring to different address spaces (which can
2435 potentially overlap), we cannot easily tell from the addresses
2436 whether the references overlap. */
2437 if (MEM_P (rtlx) && MEM_P (rtly)
2438 && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2439 return 0;
2440
2441 /* Get the base and offsets of both decls. If either is a register, we
2442 know both are and are the same, so use that as the base. The only
2443 we can avoid overlap is if we can deduce that they are nonoverlapping
2444 pieces of that decl, which is very rare. */
2445 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2446 if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2447 offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2448
2449 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2450 if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2451 offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2452
2453 /* If the bases are different, we know they do not overlap if both
2454 are constants or if one is a constant and the other a pointer into the
2455 stack frame. Otherwise a different base means we can't tell if they
2456 overlap or not. */
2457 if (! rtx_equal_p (basex, basey))
2458 return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2459 || (CONSTANT_P (basex) && REG_P (basey)
2460 && REGNO_PTR_FRAME_P (REGNO (basey)))
2461 || (CONSTANT_P (basey) && REG_P (basex)
2462 && REGNO_PTR_FRAME_P (REGNO (basex))));
2463
2464 /* Offset based disambiguation not appropriate for loop invariant */
2465 if (loop_invariant)
2466 return 0;
2467
2468 sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2469 : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2470 : -1);
2471 sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2472 : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2473 : -1);
2474
2475 /* If we have an offset for either memref, it can update the values computed
2476 above. */
2477 if (moffsetx_known_p)
2478 offsetx += moffsetx, sizex -= moffsetx;
2479 if (moffsety_known_p)
2480 offsety += moffsety, sizey -= moffsety;
2481
2482 /* If a memref has both a size and an offset, we can use the smaller size.
2483 We can't do this if the offset isn't known because we must view this
2484 memref as being anywhere inside the DECL's MEM. */
2485 if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2486 sizex = MEM_SIZE (x);
2487 if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2488 sizey = MEM_SIZE (y);
2489
2490 /* Put the values of the memref with the lower offset in X's values. */
2491 if (offsetx > offsety)
2492 {
2493 tem = offsetx, offsetx = offsety, offsety = tem;
2494 tem = sizex, sizex = sizey, sizey = tem;
2495 }
2496
2497 /* If we don't know the size of the lower-offset value, we can't tell
2498 if they conflict. Otherwise, we do the test. */
2499 return sizex >= 0 && offsety >= offsetx + sizex;
2500 }
2501
2502 /* Helper for true_dependence and canon_true_dependence.
2503 Checks for true dependence: X is read after store in MEM takes place.
2504
2505 If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2506 NULL_RTX, and the canonical addresses of MEM and X are both computed
2507 here. If MEM_CANONICALIZED, then MEM must be already canonicalized.
2508
2509 If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2510
2511 Returns 1 if there is a true dependence, 0 otherwise. */
2512
2513 static int
2514 true_dependence_1 (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2515 const_rtx x, rtx x_addr, bool mem_canonicalized)
2516 {
2517 rtx base;
2518 int ret;
2519
2520 gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2521 : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2522
2523 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2524 return 1;
2525
2526 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2527 This is used in epilogue deallocation functions, and in cselib. */
2528 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2529 return 1;
2530 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2531 return 1;
2532 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2533 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2534 return 1;
2535
2536 /* Read-only memory is by definition never modified, and therefore can't
2537 conflict with anything. We don't expect to find read-only set on MEM,
2538 but stupid user tricks can produce them, so don't die. */
2539 if (MEM_READONLY_P (x))
2540 return 0;
2541
2542 /* If we have MEMs referring to different address spaces (which can
2543 potentially overlap), we cannot easily tell from the addresses
2544 whether the references overlap. */
2545 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2546 return 1;
2547
2548 if (! mem_addr)
2549 {
2550 mem_addr = XEXP (mem, 0);
2551 if (mem_mode == VOIDmode)
2552 mem_mode = GET_MODE (mem);
2553 }
2554
2555 if (! x_addr)
2556 {
2557 x_addr = XEXP (x, 0);
2558 if (!((GET_CODE (x_addr) == VALUE
2559 && GET_CODE (mem_addr) != VALUE
2560 && reg_mentioned_p (x_addr, mem_addr))
2561 || (GET_CODE (x_addr) != VALUE
2562 && GET_CODE (mem_addr) == VALUE
2563 && reg_mentioned_p (mem_addr, x_addr))))
2564 {
2565 x_addr = get_addr (x_addr);
2566 if (! mem_canonicalized)
2567 mem_addr = get_addr (mem_addr);
2568 }
2569 }
2570
2571 base = find_base_term (x_addr);
2572 if (base && (GET_CODE (base) == LABEL_REF
2573 || (GET_CODE (base) == SYMBOL_REF
2574 && CONSTANT_POOL_ADDRESS_P (base))))
2575 return 0;
2576
2577 rtx mem_base = find_base_term (mem_addr);
2578 if (! base_alias_check (x_addr, base, mem_addr, mem_base,
2579 GET_MODE (x), mem_mode))
2580 return 0;
2581
2582 x_addr = canon_rtx (x_addr);
2583 if (!mem_canonicalized)
2584 mem_addr = canon_rtx (mem_addr);
2585
2586 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2587 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2588 return ret;
2589
2590 if (mems_in_disjoint_alias_sets_p (x, mem))
2591 return 0;
2592
2593 if (nonoverlapping_memrefs_p (mem, x, false))
2594 return 0;
2595
2596 if (nonoverlapping_component_refs_p (mem, x))
2597 return 0;
2598
2599 return rtx_refs_may_alias_p (x, mem, true);
2600 }
2601
2602 /* True dependence: X is read after store in MEM takes place. */
2603
2604 int
2605 true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x)
2606 {
2607 return true_dependence_1 (mem, mem_mode, NULL_RTX,
2608 x, NULL_RTX, /*mem_canonicalized=*/false);
2609 }
2610
2611 /* Canonical true dependence: X is read after store in MEM takes place.
2612 Variant of true_dependence which assumes MEM has already been
2613 canonicalized (hence we no longer do that here).
2614 The mem_addr argument has been added, since true_dependence_1 computed
2615 this value prior to canonicalizing. */
2616
2617 int
2618 canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2619 const_rtx x, rtx x_addr)
2620 {
2621 return true_dependence_1 (mem, mem_mode, mem_addr,
2622 x, x_addr, /*mem_canonicalized=*/true);
2623 }
2624
2625 /* Returns nonzero if a write to X might alias a previous read from
2626 (or, if WRITEP is true, a write to) MEM.
2627 If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X,
2628 and X_MODE the mode for that access.
2629 If MEM_CANONICALIZED is true, MEM is canonicalized. */
2630
2631 static int
2632 write_dependence_p (const_rtx mem,
2633 const_rtx x, enum machine_mode x_mode, rtx x_addr,
2634 bool mem_canonicalized, bool x_canonicalized, bool writep)
2635 {
2636 rtx mem_addr;
2637 rtx base;
2638 int ret;
2639
2640 gcc_checking_assert (x_canonicalized
2641 ? (x_addr != NULL_RTX && x_mode != VOIDmode)
2642 : (x_addr == NULL_RTX && x_mode == VOIDmode));
2643
2644 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2645 return 1;
2646
2647 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2648 This is used in epilogue deallocation functions. */
2649 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2650 return 1;
2651 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2652 return 1;
2653 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2654 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2655 return 1;
2656
2657 /* A read from read-only memory can't conflict with read-write memory. */
2658 if (!writep && MEM_READONLY_P (mem))
2659 return 0;
2660
2661 /* If we have MEMs referring to different address spaces (which can
2662 potentially overlap), we cannot easily tell from the addresses
2663 whether the references overlap. */
2664 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2665 return 1;
2666
2667 mem_addr = XEXP (mem, 0);
2668 if (!x_addr)
2669 {
2670 x_addr = XEXP (x, 0);
2671 if (!((GET_CODE (x_addr) == VALUE
2672 && GET_CODE (mem_addr) != VALUE
2673 && reg_mentioned_p (x_addr, mem_addr))
2674 || (GET_CODE (x_addr) != VALUE
2675 && GET_CODE (mem_addr) == VALUE
2676 && reg_mentioned_p (mem_addr, x_addr))))
2677 {
2678 x_addr = get_addr (x_addr);
2679 if (!mem_canonicalized)
2680 mem_addr = get_addr (mem_addr);
2681 }
2682 }
2683
2684 base = find_base_term (mem_addr);
2685 if (! writep
2686 && base
2687 && (GET_CODE (base) == LABEL_REF
2688 || (GET_CODE (base) == SYMBOL_REF
2689 && CONSTANT_POOL_ADDRESS_P (base))))
2690 return 0;
2691
2692 rtx x_base = find_base_term (x_addr);
2693 if (! base_alias_check (x_addr, x_base, mem_addr, base, GET_MODE (x),
2694 GET_MODE (mem)))
2695 return 0;
2696
2697 if (!x_canonicalized)
2698 {
2699 x_addr = canon_rtx (x_addr);
2700 x_mode = GET_MODE (x);
2701 }
2702 if (!mem_canonicalized)
2703 mem_addr = canon_rtx (mem_addr);
2704
2705 if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2706 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1)
2707 return ret;
2708
2709 if (nonoverlapping_memrefs_p (x, mem, false))
2710 return 0;
2711
2712 return rtx_refs_may_alias_p (x, mem, false);
2713 }
2714
2715 /* Anti dependence: X is written after read in MEM takes place. */
2716
2717 int
2718 anti_dependence (const_rtx mem, const_rtx x)
2719 {
2720 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2721 /*mem_canonicalized=*/false,
2722 /*x_canonicalized*/false, /*writep=*/false);
2723 }
2724
2725 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
2726 Also, consider X in X_MODE (which might be from an enclosing
2727 STRICT_LOW_PART / ZERO_EXTRACT).
2728 If MEM_CANONICALIZED is true, MEM is canonicalized. */
2729
2730 int
2731 canon_anti_dependence (const_rtx mem, bool mem_canonicalized,
2732 const_rtx x, enum machine_mode x_mode, rtx x_addr)
2733 {
2734 return write_dependence_p (mem, x, x_mode, x_addr,
2735 mem_canonicalized, /*x_canonicalized=*/true,
2736 /*writep=*/false);
2737 }
2738
2739 /* Output dependence: X is written after store in MEM takes place. */
2740
2741 int
2742 output_dependence (const_rtx mem, const_rtx x)
2743 {
2744 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2745 /*mem_canonicalized=*/false,
2746 /*x_canonicalized*/false, /*writep=*/true);
2747 }
2748 \f
2749
2750
2751 /* Check whether X may be aliased with MEM. Don't do offset-based
2752 memory disambiguation & TBAA. */
2753 int
2754 may_alias_p (const_rtx mem, const_rtx x)
2755 {
2756 rtx x_addr, mem_addr;
2757
2758 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2759 return 1;
2760
2761 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2762 This is used in epilogue deallocation functions. */
2763 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2764 return 1;
2765 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2766 return 1;
2767 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2768 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2769 return 1;
2770
2771 /* Read-only memory is by definition never modified, and therefore can't
2772 conflict with anything. We don't expect to find read-only set on MEM,
2773 but stupid user tricks can produce them, so don't die. */
2774 if (MEM_READONLY_P (x))
2775 return 0;
2776
2777 /* If we have MEMs referring to different address spaces (which can
2778 potentially overlap), we cannot easily tell from the addresses
2779 whether the references overlap. */
2780 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2781 return 1;
2782
2783 x_addr = XEXP (x, 0);
2784 mem_addr = XEXP (mem, 0);
2785 if (!((GET_CODE (x_addr) == VALUE
2786 && GET_CODE (mem_addr) != VALUE
2787 && reg_mentioned_p (x_addr, mem_addr))
2788 || (GET_CODE (x_addr) != VALUE
2789 && GET_CODE (mem_addr) == VALUE
2790 && reg_mentioned_p (mem_addr, x_addr))))
2791 {
2792 x_addr = get_addr (x_addr);
2793 mem_addr = get_addr (mem_addr);
2794 }
2795
2796 rtx x_base = find_base_term (x_addr);
2797 rtx mem_base = find_base_term (mem_addr);
2798 if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
2799 GET_MODE (x), GET_MODE (mem_addr)))
2800 return 0;
2801
2802 x_addr = canon_rtx (x_addr);
2803 mem_addr = canon_rtx (mem_addr);
2804
2805 if (nonoverlapping_memrefs_p (mem, x, true))
2806 return 0;
2807
2808 /* TBAA not valid for loop_invarint */
2809 return rtx_refs_may_alias_p (x, mem, false);
2810 }
2811
2812 void
2813 init_alias_target (void)
2814 {
2815 int i;
2816
2817 if (!arg_base_value)
2818 arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
2819
2820 memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2821
2822 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2823 /* Check whether this register can hold an incoming pointer
2824 argument. FUNCTION_ARG_REGNO_P tests outgoing register
2825 numbers, so translate if necessary due to register windows. */
2826 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2827 && HARD_REGNO_MODE_OK (i, Pmode))
2828 static_reg_base_value[i] = arg_base_value;
2829
2830 static_reg_base_value[STACK_POINTER_REGNUM]
2831 = unique_base_value (UNIQUE_BASE_VALUE_SP);
2832 static_reg_base_value[ARG_POINTER_REGNUM]
2833 = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
2834 static_reg_base_value[FRAME_POINTER_REGNUM]
2835 = unique_base_value (UNIQUE_BASE_VALUE_FP);
2836 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2837 static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2838 = unique_base_value (UNIQUE_BASE_VALUE_HFP);
2839 #endif
2840 }
2841
2842 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2843 to be memory reference. */
2844 static bool memory_modified;
2845 static void
2846 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2847 {
2848 if (MEM_P (x))
2849 {
2850 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2851 memory_modified = true;
2852 }
2853 }
2854
2855
2856 /* Return true when INSN possibly modify memory contents of MEM
2857 (i.e. address can be modified). */
2858 bool
2859 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2860 {
2861 if (!INSN_P (insn))
2862 return false;
2863 memory_modified = false;
2864 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2865 return memory_modified;
2866 }
2867
2868 /* Return TRUE if the destination of a set is rtx identical to
2869 ITEM. */
2870 static inline bool
2871 set_dest_equal_p (const_rtx set, const_rtx item)
2872 {
2873 rtx dest = SET_DEST (set);
2874 return rtx_equal_p (dest, item);
2875 }
2876
2877 /* Like memory_modified_in_insn_p, but return TRUE if INSN will
2878 *DEFINITELY* modify the memory contents of MEM. */
2879 bool
2880 memory_must_be_modified_in_insn_p (const_rtx mem, const_rtx insn)
2881 {
2882 if (!INSN_P (insn))
2883 return false;
2884 insn = PATTERN (insn);
2885 if (GET_CODE (insn) == SET)
2886 return set_dest_equal_p (insn, mem);
2887 else if (GET_CODE (insn) == PARALLEL)
2888 {
2889 int i;
2890 for (i = 0; i < XVECLEN (insn, 0); i++)
2891 {
2892 rtx sub = XVECEXP (insn, 0, i);
2893 if (GET_CODE (sub) == SET
2894 && set_dest_equal_p (sub, mem))
2895 return true;
2896 }
2897 }
2898 return false;
2899 }
2900
2901 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
2902 array. */
2903
2904 void
2905 init_alias_analysis (void)
2906 {
2907 unsigned int maxreg = max_reg_num ();
2908 int changed, pass;
2909 int i;
2910 unsigned int ui;
2911 rtx insn, val;
2912 int rpo_cnt;
2913 int *rpo;
2914
2915 timevar_push (TV_ALIAS_ANALYSIS);
2916
2917 vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
2918 reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
2919 bitmap_clear (reg_known_equiv_p);
2920
2921 /* If we have memory allocated from the previous run, use it. */
2922 if (old_reg_base_value)
2923 reg_base_value = old_reg_base_value;
2924
2925 if (reg_base_value)
2926 reg_base_value->truncate (0);
2927
2928 vec_safe_grow_cleared (reg_base_value, maxreg);
2929
2930 new_reg_base_value = XNEWVEC (rtx, maxreg);
2931 reg_seen = sbitmap_alloc (maxreg);
2932
2933 /* The basic idea is that each pass through this loop will use the
2934 "constant" information from the previous pass to propagate alias
2935 information through another level of assignments.
2936
2937 The propagation is done on the CFG in reverse post-order, to propagate
2938 things forward as far as possible in each iteration.
2939
2940 This could get expensive if the assignment chains are long. Maybe
2941 we should throttle the number of iterations, possibly based on
2942 the optimization level or flag_expensive_optimizations.
2943
2944 We could propagate more information in the first pass by making use
2945 of DF_REG_DEF_COUNT to determine immediately that the alias information
2946 for a pseudo is "constant".
2947
2948 A program with an uninitialized variable can cause an infinite loop
2949 here. Instead of doing a full dataflow analysis to detect such problems
2950 we just cap the number of iterations for the loop.
2951
2952 The state of the arrays for the set chain in question does not matter
2953 since the program has undefined behavior. */
2954
2955 rpo = XNEWVEC (int, n_basic_blocks);
2956 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
2957
2958 pass = 0;
2959 do
2960 {
2961 /* Assume nothing will change this iteration of the loop. */
2962 changed = 0;
2963
2964 /* We want to assign the same IDs each iteration of this loop, so
2965 start counting from one each iteration of the loop. */
2966 unique_id = 1;
2967
2968 /* We're at the start of the function each iteration through the
2969 loop, so we're copying arguments. */
2970 copying_arguments = true;
2971
2972 /* Wipe the potential alias information clean for this pass. */
2973 memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2974
2975 /* Wipe the reg_seen array clean. */
2976 bitmap_clear (reg_seen);
2977
2978 /* Initialize the alias information for this pass. */
2979 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2980 if (static_reg_base_value[i])
2981 {
2982 new_reg_base_value[i] = static_reg_base_value[i];
2983 bitmap_set_bit (reg_seen, i);
2984 }
2985
2986 /* Walk the insns adding values to the new_reg_base_value array. */
2987 for (i = 0; i < rpo_cnt; i++)
2988 {
2989 basic_block bb = BASIC_BLOCK (rpo[i]);
2990 FOR_BB_INSNS (bb, insn)
2991 {
2992 if (NONDEBUG_INSN_P (insn))
2993 {
2994 rtx note, set;
2995
2996 #if defined (HAVE_prologue) || defined (HAVE_epilogue)
2997 /* The prologue/epilogue insns are not threaded onto the
2998 insn chain until after reload has completed. Thus,
2999 there is no sense wasting time checking if INSN is in
3000 the prologue/epilogue until after reload has completed. */
3001 if (reload_completed
3002 && prologue_epilogue_contains (insn))
3003 continue;
3004 #endif
3005
3006 /* If this insn has a noalias note, process it, Otherwise,
3007 scan for sets. A simple set will have no side effects
3008 which could change the base value of any other register. */
3009
3010 if (GET_CODE (PATTERN (insn)) == SET
3011 && REG_NOTES (insn) != 0
3012 && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
3013 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
3014 else
3015 note_stores (PATTERN (insn), record_set, NULL);
3016
3017 set = single_set (insn);
3018
3019 if (set != 0
3020 && REG_P (SET_DEST (set))
3021 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3022 {
3023 unsigned int regno = REGNO (SET_DEST (set));
3024 rtx src = SET_SRC (set);
3025 rtx t;
3026
3027 note = find_reg_equal_equiv_note (insn);
3028 if (note && REG_NOTE_KIND (note) == REG_EQUAL
3029 && DF_REG_DEF_COUNT (regno) != 1)
3030 note = NULL_RTX;
3031
3032 if (note != NULL_RTX
3033 && GET_CODE (XEXP (note, 0)) != EXPR_LIST
3034 && ! rtx_varies_p (XEXP (note, 0), 1)
3035 && ! reg_overlap_mentioned_p (SET_DEST (set),
3036 XEXP (note, 0)))
3037 {
3038 set_reg_known_value (regno, XEXP (note, 0));
3039 set_reg_known_equiv_p (regno,
3040 REG_NOTE_KIND (note) == REG_EQUIV);
3041 }
3042 else if (DF_REG_DEF_COUNT (regno) == 1
3043 && GET_CODE (src) == PLUS
3044 && REG_P (XEXP (src, 0))
3045 && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
3046 && CONST_INT_P (XEXP (src, 1)))
3047 {
3048 t = plus_constant (GET_MODE (src), t,
3049 INTVAL (XEXP (src, 1)));
3050 set_reg_known_value (regno, t);
3051 set_reg_known_equiv_p (regno, false);
3052 }
3053 else if (DF_REG_DEF_COUNT (regno) == 1
3054 && ! rtx_varies_p (src, 1))
3055 {
3056 set_reg_known_value (regno, src);
3057 set_reg_known_equiv_p (regno, false);
3058 }
3059 }
3060 }
3061 else if (NOTE_P (insn)
3062 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
3063 copying_arguments = false;
3064 }
3065 }
3066
3067 /* Now propagate values from new_reg_base_value to reg_base_value. */
3068 gcc_assert (maxreg == (unsigned int) max_reg_num ());
3069
3070 for (ui = 0; ui < maxreg; ui++)
3071 {
3072 if (new_reg_base_value[ui]
3073 && new_reg_base_value[ui] != (*reg_base_value)[ui]
3074 && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
3075 {
3076 (*reg_base_value)[ui] = new_reg_base_value[ui];
3077 changed = 1;
3078 }
3079 }
3080 }
3081 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
3082 XDELETEVEC (rpo);
3083
3084 /* Fill in the remaining entries. */
3085 FOR_EACH_VEC_ELT (*reg_known_value, i, val)
3086 {
3087 int regno = i + FIRST_PSEUDO_REGISTER;
3088 if (! val)
3089 set_reg_known_value (regno, regno_reg_rtx[regno]);
3090 }
3091
3092 /* Clean up. */
3093 free (new_reg_base_value);
3094 new_reg_base_value = 0;
3095 sbitmap_free (reg_seen);
3096 reg_seen = 0;
3097 timevar_pop (TV_ALIAS_ANALYSIS);
3098 }
3099
3100 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3101 Special API for var-tracking pass purposes. */
3102
3103 void
3104 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3105 {
3106 (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3107 }
3108
3109 void
3110 end_alias_analysis (void)
3111 {
3112 old_reg_base_value = reg_base_value;
3113 vec_free (reg_known_value);
3114 sbitmap_free (reg_known_equiv_p);
3115 }
3116
3117 #include "gt-alias.h"