symtab.c (change_decl_assembler_name): Fix transparent alias chain construction.
[gcc.git] / gcc / alias.c
1 /* Alias analysis for GNU C
2 Copyright (C) 1997-2014 Free Software Foundation, Inc.
3 Contributed by John Carr (jfc@mit.edu).
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "expr.h"
29 #include "tm_p.h"
30 #include "function.h"
31 #include "alias.h"
32 #include "emit-rtl.h"
33 #include "regs.h"
34 #include "hard-reg-set.h"
35 #include "flags.h"
36 #include "diagnostic-core.h"
37 #include "cselib.h"
38 #include "splay-tree.h"
39 #include "langhooks.h"
40 #include "timevar.h"
41 #include "dumpfile.h"
42 #include "target.h"
43 #include "df.h"
44 #include "tree-ssa-alias.h"
45 #include "pointer-set.h"
46 #include "internal-fn.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-ssa.h"
51
52 /* The aliasing API provided here solves related but different problems:
53
54 Say there exists (in c)
55
56 struct X {
57 struct Y y1;
58 struct Z z2;
59 } x1, *px1, *px2;
60
61 struct Y y2, *py;
62 struct Z z2, *pz;
63
64
65 py = &x1.y1;
66 px2 = &x1;
67
68 Consider the four questions:
69
70 Can a store to x1 interfere with px2->y1?
71 Can a store to x1 interfere with px2->z2?
72 Can a store to x1 change the value pointed to by with py?
73 Can a store to x1 change the value pointed to by with pz?
74
75 The answer to these questions can be yes, yes, yes, and maybe.
76
77 The first two questions can be answered with a simple examination
78 of the type system. If structure X contains a field of type Y then
79 a store through a pointer to an X can overwrite any field that is
80 contained (recursively) in an X (unless we know that px1 != px2).
81
82 The last two questions can be solved in the same way as the first
83 two questions but this is too conservative. The observation is
84 that in some cases we can know which (if any) fields are addressed
85 and if those addresses are used in bad ways. This analysis may be
86 language specific. In C, arbitrary operations may be applied to
87 pointers. However, there is some indication that this may be too
88 conservative for some C++ types.
89
90 The pass ipa-type-escape does this analysis for the types whose
91 instances do not escape across the compilation boundary.
92
93 Historically in GCC, these two problems were combined and a single
94 data structure that was used to represent the solution to these
95 problems. We now have two similar but different data structures,
96 The data structure to solve the last two questions is similar to
97 the first, but does not contain the fields whose address are never
98 taken. For types that do escape the compilation unit, the data
99 structures will have identical information.
100 */
101
102 /* The alias sets assigned to MEMs assist the back-end in determining
103 which MEMs can alias which other MEMs. In general, two MEMs in
104 different alias sets cannot alias each other, with one important
105 exception. Consider something like:
106
107 struct S { int i; double d; };
108
109 a store to an `S' can alias something of either type `int' or type
110 `double'. (However, a store to an `int' cannot alias a `double'
111 and vice versa.) We indicate this via a tree structure that looks
112 like:
113 struct S
114 / \
115 / \
116 |/_ _\|
117 int double
118
119 (The arrows are directed and point downwards.)
120 In this situation we say the alias set for `struct S' is the
121 `superset' and that those for `int' and `double' are `subsets'.
122
123 To see whether two alias sets can point to the same memory, we must
124 see if either alias set is a subset of the other. We need not trace
125 past immediate descendants, however, since we propagate all
126 grandchildren up one level.
127
128 Alias set zero is implicitly a superset of all other alias sets.
129 However, this is no actual entry for alias set zero. It is an
130 error to attempt to explicitly construct a subset of zero. */
131
132 struct GTY(()) alias_set_entry_d {
133 /* The alias set number, as stored in MEM_ALIAS_SET. */
134 alias_set_type alias_set;
135
136 /* Nonzero if would have a child of zero: this effectively makes this
137 alias set the same as alias set zero. */
138 int has_zero_child;
139
140 /* The children of the alias set. These are not just the immediate
141 children, but, in fact, all descendants. So, if we have:
142
143 struct T { struct S s; float f; }
144
145 continuing our example above, the children here will be all of
146 `int', `double', `float', and `struct S'. */
147 splay_tree GTY((param1_is (int), param2_is (int))) children;
148 };
149 typedef struct alias_set_entry_d *alias_set_entry;
150
151 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
152 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
153 static void record_set (rtx, const_rtx, void *);
154 static int base_alias_check (rtx, rtx, rtx, rtx, enum machine_mode,
155 enum machine_mode);
156 static rtx find_base_value (rtx);
157 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
158 static int insert_subset_children (splay_tree_node, void*);
159 static alias_set_entry get_alias_set_entry (alias_set_type);
160 static bool nonoverlapping_component_refs_p (const_rtx, const_rtx);
161 static tree decl_for_component_ref (tree);
162 static int write_dependence_p (const_rtx,
163 const_rtx, enum machine_mode, rtx,
164 bool, bool, bool);
165
166 static void memory_modified_1 (rtx, const_rtx, void *);
167
168 /* Set up all info needed to perform alias analysis on memory references. */
169
170 /* Returns the size in bytes of the mode of X. */
171 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
172
173 /* Cap the number of passes we make over the insns propagating alias
174 information through set chains.
175 ??? 10 is a completely arbitrary choice. This should be based on the
176 maximum loop depth in the CFG, but we do not have this information
177 available (even if current_loops _is_ available). */
178 #define MAX_ALIAS_LOOP_PASSES 10
179
180 /* reg_base_value[N] gives an address to which register N is related.
181 If all sets after the first add or subtract to the current value
182 or otherwise modify it so it does not point to a different top level
183 object, reg_base_value[N] is equal to the address part of the source
184 of the first set.
185
186 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
187 expressions represent three types of base:
188
189 1. incoming arguments. There is just one ADDRESS to represent all
190 arguments, since we do not know at this level whether accesses
191 based on different arguments can alias. The ADDRESS has id 0.
192
193 2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
194 (if distinct from frame_pointer_rtx) and arg_pointer_rtx.
195 Each of these rtxes has a separate ADDRESS associated with it,
196 each with a negative id.
197
198 GCC is (and is required to be) precise in which register it
199 chooses to access a particular region of stack. We can therefore
200 assume that accesses based on one of these rtxes do not alias
201 accesses based on another of these rtxes.
202
203 3. bases that are derived from malloc()ed memory (REG_NOALIAS).
204 Each such piece of memory has a separate ADDRESS associated
205 with it, each with an id greater than 0.
206
207 Accesses based on one ADDRESS do not alias accesses based on other
208 ADDRESSes. Accesses based on ADDRESSes in groups (2) and (3) do not
209 alias globals either; the ADDRESSes have Pmode to indicate this.
210 The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
211 indicate this. */
212
213 static GTY(()) vec<rtx, va_gc> *reg_base_value;
214 static rtx *new_reg_base_value;
215
216 /* The single VOIDmode ADDRESS that represents all argument bases.
217 It has id 0. */
218 static GTY(()) rtx arg_base_value;
219
220 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS. */
221 static int unique_id;
222
223 /* We preserve the copy of old array around to avoid amount of garbage
224 produced. About 8% of garbage produced were attributed to this
225 array. */
226 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
227
228 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
229 registers. */
230 #define UNIQUE_BASE_VALUE_SP -1
231 #define UNIQUE_BASE_VALUE_ARGP -2
232 #define UNIQUE_BASE_VALUE_FP -3
233 #define UNIQUE_BASE_VALUE_HFP -4
234
235 #define static_reg_base_value \
236 (this_target_rtl->x_static_reg_base_value)
237
238 #define REG_BASE_VALUE(X) \
239 (REGNO (X) < vec_safe_length (reg_base_value) \
240 ? (*reg_base_value)[REGNO (X)] : 0)
241
242 /* Vector indexed by N giving the initial (unchanging) value known for
243 pseudo-register N. This vector is initialized in init_alias_analysis,
244 and does not change until end_alias_analysis is called. */
245 static GTY(()) vec<rtx, va_gc> *reg_known_value;
246
247 /* Vector recording for each reg_known_value whether it is due to a
248 REG_EQUIV note. Future passes (viz., reload) may replace the
249 pseudo with the equivalent expression and so we account for the
250 dependences that would be introduced if that happens.
251
252 The REG_EQUIV notes created in assign_parms may mention the arg
253 pointer, and there are explicit insns in the RTL that modify the
254 arg pointer. Thus we must ensure that such insns don't get
255 scheduled across each other because that would invalidate the
256 REG_EQUIV notes. One could argue that the REG_EQUIV notes are
257 wrong, but solving the problem in the scheduler will likely give
258 better code, so we do it here. */
259 static sbitmap reg_known_equiv_p;
260
261 /* True when scanning insns from the start of the rtl to the
262 NOTE_INSN_FUNCTION_BEG note. */
263 static bool copying_arguments;
264
265
266 /* The splay-tree used to store the various alias set entries. */
267 static GTY (()) vec<alias_set_entry, va_gc> *alias_sets;
268 \f
269 /* Build a decomposed reference object for querying the alias-oracle
270 from the MEM rtx and store it in *REF.
271 Returns false if MEM is not suitable for the alias-oracle. */
272
273 static bool
274 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
275 {
276 tree expr = MEM_EXPR (mem);
277 tree base;
278
279 if (!expr)
280 return false;
281
282 ao_ref_init (ref, expr);
283
284 /* Get the base of the reference and see if we have to reject or
285 adjust it. */
286 base = ao_ref_base (ref);
287 if (base == NULL_TREE)
288 return false;
289
290 /* The tree oracle doesn't like bases that are neither decls
291 nor indirect references of SSA names. */
292 if (!(DECL_P (base)
293 || (TREE_CODE (base) == MEM_REF
294 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
295 || (TREE_CODE (base) == TARGET_MEM_REF
296 && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
297 return false;
298
299 /* If this is a reference based on a partitioned decl replace the
300 base with a MEM_REF of the pointer representative we
301 created during stack slot partitioning. */
302 if (TREE_CODE (base) == VAR_DECL
303 && ! is_global_var (base)
304 && cfun->gimple_df->decls_to_pointers != NULL)
305 {
306 void *namep;
307 namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
308 if (namep)
309 ref->base = build_simple_mem_ref (*(tree *)namep);
310 }
311
312 ref->ref_alias_set = MEM_ALIAS_SET (mem);
313
314 /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
315 is conservative, so trust it. */
316 if (!MEM_OFFSET_KNOWN_P (mem)
317 || !MEM_SIZE_KNOWN_P (mem))
318 return true;
319
320 /* If the base decl is a parameter we can have negative MEM_OFFSET in
321 case of promoted subregs on bigendian targets. Trust the MEM_EXPR
322 here. */
323 if (MEM_OFFSET (mem) < 0
324 && (MEM_SIZE (mem) + MEM_OFFSET (mem)) * BITS_PER_UNIT == ref->size)
325 return true;
326
327 /* Otherwise continue and refine size and offset we got from analyzing
328 MEM_EXPR by using MEM_SIZE and MEM_OFFSET. */
329
330 ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
331 ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
332
333 /* The MEM may extend into adjacent fields, so adjust max_size if
334 necessary. */
335 if (ref->max_size != -1
336 && ref->size > ref->max_size)
337 ref->max_size = ref->size;
338
339 /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
340 the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
341 if (MEM_EXPR (mem) != get_spill_slot_decl (false)
342 && (ref->offset < 0
343 || (DECL_P (ref->base)
344 && (!tree_fits_uhwi_p (DECL_SIZE (ref->base))
345 || (tree_to_uhwi (DECL_SIZE (ref->base))
346 < (unsigned HOST_WIDE_INT) (ref->offset + ref->size))))))
347 return false;
348
349 return true;
350 }
351
352 /* Query the alias-oracle on whether the two memory rtx X and MEM may
353 alias. If TBAA_P is set also apply TBAA. Returns true if the
354 two rtxen may alias, false otherwise. */
355
356 static bool
357 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
358 {
359 ao_ref ref1, ref2;
360
361 if (!ao_ref_from_mem (&ref1, x)
362 || !ao_ref_from_mem (&ref2, mem))
363 return true;
364
365 return refs_may_alias_p_1 (&ref1, &ref2,
366 tbaa_p
367 && MEM_ALIAS_SET (x) != 0
368 && MEM_ALIAS_SET (mem) != 0);
369 }
370
371 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
372 such an entry, or NULL otherwise. */
373
374 static inline alias_set_entry
375 get_alias_set_entry (alias_set_type alias_set)
376 {
377 return (*alias_sets)[alias_set];
378 }
379
380 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
381 the two MEMs cannot alias each other. */
382
383 static inline int
384 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
385 {
386 /* Perform a basic sanity check. Namely, that there are no alias sets
387 if we're not using strict aliasing. This helps to catch bugs
388 whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
389 where a MEM is allocated in some way other than by the use of
390 gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared. If we begin to
391 use alias sets to indicate that spilled registers cannot alias each
392 other, we might need to remove this check. */
393 gcc_assert (flag_strict_aliasing
394 || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
395
396 return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
397 }
398
399 /* Insert the NODE into the splay tree given by DATA. Used by
400 record_alias_subset via splay_tree_foreach. */
401
402 static int
403 insert_subset_children (splay_tree_node node, void *data)
404 {
405 splay_tree_insert ((splay_tree) data, node->key, node->value);
406
407 return 0;
408 }
409
410 /* Return true if the first alias set is a subset of the second. */
411
412 bool
413 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
414 {
415 alias_set_entry ase;
416
417 /* Everything is a subset of the "aliases everything" set. */
418 if (set2 == 0)
419 return true;
420
421 /* Otherwise, check if set1 is a subset of set2. */
422 ase = get_alias_set_entry (set2);
423 if (ase != 0
424 && (ase->has_zero_child
425 || splay_tree_lookup (ase->children,
426 (splay_tree_key) set1)))
427 return true;
428 return false;
429 }
430
431 /* Return 1 if the two specified alias sets may conflict. */
432
433 int
434 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
435 {
436 alias_set_entry ase;
437
438 /* The easy case. */
439 if (alias_sets_must_conflict_p (set1, set2))
440 return 1;
441
442 /* See if the first alias set is a subset of the second. */
443 ase = get_alias_set_entry (set1);
444 if (ase != 0
445 && (ase->has_zero_child
446 || splay_tree_lookup (ase->children,
447 (splay_tree_key) set2)))
448 return 1;
449
450 /* Now do the same, but with the alias sets reversed. */
451 ase = get_alias_set_entry (set2);
452 if (ase != 0
453 && (ase->has_zero_child
454 || splay_tree_lookup (ase->children,
455 (splay_tree_key) set1)))
456 return 1;
457
458 /* The two alias sets are distinct and neither one is the
459 child of the other. Therefore, they cannot conflict. */
460 return 0;
461 }
462
463 /* Return 1 if the two specified alias sets will always conflict. */
464
465 int
466 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
467 {
468 if (set1 == 0 || set2 == 0 || set1 == set2)
469 return 1;
470
471 return 0;
472 }
473
474 /* Return 1 if any MEM object of type T1 will always conflict (using the
475 dependency routines in this file) with any MEM object of type T2.
476 This is used when allocating temporary storage. If T1 and/or T2 are
477 NULL_TREE, it means we know nothing about the storage. */
478
479 int
480 objects_must_conflict_p (tree t1, tree t2)
481 {
482 alias_set_type set1, set2;
483
484 /* If neither has a type specified, we don't know if they'll conflict
485 because we may be using them to store objects of various types, for
486 example the argument and local variables areas of inlined functions. */
487 if (t1 == 0 && t2 == 0)
488 return 0;
489
490 /* If they are the same type, they must conflict. */
491 if (t1 == t2
492 /* Likewise if both are volatile. */
493 || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
494 return 1;
495
496 set1 = t1 ? get_alias_set (t1) : 0;
497 set2 = t2 ? get_alias_set (t2) : 0;
498
499 /* We can't use alias_sets_conflict_p because we must make sure
500 that every subtype of t1 will conflict with every subtype of
501 t2 for which a pair of subobjects of these respective subtypes
502 overlaps on the stack. */
503 return alias_sets_must_conflict_p (set1, set2);
504 }
505 \f
506 /* Return the outermost parent of component present in the chain of
507 component references handled by get_inner_reference in T with the
508 following property:
509 - the component is non-addressable, or
510 - the parent has alias set zero,
511 or NULL_TREE if no such parent exists. In the former cases, the alias
512 set of this parent is the alias set that must be used for T itself. */
513
514 tree
515 component_uses_parent_alias_set_from (const_tree t)
516 {
517 const_tree found = NULL_TREE;
518
519 while (handled_component_p (t))
520 {
521 switch (TREE_CODE (t))
522 {
523 case COMPONENT_REF:
524 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
525 found = t;
526 break;
527
528 case ARRAY_REF:
529 case ARRAY_RANGE_REF:
530 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
531 found = t;
532 break;
533
534 case REALPART_EXPR:
535 case IMAGPART_EXPR:
536 break;
537
538 case BIT_FIELD_REF:
539 case VIEW_CONVERT_EXPR:
540 /* Bitfields and casts are never addressable. */
541 found = t;
542 break;
543
544 default:
545 gcc_unreachable ();
546 }
547
548 if (get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) == 0)
549 found = t;
550
551 t = TREE_OPERAND (t, 0);
552 }
553
554 if (found)
555 return TREE_OPERAND (found, 0);
556
557 return NULL_TREE;
558 }
559
560
561 /* Return whether the pointer-type T effective for aliasing may
562 access everything and thus the reference has to be assigned
563 alias-set zero. */
564
565 static bool
566 ref_all_alias_ptr_type_p (const_tree t)
567 {
568 return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
569 || TYPE_REF_CAN_ALIAS_ALL (t));
570 }
571
572 /* Return the alias set for the memory pointed to by T, which may be
573 either a type or an expression. Return -1 if there is nothing
574 special about dereferencing T. */
575
576 static alias_set_type
577 get_deref_alias_set_1 (tree t)
578 {
579 /* All we care about is the type. */
580 if (! TYPE_P (t))
581 t = TREE_TYPE (t);
582
583 /* If we have an INDIRECT_REF via a void pointer, we don't
584 know anything about what that might alias. Likewise if the
585 pointer is marked that way. */
586 if (ref_all_alias_ptr_type_p (t))
587 return 0;
588
589 return -1;
590 }
591
592 /* Return the alias set for the memory pointed to by T, which may be
593 either a type or an expression. */
594
595 alias_set_type
596 get_deref_alias_set (tree t)
597 {
598 /* If we're not doing any alias analysis, just assume everything
599 aliases everything else. */
600 if (!flag_strict_aliasing)
601 return 0;
602
603 alias_set_type set = get_deref_alias_set_1 (t);
604
605 /* Fall back to the alias-set of the pointed-to type. */
606 if (set == -1)
607 {
608 if (! TYPE_P (t))
609 t = TREE_TYPE (t);
610 set = get_alias_set (TREE_TYPE (t));
611 }
612
613 return set;
614 }
615
616 /* Return the pointer-type relevant for TBAA purposes from the
617 memory reference tree *T or NULL_TREE in which case *T is
618 adjusted to point to the outermost component reference that
619 can be used for assigning an alias set. */
620
621 static tree
622 reference_alias_ptr_type_1 (tree *t)
623 {
624 tree inner;
625
626 /* Get the base object of the reference. */
627 inner = *t;
628 while (handled_component_p (inner))
629 {
630 /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
631 the type of any component references that wrap it to
632 determine the alias-set. */
633 if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
634 *t = TREE_OPERAND (inner, 0);
635 inner = TREE_OPERAND (inner, 0);
636 }
637
638 /* Handle pointer dereferences here, they can override the
639 alias-set. */
640 if (INDIRECT_REF_P (inner)
641 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0))))
642 return TREE_TYPE (TREE_OPERAND (inner, 0));
643 else if (TREE_CODE (inner) == TARGET_MEM_REF)
644 return TREE_TYPE (TMR_OFFSET (inner));
645 else if (TREE_CODE (inner) == MEM_REF
646 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1))))
647 return TREE_TYPE (TREE_OPERAND (inner, 1));
648
649 /* If the innermost reference is a MEM_REF that has a
650 conversion embedded treat it like a VIEW_CONVERT_EXPR above,
651 using the memory access type for determining the alias-set. */
652 if (TREE_CODE (inner) == MEM_REF
653 && (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
654 != TYPE_MAIN_VARIANT
655 (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1))))))
656 return TREE_TYPE (TREE_OPERAND (inner, 1));
657
658 /* Otherwise, pick up the outermost object that we could have
659 a pointer to. */
660 tree tem = component_uses_parent_alias_set_from (*t);
661 if (tem)
662 *t = tem;
663
664 return NULL_TREE;
665 }
666
667 /* Return the pointer-type relevant for TBAA purposes from the
668 gimple memory reference tree T. This is the type to be used for
669 the offset operand of MEM_REF or TARGET_MEM_REF replacements of T
670 and guarantees that get_alias_set will return the same alias
671 set for T and the replacement. */
672
673 tree
674 reference_alias_ptr_type (tree t)
675 {
676 tree ptype = reference_alias_ptr_type_1 (&t);
677 /* If there is a given pointer type for aliasing purposes, return it. */
678 if (ptype != NULL_TREE)
679 return ptype;
680
681 /* Otherwise build one from the outermost component reference we
682 may use. */
683 if (TREE_CODE (t) == MEM_REF
684 || TREE_CODE (t) == TARGET_MEM_REF)
685 return TREE_TYPE (TREE_OPERAND (t, 1));
686 else
687 return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t)));
688 }
689
690 /* Return whether the pointer-types T1 and T2 used to determine
691 two alias sets of two references will yield the same answer
692 from get_deref_alias_set. */
693
694 bool
695 alias_ptr_types_compatible_p (tree t1, tree t2)
696 {
697 if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
698 return true;
699
700 if (ref_all_alias_ptr_type_p (t1)
701 || ref_all_alias_ptr_type_p (t2))
702 return false;
703
704 return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
705 == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
706 }
707
708 /* Return the alias set for T, which may be either a type or an
709 expression. Call language-specific routine for help, if needed. */
710
711 alias_set_type
712 get_alias_set (tree t)
713 {
714 alias_set_type set;
715
716 /* If we're not doing any alias analysis, just assume everything
717 aliases everything else. Also return 0 if this or its type is
718 an error. */
719 if (! flag_strict_aliasing || t == error_mark_node
720 || (! TYPE_P (t)
721 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
722 return 0;
723
724 /* We can be passed either an expression or a type. This and the
725 language-specific routine may make mutually-recursive calls to each other
726 to figure out what to do. At each juncture, we see if this is a tree
727 that the language may need to handle specially. First handle things that
728 aren't types. */
729 if (! TYPE_P (t))
730 {
731 /* Give the language a chance to do something with this tree
732 before we look at it. */
733 STRIP_NOPS (t);
734 set = lang_hooks.get_alias_set (t);
735 if (set != -1)
736 return set;
737
738 /* Get the alias pointer-type to use or the outermost object
739 that we could have a pointer to. */
740 tree ptype = reference_alias_ptr_type_1 (&t);
741 if (ptype != NULL)
742 return get_deref_alias_set (ptype);
743
744 /* If we've already determined the alias set for a decl, just return
745 it. This is necessary for C++ anonymous unions, whose component
746 variables don't look like union members (boo!). */
747 if (TREE_CODE (t) == VAR_DECL
748 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
749 return MEM_ALIAS_SET (DECL_RTL (t));
750
751 /* Now all we care about is the type. */
752 t = TREE_TYPE (t);
753 }
754
755 /* Variant qualifiers don't affect the alias set, so get the main
756 variant. */
757 t = TYPE_MAIN_VARIANT (t);
758
759 /* Always use the canonical type as well. If this is a type that
760 requires structural comparisons to identify compatible types
761 use alias set zero. */
762 if (TYPE_STRUCTURAL_EQUALITY_P (t))
763 {
764 /* Allow the language to specify another alias set for this
765 type. */
766 set = lang_hooks.get_alias_set (t);
767 if (set != -1)
768 return set;
769 return 0;
770 }
771
772 t = TYPE_CANONICAL (t);
773
774 /* The canonical type should not require structural equality checks. */
775 gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
776
777 /* If this is a type with a known alias set, return it. */
778 if (TYPE_ALIAS_SET_KNOWN_P (t))
779 return TYPE_ALIAS_SET (t);
780
781 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */
782 if (!COMPLETE_TYPE_P (t))
783 {
784 /* For arrays with unknown size the conservative answer is the
785 alias set of the element type. */
786 if (TREE_CODE (t) == ARRAY_TYPE)
787 return get_alias_set (TREE_TYPE (t));
788
789 /* But return zero as a conservative answer for incomplete types. */
790 return 0;
791 }
792
793 /* See if the language has special handling for this type. */
794 set = lang_hooks.get_alias_set (t);
795 if (set != -1)
796 return set;
797
798 /* There are no objects of FUNCTION_TYPE, so there's no point in
799 using up an alias set for them. (There are, of course, pointers
800 and references to functions, but that's different.) */
801 else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
802 set = 0;
803
804 /* Unless the language specifies otherwise, let vector types alias
805 their components. This avoids some nasty type punning issues in
806 normal usage. And indeed lets vectors be treated more like an
807 array slice. */
808 else if (TREE_CODE (t) == VECTOR_TYPE)
809 set = get_alias_set (TREE_TYPE (t));
810
811 /* Unless the language specifies otherwise, treat array types the
812 same as their components. This avoids the asymmetry we get
813 through recording the components. Consider accessing a
814 character(kind=1) through a reference to a character(kind=1)[1:1].
815 Or consider if we want to assign integer(kind=4)[0:D.1387] and
816 integer(kind=4)[4] the same alias set or not.
817 Just be pragmatic here and make sure the array and its element
818 type get the same alias set assigned. */
819 else if (TREE_CODE (t) == ARRAY_TYPE && !TYPE_NONALIASED_COMPONENT (t))
820 set = get_alias_set (TREE_TYPE (t));
821
822 /* From the former common C and C++ langhook implementation:
823
824 Unfortunately, there is no canonical form of a pointer type.
825 In particular, if we have `typedef int I', then `int *', and
826 `I *' are different types. So, we have to pick a canonical
827 representative. We do this below.
828
829 Technically, this approach is actually more conservative that
830 it needs to be. In particular, `const int *' and `int *'
831 should be in different alias sets, according to the C and C++
832 standard, since their types are not the same, and so,
833 technically, an `int **' and `const int **' cannot point at
834 the same thing.
835
836 But, the standard is wrong. In particular, this code is
837 legal C++:
838
839 int *ip;
840 int **ipp = &ip;
841 const int* const* cipp = ipp;
842 And, it doesn't make sense for that to be legal unless you
843 can dereference IPP and CIPP. So, we ignore cv-qualifiers on
844 the pointed-to types. This issue has been reported to the
845 C++ committee.
846
847 In addition to the above canonicalization issue, with LTO
848 we should also canonicalize `T (*)[]' to `T *' avoiding
849 alias issues with pointer-to element types and pointer-to
850 array types.
851
852 Likewise we need to deal with the situation of incomplete
853 pointed-to types and make `*(struct X **)&a' and
854 `*(struct X {} **)&a' alias. Otherwise we will have to
855 guarantee that all pointer-to incomplete type variants
856 will be replaced by pointer-to complete type variants if
857 they are available.
858
859 With LTO the convenient situation of using `void *' to
860 access and store any pointer type will also become
861 more apparent (and `void *' is just another pointer-to
862 incomplete type). Assigning alias-set zero to `void *'
863 and all pointer-to incomplete types is a not appealing
864 solution. Assigning an effective alias-set zero only
865 affecting pointers might be - by recording proper subset
866 relationships of all pointer alias-sets.
867
868 Pointer-to function types are another grey area which
869 needs caution. Globbing them all into one alias-set
870 or the above effective zero set would work.
871
872 For now just assign the same alias-set to all pointers.
873 That's simple and avoids all the above problems. */
874 else if (POINTER_TYPE_P (t)
875 && t != ptr_type_node)
876 set = get_alias_set (ptr_type_node);
877
878 /* Otherwise make a new alias set for this type. */
879 else
880 {
881 /* Each canonical type gets its own alias set, so canonical types
882 shouldn't form a tree. It doesn't really matter for types
883 we handle specially above, so only check it where it possibly
884 would result in a bogus alias set. */
885 gcc_checking_assert (TYPE_CANONICAL (t) == t);
886
887 set = new_alias_set ();
888 }
889
890 TYPE_ALIAS_SET (t) = set;
891
892 /* If this is an aggregate type or a complex type, we must record any
893 component aliasing information. */
894 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
895 record_component_aliases (t);
896
897 return set;
898 }
899
900 /* Return a brand-new alias set. */
901
902 alias_set_type
903 new_alias_set (void)
904 {
905 if (flag_strict_aliasing)
906 {
907 if (alias_sets == 0)
908 vec_safe_push (alias_sets, (alias_set_entry) 0);
909 vec_safe_push (alias_sets, (alias_set_entry) 0);
910 return alias_sets->length () - 1;
911 }
912 else
913 return 0;
914 }
915
916 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
917 not everything that aliases SUPERSET also aliases SUBSET. For example,
918 in C, a store to an `int' can alias a load of a structure containing an
919 `int', and vice versa. But it can't alias a load of a 'double' member
920 of the same structure. Here, the structure would be the SUPERSET and
921 `int' the SUBSET. This relationship is also described in the comment at
922 the beginning of this file.
923
924 This function should be called only once per SUPERSET/SUBSET pair.
925
926 It is illegal for SUPERSET to be zero; everything is implicitly a
927 subset of alias set zero. */
928
929 void
930 record_alias_subset (alias_set_type superset, alias_set_type subset)
931 {
932 alias_set_entry superset_entry;
933 alias_set_entry subset_entry;
934
935 /* It is possible in complex type situations for both sets to be the same,
936 in which case we can ignore this operation. */
937 if (superset == subset)
938 return;
939
940 gcc_assert (superset);
941
942 superset_entry = get_alias_set_entry (superset);
943 if (superset_entry == 0)
944 {
945 /* Create an entry for the SUPERSET, so that we have a place to
946 attach the SUBSET. */
947 superset_entry = ggc_alloc_cleared_alias_set_entry_d ();
948 superset_entry->alias_set = superset;
949 superset_entry->children
950 = splay_tree_new_ggc (splay_tree_compare_ints,
951 ggc_alloc_splay_tree_scalar_scalar_splay_tree_s,
952 ggc_alloc_splay_tree_scalar_scalar_splay_tree_node_s);
953 superset_entry->has_zero_child = 0;
954 (*alias_sets)[superset] = superset_entry;
955 }
956
957 if (subset == 0)
958 superset_entry->has_zero_child = 1;
959 else
960 {
961 subset_entry = get_alias_set_entry (subset);
962 /* If there is an entry for the subset, enter all of its children
963 (if they are not already present) as children of the SUPERSET. */
964 if (subset_entry)
965 {
966 if (subset_entry->has_zero_child)
967 superset_entry->has_zero_child = 1;
968
969 splay_tree_foreach (subset_entry->children, insert_subset_children,
970 superset_entry->children);
971 }
972
973 /* Enter the SUBSET itself as a child of the SUPERSET. */
974 splay_tree_insert (superset_entry->children,
975 (splay_tree_key) subset, 0);
976 }
977 }
978
979 /* Record that component types of TYPE, if any, are part of that type for
980 aliasing purposes. For record types, we only record component types
981 for fields that are not marked non-addressable. For array types, we
982 only record the component type if it is not marked non-aliased. */
983
984 void
985 record_component_aliases (tree type)
986 {
987 alias_set_type superset = get_alias_set (type);
988 tree field;
989
990 if (superset == 0)
991 return;
992
993 switch (TREE_CODE (type))
994 {
995 case RECORD_TYPE:
996 case UNION_TYPE:
997 case QUAL_UNION_TYPE:
998 /* Recursively record aliases for the base classes, if there are any. */
999 if (TYPE_BINFO (type))
1000 {
1001 int i;
1002 tree binfo, base_binfo;
1003
1004 for (binfo = TYPE_BINFO (type), i = 0;
1005 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
1006 record_alias_subset (superset,
1007 get_alias_set (BINFO_TYPE (base_binfo)));
1008 }
1009 for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
1010 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
1011 record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
1012 break;
1013
1014 case COMPLEX_TYPE:
1015 record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
1016 break;
1017
1018 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1019 element type. */
1020
1021 default:
1022 break;
1023 }
1024 }
1025
1026 /* Allocate an alias set for use in storing and reading from the varargs
1027 spill area. */
1028
1029 static GTY(()) alias_set_type varargs_set = -1;
1030
1031 alias_set_type
1032 get_varargs_alias_set (void)
1033 {
1034 #if 1
1035 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
1036 varargs alias set to an INDIRECT_REF (FIXME!), so we can't
1037 consistently use the varargs alias set for loads from the varargs
1038 area. So don't use it anywhere. */
1039 return 0;
1040 #else
1041 if (varargs_set == -1)
1042 varargs_set = new_alias_set ();
1043
1044 return varargs_set;
1045 #endif
1046 }
1047
1048 /* Likewise, but used for the fixed portions of the frame, e.g., register
1049 save areas. */
1050
1051 static GTY(()) alias_set_type frame_set = -1;
1052
1053 alias_set_type
1054 get_frame_alias_set (void)
1055 {
1056 if (frame_set == -1)
1057 frame_set = new_alias_set ();
1058
1059 return frame_set;
1060 }
1061
1062 /* Create a new, unique base with id ID. */
1063
1064 static rtx
1065 unique_base_value (HOST_WIDE_INT id)
1066 {
1067 return gen_rtx_ADDRESS (Pmode, id);
1068 }
1069
1070 /* Return true if accesses based on any other base value cannot alias
1071 those based on X. */
1072
1073 static bool
1074 unique_base_value_p (rtx x)
1075 {
1076 return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
1077 }
1078
1079 /* Return true if X is known to be a base value. */
1080
1081 static bool
1082 known_base_value_p (rtx x)
1083 {
1084 switch (GET_CODE (x))
1085 {
1086 case LABEL_REF:
1087 case SYMBOL_REF:
1088 return true;
1089
1090 case ADDRESS:
1091 /* Arguments may or may not be bases; we don't know for sure. */
1092 return GET_MODE (x) != VOIDmode;
1093
1094 default:
1095 return false;
1096 }
1097 }
1098
1099 /* Inside SRC, the source of a SET, find a base address. */
1100
1101 static rtx
1102 find_base_value (rtx src)
1103 {
1104 unsigned int regno;
1105
1106 #if defined (FIND_BASE_TERM)
1107 /* Try machine-dependent ways to find the base term. */
1108 src = FIND_BASE_TERM (src);
1109 #endif
1110
1111 switch (GET_CODE (src))
1112 {
1113 case SYMBOL_REF:
1114 case LABEL_REF:
1115 return src;
1116
1117 case REG:
1118 regno = REGNO (src);
1119 /* At the start of a function, argument registers have known base
1120 values which may be lost later. Returning an ADDRESS
1121 expression here allows optimization based on argument values
1122 even when the argument registers are used for other purposes. */
1123 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1124 return new_reg_base_value[regno];
1125
1126 /* If a pseudo has a known base value, return it. Do not do this
1127 for non-fixed hard regs since it can result in a circular
1128 dependency chain for registers which have values at function entry.
1129
1130 The test above is not sufficient because the scheduler may move
1131 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
1132 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1133 && regno < vec_safe_length (reg_base_value))
1134 {
1135 /* If we're inside init_alias_analysis, use new_reg_base_value
1136 to reduce the number of relaxation iterations. */
1137 if (new_reg_base_value && new_reg_base_value[regno]
1138 && DF_REG_DEF_COUNT (regno) == 1)
1139 return new_reg_base_value[regno];
1140
1141 if ((*reg_base_value)[regno])
1142 return (*reg_base_value)[regno];
1143 }
1144
1145 return 0;
1146
1147 case MEM:
1148 /* Check for an argument passed in memory. Only record in the
1149 copying-arguments block; it is too hard to track changes
1150 otherwise. */
1151 if (copying_arguments
1152 && (XEXP (src, 0) == arg_pointer_rtx
1153 || (GET_CODE (XEXP (src, 0)) == PLUS
1154 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1155 return arg_base_value;
1156 return 0;
1157
1158 case CONST:
1159 src = XEXP (src, 0);
1160 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1161 break;
1162
1163 /* ... fall through ... */
1164
1165 case PLUS:
1166 case MINUS:
1167 {
1168 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1169
1170 /* If either operand is a REG that is a known pointer, then it
1171 is the base. */
1172 if (REG_P (src_0) && REG_POINTER (src_0))
1173 return find_base_value (src_0);
1174 if (REG_P (src_1) && REG_POINTER (src_1))
1175 return find_base_value (src_1);
1176
1177 /* If either operand is a REG, then see if we already have
1178 a known value for it. */
1179 if (REG_P (src_0))
1180 {
1181 temp = find_base_value (src_0);
1182 if (temp != 0)
1183 src_0 = temp;
1184 }
1185
1186 if (REG_P (src_1))
1187 {
1188 temp = find_base_value (src_1);
1189 if (temp!= 0)
1190 src_1 = temp;
1191 }
1192
1193 /* If either base is named object or a special address
1194 (like an argument or stack reference), then use it for the
1195 base term. */
1196 if (src_0 != 0 && known_base_value_p (src_0))
1197 return src_0;
1198
1199 if (src_1 != 0 && known_base_value_p (src_1))
1200 return src_1;
1201
1202 /* Guess which operand is the base address:
1203 If either operand is a symbol, then it is the base. If
1204 either operand is a CONST_INT, then the other is the base. */
1205 if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1206 return find_base_value (src_0);
1207 else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1208 return find_base_value (src_1);
1209
1210 return 0;
1211 }
1212
1213 case LO_SUM:
1214 /* The standard form is (lo_sum reg sym) so look only at the
1215 second operand. */
1216 return find_base_value (XEXP (src, 1));
1217
1218 case AND:
1219 /* If the second operand is constant set the base
1220 address to the first operand. */
1221 if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1222 return find_base_value (XEXP (src, 0));
1223 return 0;
1224
1225 case TRUNCATE:
1226 /* As we do not know which address space the pointer is referring to, we can
1227 handle this only if the target does not support different pointer or
1228 address modes depending on the address space. */
1229 if (!target_default_pointer_address_modes_p ())
1230 break;
1231 if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1232 break;
1233 /* Fall through. */
1234 case HIGH:
1235 case PRE_INC:
1236 case PRE_DEC:
1237 case POST_INC:
1238 case POST_DEC:
1239 case PRE_MODIFY:
1240 case POST_MODIFY:
1241 return find_base_value (XEXP (src, 0));
1242
1243 case ZERO_EXTEND:
1244 case SIGN_EXTEND: /* used for NT/Alpha pointers */
1245 /* As we do not know which address space the pointer is referring to, we can
1246 handle this only if the target does not support different pointer or
1247 address modes depending on the address space. */
1248 if (!target_default_pointer_address_modes_p ())
1249 break;
1250
1251 {
1252 rtx temp = find_base_value (XEXP (src, 0));
1253
1254 if (temp != 0 && CONSTANT_P (temp))
1255 temp = convert_memory_address (Pmode, temp);
1256
1257 return temp;
1258 }
1259
1260 default:
1261 break;
1262 }
1263
1264 return 0;
1265 }
1266
1267 /* Called from init_alias_analysis indirectly through note_stores,
1268 or directly if DEST is a register with a REG_NOALIAS note attached.
1269 SET is null in the latter case. */
1270
1271 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1272 register N has been set in this function. */
1273 static sbitmap reg_seen;
1274
1275 static void
1276 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1277 {
1278 unsigned regno;
1279 rtx src;
1280 int n;
1281
1282 if (!REG_P (dest))
1283 return;
1284
1285 regno = REGNO (dest);
1286
1287 gcc_checking_assert (regno < reg_base_value->length ());
1288
1289 /* If this spans multiple hard registers, then we must indicate that every
1290 register has an unusable value. */
1291 if (regno < FIRST_PSEUDO_REGISTER)
1292 n = hard_regno_nregs[regno][GET_MODE (dest)];
1293 else
1294 n = 1;
1295 if (n != 1)
1296 {
1297 while (--n >= 0)
1298 {
1299 bitmap_set_bit (reg_seen, regno + n);
1300 new_reg_base_value[regno + n] = 0;
1301 }
1302 return;
1303 }
1304
1305 if (set)
1306 {
1307 /* A CLOBBER wipes out any old value but does not prevent a previously
1308 unset register from acquiring a base address (i.e. reg_seen is not
1309 set). */
1310 if (GET_CODE (set) == CLOBBER)
1311 {
1312 new_reg_base_value[regno] = 0;
1313 return;
1314 }
1315 src = SET_SRC (set);
1316 }
1317 else
1318 {
1319 /* There's a REG_NOALIAS note against DEST. */
1320 if (bitmap_bit_p (reg_seen, regno))
1321 {
1322 new_reg_base_value[regno] = 0;
1323 return;
1324 }
1325 bitmap_set_bit (reg_seen, regno);
1326 new_reg_base_value[regno] = unique_base_value (unique_id++);
1327 return;
1328 }
1329
1330 /* If this is not the first set of REGNO, see whether the new value
1331 is related to the old one. There are two cases of interest:
1332
1333 (1) The register might be assigned an entirely new value
1334 that has the same base term as the original set.
1335
1336 (2) The set might be a simple self-modification that
1337 cannot change REGNO's base value.
1338
1339 If neither case holds, reject the original base value as invalid.
1340 Note that the following situation is not detected:
1341
1342 extern int x, y; int *p = &x; p += (&y-&x);
1343
1344 ANSI C does not allow computing the difference of addresses
1345 of distinct top level objects. */
1346 if (new_reg_base_value[regno] != 0
1347 && find_base_value (src) != new_reg_base_value[regno])
1348 switch (GET_CODE (src))
1349 {
1350 case LO_SUM:
1351 case MINUS:
1352 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1353 new_reg_base_value[regno] = 0;
1354 break;
1355 case PLUS:
1356 /* If the value we add in the PLUS is also a valid base value,
1357 this might be the actual base value, and the original value
1358 an index. */
1359 {
1360 rtx other = NULL_RTX;
1361
1362 if (XEXP (src, 0) == dest)
1363 other = XEXP (src, 1);
1364 else if (XEXP (src, 1) == dest)
1365 other = XEXP (src, 0);
1366
1367 if (! other || find_base_value (other))
1368 new_reg_base_value[regno] = 0;
1369 break;
1370 }
1371 case AND:
1372 if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1373 new_reg_base_value[regno] = 0;
1374 break;
1375 default:
1376 new_reg_base_value[regno] = 0;
1377 break;
1378 }
1379 /* If this is the first set of a register, record the value. */
1380 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1381 && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1382 new_reg_base_value[regno] = find_base_value (src);
1383
1384 bitmap_set_bit (reg_seen, regno);
1385 }
1386
1387 /* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid
1388 using hard registers with non-null REG_BASE_VALUE for renaming. */
1389 rtx
1390 get_reg_base_value (unsigned int regno)
1391 {
1392 return (*reg_base_value)[regno];
1393 }
1394
1395 /* If a value is known for REGNO, return it. */
1396
1397 rtx
1398 get_reg_known_value (unsigned int regno)
1399 {
1400 if (regno >= FIRST_PSEUDO_REGISTER)
1401 {
1402 regno -= FIRST_PSEUDO_REGISTER;
1403 if (regno < vec_safe_length (reg_known_value))
1404 return (*reg_known_value)[regno];
1405 }
1406 return NULL;
1407 }
1408
1409 /* Set it. */
1410
1411 static void
1412 set_reg_known_value (unsigned int regno, rtx val)
1413 {
1414 if (regno >= FIRST_PSEUDO_REGISTER)
1415 {
1416 regno -= FIRST_PSEUDO_REGISTER;
1417 if (regno < vec_safe_length (reg_known_value))
1418 (*reg_known_value)[regno] = val;
1419 }
1420 }
1421
1422 /* Similarly for reg_known_equiv_p. */
1423
1424 bool
1425 get_reg_known_equiv_p (unsigned int regno)
1426 {
1427 if (regno >= FIRST_PSEUDO_REGISTER)
1428 {
1429 regno -= FIRST_PSEUDO_REGISTER;
1430 if (regno < vec_safe_length (reg_known_value))
1431 return bitmap_bit_p (reg_known_equiv_p, regno);
1432 }
1433 return false;
1434 }
1435
1436 static void
1437 set_reg_known_equiv_p (unsigned int regno, bool val)
1438 {
1439 if (regno >= FIRST_PSEUDO_REGISTER)
1440 {
1441 regno -= FIRST_PSEUDO_REGISTER;
1442 if (regno < vec_safe_length (reg_known_value))
1443 {
1444 if (val)
1445 bitmap_set_bit (reg_known_equiv_p, regno);
1446 else
1447 bitmap_clear_bit (reg_known_equiv_p, regno);
1448 }
1449 }
1450 }
1451
1452
1453 /* Returns a canonical version of X, from the point of view alias
1454 analysis. (For example, if X is a MEM whose address is a register,
1455 and the register has a known value (say a SYMBOL_REF), then a MEM
1456 whose address is the SYMBOL_REF is returned.) */
1457
1458 rtx
1459 canon_rtx (rtx x)
1460 {
1461 /* Recursively look for equivalences. */
1462 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1463 {
1464 rtx t = get_reg_known_value (REGNO (x));
1465 if (t == x)
1466 return x;
1467 if (t)
1468 return canon_rtx (t);
1469 }
1470
1471 if (GET_CODE (x) == PLUS)
1472 {
1473 rtx x0 = canon_rtx (XEXP (x, 0));
1474 rtx x1 = canon_rtx (XEXP (x, 1));
1475
1476 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1477 {
1478 if (CONST_INT_P (x0))
1479 return plus_constant (GET_MODE (x), x1, INTVAL (x0));
1480 else if (CONST_INT_P (x1))
1481 return plus_constant (GET_MODE (x), x0, INTVAL (x1));
1482 return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1483 }
1484 }
1485
1486 /* This gives us much better alias analysis when called from
1487 the loop optimizer. Note we want to leave the original
1488 MEM alone, but need to return the canonicalized MEM with
1489 all the flags with their original values. */
1490 else if (MEM_P (x))
1491 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1492
1493 return x;
1494 }
1495
1496 /* Return 1 if X and Y are identical-looking rtx's.
1497 Expect that X and Y has been already canonicalized.
1498
1499 We use the data in reg_known_value above to see if two registers with
1500 different numbers are, in fact, equivalent. */
1501
1502 static int
1503 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1504 {
1505 int i;
1506 int j;
1507 enum rtx_code code;
1508 const char *fmt;
1509
1510 if (x == 0 && y == 0)
1511 return 1;
1512 if (x == 0 || y == 0)
1513 return 0;
1514
1515 if (x == y)
1516 return 1;
1517
1518 code = GET_CODE (x);
1519 /* Rtx's of different codes cannot be equal. */
1520 if (code != GET_CODE (y))
1521 return 0;
1522
1523 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1524 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1525
1526 if (GET_MODE (x) != GET_MODE (y))
1527 return 0;
1528
1529 /* Some RTL can be compared without a recursive examination. */
1530 switch (code)
1531 {
1532 case REG:
1533 return REGNO (x) == REGNO (y);
1534
1535 case LABEL_REF:
1536 return XEXP (x, 0) == XEXP (y, 0);
1537
1538 case SYMBOL_REF:
1539 return XSTR (x, 0) == XSTR (y, 0);
1540
1541 case ENTRY_VALUE:
1542 /* This is magic, don't go through canonicalization et al. */
1543 return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1544
1545 case VALUE:
1546 CASE_CONST_UNIQUE:
1547 /* There's no need to compare the contents of CONST_DOUBLEs or
1548 CONST_INTs because pointer equality is a good enough
1549 comparison for these nodes. */
1550 return 0;
1551
1552 default:
1553 break;
1554 }
1555
1556 /* canon_rtx knows how to handle plus. No need to canonicalize. */
1557 if (code == PLUS)
1558 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1559 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1560 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1561 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1562 /* For commutative operations, the RTX match if the operand match in any
1563 order. Also handle the simple binary and unary cases without a loop. */
1564 if (COMMUTATIVE_P (x))
1565 {
1566 rtx xop0 = canon_rtx (XEXP (x, 0));
1567 rtx yop0 = canon_rtx (XEXP (y, 0));
1568 rtx yop1 = canon_rtx (XEXP (y, 1));
1569
1570 return ((rtx_equal_for_memref_p (xop0, yop0)
1571 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1572 || (rtx_equal_for_memref_p (xop0, yop1)
1573 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1574 }
1575 else if (NON_COMMUTATIVE_P (x))
1576 {
1577 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1578 canon_rtx (XEXP (y, 0)))
1579 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1580 canon_rtx (XEXP (y, 1))));
1581 }
1582 else if (UNARY_P (x))
1583 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1584 canon_rtx (XEXP (y, 0)));
1585
1586 /* Compare the elements. If any pair of corresponding elements
1587 fail to match, return 0 for the whole things.
1588
1589 Limit cases to types which actually appear in addresses. */
1590
1591 fmt = GET_RTX_FORMAT (code);
1592 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1593 {
1594 switch (fmt[i])
1595 {
1596 case 'i':
1597 if (XINT (x, i) != XINT (y, i))
1598 return 0;
1599 break;
1600
1601 case 'E':
1602 /* Two vectors must have the same length. */
1603 if (XVECLEN (x, i) != XVECLEN (y, i))
1604 return 0;
1605
1606 /* And the corresponding elements must match. */
1607 for (j = 0; j < XVECLEN (x, i); j++)
1608 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1609 canon_rtx (XVECEXP (y, i, j))) == 0)
1610 return 0;
1611 break;
1612
1613 case 'e':
1614 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1615 canon_rtx (XEXP (y, i))) == 0)
1616 return 0;
1617 break;
1618
1619 /* This can happen for asm operands. */
1620 case 's':
1621 if (strcmp (XSTR (x, i), XSTR (y, i)))
1622 return 0;
1623 break;
1624
1625 /* This can happen for an asm which clobbers memory. */
1626 case '0':
1627 break;
1628
1629 /* It is believed that rtx's at this level will never
1630 contain anything but integers and other rtx's,
1631 except for within LABEL_REFs and SYMBOL_REFs. */
1632 default:
1633 gcc_unreachable ();
1634 }
1635 }
1636 return 1;
1637 }
1638
1639 static rtx
1640 find_base_term (rtx x)
1641 {
1642 cselib_val *val;
1643 struct elt_loc_list *l, *f;
1644 rtx ret;
1645
1646 #if defined (FIND_BASE_TERM)
1647 /* Try machine-dependent ways to find the base term. */
1648 x = FIND_BASE_TERM (x);
1649 #endif
1650
1651 switch (GET_CODE (x))
1652 {
1653 case REG:
1654 return REG_BASE_VALUE (x);
1655
1656 case TRUNCATE:
1657 /* As we do not know which address space the pointer is referring to, we can
1658 handle this only if the target does not support different pointer or
1659 address modes depending on the address space. */
1660 if (!target_default_pointer_address_modes_p ())
1661 return 0;
1662 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1663 return 0;
1664 /* Fall through. */
1665 case HIGH:
1666 case PRE_INC:
1667 case PRE_DEC:
1668 case POST_INC:
1669 case POST_DEC:
1670 case PRE_MODIFY:
1671 case POST_MODIFY:
1672 return find_base_term (XEXP (x, 0));
1673
1674 case ZERO_EXTEND:
1675 case SIGN_EXTEND: /* Used for Alpha/NT pointers */
1676 /* As we do not know which address space the pointer is referring to, we can
1677 handle this only if the target does not support different pointer or
1678 address modes depending on the address space. */
1679 if (!target_default_pointer_address_modes_p ())
1680 return 0;
1681
1682 {
1683 rtx temp = find_base_term (XEXP (x, 0));
1684
1685 if (temp != 0 && CONSTANT_P (temp))
1686 temp = convert_memory_address (Pmode, temp);
1687
1688 return temp;
1689 }
1690
1691 case VALUE:
1692 val = CSELIB_VAL_PTR (x);
1693 ret = NULL_RTX;
1694
1695 if (!val)
1696 return ret;
1697
1698 if (cselib_sp_based_value_p (val))
1699 return static_reg_base_value[STACK_POINTER_REGNUM];
1700
1701 f = val->locs;
1702 /* Temporarily reset val->locs to avoid infinite recursion. */
1703 val->locs = NULL;
1704
1705 for (l = f; l; l = l->next)
1706 if (GET_CODE (l->loc) == VALUE
1707 && CSELIB_VAL_PTR (l->loc)->locs
1708 && !CSELIB_VAL_PTR (l->loc)->locs->next
1709 && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1710 continue;
1711 else if ((ret = find_base_term (l->loc)) != 0)
1712 break;
1713
1714 val->locs = f;
1715 return ret;
1716
1717 case LO_SUM:
1718 /* The standard form is (lo_sum reg sym) so look only at the
1719 second operand. */
1720 return find_base_term (XEXP (x, 1));
1721
1722 case CONST:
1723 x = XEXP (x, 0);
1724 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1725 return 0;
1726 /* Fall through. */
1727 case PLUS:
1728 case MINUS:
1729 {
1730 rtx tmp1 = XEXP (x, 0);
1731 rtx tmp2 = XEXP (x, 1);
1732
1733 /* This is a little bit tricky since we have to determine which of
1734 the two operands represents the real base address. Otherwise this
1735 routine may return the index register instead of the base register.
1736
1737 That may cause us to believe no aliasing was possible, when in
1738 fact aliasing is possible.
1739
1740 We use a few simple tests to guess the base register. Additional
1741 tests can certainly be added. For example, if one of the operands
1742 is a shift or multiply, then it must be the index register and the
1743 other operand is the base register. */
1744
1745 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1746 return find_base_term (tmp2);
1747
1748 /* If either operand is known to be a pointer, then prefer it
1749 to determine the base term. */
1750 if (REG_P (tmp1) && REG_POINTER (tmp1))
1751 ;
1752 else if (REG_P (tmp2) && REG_POINTER (tmp2))
1753 {
1754 rtx tem = tmp1;
1755 tmp1 = tmp2;
1756 tmp2 = tem;
1757 }
1758
1759 /* Go ahead and find the base term for both operands. If either base
1760 term is from a pointer or is a named object or a special address
1761 (like an argument or stack reference), then use it for the
1762 base term. */
1763 rtx base = find_base_term (tmp1);
1764 if (base != NULL_RTX
1765 && ((REG_P (tmp1) && REG_POINTER (tmp1))
1766 || known_base_value_p (base)))
1767 return base;
1768 base = find_base_term (tmp2);
1769 if (base != NULL_RTX
1770 && ((REG_P (tmp2) && REG_POINTER (tmp2))
1771 || known_base_value_p (base)))
1772 return base;
1773
1774 /* We could not determine which of the two operands was the
1775 base register and which was the index. So we can determine
1776 nothing from the base alias check. */
1777 return 0;
1778 }
1779
1780 case AND:
1781 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1782 return find_base_term (XEXP (x, 0));
1783 return 0;
1784
1785 case SYMBOL_REF:
1786 case LABEL_REF:
1787 return x;
1788
1789 default:
1790 return 0;
1791 }
1792 }
1793
1794 /* Return true if accesses to address X may alias accesses based
1795 on the stack pointer. */
1796
1797 bool
1798 may_be_sp_based_p (rtx x)
1799 {
1800 rtx base = find_base_term (x);
1801 return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
1802 }
1803
1804 /* Return 0 if the addresses X and Y are known to point to different
1805 objects, 1 if they might be pointers to the same object. */
1806
1807 static int
1808 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
1809 enum machine_mode x_mode, enum machine_mode y_mode)
1810 {
1811 /* If the address itself has no known base see if a known equivalent
1812 value has one. If either address still has no known base, nothing
1813 is known about aliasing. */
1814 if (x_base == 0)
1815 {
1816 rtx x_c;
1817
1818 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1819 return 1;
1820
1821 x_base = find_base_term (x_c);
1822 if (x_base == 0)
1823 return 1;
1824 }
1825
1826 if (y_base == 0)
1827 {
1828 rtx y_c;
1829 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1830 return 1;
1831
1832 y_base = find_base_term (y_c);
1833 if (y_base == 0)
1834 return 1;
1835 }
1836
1837 /* If the base addresses are equal nothing is known about aliasing. */
1838 if (rtx_equal_p (x_base, y_base))
1839 return 1;
1840
1841 /* The base addresses are different expressions. If they are not accessed
1842 via AND, there is no conflict. We can bring knowledge of object
1843 alignment into play here. For example, on alpha, "char a, b;" can
1844 alias one another, though "char a; long b;" cannot. AND addesses may
1845 implicitly alias surrounding objects; i.e. unaligned access in DImode
1846 via AND address can alias all surrounding object types except those
1847 with aligment 8 or higher. */
1848 if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1849 return 1;
1850 if (GET_CODE (x) == AND
1851 && (!CONST_INT_P (XEXP (x, 1))
1852 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1853 return 1;
1854 if (GET_CODE (y) == AND
1855 && (!CONST_INT_P (XEXP (y, 1))
1856 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1857 return 1;
1858
1859 /* Differing symbols not accessed via AND never alias. */
1860 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1861 return 0;
1862
1863 if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
1864 return 0;
1865
1866 return 1;
1867 }
1868
1869 /* Callback for for_each_rtx, that returns 1 upon encountering a VALUE
1870 whose UID is greater than the int uid that D points to. */
1871
1872 static int
1873 refs_newer_value_cb (rtx *x, void *d)
1874 {
1875 if (GET_CODE (*x) == VALUE && CSELIB_VAL_PTR (*x)->uid > *(int *)d)
1876 return 1;
1877
1878 return 0;
1879 }
1880
1881 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than
1882 that of V. */
1883
1884 static bool
1885 refs_newer_value_p (rtx expr, rtx v)
1886 {
1887 int minuid = CSELIB_VAL_PTR (v)->uid;
1888
1889 return for_each_rtx (&expr, refs_newer_value_cb, &minuid);
1890 }
1891
1892 /* Convert the address X into something we can use. This is done by returning
1893 it unchanged unless it is a value; in the latter case we call cselib to get
1894 a more useful rtx. */
1895
1896 rtx
1897 get_addr (rtx x)
1898 {
1899 cselib_val *v;
1900 struct elt_loc_list *l;
1901
1902 if (GET_CODE (x) != VALUE)
1903 return x;
1904 v = CSELIB_VAL_PTR (x);
1905 if (v)
1906 {
1907 bool have_equivs = cselib_have_permanent_equivalences ();
1908 if (have_equivs)
1909 v = canonical_cselib_val (v);
1910 for (l = v->locs; l; l = l->next)
1911 if (CONSTANT_P (l->loc))
1912 return l->loc;
1913 for (l = v->locs; l; l = l->next)
1914 if (!REG_P (l->loc) && !MEM_P (l->loc)
1915 /* Avoid infinite recursion when potentially dealing with
1916 var-tracking artificial equivalences, by skipping the
1917 equivalences themselves, and not choosing expressions
1918 that refer to newer VALUEs. */
1919 && (!have_equivs
1920 || (GET_CODE (l->loc) != VALUE
1921 && !refs_newer_value_p (l->loc, x))))
1922 return l->loc;
1923 if (have_equivs)
1924 {
1925 for (l = v->locs; l; l = l->next)
1926 if (REG_P (l->loc)
1927 || (GET_CODE (l->loc) != VALUE
1928 && !refs_newer_value_p (l->loc, x)))
1929 return l->loc;
1930 /* Return the canonical value. */
1931 return v->val_rtx;
1932 }
1933 if (v->locs)
1934 return v->locs->loc;
1935 }
1936 return x;
1937 }
1938
1939 /* Return the address of the (N_REFS + 1)th memory reference to ADDR
1940 where SIZE is the size in bytes of the memory reference. If ADDR
1941 is not modified by the memory reference then ADDR is returned. */
1942
1943 static rtx
1944 addr_side_effect_eval (rtx addr, int size, int n_refs)
1945 {
1946 int offset = 0;
1947
1948 switch (GET_CODE (addr))
1949 {
1950 case PRE_INC:
1951 offset = (n_refs + 1) * size;
1952 break;
1953 case PRE_DEC:
1954 offset = -(n_refs + 1) * size;
1955 break;
1956 case POST_INC:
1957 offset = n_refs * size;
1958 break;
1959 case POST_DEC:
1960 offset = -n_refs * size;
1961 break;
1962
1963 default:
1964 return addr;
1965 }
1966
1967 if (offset)
1968 addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1969 gen_int_mode (offset, GET_MODE (addr)));
1970 else
1971 addr = XEXP (addr, 0);
1972 addr = canon_rtx (addr);
1973
1974 return addr;
1975 }
1976
1977 /* Return TRUE if an object X sized at XSIZE bytes and another object
1978 Y sized at YSIZE bytes, starting C bytes after X, may overlap. If
1979 any of the sizes is zero, assume an overlap, otherwise use the
1980 absolute value of the sizes as the actual sizes. */
1981
1982 static inline bool
1983 offset_overlap_p (HOST_WIDE_INT c, int xsize, int ysize)
1984 {
1985 return (xsize == 0 || ysize == 0
1986 || (c >= 0
1987 ? (abs (xsize) > c)
1988 : (abs (ysize) > -c)));
1989 }
1990
1991 /* Return one if X and Y (memory addresses) reference the
1992 same location in memory or if the references overlap.
1993 Return zero if they do not overlap, else return
1994 minus one in which case they still might reference the same location.
1995
1996 C is an offset accumulator. When
1997 C is nonzero, we are testing aliases between X and Y + C.
1998 XSIZE is the size in bytes of the X reference,
1999 similarly YSIZE is the size in bytes for Y.
2000 Expect that canon_rtx has been already called for X and Y.
2001
2002 If XSIZE or YSIZE is zero, we do not know the amount of memory being
2003 referenced (the reference was BLKmode), so make the most pessimistic
2004 assumptions.
2005
2006 If XSIZE or YSIZE is negative, we may access memory outside the object
2007 being referenced as a side effect. This can happen when using AND to
2008 align memory references, as is done on the Alpha.
2009
2010 Nice to notice that varying addresses cannot conflict with fp if no
2011 local variables had their addresses taken, but that's too hard now.
2012
2013 ??? Contrary to the tree alias oracle this does not return
2014 one for X + non-constant and Y + non-constant when X and Y are equal.
2015 If that is fixed the TBAA hack for union type-punning can be removed. */
2016
2017 static int
2018 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
2019 {
2020 if (GET_CODE (x) == VALUE)
2021 {
2022 if (REG_P (y))
2023 {
2024 struct elt_loc_list *l = NULL;
2025 if (CSELIB_VAL_PTR (x))
2026 for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
2027 l; l = l->next)
2028 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
2029 break;
2030 if (l)
2031 x = y;
2032 else
2033 x = get_addr (x);
2034 }
2035 /* Don't call get_addr if y is the same VALUE. */
2036 else if (x != y)
2037 x = get_addr (x);
2038 }
2039 if (GET_CODE (y) == VALUE)
2040 {
2041 if (REG_P (x))
2042 {
2043 struct elt_loc_list *l = NULL;
2044 if (CSELIB_VAL_PTR (y))
2045 for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
2046 l; l = l->next)
2047 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
2048 break;
2049 if (l)
2050 y = x;
2051 else
2052 y = get_addr (y);
2053 }
2054 /* Don't call get_addr if x is the same VALUE. */
2055 else if (y != x)
2056 y = get_addr (y);
2057 }
2058 if (GET_CODE (x) == HIGH)
2059 x = XEXP (x, 0);
2060 else if (GET_CODE (x) == LO_SUM)
2061 x = XEXP (x, 1);
2062 else
2063 x = addr_side_effect_eval (x, abs (xsize), 0);
2064 if (GET_CODE (y) == HIGH)
2065 y = XEXP (y, 0);
2066 else if (GET_CODE (y) == LO_SUM)
2067 y = XEXP (y, 1);
2068 else
2069 y = addr_side_effect_eval (y, abs (ysize), 0);
2070
2071 if (rtx_equal_for_memref_p (x, y))
2072 {
2073 return offset_overlap_p (c, xsize, ysize);
2074 }
2075
2076 /* This code used to check for conflicts involving stack references and
2077 globals but the base address alias code now handles these cases. */
2078
2079 if (GET_CODE (x) == PLUS)
2080 {
2081 /* The fact that X is canonicalized means that this
2082 PLUS rtx is canonicalized. */
2083 rtx x0 = XEXP (x, 0);
2084 rtx x1 = XEXP (x, 1);
2085
2086 if (GET_CODE (y) == PLUS)
2087 {
2088 /* The fact that Y is canonicalized means that this
2089 PLUS rtx is canonicalized. */
2090 rtx y0 = XEXP (y, 0);
2091 rtx y1 = XEXP (y, 1);
2092
2093 if (rtx_equal_for_memref_p (x1, y1))
2094 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2095 if (rtx_equal_for_memref_p (x0, y0))
2096 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2097 if (CONST_INT_P (x1))
2098 {
2099 if (CONST_INT_P (y1))
2100 return memrefs_conflict_p (xsize, x0, ysize, y0,
2101 c - INTVAL (x1) + INTVAL (y1));
2102 else
2103 return memrefs_conflict_p (xsize, x0, ysize, y,
2104 c - INTVAL (x1));
2105 }
2106 else if (CONST_INT_P (y1))
2107 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2108
2109 return -1;
2110 }
2111 else if (CONST_INT_P (x1))
2112 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
2113 }
2114 else if (GET_CODE (y) == PLUS)
2115 {
2116 /* The fact that Y is canonicalized means that this
2117 PLUS rtx is canonicalized. */
2118 rtx y0 = XEXP (y, 0);
2119 rtx y1 = XEXP (y, 1);
2120
2121 if (CONST_INT_P (y1))
2122 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2123 else
2124 return -1;
2125 }
2126
2127 if (GET_CODE (x) == GET_CODE (y))
2128 switch (GET_CODE (x))
2129 {
2130 case MULT:
2131 {
2132 /* Handle cases where we expect the second operands to be the
2133 same, and check only whether the first operand would conflict
2134 or not. */
2135 rtx x0, y0;
2136 rtx x1 = canon_rtx (XEXP (x, 1));
2137 rtx y1 = canon_rtx (XEXP (y, 1));
2138 if (! rtx_equal_for_memref_p (x1, y1))
2139 return -1;
2140 x0 = canon_rtx (XEXP (x, 0));
2141 y0 = canon_rtx (XEXP (y, 0));
2142 if (rtx_equal_for_memref_p (x0, y0))
2143 return offset_overlap_p (c, xsize, ysize);
2144
2145 /* Can't properly adjust our sizes. */
2146 if (!CONST_INT_P (x1))
2147 return -1;
2148 xsize /= INTVAL (x1);
2149 ysize /= INTVAL (x1);
2150 c /= INTVAL (x1);
2151 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2152 }
2153
2154 default:
2155 break;
2156 }
2157
2158 /* Deal with alignment ANDs by adjusting offset and size so as to
2159 cover the maximum range, without taking any previously known
2160 alignment into account. Make a size negative after such an
2161 adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2162 assume a potential overlap, because they may end up in contiguous
2163 memory locations and the stricter-alignment access may span over
2164 part of both. */
2165 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2166 {
2167 HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2168 unsigned HOST_WIDE_INT uc = sc;
2169 if (sc < 0 && -uc == (uc & -uc))
2170 {
2171 if (xsize > 0)
2172 xsize = -xsize;
2173 if (xsize)
2174 xsize += sc + 1;
2175 c -= sc + 1;
2176 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2177 ysize, y, c);
2178 }
2179 }
2180 if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2181 {
2182 HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2183 unsigned HOST_WIDE_INT uc = sc;
2184 if (sc < 0 && -uc == (uc & -uc))
2185 {
2186 if (ysize > 0)
2187 ysize = -ysize;
2188 if (ysize)
2189 ysize += sc + 1;
2190 c += sc + 1;
2191 return memrefs_conflict_p (xsize, x,
2192 ysize, canon_rtx (XEXP (y, 0)), c);
2193 }
2194 }
2195
2196 if (CONSTANT_P (x))
2197 {
2198 if (CONST_INT_P (x) && CONST_INT_P (y))
2199 {
2200 c += (INTVAL (y) - INTVAL (x));
2201 return offset_overlap_p (c, xsize, ysize);
2202 }
2203
2204 if (GET_CODE (x) == CONST)
2205 {
2206 if (GET_CODE (y) == CONST)
2207 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2208 ysize, canon_rtx (XEXP (y, 0)), c);
2209 else
2210 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2211 ysize, y, c);
2212 }
2213 if (GET_CODE (y) == CONST)
2214 return memrefs_conflict_p (xsize, x, ysize,
2215 canon_rtx (XEXP (y, 0)), c);
2216
2217 /* Assume a potential overlap for symbolic addresses that went
2218 through alignment adjustments (i.e., that have negative
2219 sizes), because we can't know how far they are from each
2220 other. */
2221 if (CONSTANT_P (y))
2222 return (xsize < 0 || ysize < 0 || offset_overlap_p (c, xsize, ysize));
2223
2224 return -1;
2225 }
2226
2227 return -1;
2228 }
2229
2230 /* Functions to compute memory dependencies.
2231
2232 Since we process the insns in execution order, we can build tables
2233 to keep track of what registers are fixed (and not aliased), what registers
2234 are varying in known ways, and what registers are varying in unknown
2235 ways.
2236
2237 If both memory references are volatile, then there must always be a
2238 dependence between the two references, since their order can not be
2239 changed. A volatile and non-volatile reference can be interchanged
2240 though.
2241
2242 We also must allow AND addresses, because they may generate accesses
2243 outside the object being referenced. This is used to generate aligned
2244 addresses from unaligned addresses, for instance, the alpha
2245 storeqi_unaligned pattern. */
2246
2247 /* Read dependence: X is read after read in MEM takes place. There can
2248 only be a dependence here if both reads are volatile, or if either is
2249 an explicit barrier. */
2250
2251 int
2252 read_dependence (const_rtx mem, const_rtx x)
2253 {
2254 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2255 return true;
2256 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2257 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2258 return true;
2259 return false;
2260 }
2261
2262 /* Return true if we can determine that the fields referenced cannot
2263 overlap for any pair of objects. */
2264
2265 static bool
2266 nonoverlapping_component_refs_p (const_rtx rtlx, const_rtx rtly)
2267 {
2268 const_tree x = MEM_EXPR (rtlx), y = MEM_EXPR (rtly);
2269 const_tree fieldx, fieldy, typex, typey, orig_y;
2270
2271 if (!flag_strict_aliasing
2272 || !x || !y
2273 || TREE_CODE (x) != COMPONENT_REF
2274 || TREE_CODE (y) != COMPONENT_REF)
2275 return false;
2276
2277 do
2278 {
2279 /* The comparison has to be done at a common type, since we don't
2280 know how the inheritance hierarchy works. */
2281 orig_y = y;
2282 do
2283 {
2284 fieldx = TREE_OPERAND (x, 1);
2285 typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
2286
2287 y = orig_y;
2288 do
2289 {
2290 fieldy = TREE_OPERAND (y, 1);
2291 typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
2292
2293 if (typex == typey)
2294 goto found;
2295
2296 y = TREE_OPERAND (y, 0);
2297 }
2298 while (y && TREE_CODE (y) == COMPONENT_REF);
2299
2300 x = TREE_OPERAND (x, 0);
2301 }
2302 while (x && TREE_CODE (x) == COMPONENT_REF);
2303 /* Never found a common type. */
2304 return false;
2305
2306 found:
2307 /* If we're left with accessing different fields of a structure, then no
2308 possible overlap, unless they are both bitfields. */
2309 if (TREE_CODE (typex) == RECORD_TYPE && fieldx != fieldy)
2310 return !(DECL_BIT_FIELD (fieldx) && DECL_BIT_FIELD (fieldy));
2311
2312 /* The comparison on the current field failed. If we're accessing
2313 a very nested structure, look at the next outer level. */
2314 x = TREE_OPERAND (x, 0);
2315 y = TREE_OPERAND (y, 0);
2316 }
2317 while (x && y
2318 && TREE_CODE (x) == COMPONENT_REF
2319 && TREE_CODE (y) == COMPONENT_REF);
2320
2321 return false;
2322 }
2323
2324 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */
2325
2326 static tree
2327 decl_for_component_ref (tree x)
2328 {
2329 do
2330 {
2331 x = TREE_OPERAND (x, 0);
2332 }
2333 while (x && TREE_CODE (x) == COMPONENT_REF);
2334
2335 return x && DECL_P (x) ? x : NULL_TREE;
2336 }
2337
2338 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2339 for the offset of the field reference. *KNOWN_P says whether the
2340 offset is known. */
2341
2342 static void
2343 adjust_offset_for_component_ref (tree x, bool *known_p,
2344 HOST_WIDE_INT *offset)
2345 {
2346 if (!*known_p)
2347 return;
2348 do
2349 {
2350 tree xoffset = component_ref_field_offset (x);
2351 tree field = TREE_OPERAND (x, 1);
2352
2353 if (! tree_fits_uhwi_p (xoffset))
2354 {
2355 *known_p = false;
2356 return;
2357 }
2358 *offset += (tree_to_uhwi (xoffset)
2359 + (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
2360 / BITS_PER_UNIT));
2361
2362 x = TREE_OPERAND (x, 0);
2363 }
2364 while (x && TREE_CODE (x) == COMPONENT_REF);
2365 }
2366
2367 /* Return nonzero if we can determine the exprs corresponding to memrefs
2368 X and Y and they do not overlap.
2369 If LOOP_VARIANT is set, skip offset-based disambiguation */
2370
2371 int
2372 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2373 {
2374 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2375 rtx rtlx, rtly;
2376 rtx basex, basey;
2377 bool moffsetx_known_p, moffsety_known_p;
2378 HOST_WIDE_INT moffsetx = 0, moffsety = 0;
2379 HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2380
2381 /* Unless both have exprs, we can't tell anything. */
2382 if (exprx == 0 || expry == 0)
2383 return 0;
2384
2385 /* For spill-slot accesses make sure we have valid offsets. */
2386 if ((exprx == get_spill_slot_decl (false)
2387 && ! MEM_OFFSET_KNOWN_P (x))
2388 || (expry == get_spill_slot_decl (false)
2389 && ! MEM_OFFSET_KNOWN_P (y)))
2390 return 0;
2391
2392 /* If the field reference test failed, look at the DECLs involved. */
2393 moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2394 if (moffsetx_known_p)
2395 moffsetx = MEM_OFFSET (x);
2396 if (TREE_CODE (exprx) == COMPONENT_REF)
2397 {
2398 tree t = decl_for_component_ref (exprx);
2399 if (! t)
2400 return 0;
2401 adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2402 exprx = t;
2403 }
2404
2405 moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2406 if (moffsety_known_p)
2407 moffsety = MEM_OFFSET (y);
2408 if (TREE_CODE (expry) == COMPONENT_REF)
2409 {
2410 tree t = decl_for_component_ref (expry);
2411 if (! t)
2412 return 0;
2413 adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2414 expry = t;
2415 }
2416
2417 if (! DECL_P (exprx) || ! DECL_P (expry))
2418 return 0;
2419
2420 /* With invalid code we can end up storing into the constant pool.
2421 Bail out to avoid ICEing when creating RTL for this.
2422 See gfortran.dg/lto/20091028-2_0.f90. */
2423 if (TREE_CODE (exprx) == CONST_DECL
2424 || TREE_CODE (expry) == CONST_DECL)
2425 return 1;
2426
2427 rtlx = DECL_RTL (exprx);
2428 rtly = DECL_RTL (expry);
2429
2430 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2431 can't overlap unless they are the same because we never reuse that part
2432 of the stack frame used for locals for spilled pseudos. */
2433 if ((!MEM_P (rtlx) || !MEM_P (rtly))
2434 && ! rtx_equal_p (rtlx, rtly))
2435 return 1;
2436
2437 /* If we have MEMs referring to different address spaces (which can
2438 potentially overlap), we cannot easily tell from the addresses
2439 whether the references overlap. */
2440 if (MEM_P (rtlx) && MEM_P (rtly)
2441 && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2442 return 0;
2443
2444 /* Get the base and offsets of both decls. If either is a register, we
2445 know both are and are the same, so use that as the base. The only
2446 we can avoid overlap is if we can deduce that they are nonoverlapping
2447 pieces of that decl, which is very rare. */
2448 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2449 if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2450 offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2451
2452 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2453 if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2454 offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2455
2456 /* If the bases are different, we know they do not overlap if both
2457 are constants or if one is a constant and the other a pointer into the
2458 stack frame. Otherwise a different base means we can't tell if they
2459 overlap or not. */
2460 if (! rtx_equal_p (basex, basey))
2461 return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2462 || (CONSTANT_P (basex) && REG_P (basey)
2463 && REGNO_PTR_FRAME_P (REGNO (basey)))
2464 || (CONSTANT_P (basey) && REG_P (basex)
2465 && REGNO_PTR_FRAME_P (REGNO (basex))));
2466
2467 /* Offset based disambiguation not appropriate for loop invariant */
2468 if (loop_invariant)
2469 return 0;
2470
2471 sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2472 : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2473 : -1);
2474 sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2475 : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2476 : -1);
2477
2478 /* If we have an offset for either memref, it can update the values computed
2479 above. */
2480 if (moffsetx_known_p)
2481 offsetx += moffsetx, sizex -= moffsetx;
2482 if (moffsety_known_p)
2483 offsety += moffsety, sizey -= moffsety;
2484
2485 /* If a memref has both a size and an offset, we can use the smaller size.
2486 We can't do this if the offset isn't known because we must view this
2487 memref as being anywhere inside the DECL's MEM. */
2488 if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2489 sizex = MEM_SIZE (x);
2490 if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2491 sizey = MEM_SIZE (y);
2492
2493 /* Put the values of the memref with the lower offset in X's values. */
2494 if (offsetx > offsety)
2495 {
2496 tem = offsetx, offsetx = offsety, offsety = tem;
2497 tem = sizex, sizex = sizey, sizey = tem;
2498 }
2499
2500 /* If we don't know the size of the lower-offset value, we can't tell
2501 if they conflict. Otherwise, we do the test. */
2502 return sizex >= 0 && offsety >= offsetx + sizex;
2503 }
2504
2505 /* Helper for true_dependence and canon_true_dependence.
2506 Checks for true dependence: X is read after store in MEM takes place.
2507
2508 If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2509 NULL_RTX, and the canonical addresses of MEM and X are both computed
2510 here. If MEM_CANONICALIZED, then MEM must be already canonicalized.
2511
2512 If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2513
2514 Returns 1 if there is a true dependence, 0 otherwise. */
2515
2516 static int
2517 true_dependence_1 (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2518 const_rtx x, rtx x_addr, bool mem_canonicalized)
2519 {
2520 rtx base;
2521 int ret;
2522
2523 gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2524 : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2525
2526 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2527 return 1;
2528
2529 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2530 This is used in epilogue deallocation functions, and in cselib. */
2531 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2532 return 1;
2533 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2534 return 1;
2535 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2536 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2537 return 1;
2538
2539 /* Read-only memory is by definition never modified, and therefore can't
2540 conflict with anything. We don't expect to find read-only set on MEM,
2541 but stupid user tricks can produce them, so don't die. */
2542 if (MEM_READONLY_P (x))
2543 return 0;
2544
2545 /* If we have MEMs referring to different address spaces (which can
2546 potentially overlap), we cannot easily tell from the addresses
2547 whether the references overlap. */
2548 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2549 return 1;
2550
2551 if (! mem_addr)
2552 {
2553 mem_addr = XEXP (mem, 0);
2554 if (mem_mode == VOIDmode)
2555 mem_mode = GET_MODE (mem);
2556 }
2557
2558 if (! x_addr)
2559 {
2560 x_addr = XEXP (x, 0);
2561 if (!((GET_CODE (x_addr) == VALUE
2562 && GET_CODE (mem_addr) != VALUE
2563 && reg_mentioned_p (x_addr, mem_addr))
2564 || (GET_CODE (x_addr) != VALUE
2565 && GET_CODE (mem_addr) == VALUE
2566 && reg_mentioned_p (mem_addr, x_addr))))
2567 {
2568 x_addr = get_addr (x_addr);
2569 if (! mem_canonicalized)
2570 mem_addr = get_addr (mem_addr);
2571 }
2572 }
2573
2574 base = find_base_term (x_addr);
2575 if (base && (GET_CODE (base) == LABEL_REF
2576 || (GET_CODE (base) == SYMBOL_REF
2577 && CONSTANT_POOL_ADDRESS_P (base))))
2578 return 0;
2579
2580 rtx mem_base = find_base_term (mem_addr);
2581 if (! base_alias_check (x_addr, base, mem_addr, mem_base,
2582 GET_MODE (x), mem_mode))
2583 return 0;
2584
2585 x_addr = canon_rtx (x_addr);
2586 if (!mem_canonicalized)
2587 mem_addr = canon_rtx (mem_addr);
2588
2589 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2590 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2591 return ret;
2592
2593 if (mems_in_disjoint_alias_sets_p (x, mem))
2594 return 0;
2595
2596 if (nonoverlapping_memrefs_p (mem, x, false))
2597 return 0;
2598
2599 if (nonoverlapping_component_refs_p (mem, x))
2600 return 0;
2601
2602 return rtx_refs_may_alias_p (x, mem, true);
2603 }
2604
2605 /* True dependence: X is read after store in MEM takes place. */
2606
2607 int
2608 true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x)
2609 {
2610 return true_dependence_1 (mem, mem_mode, NULL_RTX,
2611 x, NULL_RTX, /*mem_canonicalized=*/false);
2612 }
2613
2614 /* Canonical true dependence: X is read after store in MEM takes place.
2615 Variant of true_dependence which assumes MEM has already been
2616 canonicalized (hence we no longer do that here).
2617 The mem_addr argument has been added, since true_dependence_1 computed
2618 this value prior to canonicalizing. */
2619
2620 int
2621 canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2622 const_rtx x, rtx x_addr)
2623 {
2624 return true_dependence_1 (mem, mem_mode, mem_addr,
2625 x, x_addr, /*mem_canonicalized=*/true);
2626 }
2627
2628 /* Returns nonzero if a write to X might alias a previous read from
2629 (or, if WRITEP is true, a write to) MEM.
2630 If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X,
2631 and X_MODE the mode for that access.
2632 If MEM_CANONICALIZED is true, MEM is canonicalized. */
2633
2634 static int
2635 write_dependence_p (const_rtx mem,
2636 const_rtx x, enum machine_mode x_mode, rtx x_addr,
2637 bool mem_canonicalized, bool x_canonicalized, bool writep)
2638 {
2639 rtx mem_addr;
2640 rtx base;
2641 int ret;
2642
2643 gcc_checking_assert (x_canonicalized
2644 ? (x_addr != NULL_RTX && x_mode != VOIDmode)
2645 : (x_addr == NULL_RTX && x_mode == VOIDmode));
2646
2647 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2648 return 1;
2649
2650 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2651 This is used in epilogue deallocation functions. */
2652 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2653 return 1;
2654 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2655 return 1;
2656 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2657 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2658 return 1;
2659
2660 /* A read from read-only memory can't conflict with read-write memory. */
2661 if (!writep && MEM_READONLY_P (mem))
2662 return 0;
2663
2664 /* If we have MEMs referring to different address spaces (which can
2665 potentially overlap), we cannot easily tell from the addresses
2666 whether the references overlap. */
2667 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2668 return 1;
2669
2670 mem_addr = XEXP (mem, 0);
2671 if (!x_addr)
2672 {
2673 x_addr = XEXP (x, 0);
2674 if (!((GET_CODE (x_addr) == VALUE
2675 && GET_CODE (mem_addr) != VALUE
2676 && reg_mentioned_p (x_addr, mem_addr))
2677 || (GET_CODE (x_addr) != VALUE
2678 && GET_CODE (mem_addr) == VALUE
2679 && reg_mentioned_p (mem_addr, x_addr))))
2680 {
2681 x_addr = get_addr (x_addr);
2682 if (!mem_canonicalized)
2683 mem_addr = get_addr (mem_addr);
2684 }
2685 }
2686
2687 base = find_base_term (mem_addr);
2688 if (! writep
2689 && base
2690 && (GET_CODE (base) == LABEL_REF
2691 || (GET_CODE (base) == SYMBOL_REF
2692 && CONSTANT_POOL_ADDRESS_P (base))))
2693 return 0;
2694
2695 rtx x_base = find_base_term (x_addr);
2696 if (! base_alias_check (x_addr, x_base, mem_addr, base, GET_MODE (x),
2697 GET_MODE (mem)))
2698 return 0;
2699
2700 if (!x_canonicalized)
2701 {
2702 x_addr = canon_rtx (x_addr);
2703 x_mode = GET_MODE (x);
2704 }
2705 if (!mem_canonicalized)
2706 mem_addr = canon_rtx (mem_addr);
2707
2708 if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2709 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1)
2710 return ret;
2711
2712 if (nonoverlapping_memrefs_p (x, mem, false))
2713 return 0;
2714
2715 return rtx_refs_may_alias_p (x, mem, false);
2716 }
2717
2718 /* Anti dependence: X is written after read in MEM takes place. */
2719
2720 int
2721 anti_dependence (const_rtx mem, const_rtx x)
2722 {
2723 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2724 /*mem_canonicalized=*/false,
2725 /*x_canonicalized*/false, /*writep=*/false);
2726 }
2727
2728 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
2729 Also, consider X in X_MODE (which might be from an enclosing
2730 STRICT_LOW_PART / ZERO_EXTRACT).
2731 If MEM_CANONICALIZED is true, MEM is canonicalized. */
2732
2733 int
2734 canon_anti_dependence (const_rtx mem, bool mem_canonicalized,
2735 const_rtx x, enum machine_mode x_mode, rtx x_addr)
2736 {
2737 return write_dependence_p (mem, x, x_mode, x_addr,
2738 mem_canonicalized, /*x_canonicalized=*/true,
2739 /*writep=*/false);
2740 }
2741
2742 /* Output dependence: X is written after store in MEM takes place. */
2743
2744 int
2745 output_dependence (const_rtx mem, const_rtx x)
2746 {
2747 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2748 /*mem_canonicalized=*/false,
2749 /*x_canonicalized*/false, /*writep=*/true);
2750 }
2751 \f
2752
2753
2754 /* Check whether X may be aliased with MEM. Don't do offset-based
2755 memory disambiguation & TBAA. */
2756 int
2757 may_alias_p (const_rtx mem, const_rtx x)
2758 {
2759 rtx x_addr, mem_addr;
2760
2761 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2762 return 1;
2763
2764 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2765 This is used in epilogue deallocation functions. */
2766 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2767 return 1;
2768 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2769 return 1;
2770 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2771 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2772 return 1;
2773
2774 /* Read-only memory is by definition never modified, and therefore can't
2775 conflict with anything. We don't expect to find read-only set on MEM,
2776 but stupid user tricks can produce them, so don't die. */
2777 if (MEM_READONLY_P (x))
2778 return 0;
2779
2780 /* If we have MEMs referring to different address spaces (which can
2781 potentially overlap), we cannot easily tell from the addresses
2782 whether the references overlap. */
2783 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2784 return 1;
2785
2786 x_addr = XEXP (x, 0);
2787 mem_addr = XEXP (mem, 0);
2788 if (!((GET_CODE (x_addr) == VALUE
2789 && GET_CODE (mem_addr) != VALUE
2790 && reg_mentioned_p (x_addr, mem_addr))
2791 || (GET_CODE (x_addr) != VALUE
2792 && GET_CODE (mem_addr) == VALUE
2793 && reg_mentioned_p (mem_addr, x_addr))))
2794 {
2795 x_addr = get_addr (x_addr);
2796 mem_addr = get_addr (mem_addr);
2797 }
2798
2799 rtx x_base = find_base_term (x_addr);
2800 rtx mem_base = find_base_term (mem_addr);
2801 if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
2802 GET_MODE (x), GET_MODE (mem_addr)))
2803 return 0;
2804
2805 x_addr = canon_rtx (x_addr);
2806 mem_addr = canon_rtx (mem_addr);
2807
2808 if (nonoverlapping_memrefs_p (mem, x, true))
2809 return 0;
2810
2811 /* TBAA not valid for loop_invarint */
2812 return rtx_refs_may_alias_p (x, mem, false);
2813 }
2814
2815 void
2816 init_alias_target (void)
2817 {
2818 int i;
2819
2820 if (!arg_base_value)
2821 arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
2822
2823 memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2824
2825 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2826 /* Check whether this register can hold an incoming pointer
2827 argument. FUNCTION_ARG_REGNO_P tests outgoing register
2828 numbers, so translate if necessary due to register windows. */
2829 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2830 && HARD_REGNO_MODE_OK (i, Pmode))
2831 static_reg_base_value[i] = arg_base_value;
2832
2833 static_reg_base_value[STACK_POINTER_REGNUM]
2834 = unique_base_value (UNIQUE_BASE_VALUE_SP);
2835 static_reg_base_value[ARG_POINTER_REGNUM]
2836 = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
2837 static_reg_base_value[FRAME_POINTER_REGNUM]
2838 = unique_base_value (UNIQUE_BASE_VALUE_FP);
2839 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2840 static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2841 = unique_base_value (UNIQUE_BASE_VALUE_HFP);
2842 #endif
2843 }
2844
2845 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2846 to be memory reference. */
2847 static bool memory_modified;
2848 static void
2849 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2850 {
2851 if (MEM_P (x))
2852 {
2853 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2854 memory_modified = true;
2855 }
2856 }
2857
2858
2859 /* Return true when INSN possibly modify memory contents of MEM
2860 (i.e. address can be modified). */
2861 bool
2862 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2863 {
2864 if (!INSN_P (insn))
2865 return false;
2866 memory_modified = false;
2867 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2868 return memory_modified;
2869 }
2870
2871 /* Return TRUE if the destination of a set is rtx identical to
2872 ITEM. */
2873 static inline bool
2874 set_dest_equal_p (const_rtx set, const_rtx item)
2875 {
2876 rtx dest = SET_DEST (set);
2877 return rtx_equal_p (dest, item);
2878 }
2879
2880 /* Like memory_modified_in_insn_p, but return TRUE if INSN will
2881 *DEFINITELY* modify the memory contents of MEM. */
2882 bool
2883 memory_must_be_modified_in_insn_p (const_rtx mem, const_rtx insn)
2884 {
2885 if (!INSN_P (insn))
2886 return false;
2887 insn = PATTERN (insn);
2888 if (GET_CODE (insn) == SET)
2889 return set_dest_equal_p (insn, mem);
2890 else if (GET_CODE (insn) == PARALLEL)
2891 {
2892 int i;
2893 for (i = 0; i < XVECLEN (insn, 0); i++)
2894 {
2895 rtx sub = XVECEXP (insn, 0, i);
2896 if (GET_CODE (sub) == SET
2897 && set_dest_equal_p (sub, mem))
2898 return true;
2899 }
2900 }
2901 return false;
2902 }
2903
2904 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
2905 array. */
2906
2907 void
2908 init_alias_analysis (void)
2909 {
2910 unsigned int maxreg = max_reg_num ();
2911 int changed, pass;
2912 int i;
2913 unsigned int ui;
2914 rtx insn, val;
2915 int rpo_cnt;
2916 int *rpo;
2917
2918 timevar_push (TV_ALIAS_ANALYSIS);
2919
2920 vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
2921 reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
2922 bitmap_clear (reg_known_equiv_p);
2923
2924 /* If we have memory allocated from the previous run, use it. */
2925 if (old_reg_base_value)
2926 reg_base_value = old_reg_base_value;
2927
2928 if (reg_base_value)
2929 reg_base_value->truncate (0);
2930
2931 vec_safe_grow_cleared (reg_base_value, maxreg);
2932
2933 new_reg_base_value = XNEWVEC (rtx, maxreg);
2934 reg_seen = sbitmap_alloc (maxreg);
2935
2936 /* The basic idea is that each pass through this loop will use the
2937 "constant" information from the previous pass to propagate alias
2938 information through another level of assignments.
2939
2940 The propagation is done on the CFG in reverse post-order, to propagate
2941 things forward as far as possible in each iteration.
2942
2943 This could get expensive if the assignment chains are long. Maybe
2944 we should throttle the number of iterations, possibly based on
2945 the optimization level or flag_expensive_optimizations.
2946
2947 We could propagate more information in the first pass by making use
2948 of DF_REG_DEF_COUNT to determine immediately that the alias information
2949 for a pseudo is "constant".
2950
2951 A program with an uninitialized variable can cause an infinite loop
2952 here. Instead of doing a full dataflow analysis to detect such problems
2953 we just cap the number of iterations for the loop.
2954
2955 The state of the arrays for the set chain in question does not matter
2956 since the program has undefined behavior. */
2957
2958 rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2959 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
2960
2961 pass = 0;
2962 do
2963 {
2964 /* Assume nothing will change this iteration of the loop. */
2965 changed = 0;
2966
2967 /* We want to assign the same IDs each iteration of this loop, so
2968 start counting from one each iteration of the loop. */
2969 unique_id = 1;
2970
2971 /* We're at the start of the function each iteration through the
2972 loop, so we're copying arguments. */
2973 copying_arguments = true;
2974
2975 /* Wipe the potential alias information clean for this pass. */
2976 memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2977
2978 /* Wipe the reg_seen array clean. */
2979 bitmap_clear (reg_seen);
2980
2981 /* Initialize the alias information for this pass. */
2982 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2983 if (static_reg_base_value[i])
2984 {
2985 new_reg_base_value[i] = static_reg_base_value[i];
2986 bitmap_set_bit (reg_seen, i);
2987 }
2988
2989 /* Walk the insns adding values to the new_reg_base_value array. */
2990 for (i = 0; i < rpo_cnt; i++)
2991 {
2992 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
2993 FOR_BB_INSNS (bb, insn)
2994 {
2995 if (NONDEBUG_INSN_P (insn))
2996 {
2997 rtx note, set;
2998
2999 #if defined (HAVE_prologue) || defined (HAVE_epilogue)
3000 /* The prologue/epilogue insns are not threaded onto the
3001 insn chain until after reload has completed. Thus,
3002 there is no sense wasting time checking if INSN is in
3003 the prologue/epilogue until after reload has completed. */
3004 if (reload_completed
3005 && prologue_epilogue_contains (insn))
3006 continue;
3007 #endif
3008
3009 /* If this insn has a noalias note, process it, Otherwise,
3010 scan for sets. A simple set will have no side effects
3011 which could change the base value of any other register. */
3012
3013 if (GET_CODE (PATTERN (insn)) == SET
3014 && REG_NOTES (insn) != 0
3015 && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
3016 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
3017 else
3018 note_stores (PATTERN (insn), record_set, NULL);
3019
3020 set = single_set (insn);
3021
3022 if (set != 0
3023 && REG_P (SET_DEST (set))
3024 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3025 {
3026 unsigned int regno = REGNO (SET_DEST (set));
3027 rtx src = SET_SRC (set);
3028 rtx t;
3029
3030 note = find_reg_equal_equiv_note (insn);
3031 if (note && REG_NOTE_KIND (note) == REG_EQUAL
3032 && DF_REG_DEF_COUNT (regno) != 1)
3033 note = NULL_RTX;
3034
3035 if (note != NULL_RTX
3036 && GET_CODE (XEXP (note, 0)) != EXPR_LIST
3037 && ! rtx_varies_p (XEXP (note, 0), 1)
3038 && ! reg_overlap_mentioned_p (SET_DEST (set),
3039 XEXP (note, 0)))
3040 {
3041 set_reg_known_value (regno, XEXP (note, 0));
3042 set_reg_known_equiv_p (regno,
3043 REG_NOTE_KIND (note) == REG_EQUIV);
3044 }
3045 else if (DF_REG_DEF_COUNT (regno) == 1
3046 && GET_CODE (src) == PLUS
3047 && REG_P (XEXP (src, 0))
3048 && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
3049 && CONST_INT_P (XEXP (src, 1)))
3050 {
3051 t = plus_constant (GET_MODE (src), t,
3052 INTVAL (XEXP (src, 1)));
3053 set_reg_known_value (regno, t);
3054 set_reg_known_equiv_p (regno, false);
3055 }
3056 else if (DF_REG_DEF_COUNT (regno) == 1
3057 && ! rtx_varies_p (src, 1))
3058 {
3059 set_reg_known_value (regno, src);
3060 set_reg_known_equiv_p (regno, false);
3061 }
3062 }
3063 }
3064 else if (NOTE_P (insn)
3065 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
3066 copying_arguments = false;
3067 }
3068 }
3069
3070 /* Now propagate values from new_reg_base_value to reg_base_value. */
3071 gcc_assert (maxreg == (unsigned int) max_reg_num ());
3072
3073 for (ui = 0; ui < maxreg; ui++)
3074 {
3075 if (new_reg_base_value[ui]
3076 && new_reg_base_value[ui] != (*reg_base_value)[ui]
3077 && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
3078 {
3079 (*reg_base_value)[ui] = new_reg_base_value[ui];
3080 changed = 1;
3081 }
3082 }
3083 }
3084 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
3085 XDELETEVEC (rpo);
3086
3087 /* Fill in the remaining entries. */
3088 FOR_EACH_VEC_ELT (*reg_known_value, i, val)
3089 {
3090 int regno = i + FIRST_PSEUDO_REGISTER;
3091 if (! val)
3092 set_reg_known_value (regno, regno_reg_rtx[regno]);
3093 }
3094
3095 /* Clean up. */
3096 free (new_reg_base_value);
3097 new_reg_base_value = 0;
3098 sbitmap_free (reg_seen);
3099 reg_seen = 0;
3100 timevar_pop (TV_ALIAS_ANALYSIS);
3101 }
3102
3103 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3104 Special API for var-tracking pass purposes. */
3105
3106 void
3107 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3108 {
3109 (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3110 }
3111
3112 void
3113 end_alias_analysis (void)
3114 {
3115 old_reg_base_value = reg_base_value;
3116 vec_free (reg_known_value);
3117 sbitmap_free (reg_known_equiv_p);
3118 }
3119
3120 #include "gt-alias.h"