tree-sra.c (generate_element_init_1): Deal with NULL constructor element index.
[gcc.git] / gcc / tree-sra.c
1 /* Scalar Replacement of Aggregates (SRA) converts some structure
2 references into scalar references, exposing them to the scalar
3 optimizers.
4 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008
5 Free Software Foundation, Inc.
6 Contributed by Diego Novillo <dnovillo@redhat.com>
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 3, or (at your option) any
13 later version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "ggc.h"
29 #include "tree.h"
30
31 /* These RTL headers are needed for basic-block.h. */
32 #include "rtl.h"
33 #include "tm_p.h"
34 #include "hard-reg-set.h"
35 #include "basic-block.h"
36 #include "diagnostic.h"
37 #include "langhooks.h"
38 #include "tree-inline.h"
39 #include "tree-flow.h"
40 #include "gimple.h"
41 #include "tree-dump.h"
42 #include "tree-pass.h"
43 #include "timevar.h"
44 #include "flags.h"
45 #include "bitmap.h"
46 #include "obstack.h"
47 #include "target.h"
48 /* expr.h is needed for MOVE_RATIO. */
49 #include "expr.h"
50 #include "params.h"
51
52
53 /* This object of this pass is to replace a non-addressable aggregate with a
54 set of independent variables. Most of the time, all of these variables
55 will be scalars. But a secondary objective is to break up larger
56 aggregates into smaller aggregates. In the process we may find that some
57 bits of the larger aggregate can be deleted as unreferenced.
58
59 This substitution is done globally. More localized substitutions would
60 be the purvey of a load-store motion pass.
61
62 The optimization proceeds in phases:
63
64 (1) Identify variables that have types that are candidates for
65 decomposition.
66
67 (2) Scan the function looking for the ways these variables are used.
68 In particular we're interested in the number of times a variable
69 (or member) is needed as a complete unit, and the number of times
70 a variable (or member) is copied.
71
72 (3) Based on the usage profile, instantiate substitution variables.
73
74 (4) Scan the function making replacements.
75 */
76
77
78 /* True if this is the "early" pass, before inlining. */
79 static bool early_sra;
80
81 /* The set of todo flags to return from tree_sra. */
82 static unsigned int todoflags;
83
84 /* The set of aggregate variables that are candidates for scalarization. */
85 static bitmap sra_candidates;
86
87 /* Set of scalarizable PARM_DECLs that need copy-in operations at the
88 beginning of the function. */
89 static bitmap needs_copy_in;
90
91 /* Sets of bit pairs that cache type decomposition and instantiation. */
92 static bitmap sra_type_decomp_cache;
93 static bitmap sra_type_inst_cache;
94
95 /* One of these structures is created for each candidate aggregate and
96 each (accessed) member or group of members of such an aggregate. */
97 struct sra_elt
98 {
99 /* A tree of the elements. Used when we want to traverse everything. */
100 struct sra_elt *parent;
101 struct sra_elt *groups;
102 struct sra_elt *children;
103 struct sra_elt *sibling;
104
105 /* If this element is a root, then this is the VAR_DECL. If this is
106 a sub-element, this is some token used to identify the reference.
107 In the case of COMPONENT_REF, this is the FIELD_DECL. In the case
108 of an ARRAY_REF, this is the (constant) index. In the case of an
109 ARRAY_RANGE_REF, this is the (constant) RANGE_EXPR. In the case
110 of a complex number, this is a zero or one. */
111 tree element;
112
113 /* The type of the element. */
114 tree type;
115
116 /* A VAR_DECL, for any sub-element we've decided to replace. */
117 tree replacement;
118
119 /* The number of times the element is referenced as a whole. I.e.
120 given "a.b.c", this would be incremented for C, but not for A or B. */
121 unsigned int n_uses;
122
123 /* The number of times the element is copied to or from another
124 scalarizable element. */
125 unsigned int n_copies;
126
127 /* True if TYPE is scalar. */
128 bool is_scalar;
129
130 /* True if this element is a group of members of its parent. */
131 bool is_group;
132
133 /* True if we saw something about this element that prevents scalarization,
134 such as non-constant indexing. */
135 bool cannot_scalarize;
136
137 /* True if we've decided that structure-to-structure assignment
138 should happen via memcpy and not per-element. */
139 bool use_block_copy;
140
141 /* True if everything under this element has been marked TREE_NO_WARNING. */
142 bool all_no_warning;
143
144 /* A flag for use with/after random access traversals. */
145 bool visited;
146
147 /* True if there is BIT_FIELD_REF on the lhs with a vector. */
148 bool is_vector_lhs;
149
150 /* 1 if the element is a field that is part of a block, 2 if the field
151 is the block itself, 0 if it's neither. */
152 char in_bitfld_block;
153 };
154
155 #define IS_ELEMENT_FOR_GROUP(ELEMENT) (TREE_CODE (ELEMENT) == RANGE_EXPR)
156
157 #define FOR_EACH_ACTUAL_CHILD(CHILD, ELT) \
158 for ((CHILD) = (ELT)->is_group \
159 ? next_child_for_group (NULL, (ELT)) \
160 : (ELT)->children; \
161 (CHILD); \
162 (CHILD) = (ELT)->is_group \
163 ? next_child_for_group ((CHILD), (ELT)) \
164 : (CHILD)->sibling)
165
166 /* Helper function for above macro. Return next child in group. */
167 static struct sra_elt *
168 next_child_for_group (struct sra_elt *child, struct sra_elt *group)
169 {
170 gcc_assert (group->is_group);
171
172 /* Find the next child in the parent. */
173 if (child)
174 child = child->sibling;
175 else
176 child = group->parent->children;
177
178 /* Skip siblings that do not belong to the group. */
179 while (child)
180 {
181 tree g_elt = group->element;
182 if (TREE_CODE (g_elt) == RANGE_EXPR)
183 {
184 if (!tree_int_cst_lt (child->element, TREE_OPERAND (g_elt, 0))
185 && !tree_int_cst_lt (TREE_OPERAND (g_elt, 1), child->element))
186 break;
187 }
188 else
189 gcc_unreachable ();
190
191 child = child->sibling;
192 }
193
194 return child;
195 }
196
197 /* Random access to the child of a parent is performed by hashing.
198 This prevents quadratic behavior, and allows SRA to function
199 reasonably on larger records. */
200 static htab_t sra_map;
201
202 /* All structures are allocated out of the following obstack. */
203 static struct obstack sra_obstack;
204
205 /* Debugging functions. */
206 static void dump_sra_elt_name (FILE *, struct sra_elt *);
207 extern void debug_sra_elt_name (struct sra_elt *);
208
209 /* Forward declarations. */
210 static tree generate_element_ref (struct sra_elt *);
211 static gimple_seq sra_build_assignment (tree dst, tree src);
212 static void mark_all_v_defs_seq (gimple_seq);
213 static void mark_all_v_defs_stmt (gimple);
214
215 \f
216 /* Return true if DECL is an SRA candidate. */
217
218 static bool
219 is_sra_candidate_decl (tree decl)
220 {
221 return DECL_P (decl) && bitmap_bit_p (sra_candidates, DECL_UID (decl));
222 }
223
224 /* Return true if TYPE is a scalar type. */
225
226 static bool
227 is_sra_scalar_type (tree type)
228 {
229 enum tree_code code = TREE_CODE (type);
230 return (code == INTEGER_TYPE || code == REAL_TYPE || code == VECTOR_TYPE
231 || code == FIXED_POINT_TYPE
232 || code == ENUMERAL_TYPE || code == BOOLEAN_TYPE
233 || code == POINTER_TYPE || code == OFFSET_TYPE
234 || code == REFERENCE_TYPE);
235 }
236
237 /* Return true if TYPE can be decomposed into a set of independent variables.
238
239 Note that this doesn't imply that all elements of TYPE can be
240 instantiated, just that if we decide to break up the type into
241 separate pieces that it can be done. */
242
243 bool
244 sra_type_can_be_decomposed_p (tree type)
245 {
246 unsigned int cache = TYPE_UID (TYPE_MAIN_VARIANT (type)) * 2;
247 tree t;
248
249 /* Avoid searching the same type twice. */
250 if (bitmap_bit_p (sra_type_decomp_cache, cache+0))
251 return true;
252 if (bitmap_bit_p (sra_type_decomp_cache, cache+1))
253 return false;
254
255 /* The type must have a definite nonzero size. */
256 if (TYPE_SIZE (type) == NULL || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
257 || integer_zerop (TYPE_SIZE (type)))
258 goto fail;
259
260 /* The type must be a non-union aggregate. */
261 switch (TREE_CODE (type))
262 {
263 case RECORD_TYPE:
264 {
265 bool saw_one_field = false;
266
267 for (t = TYPE_FIELDS (type); t ; t = TREE_CHAIN (t))
268 if (TREE_CODE (t) == FIELD_DECL)
269 {
270 /* Reject incorrectly represented bit fields. */
271 if (DECL_BIT_FIELD (t)
272 && INTEGRAL_TYPE_P (TREE_TYPE (t))
273 && (tree_low_cst (DECL_SIZE (t), 1)
274 != TYPE_PRECISION (TREE_TYPE (t))))
275 goto fail;
276
277 saw_one_field = true;
278 }
279
280 /* Record types must have at least one field. */
281 if (!saw_one_field)
282 goto fail;
283 }
284 break;
285
286 case ARRAY_TYPE:
287 /* Array types must have a fixed lower and upper bound. */
288 t = TYPE_DOMAIN (type);
289 if (t == NULL)
290 goto fail;
291 if (TYPE_MIN_VALUE (t) == NULL || !TREE_CONSTANT (TYPE_MIN_VALUE (t)))
292 goto fail;
293 if (TYPE_MAX_VALUE (t) == NULL || !TREE_CONSTANT (TYPE_MAX_VALUE (t)))
294 goto fail;
295 break;
296
297 case COMPLEX_TYPE:
298 break;
299
300 default:
301 goto fail;
302 }
303
304 bitmap_set_bit (sra_type_decomp_cache, cache+0);
305 return true;
306
307 fail:
308 bitmap_set_bit (sra_type_decomp_cache, cache+1);
309 return false;
310 }
311
312 /* Returns true if the TYPE is one of the available va_list types.
313 Otherwise it returns false.
314 Note, that for multiple calling conventions there can be more
315 than just one va_list type present. */
316
317 static bool
318 is_va_list_type (tree type)
319 {
320 tree h;
321
322 if (type == NULL_TREE)
323 return false;
324 h = targetm.canonical_va_list_type (type);
325 if (h == NULL_TREE)
326 return false;
327 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (h))
328 return true;
329 return false;
330 }
331
332 /* Return true if DECL can be decomposed into a set of independent
333 (though not necessarily scalar) variables. */
334
335 static bool
336 decl_can_be_decomposed_p (tree var)
337 {
338 /* Early out for scalars. */
339 if (is_sra_scalar_type (TREE_TYPE (var)))
340 return false;
341
342 /* The variable must not be aliased. */
343 if (!is_gimple_non_addressable (var))
344 {
345 if (dump_file && (dump_flags & TDF_DETAILS))
346 {
347 fprintf (dump_file, "Cannot scalarize variable ");
348 print_generic_expr (dump_file, var, dump_flags);
349 fprintf (dump_file, " because it must live in memory\n");
350 }
351 return false;
352 }
353
354 /* The variable must not be volatile. */
355 if (TREE_THIS_VOLATILE (var))
356 {
357 if (dump_file && (dump_flags & TDF_DETAILS))
358 {
359 fprintf (dump_file, "Cannot scalarize variable ");
360 print_generic_expr (dump_file, var, dump_flags);
361 fprintf (dump_file, " because it is declared volatile\n");
362 }
363 return false;
364 }
365
366 /* We must be able to decompose the variable's type. */
367 if (!sra_type_can_be_decomposed_p (TREE_TYPE (var)))
368 {
369 if (dump_file && (dump_flags & TDF_DETAILS))
370 {
371 fprintf (dump_file, "Cannot scalarize variable ");
372 print_generic_expr (dump_file, var, dump_flags);
373 fprintf (dump_file, " because its type cannot be decomposed\n");
374 }
375 return false;
376 }
377
378 /* HACK: if we decompose a va_list_type_node before inlining, then we'll
379 confuse tree-stdarg.c, and we won't be able to figure out which and
380 how many arguments are accessed. This really should be improved in
381 tree-stdarg.c, as the decomposition is truly a win. This could also
382 be fixed if the stdarg pass ran early, but this can't be done until
383 we've aliasing information early too. See PR 30791. */
384 if (early_sra && is_va_list_type (TREE_TYPE (var)))
385 return false;
386
387 return true;
388 }
389
390 /* Return true if TYPE can be *completely* decomposed into scalars. */
391
392 static bool
393 type_can_instantiate_all_elements (tree type)
394 {
395 if (is_sra_scalar_type (type))
396 return true;
397 if (!sra_type_can_be_decomposed_p (type))
398 return false;
399
400 switch (TREE_CODE (type))
401 {
402 case RECORD_TYPE:
403 {
404 unsigned int cache = TYPE_UID (TYPE_MAIN_VARIANT (type)) * 2;
405 tree f;
406
407 if (bitmap_bit_p (sra_type_inst_cache, cache+0))
408 return true;
409 if (bitmap_bit_p (sra_type_inst_cache, cache+1))
410 return false;
411
412 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
413 if (TREE_CODE (f) == FIELD_DECL)
414 {
415 if (!type_can_instantiate_all_elements (TREE_TYPE (f)))
416 {
417 bitmap_set_bit (sra_type_inst_cache, cache+1);
418 return false;
419 }
420 }
421
422 bitmap_set_bit (sra_type_inst_cache, cache+0);
423 return true;
424 }
425
426 case ARRAY_TYPE:
427 return type_can_instantiate_all_elements (TREE_TYPE (type));
428
429 case COMPLEX_TYPE:
430 return true;
431
432 default:
433 gcc_unreachable ();
434 }
435 }
436
437 /* Test whether ELT or some sub-element cannot be scalarized. */
438
439 static bool
440 can_completely_scalarize_p (struct sra_elt *elt)
441 {
442 struct sra_elt *c;
443
444 if (elt->cannot_scalarize)
445 return false;
446
447 for (c = elt->children; c; c = c->sibling)
448 if (!can_completely_scalarize_p (c))
449 return false;
450
451 for (c = elt->groups; c; c = c->sibling)
452 if (!can_completely_scalarize_p (c))
453 return false;
454
455 return true;
456 }
457
458 \f
459 /* A simplified tree hashing algorithm that only handles the types of
460 trees we expect to find in sra_elt->element. */
461
462 static hashval_t
463 sra_hash_tree (tree t)
464 {
465 hashval_t h;
466
467 switch (TREE_CODE (t))
468 {
469 case VAR_DECL:
470 case PARM_DECL:
471 case RESULT_DECL:
472 h = DECL_UID (t);
473 break;
474
475 case INTEGER_CST:
476 h = TREE_INT_CST_LOW (t) ^ TREE_INT_CST_HIGH (t);
477 break;
478
479 case RANGE_EXPR:
480 h = iterative_hash_expr (TREE_OPERAND (t, 0), 0);
481 h = iterative_hash_expr (TREE_OPERAND (t, 1), h);
482 break;
483
484 case FIELD_DECL:
485 /* We can have types that are compatible, but have different member
486 lists, so we can't hash fields by ID. Use offsets instead. */
487 h = iterative_hash_expr (DECL_FIELD_OFFSET (t), 0);
488 h = iterative_hash_expr (DECL_FIELD_BIT_OFFSET (t), h);
489 break;
490
491 case BIT_FIELD_REF:
492 /* Don't take operand 0 into account, that's our parent. */
493 h = iterative_hash_expr (TREE_OPERAND (t, 1), 0);
494 h = iterative_hash_expr (TREE_OPERAND (t, 2), h);
495 break;
496
497 default:
498 gcc_unreachable ();
499 }
500
501 return h;
502 }
503
504 /* Hash function for type SRA_PAIR. */
505
506 static hashval_t
507 sra_elt_hash (const void *x)
508 {
509 const struct sra_elt *const e = (const struct sra_elt *) x;
510 const struct sra_elt *p;
511 hashval_t h;
512
513 h = sra_hash_tree (e->element);
514
515 /* Take into account everything except bitfield blocks back up the
516 chain. Given that chain lengths are rarely very long, this
517 should be acceptable. If we truly identify this as a performance
518 problem, it should work to hash the pointer value
519 "e->parent". */
520 for (p = e->parent; p ; p = p->parent)
521 if (!p->in_bitfld_block)
522 h = (h * 65521) ^ sra_hash_tree (p->element);
523
524 return h;
525 }
526
527 /* Equality function for type SRA_PAIR. */
528
529 static int
530 sra_elt_eq (const void *x, const void *y)
531 {
532 const struct sra_elt *const a = (const struct sra_elt *) x;
533 const struct sra_elt *const b = (const struct sra_elt *) y;
534 tree ae, be;
535 const struct sra_elt *ap = a->parent;
536 const struct sra_elt *bp = b->parent;
537
538 if (ap)
539 while (ap->in_bitfld_block)
540 ap = ap->parent;
541 if (bp)
542 while (bp->in_bitfld_block)
543 bp = bp->parent;
544
545 if (ap != bp)
546 return false;
547
548 ae = a->element;
549 be = b->element;
550
551 if (ae == be)
552 return true;
553 if (TREE_CODE (ae) != TREE_CODE (be))
554 return false;
555
556 switch (TREE_CODE (ae))
557 {
558 case VAR_DECL:
559 case PARM_DECL:
560 case RESULT_DECL:
561 /* These are all pointer unique. */
562 return false;
563
564 case INTEGER_CST:
565 /* Integers are not pointer unique, so compare their values. */
566 return tree_int_cst_equal (ae, be);
567
568 case RANGE_EXPR:
569 return
570 tree_int_cst_equal (TREE_OPERAND (ae, 0), TREE_OPERAND (be, 0))
571 && tree_int_cst_equal (TREE_OPERAND (ae, 1), TREE_OPERAND (be, 1));
572
573 case FIELD_DECL:
574 /* Fields are unique within a record, but not between
575 compatible records. */
576 if (DECL_FIELD_CONTEXT (ae) == DECL_FIELD_CONTEXT (be))
577 return false;
578 return fields_compatible_p (ae, be);
579
580 case BIT_FIELD_REF:
581 return
582 tree_int_cst_equal (TREE_OPERAND (ae, 1), TREE_OPERAND (be, 1))
583 && tree_int_cst_equal (TREE_OPERAND (ae, 2), TREE_OPERAND (be, 2));
584
585 default:
586 gcc_unreachable ();
587 }
588 }
589
590 /* Create or return the SRA_ELT structure for CHILD in PARENT. PARENT
591 may be null, in which case CHILD must be a DECL. */
592
593 static struct sra_elt *
594 lookup_element (struct sra_elt *parent, tree child, tree type,
595 enum insert_option insert)
596 {
597 struct sra_elt dummy;
598 struct sra_elt **slot;
599 struct sra_elt *elt;
600
601 if (parent)
602 dummy.parent = parent->is_group ? parent->parent : parent;
603 else
604 dummy.parent = NULL;
605 dummy.element = child;
606
607 slot = (struct sra_elt **) htab_find_slot (sra_map, &dummy, insert);
608 if (!slot && insert == NO_INSERT)
609 return NULL;
610
611 elt = *slot;
612 if (!elt && insert == INSERT)
613 {
614 *slot = elt = XOBNEW (&sra_obstack, struct sra_elt);
615 memset (elt, 0, sizeof (*elt));
616
617 elt->parent = parent;
618 elt->element = child;
619 elt->type = type;
620 elt->is_scalar = is_sra_scalar_type (type);
621
622 if (parent)
623 {
624 if (IS_ELEMENT_FOR_GROUP (elt->element))
625 {
626 elt->is_group = true;
627 elt->sibling = parent->groups;
628 parent->groups = elt;
629 }
630 else
631 {
632 elt->sibling = parent->children;
633 parent->children = elt;
634 }
635 }
636
637 /* If this is a parameter, then if we want to scalarize, we have
638 one copy from the true function parameter. Count it now. */
639 if (TREE_CODE (child) == PARM_DECL)
640 {
641 elt->n_copies = 1;
642 bitmap_set_bit (needs_copy_in, DECL_UID (child));
643 }
644 }
645
646 return elt;
647 }
648
649 /* Create or return the SRA_ELT structure for EXPR if the expression
650 refers to a scalarizable variable. */
651
652 static struct sra_elt *
653 maybe_lookup_element_for_expr (tree expr)
654 {
655 struct sra_elt *elt;
656 tree child;
657
658 switch (TREE_CODE (expr))
659 {
660 case VAR_DECL:
661 case PARM_DECL:
662 case RESULT_DECL:
663 if (is_sra_candidate_decl (expr))
664 return lookup_element (NULL, expr, TREE_TYPE (expr), INSERT);
665 return NULL;
666
667 case ARRAY_REF:
668 /* We can't scalarize variable array indices. */
669 if (in_array_bounds_p (expr))
670 child = TREE_OPERAND (expr, 1);
671 else
672 return NULL;
673 break;
674
675 case ARRAY_RANGE_REF:
676 /* We can't scalarize variable array indices. */
677 if (range_in_array_bounds_p (expr))
678 {
679 tree domain = TYPE_DOMAIN (TREE_TYPE (expr));
680 child = build2 (RANGE_EXPR, integer_type_node,
681 TYPE_MIN_VALUE (domain), TYPE_MAX_VALUE (domain));
682 }
683 else
684 return NULL;
685 break;
686
687 case COMPONENT_REF:
688 {
689 tree type = TREE_TYPE (TREE_OPERAND (expr, 0));
690 /* Don't look through unions. */
691 if (TREE_CODE (type) != RECORD_TYPE)
692 return NULL;
693 /* Neither through variable-sized records. */
694 if (TYPE_SIZE (type) == NULL_TREE
695 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
696 return NULL;
697 child = TREE_OPERAND (expr, 1);
698 }
699 break;
700
701 case REALPART_EXPR:
702 child = integer_zero_node;
703 break;
704 case IMAGPART_EXPR:
705 child = integer_one_node;
706 break;
707
708 default:
709 return NULL;
710 }
711
712 elt = maybe_lookup_element_for_expr (TREE_OPERAND (expr, 0));
713 if (elt)
714 return lookup_element (elt, child, TREE_TYPE (expr), INSERT);
715 return NULL;
716 }
717
718 \f
719 /* Functions to walk just enough of the tree to see all scalarizable
720 references, and categorize them. */
721
722 /* A set of callbacks for phases 2 and 4. They'll be invoked for the
723 various kinds of references seen. In all cases, *GSI is an iterator
724 pointing to the statement being processed. */
725 struct sra_walk_fns
726 {
727 /* Invoked when ELT is required as a unit. Note that ELT might refer to
728 a leaf node, in which case this is a simple scalar reference. *EXPR_P
729 points to the location of the expression. IS_OUTPUT is true if this
730 is a left-hand-side reference. USE_ALL is true if we saw something we
731 couldn't quite identify and had to force the use of the entire object. */
732 void (*use) (struct sra_elt *elt, tree *expr_p,
733 gimple_stmt_iterator *gsi, bool is_output, bool use_all);
734
735 /* Invoked when we have a copy between two scalarizable references. */
736 void (*copy) (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt,
737 gimple_stmt_iterator *gsi);
738
739 /* Invoked when ELT is initialized from a constant. VALUE may be NULL,
740 in which case it should be treated as an empty CONSTRUCTOR. */
741 void (*init) (struct sra_elt *elt, tree value, gimple_stmt_iterator *gsi);
742
743 /* Invoked when we have a copy between one scalarizable reference ELT
744 and one non-scalarizable reference OTHER without side-effects.
745 IS_OUTPUT is true if ELT is on the left-hand side. */
746 void (*ldst) (struct sra_elt *elt, tree other,
747 gimple_stmt_iterator *gsi, bool is_output);
748
749 /* True during phase 2, false during phase 4. */
750 /* ??? This is a hack. */
751 bool initial_scan;
752 };
753
754 #ifdef ENABLE_CHECKING
755 /* Invoked via walk_tree, if *TP contains a candidate decl, return it. */
756
757 static tree
758 sra_find_candidate_decl (tree *tp, int *walk_subtrees,
759 void *data ATTRIBUTE_UNUSED)
760 {
761 tree t = *tp;
762 enum tree_code code = TREE_CODE (t);
763
764 if (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL)
765 {
766 *walk_subtrees = 0;
767 if (is_sra_candidate_decl (t))
768 return t;
769 }
770 else if (TYPE_P (t))
771 *walk_subtrees = 0;
772
773 return NULL;
774 }
775 #endif
776
777 /* Walk most expressions looking for a scalarizable aggregate.
778 If we find one, invoke FNS->USE. */
779
780 static void
781 sra_walk_expr (tree *expr_p, gimple_stmt_iterator *gsi, bool is_output,
782 const struct sra_walk_fns *fns)
783 {
784 tree expr = *expr_p;
785 tree inner = expr;
786 bool disable_scalarization = false;
787 bool use_all_p = false;
788
789 /* We're looking to collect a reference expression between EXPR and INNER,
790 such that INNER is a scalarizable decl and all other nodes through EXPR
791 are references that we can scalarize. If we come across something that
792 we can't scalarize, we reset EXPR. This has the effect of making it
793 appear that we're referring to the larger expression as a whole. */
794
795 while (1)
796 switch (TREE_CODE (inner))
797 {
798 case VAR_DECL:
799 case PARM_DECL:
800 case RESULT_DECL:
801 /* If there is a scalarizable decl at the bottom, then process it. */
802 if (is_sra_candidate_decl (inner))
803 {
804 struct sra_elt *elt = maybe_lookup_element_for_expr (expr);
805 if (disable_scalarization)
806 elt->cannot_scalarize = true;
807 else
808 fns->use (elt, expr_p, gsi, is_output, use_all_p);
809 }
810 return;
811
812 case ARRAY_REF:
813 /* Non-constant index means any member may be accessed. Prevent the
814 expression from being scalarized. If we were to treat this as a
815 reference to the whole array, we can wind up with a single dynamic
816 index reference inside a loop being overridden by several constant
817 index references during loop setup. It's possible that this could
818 be avoided by using dynamic usage counts based on BB trip counts
819 (based on loop analysis or profiling), but that hardly seems worth
820 the effort. */
821 /* ??? Hack. Figure out how to push this into the scan routines
822 without duplicating too much code. */
823 if (!in_array_bounds_p (inner))
824 {
825 disable_scalarization = true;
826 goto use_all;
827 }
828 /* ??? Are we assured that non-constant bounds and stride will have
829 the same value everywhere? I don't think Fortran will... */
830 if (TREE_OPERAND (inner, 2) || TREE_OPERAND (inner, 3))
831 goto use_all;
832 inner = TREE_OPERAND (inner, 0);
833 break;
834
835 case ARRAY_RANGE_REF:
836 if (!range_in_array_bounds_p (inner))
837 {
838 disable_scalarization = true;
839 goto use_all;
840 }
841 /* ??? See above non-constant bounds and stride . */
842 if (TREE_OPERAND (inner, 2) || TREE_OPERAND (inner, 3))
843 goto use_all;
844 inner = TREE_OPERAND (inner, 0);
845 break;
846
847 case COMPONENT_REF:
848 {
849 tree type = TREE_TYPE (TREE_OPERAND (inner, 0));
850 /* Don't look through unions. */
851 if (TREE_CODE (type) != RECORD_TYPE)
852 goto use_all;
853 /* Neither through variable-sized records. */
854 if (TYPE_SIZE (type) == NULL_TREE
855 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
856 goto use_all;
857 inner = TREE_OPERAND (inner, 0);
858 }
859 break;
860
861 case REALPART_EXPR:
862 case IMAGPART_EXPR:
863 inner = TREE_OPERAND (inner, 0);
864 break;
865
866 case BIT_FIELD_REF:
867 /* A bit field reference to a specific vector is scalarized but for
868 ones for inputs need to be marked as used on the left hand size so
869 when we scalarize it, we can mark that variable as non renamable. */
870 if (is_output
871 && TREE_CODE (TREE_TYPE (TREE_OPERAND (inner, 0))) == VECTOR_TYPE)
872 {
873 struct sra_elt *elt
874 = maybe_lookup_element_for_expr (TREE_OPERAND (inner, 0));
875 if (elt)
876 elt->is_vector_lhs = true;
877 }
878
879 /* A bit field reference (access to *multiple* fields simultaneously)
880 is not currently scalarized. Consider this an access to the full
881 outer element, to which walk_tree will bring us next. */
882 goto use_all;
883
884 case NOP_EXPR:
885 case CONVERT_EXPR:
886 /* Similarly, a nop explicitly wants to look at an object in a
887 type other than the one we've scalarized. */
888 goto use_all;
889
890 case VIEW_CONVERT_EXPR:
891 /* Likewise for a view conversion, but with an additional twist:
892 it can be on the LHS and, in this case, an access to the full
893 outer element would mean a killing def. So we need to punt
894 if we haven't already a full access to the current element,
895 because we cannot pretend to have a killing def if we only
896 have a partial access at some level. */
897 if (is_output && !use_all_p && inner != expr)
898 disable_scalarization = true;
899 goto use_all;
900
901 case WITH_SIZE_EXPR:
902 /* This is a transparent wrapper. The entire inner expression really
903 is being used. */
904 goto use_all;
905
906 use_all:
907 expr_p = &TREE_OPERAND (inner, 0);
908 inner = expr = *expr_p;
909 use_all_p = true;
910 break;
911
912 default:
913 #ifdef ENABLE_CHECKING
914 /* Validate that we're not missing any references. */
915 gcc_assert (!walk_tree (&inner, sra_find_candidate_decl, NULL, NULL));
916 #endif
917 return;
918 }
919 }
920
921 /* Walk the arguments of a GIMPLE_CALL looking for scalarizable aggregates.
922 If we find one, invoke FNS->USE. */
923
924 static void
925 sra_walk_gimple_call (gimple stmt, gimple_stmt_iterator *gsi,
926 const struct sra_walk_fns *fns)
927 {
928 int i;
929 int nargs = gimple_call_num_args (stmt);
930
931 for (i = 0; i < nargs; i++)
932 sra_walk_expr (gimple_call_arg_ptr (stmt, i), gsi, false, fns);
933
934 if (gimple_call_lhs (stmt))
935 sra_walk_expr (gimple_call_lhs_ptr (stmt), gsi, true, fns);
936 }
937
938 /* Walk the inputs and outputs of a GIMPLE_ASM looking for scalarizable
939 aggregates. If we find one, invoke FNS->USE. */
940
941 static void
942 sra_walk_gimple_asm (gimple stmt, gimple_stmt_iterator *gsi,
943 const struct sra_walk_fns *fns)
944 {
945 size_t i;
946 for (i = 0; i < gimple_asm_ninputs (stmt); i++)
947 sra_walk_expr (&TREE_VALUE (gimple_asm_input_op (stmt, i)), gsi, false, fns);
948 for (i = 0; i < gimple_asm_noutputs (stmt); i++)
949 sra_walk_expr (&TREE_VALUE (gimple_asm_output_op (stmt, i)), gsi, true, fns);
950 }
951
952 /* Walk a GIMPLE_ASSIGN and categorize the assignment appropriately. */
953
954 static void
955 sra_walk_gimple_assign (gimple stmt, gimple_stmt_iterator *gsi,
956 const struct sra_walk_fns *fns)
957 {
958 struct sra_elt *lhs_elt = NULL, *rhs_elt = NULL;
959 tree lhs, rhs;
960
961 /* If there is more than 1 element on the RHS, only walk the lhs. */
962 if (!gimple_assign_single_p (stmt))
963 {
964 sra_walk_expr (gimple_assign_lhs_ptr (stmt), gsi, true, fns);
965 return;
966 }
967
968 lhs = gimple_assign_lhs (stmt);
969 rhs = gimple_assign_rhs1 (stmt);
970 lhs_elt = maybe_lookup_element_for_expr (lhs);
971 rhs_elt = maybe_lookup_element_for_expr (rhs);
972
973 /* If both sides are scalarizable, this is a COPY operation. */
974 if (lhs_elt && rhs_elt)
975 {
976 fns->copy (lhs_elt, rhs_elt, gsi);
977 return;
978 }
979
980 /* If the RHS is scalarizable, handle it. There are only two cases. */
981 if (rhs_elt)
982 {
983 if (!rhs_elt->is_scalar && !TREE_SIDE_EFFECTS (lhs))
984 fns->ldst (rhs_elt, lhs, gsi, false);
985 else
986 fns->use (rhs_elt, gimple_assign_rhs1_ptr (stmt), gsi, false, false);
987 }
988
989 /* If it isn't scalarizable, there may be scalarizable variables within, so
990 check for a call or else walk the RHS to see if we need to do any
991 copy-in operations. We need to do it before the LHS is scalarized so
992 that the statements get inserted in the proper place, before any
993 copy-out operations. */
994 else
995 sra_walk_expr (gimple_assign_rhs1_ptr (stmt), gsi, false, fns);
996
997 /* Likewise, handle the LHS being scalarizable. We have cases similar
998 to those above, but also want to handle RHS being constant. */
999 if (lhs_elt)
1000 {
1001 /* If this is an assignment from a constant, or constructor, then
1002 we have access to all of the elements individually. Invoke INIT. */
1003 if (TREE_CODE (rhs) == COMPLEX_EXPR
1004 || TREE_CODE (rhs) == COMPLEX_CST
1005 || TREE_CODE (rhs) == CONSTRUCTOR)
1006 fns->init (lhs_elt, rhs, gsi);
1007
1008 /* If this is an assignment from read-only memory, treat this as if
1009 we'd been passed the constructor directly. Invoke INIT. */
1010 else if (TREE_CODE (rhs) == VAR_DECL
1011 && TREE_STATIC (rhs)
1012 && TREE_READONLY (rhs)
1013 && targetm.binds_local_p (rhs))
1014 fns->init (lhs_elt, DECL_INITIAL (rhs), gsi);
1015
1016 /* If this is a copy from a non-scalarizable lvalue, invoke LDST.
1017 The lvalue requirement prevents us from trying to directly scalarize
1018 the result of a function call. Which would result in trying to call
1019 the function multiple times, and other evil things. */
1020 else if (!lhs_elt->is_scalar
1021 && !TREE_SIDE_EFFECTS (rhs) && is_gimple_addressable (rhs))
1022 fns->ldst (lhs_elt, rhs, gsi, true);
1023
1024 /* Otherwise we're being used in some context that requires the
1025 aggregate to be seen as a whole. Invoke USE. */
1026 else
1027 fns->use (lhs_elt, gimple_assign_lhs_ptr (stmt), gsi, true, false);
1028 }
1029
1030 /* Similarly to above, LHS_ELT being null only means that the LHS as a
1031 whole is not a scalarizable reference. There may be occurrences of
1032 scalarizable variables within, which implies a USE. */
1033 else
1034 sra_walk_expr (gimple_assign_lhs_ptr (stmt), gsi, true, fns);
1035 }
1036
1037 /* Entry point to the walk functions. Search the entire function,
1038 invoking the callbacks in FNS on each of the references to
1039 scalarizable variables. */
1040
1041 static void
1042 sra_walk_function (const struct sra_walk_fns *fns)
1043 {
1044 basic_block bb;
1045 gimple_stmt_iterator si, ni;
1046
1047 /* ??? Phase 4 could derive some benefit to walking the function in
1048 dominator tree order. */
1049
1050 FOR_EACH_BB (bb)
1051 for (si = gsi_start_bb (bb); !gsi_end_p (si); si = ni)
1052 {
1053 gimple stmt;
1054
1055 stmt = gsi_stmt (si);
1056
1057 ni = si;
1058 gsi_next (&ni);
1059
1060 /* If the statement has no virtual operands, then it doesn't
1061 make any structure references that we care about. */
1062 if (gimple_aliases_computed_p (cfun)
1063 && ZERO_SSA_OPERANDS (stmt, (SSA_OP_VIRTUAL_DEFS | SSA_OP_VUSE)))
1064 continue;
1065
1066 switch (gimple_code (stmt))
1067 {
1068 case GIMPLE_RETURN:
1069 /* If we have "return <retval>" then the return value is
1070 already exposed for our pleasure. Walk it as a USE to
1071 force all the components back in place for the return.
1072 */
1073 if (gimple_return_retval (stmt) == NULL_TREE)
1074 ;
1075 else
1076 sra_walk_expr (gimple_return_retval_ptr (stmt), &si, false,
1077 fns);
1078 break;
1079
1080 case GIMPLE_ASSIGN:
1081 sra_walk_gimple_assign (stmt, &si, fns);
1082 break;
1083 case GIMPLE_CALL:
1084 sra_walk_gimple_call (stmt, &si, fns);
1085 break;
1086 case GIMPLE_ASM:
1087 sra_walk_gimple_asm (stmt, &si, fns);
1088 break;
1089
1090 default:
1091 break;
1092 }
1093 }
1094 }
1095 \f
1096 /* Phase One: Scan all referenced variables in the program looking for
1097 structures that could be decomposed. */
1098
1099 static bool
1100 find_candidates_for_sra (void)
1101 {
1102 bool any_set = false;
1103 tree var;
1104 referenced_var_iterator rvi;
1105
1106 FOR_EACH_REFERENCED_VAR (var, rvi)
1107 {
1108 if (decl_can_be_decomposed_p (var))
1109 {
1110 bitmap_set_bit (sra_candidates, DECL_UID (var));
1111 any_set = true;
1112 }
1113 }
1114
1115 return any_set;
1116 }
1117
1118 \f
1119 /* Phase Two: Scan all references to scalarizable variables. Count the
1120 number of times they are used or copied respectively. */
1121
1122 /* Callbacks to fill in SRA_WALK_FNS. Everything but USE is
1123 considered a copy, because we can decompose the reference such that
1124 the sub-elements needn't be contiguous. */
1125
1126 static void
1127 scan_use (struct sra_elt *elt, tree *expr_p ATTRIBUTE_UNUSED,
1128 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
1129 bool is_output ATTRIBUTE_UNUSED, bool use_all ATTRIBUTE_UNUSED)
1130 {
1131 elt->n_uses += 1;
1132 }
1133
1134 static void
1135 scan_copy (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt,
1136 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED)
1137 {
1138 lhs_elt->n_copies += 1;
1139 rhs_elt->n_copies += 1;
1140 }
1141
1142 static void
1143 scan_init (struct sra_elt *lhs_elt, tree rhs ATTRIBUTE_UNUSED,
1144 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED)
1145 {
1146 lhs_elt->n_copies += 1;
1147 }
1148
1149 static void
1150 scan_ldst (struct sra_elt *elt, tree other ATTRIBUTE_UNUSED,
1151 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
1152 bool is_output ATTRIBUTE_UNUSED)
1153 {
1154 elt->n_copies += 1;
1155 }
1156
1157 /* Dump the values we collected during the scanning phase. */
1158
1159 static void
1160 scan_dump (struct sra_elt *elt)
1161 {
1162 struct sra_elt *c;
1163
1164 dump_sra_elt_name (dump_file, elt);
1165 fprintf (dump_file, ": n_uses=%u n_copies=%u\n", elt->n_uses, elt->n_copies);
1166
1167 for (c = elt->children; c ; c = c->sibling)
1168 scan_dump (c);
1169
1170 for (c = elt->groups; c ; c = c->sibling)
1171 scan_dump (c);
1172 }
1173
1174 /* Entry point to phase 2. Scan the entire function, building up
1175 scalarization data structures, recording copies and uses. */
1176
1177 static void
1178 scan_function (void)
1179 {
1180 static const struct sra_walk_fns fns = {
1181 scan_use, scan_copy, scan_init, scan_ldst, true
1182 };
1183 bitmap_iterator bi;
1184
1185 sra_walk_function (&fns);
1186
1187 if (dump_file && (dump_flags & TDF_DETAILS))
1188 {
1189 unsigned i;
1190
1191 fputs ("\nScan results:\n", dump_file);
1192 EXECUTE_IF_SET_IN_BITMAP (sra_candidates, 0, i, bi)
1193 {
1194 tree var = referenced_var (i);
1195 struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT);
1196 if (elt)
1197 scan_dump (elt);
1198 }
1199 fputc ('\n', dump_file);
1200 }
1201 }
1202 \f
1203 /* Phase Three: Make decisions about which variables to scalarize, if any.
1204 All elements to be scalarized have replacement variables made for them. */
1205
1206 /* A subroutine of build_element_name. Recursively build the element
1207 name on the obstack. */
1208
1209 static void
1210 build_element_name_1 (struct sra_elt *elt)
1211 {
1212 tree t;
1213 char buffer[32];
1214
1215 if (elt->parent)
1216 {
1217 build_element_name_1 (elt->parent);
1218 obstack_1grow (&sra_obstack, '$');
1219
1220 if (TREE_CODE (elt->parent->type) == COMPLEX_TYPE)
1221 {
1222 if (elt->element == integer_zero_node)
1223 obstack_grow (&sra_obstack, "real", 4);
1224 else
1225 obstack_grow (&sra_obstack, "imag", 4);
1226 return;
1227 }
1228 }
1229
1230 t = elt->element;
1231 if (TREE_CODE (t) == INTEGER_CST)
1232 {
1233 /* ??? Eh. Don't bother doing double-wide printing. */
1234 sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, TREE_INT_CST_LOW (t));
1235 obstack_grow (&sra_obstack, buffer, strlen (buffer));
1236 }
1237 else if (TREE_CODE (t) == BIT_FIELD_REF)
1238 {
1239 sprintf (buffer, "B" HOST_WIDE_INT_PRINT_DEC,
1240 tree_low_cst (TREE_OPERAND (t, 2), 1));
1241 obstack_grow (&sra_obstack, buffer, strlen (buffer));
1242 sprintf (buffer, "F" HOST_WIDE_INT_PRINT_DEC,
1243 tree_low_cst (TREE_OPERAND (t, 1), 1));
1244 obstack_grow (&sra_obstack, buffer, strlen (buffer));
1245 }
1246 else
1247 {
1248 tree name = DECL_NAME (t);
1249 if (name)
1250 obstack_grow (&sra_obstack, IDENTIFIER_POINTER (name),
1251 IDENTIFIER_LENGTH (name));
1252 else
1253 {
1254 sprintf (buffer, "D%u", DECL_UID (t));
1255 obstack_grow (&sra_obstack, buffer, strlen (buffer));
1256 }
1257 }
1258 }
1259
1260 /* Construct a pretty variable name for an element's replacement variable.
1261 The name is built on the obstack. */
1262
1263 static char *
1264 build_element_name (struct sra_elt *elt)
1265 {
1266 build_element_name_1 (elt);
1267 obstack_1grow (&sra_obstack, '\0');
1268 return XOBFINISH (&sra_obstack, char *);
1269 }
1270
1271 /* Instantiate an element as an independent variable. */
1272
1273 static void
1274 instantiate_element (struct sra_elt *elt)
1275 {
1276 struct sra_elt *base_elt;
1277 tree var, base;
1278 bool nowarn = TREE_NO_WARNING (elt->element);
1279
1280 for (base_elt = elt; base_elt->parent; base_elt = base_elt->parent)
1281 if (!nowarn)
1282 nowarn = TREE_NO_WARNING (base_elt->parent->element);
1283 base = base_elt->element;
1284
1285 elt->replacement = var = make_rename_temp (elt->type, "SR");
1286
1287 if (DECL_P (elt->element)
1288 && !tree_int_cst_equal (DECL_SIZE (var), DECL_SIZE (elt->element)))
1289 {
1290 DECL_SIZE (var) = DECL_SIZE (elt->element);
1291 DECL_SIZE_UNIT (var) = DECL_SIZE_UNIT (elt->element);
1292
1293 elt->in_bitfld_block = 1;
1294 elt->replacement = fold_build3 (BIT_FIELD_REF, elt->type, var,
1295 DECL_SIZE (var),
1296 BYTES_BIG_ENDIAN
1297 ? size_binop (MINUS_EXPR,
1298 TYPE_SIZE (elt->type),
1299 DECL_SIZE (var))
1300 : bitsize_int (0));
1301 }
1302
1303 /* For vectors, if used on the left hand side with BIT_FIELD_REF,
1304 they are not a gimple register. */
1305 if (TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE && elt->is_vector_lhs)
1306 DECL_GIMPLE_REG_P (var) = 0;
1307
1308 DECL_SOURCE_LOCATION (var) = DECL_SOURCE_LOCATION (base);
1309 DECL_ARTIFICIAL (var) = 1;
1310
1311 if (TREE_THIS_VOLATILE (elt->type))
1312 {
1313 TREE_THIS_VOLATILE (var) = 1;
1314 TREE_SIDE_EFFECTS (var) = 1;
1315 }
1316
1317 if (DECL_NAME (base) && !DECL_IGNORED_P (base))
1318 {
1319 char *pretty_name = build_element_name (elt);
1320 DECL_NAME (var) = get_identifier (pretty_name);
1321 obstack_free (&sra_obstack, pretty_name);
1322
1323 SET_DECL_DEBUG_EXPR (var, generate_element_ref (elt));
1324 DECL_DEBUG_EXPR_IS_FROM (var) = 1;
1325
1326 DECL_IGNORED_P (var) = 0;
1327 TREE_NO_WARNING (var) = nowarn;
1328 }
1329 else
1330 {
1331 DECL_IGNORED_P (var) = 1;
1332 /* ??? We can't generate any warning that would be meaningful. */
1333 TREE_NO_WARNING (var) = 1;
1334 }
1335
1336 /* Zero-initialize bit-field scalarization variables, to avoid
1337 triggering undefined behavior. */
1338 if (TREE_CODE (elt->element) == BIT_FIELD_REF
1339 || (var != elt->replacement
1340 && TREE_CODE (elt->replacement) == BIT_FIELD_REF))
1341 {
1342 gimple_seq init = sra_build_assignment (var,
1343 fold_convert (TREE_TYPE (var),
1344 integer_zero_node)
1345 );
1346 insert_edge_copies_seq (init, ENTRY_BLOCK_PTR);
1347 mark_all_v_defs_seq (init);
1348 }
1349
1350 if (dump_file)
1351 {
1352 fputs (" ", dump_file);
1353 dump_sra_elt_name (dump_file, elt);
1354 fputs (" -> ", dump_file);
1355 print_generic_expr (dump_file, var, dump_flags);
1356 fputc ('\n', dump_file);
1357 }
1358 }
1359
1360 /* Make one pass across an element tree deciding whether or not it's
1361 profitable to instantiate individual leaf scalars.
1362
1363 PARENT_USES and PARENT_COPIES are the sum of the N_USES and N_COPIES
1364 fields all the way up the tree. */
1365
1366 static void
1367 decide_instantiation_1 (struct sra_elt *elt, unsigned int parent_uses,
1368 unsigned int parent_copies)
1369 {
1370 if (dump_file && !elt->parent)
1371 {
1372 fputs ("Initial instantiation for ", dump_file);
1373 dump_sra_elt_name (dump_file, elt);
1374 fputc ('\n', dump_file);
1375 }
1376
1377 if (elt->cannot_scalarize)
1378 return;
1379
1380 if (elt->is_scalar)
1381 {
1382 /* The decision is simple: instantiate if we're used more frequently
1383 than the parent needs to be seen as a complete unit. */
1384 if (elt->n_uses + elt->n_copies + parent_copies > parent_uses)
1385 instantiate_element (elt);
1386 }
1387 else
1388 {
1389 struct sra_elt *c, *group;
1390 unsigned int this_uses = elt->n_uses + parent_uses;
1391 unsigned int this_copies = elt->n_copies + parent_copies;
1392
1393 /* Consider groups of sub-elements as weighing in favour of
1394 instantiation whatever their size. */
1395 for (group = elt->groups; group ; group = group->sibling)
1396 FOR_EACH_ACTUAL_CHILD (c, group)
1397 {
1398 c->n_uses += group->n_uses;
1399 c->n_copies += group->n_copies;
1400 }
1401
1402 for (c = elt->children; c ; c = c->sibling)
1403 decide_instantiation_1 (c, this_uses, this_copies);
1404 }
1405 }
1406
1407 /* Compute the size and number of all instantiated elements below ELT.
1408 We will only care about this if the size of the complete structure
1409 fits in a HOST_WIDE_INT, so we don't have to worry about overflow. */
1410
1411 static unsigned int
1412 sum_instantiated_sizes (struct sra_elt *elt, unsigned HOST_WIDE_INT *sizep)
1413 {
1414 if (elt->replacement)
1415 {
1416 *sizep += TREE_INT_CST_LOW (TYPE_SIZE_UNIT (elt->type));
1417 return 1;
1418 }
1419 else
1420 {
1421 struct sra_elt *c;
1422 unsigned int count = 0;
1423
1424 for (c = elt->children; c ; c = c->sibling)
1425 count += sum_instantiated_sizes (c, sizep);
1426
1427 return count;
1428 }
1429 }
1430
1431 /* Instantiate fields in ELT->TYPE that are not currently present as
1432 children of ELT. */
1433
1434 static void instantiate_missing_elements (struct sra_elt *elt);
1435
1436 static struct sra_elt *
1437 instantiate_missing_elements_1 (struct sra_elt *elt, tree child, tree type)
1438 {
1439 struct sra_elt *sub = lookup_element (elt, child, type, INSERT);
1440 if (sub->is_scalar)
1441 {
1442 if (sub->replacement == NULL)
1443 instantiate_element (sub);
1444 }
1445 else
1446 instantiate_missing_elements (sub);
1447 return sub;
1448 }
1449
1450 /* Obtain the canonical type for field F of ELEMENT. */
1451
1452 static tree
1453 canon_type_for_field (tree f, tree element)
1454 {
1455 tree field_type = TREE_TYPE (f);
1456
1457 /* canonicalize_component_ref() unwidens some bit-field types (not
1458 marked as DECL_BIT_FIELD in C++), so we must do the same, lest we
1459 may introduce type mismatches. */
1460 if (INTEGRAL_TYPE_P (field_type)
1461 && DECL_MODE (f) != TYPE_MODE (field_type))
1462 field_type = TREE_TYPE (get_unwidened (build3 (COMPONENT_REF,
1463 field_type,
1464 element,
1465 f, NULL_TREE),
1466 NULL_TREE));
1467
1468 return field_type;
1469 }
1470
1471 /* Look for adjacent fields of ELT starting at F that we'd like to
1472 scalarize as a single variable. Return the last field of the
1473 group. */
1474
1475 static tree
1476 try_instantiate_multiple_fields (struct sra_elt *elt, tree f)
1477 {
1478 int count;
1479 unsigned HOST_WIDE_INT align, bit, size, alchk;
1480 enum machine_mode mode;
1481 tree first = f, prev;
1482 tree type, var;
1483 struct sra_elt *block;
1484
1485 /* Point fields are typically best handled as standalone entities. */
1486 if (POINTER_TYPE_P (TREE_TYPE (f)))
1487 return f;
1488
1489 if (!is_sra_scalar_type (TREE_TYPE (f))
1490 || !host_integerp (DECL_FIELD_OFFSET (f), 1)
1491 || !host_integerp (DECL_FIELD_BIT_OFFSET (f), 1)
1492 || !host_integerp (DECL_SIZE (f), 1)
1493 || lookup_element (elt, f, NULL, NO_INSERT))
1494 return f;
1495
1496 block = elt;
1497
1498 /* For complex and array objects, there are going to be integer
1499 literals as child elements. In this case, we can't just take the
1500 alignment and mode of the decl, so we instead rely on the element
1501 type.
1502
1503 ??? We could try to infer additional alignment from the full
1504 object declaration and the location of the sub-elements we're
1505 accessing. */
1506 for (count = 0; !DECL_P (block->element); count++)
1507 block = block->parent;
1508
1509 align = DECL_ALIGN (block->element);
1510 alchk = GET_MODE_BITSIZE (DECL_MODE (block->element));
1511
1512 if (count)
1513 {
1514 type = TREE_TYPE (block->element);
1515 while (count--)
1516 type = TREE_TYPE (type);
1517
1518 align = TYPE_ALIGN (type);
1519 alchk = GET_MODE_BITSIZE (TYPE_MODE (type));
1520 }
1521
1522 if (align < alchk)
1523 align = alchk;
1524
1525 /* Coalescing wider fields is probably pointless and
1526 inefficient. */
1527 if (align > BITS_PER_WORD)
1528 align = BITS_PER_WORD;
1529
1530 bit = tree_low_cst (DECL_FIELD_OFFSET (f), 1) * BITS_PER_UNIT
1531 + tree_low_cst (DECL_FIELD_BIT_OFFSET (f), 1);
1532 size = tree_low_cst (DECL_SIZE (f), 1);
1533
1534 alchk = align - 1;
1535 alchk = ~alchk;
1536
1537 if ((bit & alchk) != ((bit + size - 1) & alchk))
1538 return f;
1539
1540 /* Find adjacent fields in the same alignment word. */
1541
1542 for (prev = f, f = TREE_CHAIN (f);
1543 f && TREE_CODE (f) == FIELD_DECL
1544 && is_sra_scalar_type (TREE_TYPE (f))
1545 && host_integerp (DECL_FIELD_OFFSET (f), 1)
1546 && host_integerp (DECL_FIELD_BIT_OFFSET (f), 1)
1547 && host_integerp (DECL_SIZE (f), 1)
1548 && !lookup_element (elt, f, NULL, NO_INSERT);
1549 prev = f, f = TREE_CHAIN (f))
1550 {
1551 unsigned HOST_WIDE_INT nbit, nsize;
1552
1553 nbit = tree_low_cst (DECL_FIELD_OFFSET (f), 1) * BITS_PER_UNIT
1554 + tree_low_cst (DECL_FIELD_BIT_OFFSET (f), 1);
1555 nsize = tree_low_cst (DECL_SIZE (f), 1);
1556
1557 if (bit + size == nbit)
1558 {
1559 if ((bit & alchk) != ((nbit + nsize - 1) & alchk))
1560 {
1561 /* If we're at an alignment boundary, don't bother
1562 growing alignment such that we can include this next
1563 field. */
1564 if ((nbit & alchk)
1565 || GET_MODE_BITSIZE (DECL_MODE (f)) <= align)
1566 break;
1567
1568 align = GET_MODE_BITSIZE (DECL_MODE (f));
1569 alchk = align - 1;
1570 alchk = ~alchk;
1571
1572 if ((bit & alchk) != ((nbit + nsize - 1) & alchk))
1573 break;
1574 }
1575 size += nsize;
1576 }
1577 else if (nbit + nsize == bit)
1578 {
1579 if ((nbit & alchk) != ((bit + size - 1) & alchk))
1580 {
1581 if ((bit & alchk)
1582 || GET_MODE_BITSIZE (DECL_MODE (f)) <= align)
1583 break;
1584
1585 align = GET_MODE_BITSIZE (DECL_MODE (f));
1586 alchk = align - 1;
1587 alchk = ~alchk;
1588
1589 if ((nbit & alchk) != ((bit + size - 1) & alchk))
1590 break;
1591 }
1592 bit = nbit;
1593 size += nsize;
1594 }
1595 else
1596 break;
1597 }
1598
1599 f = prev;
1600
1601 if (f == first)
1602 return f;
1603
1604 gcc_assert ((bit & alchk) == ((bit + size - 1) & alchk));
1605
1606 /* Try to widen the bit range so as to cover padding bits as well. */
1607
1608 if ((bit & ~alchk) || size != align)
1609 {
1610 unsigned HOST_WIDE_INT mbit = bit & alchk;
1611 unsigned HOST_WIDE_INT msize = align;
1612
1613 for (f = TYPE_FIELDS (elt->type);
1614 f; f = TREE_CHAIN (f))
1615 {
1616 unsigned HOST_WIDE_INT fbit, fsize;
1617
1618 /* Skip the fields from first to prev. */
1619 if (f == first)
1620 {
1621 f = prev;
1622 continue;
1623 }
1624
1625 if (!(TREE_CODE (f) == FIELD_DECL
1626 && host_integerp (DECL_FIELD_OFFSET (f), 1)
1627 && host_integerp (DECL_FIELD_BIT_OFFSET (f), 1)))
1628 continue;
1629
1630 fbit = tree_low_cst (DECL_FIELD_OFFSET (f), 1) * BITS_PER_UNIT
1631 + tree_low_cst (DECL_FIELD_BIT_OFFSET (f), 1);
1632
1633 /* If we're past the selected word, we're fine. */
1634 if ((bit & alchk) < (fbit & alchk))
1635 continue;
1636
1637 if (host_integerp (DECL_SIZE (f), 1))
1638 fsize = tree_low_cst (DECL_SIZE (f), 1);
1639 else
1640 /* Assume a variable-sized field takes up all space till
1641 the end of the word. ??? Endianness issues? */
1642 fsize = align - (fbit & alchk);
1643
1644 if ((fbit & alchk) < (bit & alchk))
1645 {
1646 /* A large field might start at a previous word and
1647 extend into the selected word. Exclude those
1648 bits. ??? Endianness issues? */
1649 HOST_WIDE_INT diff = fbit + fsize - mbit;
1650
1651 if (diff <= 0)
1652 continue;
1653
1654 mbit += diff;
1655 msize -= diff;
1656 }
1657 else
1658 {
1659 /* Non-overlapping, great. */
1660 if (fbit + fsize <= mbit
1661 || mbit + msize <= fbit)
1662 continue;
1663
1664 if (fbit <= mbit)
1665 {
1666 unsigned HOST_WIDE_INT diff = fbit + fsize - mbit;
1667 mbit += diff;
1668 msize -= diff;
1669 }
1670 else if (fbit > mbit)
1671 msize -= (mbit + msize - fbit);
1672 else
1673 gcc_unreachable ();
1674 }
1675 }
1676
1677 bit = mbit;
1678 size = msize;
1679 }
1680
1681 /* Now we know the bit range we're interested in. Find the smallest
1682 machine mode we can use to access it. */
1683
1684 for (mode = smallest_mode_for_size (size, MODE_INT);
1685 ;
1686 mode = GET_MODE_WIDER_MODE (mode))
1687 {
1688 gcc_assert (mode != VOIDmode);
1689
1690 alchk = GET_MODE_PRECISION (mode) - 1;
1691 alchk = ~alchk;
1692
1693 if ((bit & alchk) == ((bit + size - 1) & alchk))
1694 break;
1695 }
1696
1697 gcc_assert (~alchk < align);
1698
1699 /* Create the field group as a single variable. */
1700
1701 /* We used to create a type for the mode above, but size turns
1702 to be out not of mode-size. As we need a matching type
1703 to build a BIT_FIELD_REF, use a nonstandard integer type as
1704 fallback. */
1705 type = lang_hooks.types.type_for_size (size, 1);
1706 if (!type || TYPE_PRECISION (type) != size)
1707 type = build_nonstandard_integer_type (size, 1);
1708 gcc_assert (type);
1709 var = build3 (BIT_FIELD_REF, type, NULL_TREE,
1710 bitsize_int (size), bitsize_int (bit));
1711
1712 block = instantiate_missing_elements_1 (elt, var, type);
1713 gcc_assert (block && block->is_scalar);
1714
1715 var = block->replacement;
1716
1717 if ((bit & ~alchk)
1718 || (HOST_WIDE_INT)size != tree_low_cst (DECL_SIZE (var), 1))
1719 {
1720 block->replacement = fold_build3 (BIT_FIELD_REF,
1721 TREE_TYPE (block->element), var,
1722 bitsize_int (size),
1723 bitsize_int (bit & ~alchk));
1724 }
1725
1726 block->in_bitfld_block = 2;
1727
1728 /* Add the member fields to the group, such that they access
1729 portions of the group variable. */
1730
1731 for (f = first; f != TREE_CHAIN (prev); f = TREE_CHAIN (f))
1732 {
1733 tree field_type = canon_type_for_field (f, elt->element);
1734 struct sra_elt *fld = lookup_element (block, f, field_type, INSERT);
1735
1736 gcc_assert (fld && fld->is_scalar && !fld->replacement);
1737
1738 fld->replacement = fold_build3 (BIT_FIELD_REF, field_type, var,
1739 DECL_SIZE (f),
1740 bitsize_int
1741 ((TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f))
1742 * BITS_PER_UNIT
1743 + (TREE_INT_CST_LOW
1744 (DECL_FIELD_BIT_OFFSET (f))))
1745 & ~alchk));
1746 fld->in_bitfld_block = 1;
1747 }
1748
1749 return prev;
1750 }
1751
1752 static void
1753 instantiate_missing_elements (struct sra_elt *elt)
1754 {
1755 tree type = elt->type;
1756
1757 switch (TREE_CODE (type))
1758 {
1759 case RECORD_TYPE:
1760 {
1761 tree f;
1762 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1763 if (TREE_CODE (f) == FIELD_DECL)
1764 {
1765 tree last = try_instantiate_multiple_fields (elt, f);
1766
1767 if (last != f)
1768 {
1769 f = last;
1770 continue;
1771 }
1772
1773 instantiate_missing_elements_1 (elt, f,
1774 canon_type_for_field
1775 (f, elt->element));
1776 }
1777 break;
1778 }
1779
1780 case ARRAY_TYPE:
1781 {
1782 tree i, max, subtype;
1783
1784 i = TYPE_MIN_VALUE (TYPE_DOMAIN (type));
1785 max = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
1786 subtype = TREE_TYPE (type);
1787
1788 while (1)
1789 {
1790 instantiate_missing_elements_1 (elt, i, subtype);
1791 if (tree_int_cst_equal (i, max))
1792 break;
1793 i = int_const_binop (PLUS_EXPR, i, integer_one_node, true);
1794 }
1795
1796 break;
1797 }
1798
1799 case COMPLEX_TYPE:
1800 type = TREE_TYPE (type);
1801 instantiate_missing_elements_1 (elt, integer_zero_node, type);
1802 instantiate_missing_elements_1 (elt, integer_one_node, type);
1803 break;
1804
1805 default:
1806 gcc_unreachable ();
1807 }
1808 }
1809
1810 /* Return true if there is only one non aggregate field in the record, TYPE.
1811 Return false otherwise. */
1812
1813 static bool
1814 single_scalar_field_in_record_p (tree type)
1815 {
1816 int num_fields = 0;
1817 tree field;
1818 if (TREE_CODE (type) != RECORD_TYPE)
1819 return false;
1820
1821 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1822 if (TREE_CODE (field) == FIELD_DECL)
1823 {
1824 num_fields++;
1825
1826 if (num_fields == 2)
1827 return false;
1828
1829 if (AGGREGATE_TYPE_P (TREE_TYPE (field)))
1830 return false;
1831 }
1832
1833 return true;
1834 }
1835
1836 /* Make one pass across an element tree deciding whether to perform block
1837 or element copies. If we decide on element copies, instantiate all
1838 elements. Return true if there are any instantiated sub-elements. */
1839
1840 static bool
1841 decide_block_copy (struct sra_elt *elt)
1842 {
1843 struct sra_elt *c;
1844 bool any_inst;
1845
1846 /* We shouldn't be invoked on groups of sub-elements as they must
1847 behave like their parent as far as block copy is concerned. */
1848 gcc_assert (!elt->is_group);
1849
1850 /* If scalarization is disabled, respect it. */
1851 if (elt->cannot_scalarize)
1852 {
1853 elt->use_block_copy = 1;
1854
1855 if (dump_file)
1856 {
1857 fputs ("Scalarization disabled for ", dump_file);
1858 dump_sra_elt_name (dump_file, elt);
1859 fputc ('\n', dump_file);
1860 }
1861
1862 /* Disable scalarization of sub-elements */
1863 for (c = elt->children; c; c = c->sibling)
1864 {
1865 c->cannot_scalarize = 1;
1866 decide_block_copy (c);
1867 }
1868
1869 /* Groups behave like their parent. */
1870 for (c = elt->groups; c; c = c->sibling)
1871 {
1872 c->cannot_scalarize = 1;
1873 c->use_block_copy = 1;
1874 }
1875
1876 return false;
1877 }
1878
1879 /* Don't decide if we've no uses and no groups. */
1880 if (elt->n_uses == 0 && elt->n_copies == 0 && elt->groups == NULL)
1881 ;
1882
1883 else if (!elt->is_scalar)
1884 {
1885 tree size_tree = TYPE_SIZE_UNIT (elt->type);
1886 bool use_block_copy = true;
1887
1888 /* Tradeoffs for COMPLEX types pretty much always make it better
1889 to go ahead and split the components. */
1890 if (TREE_CODE (elt->type) == COMPLEX_TYPE)
1891 use_block_copy = false;
1892
1893 /* Don't bother trying to figure out the rest if the structure is
1894 so large we can't do easy arithmetic. This also forces block
1895 copies for variable sized structures. */
1896 else if (host_integerp (size_tree, 1))
1897 {
1898 unsigned HOST_WIDE_INT full_size, inst_size = 0;
1899 unsigned int max_size, max_count, inst_count, full_count;
1900
1901 /* If the sra-max-structure-size parameter is 0, then the
1902 user has not overridden the parameter and we can choose a
1903 sensible default. */
1904 max_size = SRA_MAX_STRUCTURE_SIZE
1905 ? SRA_MAX_STRUCTURE_SIZE
1906 : MOVE_RATIO * UNITS_PER_WORD;
1907 max_count = SRA_MAX_STRUCTURE_COUNT
1908 ? SRA_MAX_STRUCTURE_COUNT
1909 : MOVE_RATIO;
1910
1911 full_size = tree_low_cst (size_tree, 1);
1912 full_count = count_type_elements (elt->type, false);
1913 inst_count = sum_instantiated_sizes (elt, &inst_size);
1914
1915 /* If there is only one scalar field in the record, don't block copy. */
1916 if (single_scalar_field_in_record_p (elt->type))
1917 use_block_copy = false;
1918
1919 /* ??? What to do here. If there are two fields, and we've only
1920 instantiated one, then instantiating the other is clearly a win.
1921 If there are a large number of fields then the size of the copy
1922 is much more of a factor. */
1923
1924 /* If the structure is small, and we've made copies, go ahead
1925 and instantiate, hoping that the copies will go away. */
1926 if (full_size <= max_size
1927 && (full_count - inst_count) <= max_count
1928 && elt->n_copies > elt->n_uses)
1929 use_block_copy = false;
1930 else if (inst_count * 100 >= full_count * SRA_FIELD_STRUCTURE_RATIO
1931 && inst_size * 100 >= full_size * SRA_FIELD_STRUCTURE_RATIO)
1932 use_block_copy = false;
1933
1934 /* In order to avoid block copy, we have to be able to instantiate
1935 all elements of the type. See if this is possible. */
1936 if (!use_block_copy
1937 && (!can_completely_scalarize_p (elt)
1938 || !type_can_instantiate_all_elements (elt->type)))
1939 use_block_copy = true;
1940 }
1941
1942 elt->use_block_copy = use_block_copy;
1943
1944 /* Groups behave like their parent. */
1945 for (c = elt->groups; c; c = c->sibling)
1946 c->use_block_copy = use_block_copy;
1947
1948 if (dump_file)
1949 {
1950 fprintf (dump_file, "Using %s for ",
1951 use_block_copy ? "block-copy" : "element-copy");
1952 dump_sra_elt_name (dump_file, elt);
1953 fputc ('\n', dump_file);
1954 }
1955
1956 if (!use_block_copy)
1957 {
1958 instantiate_missing_elements (elt);
1959 return true;
1960 }
1961 }
1962
1963 any_inst = elt->replacement != NULL;
1964
1965 for (c = elt->children; c ; c = c->sibling)
1966 any_inst |= decide_block_copy (c);
1967
1968 return any_inst;
1969 }
1970
1971 /* Entry point to phase 3. Instantiate scalar replacement variables. */
1972
1973 static void
1974 decide_instantiations (void)
1975 {
1976 unsigned int i;
1977 bool cleared_any;
1978 bitmap_head done_head;
1979 bitmap_iterator bi;
1980
1981 /* We cannot clear bits from a bitmap we're iterating over,
1982 so save up all the bits to clear until the end. */
1983 bitmap_initialize (&done_head, &bitmap_default_obstack);
1984 cleared_any = false;
1985
1986 EXECUTE_IF_SET_IN_BITMAP (sra_candidates, 0, i, bi)
1987 {
1988 tree var = referenced_var (i);
1989 struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT);
1990 if (elt)
1991 {
1992 decide_instantiation_1 (elt, 0, 0);
1993 if (!decide_block_copy (elt))
1994 elt = NULL;
1995 }
1996 if (!elt)
1997 {
1998 bitmap_set_bit (&done_head, i);
1999 cleared_any = true;
2000 }
2001 }
2002
2003 if (cleared_any)
2004 {
2005 bitmap_and_compl_into (sra_candidates, &done_head);
2006 bitmap_and_compl_into (needs_copy_in, &done_head);
2007 }
2008 bitmap_clear (&done_head);
2009
2010 mark_set_for_renaming (sra_candidates);
2011
2012 if (dump_file)
2013 fputc ('\n', dump_file);
2014 }
2015
2016 \f
2017 /* Phase Four: Update the function to match the replacements created. */
2018
2019 /* Mark all the variables in VDEF/VUSE operators for STMT for
2020 renaming. This becomes necessary when we modify all of a
2021 non-scalar. */
2022
2023 static void
2024 mark_all_v_defs_stmt (gimple stmt)
2025 {
2026 tree sym;
2027 ssa_op_iter iter;
2028
2029 update_stmt_if_modified (stmt);
2030
2031 FOR_EACH_SSA_TREE_OPERAND (sym, stmt, iter, SSA_OP_ALL_VIRTUALS)
2032 {
2033 if (TREE_CODE (sym) == SSA_NAME)
2034 sym = SSA_NAME_VAR (sym);
2035 mark_sym_for_renaming (sym);
2036 }
2037 }
2038
2039
2040 /* Mark all the variables in virtual operands in all the statements in
2041 LIST for renaming. */
2042
2043 static void
2044 mark_all_v_defs_seq (gimple_seq seq)
2045 {
2046 gimple_stmt_iterator gsi;
2047
2048 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
2049 mark_all_v_defs_stmt (gsi_stmt (gsi));
2050 }
2051
2052 /* Mark every replacement under ELT with TREE_NO_WARNING. */
2053
2054 static void
2055 mark_no_warning (struct sra_elt *elt)
2056 {
2057 if (!elt->all_no_warning)
2058 {
2059 if (elt->replacement)
2060 TREE_NO_WARNING (elt->replacement) = 1;
2061 else
2062 {
2063 struct sra_elt *c;
2064 FOR_EACH_ACTUAL_CHILD (c, elt)
2065 mark_no_warning (c);
2066 }
2067 elt->all_no_warning = true;
2068 }
2069 }
2070
2071 /* Build a single level component reference to ELT rooted at BASE. */
2072
2073 static tree
2074 generate_one_element_ref (struct sra_elt *elt, tree base)
2075 {
2076 switch (TREE_CODE (TREE_TYPE (base)))
2077 {
2078 case RECORD_TYPE:
2079 {
2080 tree field = elt->element;
2081
2082 /* We can't test elt->in_bitfld_block here because, when this is
2083 called from instantiate_element, we haven't set this field
2084 yet. */
2085 if (TREE_CODE (field) == BIT_FIELD_REF)
2086 {
2087 tree ret = unshare_expr (field);
2088 TREE_OPERAND (ret, 0) = base;
2089 return ret;
2090 }
2091
2092 /* Watch out for compatible records with differing field lists. */
2093 if (DECL_FIELD_CONTEXT (field) != TYPE_MAIN_VARIANT (TREE_TYPE (base)))
2094 field = find_compatible_field (TREE_TYPE (base), field);
2095
2096 return build3 (COMPONENT_REF, elt->type, base, field, NULL);
2097 }
2098
2099 case ARRAY_TYPE:
2100 if (TREE_CODE (elt->element) == RANGE_EXPR)
2101 return build4 (ARRAY_RANGE_REF, elt->type, base,
2102 TREE_OPERAND (elt->element, 0), NULL, NULL);
2103 else
2104 return build4 (ARRAY_REF, elt->type, base, elt->element, NULL, NULL);
2105
2106 case COMPLEX_TYPE:
2107 if (elt->element == integer_zero_node)
2108 return build1 (REALPART_EXPR, elt->type, base);
2109 else
2110 return build1 (IMAGPART_EXPR, elt->type, base);
2111
2112 default:
2113 gcc_unreachable ();
2114 }
2115 }
2116
2117 /* Build a full component reference to ELT rooted at its native variable. */
2118
2119 static tree
2120 generate_element_ref (struct sra_elt *elt)
2121 {
2122 if (elt->parent)
2123 return generate_one_element_ref (elt, generate_element_ref (elt->parent));
2124 else
2125 return elt->element;
2126 }
2127
2128 /* Return true if BF is a bit-field that we can handle like a scalar. */
2129
2130 static bool
2131 scalar_bitfield_p (tree bf)
2132 {
2133 return (TREE_CODE (bf) == BIT_FIELD_REF
2134 && (is_gimple_reg (TREE_OPERAND (bf, 0))
2135 || (TYPE_MODE (TREE_TYPE (TREE_OPERAND (bf, 0))) != BLKmode
2136 && (!TREE_SIDE_EFFECTS (TREE_OPERAND (bf, 0))
2137 || (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE
2138 (TREE_OPERAND (bf, 0))))
2139 <= BITS_PER_WORD)))));
2140 }
2141
2142 /* Create an assignment statement from SRC to DST. */
2143
2144 static gimple_seq
2145 sra_build_assignment (tree dst, tree src)
2146 {
2147 gimple stmt;
2148 gimple_seq seq = NULL;
2149 /* Turning BIT_FIELD_REFs into bit operations enables other passes
2150 to do a much better job at optimizing the code.
2151 From dst = BIT_FIELD_REF <var, sz, off> we produce
2152
2153 SR.1 = (scalar type) var;
2154 SR.2 = SR.1 >> off;
2155 SR.3 = SR.2 & ((1 << sz) - 1);
2156 ... possible sign extension of SR.3 ...
2157 dst = (destination type) SR.3;
2158 */
2159 if (scalar_bitfield_p (src))
2160 {
2161 tree var, shift, width;
2162 tree utype, stype, stmp, utmp, dtmp;
2163 bool unsignedp = (INTEGRAL_TYPE_P (TREE_TYPE (src))
2164 ? TYPE_UNSIGNED (TREE_TYPE (src)) : true);
2165
2166 var = TREE_OPERAND (src, 0);
2167 width = TREE_OPERAND (src, 1);
2168 /* The offset needs to be adjusted to a right shift quantity
2169 depending on the endianness. */
2170 if (BYTES_BIG_ENDIAN)
2171 {
2172 tree tmp = size_binop (PLUS_EXPR, width, TREE_OPERAND (src, 2));
2173 shift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), tmp);
2174 }
2175 else
2176 shift = TREE_OPERAND (src, 2);
2177
2178 /* In weird cases we have non-integral types for the source or
2179 destination object.
2180 ??? For unknown reasons we also want an unsigned scalar type. */
2181 stype = TREE_TYPE (var);
2182 if (!INTEGRAL_TYPE_P (stype))
2183 stype = lang_hooks.types.type_for_size (TREE_INT_CST_LOW
2184 (TYPE_SIZE (stype)), 1);
2185 else if (!TYPE_UNSIGNED (stype))
2186 stype = unsigned_type_for (stype);
2187
2188 utype = TREE_TYPE (dst);
2189 if (!INTEGRAL_TYPE_P (utype))
2190 utype = lang_hooks.types.type_for_size (TREE_INT_CST_LOW
2191 (TYPE_SIZE (utype)), 1);
2192 else if (!TYPE_UNSIGNED (utype))
2193 utype = unsigned_type_for (utype);
2194
2195 stmp = make_rename_temp (stype, "SR");
2196
2197 /* Convert the base var of the BIT_FIELD_REF to the scalar type
2198 we use for computation if we cannot use it directly. */
2199 if (!useless_type_conversion_p (stype, TREE_TYPE (var)))
2200 {
2201 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
2202 stmt = gimple_build_assign (stmp, fold_convert (stype, var));
2203 else
2204 stmt = gimple_build_assign (stmp, fold_build1 (VIEW_CONVERT_EXPR,
2205 stype, var));
2206 gimple_seq_add_stmt (&seq, stmt);
2207 var = stmp;
2208 }
2209
2210 if (!integer_zerop (shift))
2211 {
2212 stmt = gimple_build_assign (stmp, fold_build2 (RSHIFT_EXPR, stype,
2213 var, shift));
2214 gimple_seq_add_stmt (&seq, stmt);
2215 var = stmp;
2216 }
2217
2218 /* If we need a masking operation, produce one. */
2219 if (TREE_INT_CST_LOW (width) == TYPE_PRECISION (stype))
2220 unsignedp = true;
2221 else
2222 {
2223 tree one = build_int_cst_wide (stype, 1, 0);
2224 tree mask = int_const_binop (LSHIFT_EXPR, one, width, 0);
2225 mask = int_const_binop (MINUS_EXPR, mask, one, 0);
2226
2227 stmt = gimple_build_assign (stmp, fold_build2 (BIT_AND_EXPR, stype,
2228 var, mask));
2229 gimple_seq_add_stmt (&seq, stmt);
2230 var = stmp;
2231 }
2232
2233 /* After shifting and masking, convert to the target type. */
2234 utmp = stmp;
2235 if (!useless_type_conversion_p (utype, stype))
2236 {
2237 utmp = make_rename_temp (utype, "SR");
2238
2239 stmt = gimple_build_assign (utmp, fold_convert (utype, var));
2240 gimple_seq_add_stmt (&seq, stmt);
2241
2242 var = utmp;
2243 }
2244
2245 /* Perform sign extension, if required.
2246 ??? This should never be necessary. */
2247 if (!unsignedp)
2248 {
2249 tree signbit = int_const_binop (LSHIFT_EXPR,
2250 build_int_cst_wide (utype, 1, 0),
2251 size_binop (MINUS_EXPR, width,
2252 bitsize_int (1)), 0);
2253
2254 stmt = gimple_build_assign (utmp, fold_build2 (BIT_XOR_EXPR, utype,
2255 var, signbit));
2256 gimple_seq_add_stmt (&seq, stmt);
2257
2258 stmt = gimple_build_assign (utmp, fold_build2 (MINUS_EXPR, utype,
2259 utmp, signbit));
2260 gimple_seq_add_stmt (&seq, stmt);
2261
2262 var = utmp;
2263 }
2264
2265 /* fold_build3 (BIT_FIELD_REF, ...) sometimes returns a cast. */
2266 STRIP_NOPS (dst);
2267
2268 /* Finally, move and convert to the destination. */
2269 if (!useless_type_conversion_p (TREE_TYPE (dst), TREE_TYPE (var)))
2270 {
2271 if (INTEGRAL_TYPE_P (TREE_TYPE (dst)))
2272 var = fold_convert (TREE_TYPE (dst), var);
2273 else
2274 var = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (dst), var);
2275
2276 /* If the destination is not a register the conversion needs
2277 to be a separate statement. */
2278 if (!is_gimple_reg (dst))
2279 {
2280 dtmp = make_rename_temp (TREE_TYPE (dst), "SR");
2281 stmt = gimple_build_assign (dtmp, var);
2282 gimple_seq_add_stmt (&seq, stmt);
2283 var = dtmp;
2284 }
2285 }
2286 stmt = gimple_build_assign (dst, var);
2287 gimple_seq_add_stmt (&seq, stmt);
2288
2289 return seq;
2290 }
2291
2292 /* fold_build3 (BIT_FIELD_REF, ...) sometimes returns a cast. */
2293 if (CONVERT_EXPR_P (dst))
2294 {
2295 STRIP_NOPS (dst);
2296 src = fold_convert (TREE_TYPE (dst), src);
2297 }
2298 /* It was hoped that we could perform some type sanity checking
2299 here, but since front-ends can emit accesses of fields in types
2300 different from their nominal types and copy structures containing
2301 them as a whole, we'd have to handle such differences here.
2302 Since such accesses under different types require compatibility
2303 anyway, there's little point in making tests and/or adding
2304 conversions to ensure the types of src and dst are the same.
2305 So we just assume type differences at this point are ok.
2306 The only exception we make here are pointer types, which can be different
2307 in e.g. structurally equal, but non-identical RECORD_TYPEs. */
2308 else if (POINTER_TYPE_P (TREE_TYPE (dst))
2309 && !useless_type_conversion_p (TREE_TYPE (dst), TREE_TYPE (src)))
2310 src = fold_convert (TREE_TYPE (dst), src);
2311
2312 stmt = gimple_build_assign (dst, src);
2313 gimple_seq_add_stmt (&seq, stmt);
2314 return seq;
2315 }
2316
2317 /* BIT_FIELD_REFs must not be shared. sra_build_elt_assignment()
2318 takes care of assignments, but we must create copies for uses. */
2319 #define REPLDUP(t) (TREE_CODE (t) != BIT_FIELD_REF ? (t) : unshare_expr (t))
2320
2321 /* Emit an assignment from SRC to DST, but if DST is a scalarizable
2322 BIT_FIELD_REF, turn it into bit operations. */
2323
2324 static gimple_seq
2325 sra_build_bf_assignment (tree dst, tree src)
2326 {
2327 tree var, type, utype, tmp, tmp2, tmp3;
2328 gimple_seq seq;
2329 gimple stmt;
2330 tree cst, cst2, mask;
2331 tree minshift, maxshift;
2332
2333 if (TREE_CODE (dst) != BIT_FIELD_REF)
2334 return sra_build_assignment (dst, src);
2335
2336 var = TREE_OPERAND (dst, 0);
2337
2338 if (!scalar_bitfield_p (dst))
2339 return sra_build_assignment (REPLDUP (dst), src);
2340
2341 seq = NULL;
2342
2343 cst = fold_convert (bitsizetype, TREE_OPERAND (dst, 2));
2344 cst2 = size_binop (PLUS_EXPR,
2345 fold_convert (bitsizetype, TREE_OPERAND (dst, 1)),
2346 cst);
2347
2348 if (BYTES_BIG_ENDIAN)
2349 {
2350 maxshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst);
2351 minshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst2);
2352 }
2353 else
2354 {
2355 maxshift = cst2;
2356 minshift = cst;
2357 }
2358
2359 type = TREE_TYPE (var);
2360 if (!INTEGRAL_TYPE_P (type))
2361 type = lang_hooks.types.type_for_size
2362 (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (var))), 1);
2363 if (TYPE_UNSIGNED (type))
2364 utype = type;
2365 else
2366 utype = unsigned_type_for (type);
2367
2368 mask = build_int_cst_wide (utype, 1, 0);
2369 if (TREE_INT_CST_LOW (maxshift) == TYPE_PRECISION (utype))
2370 cst = build_int_cst_wide (utype, 0, 0);
2371 else
2372 cst = int_const_binop (LSHIFT_EXPR, mask, maxshift, true);
2373 if (integer_zerop (minshift))
2374 cst2 = mask;
2375 else
2376 cst2 = int_const_binop (LSHIFT_EXPR, mask, minshift, true);
2377 mask = int_const_binop (MINUS_EXPR, cst, cst2, true);
2378 mask = fold_build1 (BIT_NOT_EXPR, utype, mask);
2379
2380 if (TYPE_MAIN_VARIANT (utype) != TYPE_MAIN_VARIANT (TREE_TYPE (var))
2381 && !integer_zerop (mask))
2382 {
2383 tmp = var;
2384 if (!is_gimple_variable (tmp))
2385 tmp = unshare_expr (var);
2386
2387 tmp2 = make_rename_temp (utype, "SR");
2388
2389 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
2390 stmt = gimple_build_assign (tmp2, fold_convert (utype, tmp));
2391 else
2392 stmt = gimple_build_assign (tmp2, fold_build1 (VIEW_CONVERT_EXPR,
2393 utype, tmp));
2394 gimple_seq_add_stmt (&seq, stmt);
2395 }
2396 else
2397 tmp2 = var;
2398
2399 if (!integer_zerop (mask))
2400 {
2401 tmp = make_rename_temp (utype, "SR");
2402 stmt = gimple_build_assign (tmp, fold_build2 (BIT_AND_EXPR, utype,
2403 tmp2, mask));
2404 gimple_seq_add_stmt (&seq, stmt);
2405 }
2406 else
2407 tmp = mask;
2408
2409 if (is_gimple_reg (src) && INTEGRAL_TYPE_P (TREE_TYPE (src)))
2410 tmp2 = src;
2411 else if (INTEGRAL_TYPE_P (TREE_TYPE (src)))
2412 {
2413 gimple_seq tmp_seq;
2414 tmp2 = make_rename_temp (TREE_TYPE (src), "SR");
2415 tmp_seq = sra_build_assignment (tmp2, src);
2416 gimple_seq_add_seq (&seq, tmp_seq);
2417 }
2418 else
2419 {
2420 gimple_seq tmp_seq;
2421 tmp2 = make_rename_temp
2422 (lang_hooks.types.type_for_size
2423 (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (src))),
2424 1), "SR");
2425 tmp_seq = sra_build_assignment (tmp2, fold_build1 (VIEW_CONVERT_EXPR,
2426 TREE_TYPE (tmp2), src));
2427 gimple_seq_add_seq (&seq, tmp_seq);
2428 }
2429
2430 if (!TYPE_UNSIGNED (TREE_TYPE (tmp2)))
2431 {
2432 gimple_seq tmp_seq;
2433 tree ut = unsigned_type_for (TREE_TYPE (tmp2));
2434 tmp3 = make_rename_temp (ut, "SR");
2435 tmp2 = fold_convert (ut, tmp2);
2436 tmp_seq = sra_build_assignment (tmp3, tmp2);
2437 gimple_seq_add_seq (&seq, tmp_seq);
2438
2439 tmp2 = fold_build1 (BIT_NOT_EXPR, utype, mask);
2440 tmp2 = int_const_binop (RSHIFT_EXPR, tmp2, minshift, true);
2441 tmp2 = fold_convert (ut, tmp2);
2442 tmp2 = fold_build2 (BIT_AND_EXPR, ut, tmp3, tmp2);
2443
2444 if (tmp3 != tmp2)
2445 {
2446 tmp3 = make_rename_temp (ut, "SR");
2447 tmp_seq = sra_build_assignment (tmp3, tmp2);
2448 gimple_seq_add_seq (&seq, tmp_seq);
2449 }
2450
2451 tmp2 = tmp3;
2452 }
2453
2454 if (TYPE_MAIN_VARIANT (TREE_TYPE (tmp2)) != TYPE_MAIN_VARIANT (utype))
2455 {
2456 gimple_seq tmp_seq;
2457 tmp3 = make_rename_temp (utype, "SR");
2458 tmp2 = fold_convert (utype, tmp2);
2459 tmp_seq = sra_build_assignment (tmp3, tmp2);
2460 gimple_seq_add_seq (&seq, tmp_seq);
2461 tmp2 = tmp3;
2462 }
2463
2464 if (!integer_zerop (minshift))
2465 {
2466 tmp3 = make_rename_temp (utype, "SR");
2467 stmt = gimple_build_assign (tmp3, fold_build2 (LSHIFT_EXPR, utype,
2468 tmp2, minshift));
2469 gimple_seq_add_stmt (&seq, stmt);
2470 tmp2 = tmp3;
2471 }
2472
2473 if (utype != TREE_TYPE (var))
2474 tmp3 = make_rename_temp (utype, "SR");
2475 else
2476 tmp3 = var;
2477 stmt = gimple_build_assign (tmp3, fold_build2 (BIT_IOR_EXPR, utype,
2478 tmp, tmp2));
2479 gimple_seq_add_stmt (&seq, stmt);
2480
2481 if (tmp3 != var)
2482 {
2483 if (TREE_TYPE (var) == type)
2484 stmt = gimple_build_assign (var, fold_convert (type, tmp3));
2485 else
2486 stmt = gimple_build_assign (var, fold_build1 (VIEW_CONVERT_EXPR,
2487 TREE_TYPE (var), tmp3));
2488 gimple_seq_add_stmt (&seq, stmt);
2489 }
2490
2491 return seq;
2492 }
2493
2494 /* Expand an assignment of SRC to the scalarized representation of
2495 ELT. If it is a field group, try to widen the assignment to cover
2496 the full variable. */
2497
2498 static gimple_seq
2499 sra_build_elt_assignment (struct sra_elt *elt, tree src)
2500 {
2501 tree dst = elt->replacement;
2502 tree var, tmp, cst, cst2;
2503 gimple stmt;
2504 gimple_seq seq;
2505
2506 if (TREE_CODE (dst) != BIT_FIELD_REF
2507 || !elt->in_bitfld_block)
2508 return sra_build_assignment (REPLDUP (dst), src);
2509
2510 var = TREE_OPERAND (dst, 0);
2511
2512 /* Try to widen the assignment to the entire variable.
2513 We need the source to be a BIT_FIELD_REF as well, such that, for
2514 BIT_FIELD_REF<d,sz,dp> = BIT_FIELD_REF<s,sz,sp>,
2515 by design, conditions are met such that we can turn it into
2516 d = BIT_FIELD_REF<s,dw,sp-dp>. */
2517 if (elt->in_bitfld_block == 2
2518 && TREE_CODE (src) == BIT_FIELD_REF)
2519 {
2520 tmp = src;
2521 cst = TYPE_SIZE (TREE_TYPE (var));
2522 cst2 = size_binop (MINUS_EXPR, TREE_OPERAND (src, 2),
2523 TREE_OPERAND (dst, 2));
2524
2525 src = TREE_OPERAND (src, 0);
2526
2527 /* Avoid full-width bit-fields. */
2528 if (integer_zerop (cst2)
2529 && tree_int_cst_equal (cst, TYPE_SIZE (TREE_TYPE (src))))
2530 {
2531 if (INTEGRAL_TYPE_P (TREE_TYPE (src))
2532 && !TYPE_UNSIGNED (TREE_TYPE (src)))
2533 src = fold_convert (unsigned_type_for (TREE_TYPE (src)), src);
2534
2535 /* If a single conversion won't do, we'll need a statement
2536 list. */
2537 if (TYPE_MAIN_VARIANT (TREE_TYPE (var))
2538 != TYPE_MAIN_VARIANT (TREE_TYPE (src)))
2539 {
2540 gimple_seq tmp_seq;
2541 seq = NULL;
2542
2543 if (!INTEGRAL_TYPE_P (TREE_TYPE (src)))
2544 src = fold_build1 (VIEW_CONVERT_EXPR,
2545 lang_hooks.types.type_for_size
2546 (TREE_INT_CST_LOW
2547 (TYPE_SIZE (TREE_TYPE (src))),
2548 1), src);
2549 gcc_assert (TYPE_UNSIGNED (TREE_TYPE (src)));
2550
2551 tmp = make_rename_temp (TREE_TYPE (src), "SR");
2552 stmt = gimple_build_assign (tmp, src);
2553 gimple_seq_add_stmt (&seq, stmt);
2554
2555 tmp_seq = sra_build_assignment (var,
2556 fold_convert (TREE_TYPE (var),
2557 tmp));
2558 gimple_seq_add_seq (&seq, tmp_seq);
2559
2560 return seq;
2561 }
2562
2563 src = fold_convert (TREE_TYPE (var), src);
2564 }
2565 else
2566 {
2567 src = fold_convert (TREE_TYPE (var), tmp);
2568 }
2569
2570 return sra_build_assignment (var, src);
2571 }
2572
2573 return sra_build_bf_assignment (dst, src);
2574 }
2575
2576 /* Generate a set of assignment statements in *LIST_P to copy all
2577 instantiated elements under ELT to or from the equivalent structure
2578 rooted at EXPR. COPY_OUT controls the direction of the copy, with
2579 true meaning to copy out of EXPR into ELT. */
2580
2581 static void
2582 generate_copy_inout (struct sra_elt *elt, bool copy_out, tree expr,
2583 gimple_seq *seq_p)
2584 {
2585 struct sra_elt *c;
2586 gimple_seq tmp_seq;
2587 tree t;
2588
2589 if (!copy_out && TREE_CODE (expr) == SSA_NAME
2590 && TREE_CODE (TREE_TYPE (expr)) == COMPLEX_TYPE)
2591 {
2592 tree r, i;
2593
2594 c = lookup_element (elt, integer_zero_node, NULL, NO_INSERT);
2595 r = c->replacement;
2596 c = lookup_element (elt, integer_one_node, NULL, NO_INSERT);
2597 i = c->replacement;
2598
2599 t = build2 (COMPLEX_EXPR, elt->type, r, i);
2600 tmp_seq = sra_build_bf_assignment (expr, t);
2601 gcc_assert (gimple_seq_singleton_p (tmp_seq));
2602 SSA_NAME_DEF_STMT (expr) = gimple_seq_first_stmt (tmp_seq);
2603 gimple_seq_add_seq (seq_p, tmp_seq);
2604 }
2605 else if (elt->replacement)
2606 {
2607 if (copy_out)
2608 tmp_seq = sra_build_elt_assignment (elt, expr);
2609 else
2610 tmp_seq = sra_build_bf_assignment (expr, REPLDUP (elt->replacement));
2611 gimple_seq_add_seq (seq_p, tmp_seq);
2612 }
2613 else
2614 {
2615 FOR_EACH_ACTUAL_CHILD (c, elt)
2616 {
2617 t = generate_one_element_ref (c, unshare_expr (expr));
2618 generate_copy_inout (c, copy_out, t, seq_p);
2619 }
2620 }
2621 }
2622
2623 /* Generate a set of assignment statements in *LIST_P to copy all instantiated
2624 elements under SRC to their counterparts under DST. There must be a 1-1
2625 correspondence of instantiated elements. */
2626
2627 static void
2628 generate_element_copy (struct sra_elt *dst, struct sra_elt *src, gimple_seq *seq_p)
2629 {
2630 struct sra_elt *dc, *sc;
2631
2632 FOR_EACH_ACTUAL_CHILD (dc, dst)
2633 {
2634 sc = lookup_element (src, dc->element, NULL, NO_INSERT);
2635 if (!sc && dc->in_bitfld_block == 2)
2636 {
2637 struct sra_elt *dcs;
2638
2639 FOR_EACH_ACTUAL_CHILD (dcs, dc)
2640 {
2641 sc = lookup_element (src, dcs->element, NULL, NO_INSERT);
2642 gcc_assert (sc);
2643 generate_element_copy (dcs, sc, seq_p);
2644 }
2645
2646 continue;
2647 }
2648
2649 /* If DST and SRC are structs with the same elements, but do not have
2650 the same TYPE_MAIN_VARIANT, then lookup of DST FIELD_DECL in SRC
2651 will fail. Try harder by finding the corresponding FIELD_DECL
2652 in SRC. */
2653 if (!sc)
2654 {
2655 tree f;
2656
2657 gcc_assert (useless_type_conversion_p (dst->type, src->type));
2658 gcc_assert (TREE_CODE (dc->element) == FIELD_DECL);
2659 for (f = TYPE_FIELDS (src->type); f ; f = TREE_CHAIN (f))
2660 if (simple_cst_equal (DECL_FIELD_OFFSET (f),
2661 DECL_FIELD_OFFSET (dc->element)) > 0
2662 && simple_cst_equal (DECL_FIELD_BIT_OFFSET (f),
2663 DECL_FIELD_BIT_OFFSET (dc->element)) > 0
2664 && simple_cst_equal (DECL_SIZE (f),
2665 DECL_SIZE (dc->element)) > 0
2666 && (useless_type_conversion_p (TREE_TYPE (dc->element),
2667 TREE_TYPE (f))
2668 || (POINTER_TYPE_P (TREE_TYPE (dc->element))
2669 && POINTER_TYPE_P (TREE_TYPE (f)))))
2670 break;
2671 gcc_assert (f != NULL_TREE);
2672 sc = lookup_element (src, f, NULL, NO_INSERT);
2673 }
2674
2675 generate_element_copy (dc, sc, seq_p);
2676 }
2677
2678 if (dst->replacement)
2679 {
2680 gimple_seq tmp_seq;
2681
2682 gcc_assert (src->replacement);
2683
2684 tmp_seq = sra_build_elt_assignment (dst, REPLDUP (src->replacement));
2685 gimple_seq_add_seq (seq_p, tmp_seq);
2686 }
2687 }
2688
2689 /* Generate a set of assignment statements in *LIST_P to zero all instantiated
2690 elements under ELT. In addition, do not assign to elements that have been
2691 marked VISITED but do reset the visited flag; this allows easy coordination
2692 with generate_element_init. */
2693
2694 static void
2695 generate_element_zero (struct sra_elt *elt, gimple_seq *seq_p)
2696 {
2697 struct sra_elt *c;
2698
2699 if (elt->visited)
2700 {
2701 elt->visited = false;
2702 return;
2703 }
2704
2705 if (!elt->in_bitfld_block)
2706 FOR_EACH_ACTUAL_CHILD (c, elt)
2707 generate_element_zero (c, seq_p);
2708
2709 if (elt->replacement)
2710 {
2711 tree t;
2712 gimple_seq tmp_seq;
2713
2714 gcc_assert (elt->is_scalar);
2715 t = fold_convert (elt->type, integer_zero_node);
2716
2717 tmp_seq = sra_build_elt_assignment (elt, t);
2718 gimple_seq_add_seq (seq_p, tmp_seq);
2719 }
2720 }
2721
2722 /* Generate an assignment VAR = INIT, where INIT may need gimplification.
2723 Add the result to *LIST_P. */
2724
2725 static void
2726 generate_one_element_init (struct sra_elt *elt, tree init, gimple_seq *seq_p)
2727 {
2728 gimple_seq tmp_seq = sra_build_elt_assignment (elt, init);
2729 gimple_seq_add_seq (seq_p, tmp_seq);
2730 }
2731
2732 /* Generate a set of assignment statements in *LIST_P to set all instantiated
2733 elements under ELT with the contents of the initializer INIT. In addition,
2734 mark all assigned elements VISITED; this allows easy coordination with
2735 generate_element_zero. Return false if we found a case we couldn't
2736 handle. */
2737
2738 static bool
2739 generate_element_init_1 (struct sra_elt *elt, tree init, gimple_seq *seq_p)
2740 {
2741 bool result = true;
2742 enum tree_code init_code;
2743 struct sra_elt *sub;
2744 tree t;
2745 unsigned HOST_WIDE_INT idx;
2746 tree value, purpose;
2747
2748 /* We can be passed DECL_INITIAL of a static variable. It might have a
2749 conversion, which we strip off here. */
2750 STRIP_USELESS_TYPE_CONVERSION (init);
2751 init_code = TREE_CODE (init);
2752
2753 if (elt->is_scalar)
2754 {
2755 if (elt->replacement)
2756 {
2757 generate_one_element_init (elt, init, seq_p);
2758 elt->visited = true;
2759 }
2760 return result;
2761 }
2762
2763 switch (init_code)
2764 {
2765 case COMPLEX_CST:
2766 case COMPLEX_EXPR:
2767 FOR_EACH_ACTUAL_CHILD (sub, elt)
2768 {
2769 if (sub->element == integer_zero_node)
2770 t = (init_code == COMPLEX_EXPR
2771 ? TREE_OPERAND (init, 0) : TREE_REALPART (init));
2772 else
2773 t = (init_code == COMPLEX_EXPR
2774 ? TREE_OPERAND (init, 1) : TREE_IMAGPART (init));
2775 result &= generate_element_init_1 (sub, t, seq_p);
2776 }
2777 break;
2778
2779 case CONSTRUCTOR:
2780 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), idx, purpose, value)
2781 {
2782 /* Array constructors are routinely created with NULL indices. */
2783 if (purpose == NULL_TREE)
2784 {
2785 result = false;
2786 break;
2787 }
2788 if (TREE_CODE (purpose) == RANGE_EXPR)
2789 {
2790 tree lower = TREE_OPERAND (purpose, 0);
2791 tree upper = TREE_OPERAND (purpose, 1);
2792
2793 while (1)
2794 {
2795 sub = lookup_element (elt, lower, NULL, NO_INSERT);
2796 if (sub != NULL)
2797 result &= generate_element_init_1 (sub, value, seq_p);
2798 if (tree_int_cst_equal (lower, upper))
2799 break;
2800 lower = int_const_binop (PLUS_EXPR, lower,
2801 integer_one_node, true);
2802 }
2803 }
2804 else
2805 {
2806 sub = lookup_element (elt, purpose, NULL, NO_INSERT);
2807 if (sub != NULL)
2808 result &= generate_element_init_1 (sub, value, seq_p);
2809 }
2810 }
2811 break;
2812
2813 default:
2814 elt->visited = true;
2815 result = false;
2816 }
2817
2818 return result;
2819 }
2820
2821 /* A wrapper function for generate_element_init_1 that handles cleanup after
2822 gimplification. */
2823
2824 static bool
2825 generate_element_init (struct sra_elt *elt, tree init, gimple_seq *seq_p)
2826 {
2827 bool ret;
2828 struct gimplify_ctx gctx;
2829
2830 push_gimplify_context (&gctx);
2831 ret = generate_element_init_1 (elt, init, seq_p);
2832 pop_gimplify_context (NULL);
2833
2834 /* The replacement can expose previously unreferenced variables. */
2835 if (ret && *seq_p)
2836 {
2837 gimple_stmt_iterator i;
2838
2839 for (i = gsi_start (*seq_p); !gsi_end_p (i); gsi_next (&i))
2840 find_new_referenced_vars (gsi_stmt (i));
2841 }
2842
2843 return ret;
2844 }
2845
2846 /* Insert a gimple_seq SEQ on all the outgoing edges out of BB. Note that
2847 if BB has more than one edge, STMT will be replicated for each edge.
2848 Also, abnormal edges will be ignored. */
2849
2850 void
2851 insert_edge_copies_seq (gimple_seq seq, basic_block bb)
2852 {
2853 edge e;
2854 edge_iterator ei;
2855 unsigned n_copies = -1;
2856
2857 FOR_EACH_EDGE (e, ei, bb->succs)
2858 if (!(e->flags & EDGE_ABNORMAL))
2859 n_copies++;
2860
2861 FOR_EACH_EDGE (e, ei, bb->succs)
2862 if (!(e->flags & EDGE_ABNORMAL))
2863 gsi_insert_seq_on_edge (e, n_copies-- > 0 ? gimple_seq_copy (seq) : seq);
2864 }
2865
2866 /* Helper function to insert LIST before GSI, and set up line number info. */
2867
2868 void
2869 sra_insert_before (gimple_stmt_iterator *gsi, gimple_seq seq)
2870 {
2871 gimple stmt = gsi_stmt (*gsi);
2872
2873 if (gimple_has_location (stmt))
2874 annotate_all_with_location (seq, gimple_location (stmt));
2875 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
2876 }
2877
2878 /* Similarly, but insert after GSI. Handles insertion onto edges as well. */
2879
2880 void
2881 sra_insert_after (gimple_stmt_iterator *gsi, gimple_seq seq)
2882 {
2883 gimple stmt = gsi_stmt (*gsi);
2884
2885 if (gimple_has_location (stmt))
2886 annotate_all_with_location (seq, gimple_location (stmt));
2887
2888 if (stmt_ends_bb_p (stmt))
2889 insert_edge_copies_seq (seq, gsi_bb (*gsi));
2890 else
2891 gsi_insert_seq_after (gsi, seq, GSI_SAME_STMT);
2892 }
2893
2894 /* Similarly, but replace the statement at GSI. */
2895
2896 static void
2897 sra_replace (gimple_stmt_iterator *gsi, gimple_seq seq)
2898 {
2899 sra_insert_before (gsi, seq);
2900 gsi_remove (gsi, false);
2901 if (gsi_end_p (*gsi))
2902 *gsi = gsi_last (gsi_seq (*gsi));
2903 else
2904 gsi_prev (gsi);
2905 }
2906
2907 /* Data structure that bitfield_overlaps_p fills in with information
2908 about the element passed in and how much of it overlaps with the
2909 bit-range passed it to. */
2910
2911 struct bitfield_overlap_info
2912 {
2913 /* The bit-length of an element. */
2914 tree field_len;
2915
2916 /* The bit-position of the element in its parent. */
2917 tree field_pos;
2918
2919 /* The number of bits of the element that overlap with the incoming
2920 bit range. */
2921 tree overlap_len;
2922
2923 /* The first bit of the element that overlaps with the incoming bit
2924 range. */
2925 tree overlap_pos;
2926 };
2927
2928 /* Return true if a BIT_FIELD_REF<(FLD->parent), BLEN, BPOS>
2929 expression (referenced as BF below) accesses any of the bits in FLD,
2930 false if it doesn't. If DATA is non-null, its field_len and
2931 field_pos are filled in such that BIT_FIELD_REF<(FLD->parent),
2932 field_len, field_pos> (referenced as BFLD below) represents the
2933 entire field FLD->element, and BIT_FIELD_REF<BFLD, overlap_len,
2934 overlap_pos> represents the portion of the entire field that
2935 overlaps with BF. */
2936
2937 static bool
2938 bitfield_overlaps_p (tree blen, tree bpos, struct sra_elt *fld,
2939 struct bitfield_overlap_info *data)
2940 {
2941 tree flen, fpos;
2942 bool ret;
2943
2944 if (TREE_CODE (fld->element) == FIELD_DECL)
2945 {
2946 flen = fold_convert (bitsizetype, DECL_SIZE (fld->element));
2947 fpos = fold_convert (bitsizetype, DECL_FIELD_OFFSET (fld->element));
2948 fpos = size_binop (MULT_EXPR, fpos, bitsize_int (BITS_PER_UNIT));
2949 fpos = size_binop (PLUS_EXPR, fpos, DECL_FIELD_BIT_OFFSET (fld->element));
2950 }
2951 else if (TREE_CODE (fld->element) == BIT_FIELD_REF)
2952 {
2953 flen = fold_convert (bitsizetype, TREE_OPERAND (fld->element, 1));
2954 fpos = fold_convert (bitsizetype, TREE_OPERAND (fld->element, 2));
2955 }
2956 else if (TREE_CODE (fld->element) == INTEGER_CST)
2957 {
2958 flen = fold_convert (bitsizetype, TYPE_SIZE (fld->type));
2959 fpos = fold_convert (bitsizetype, fld->element);
2960 fpos = size_binop (MULT_EXPR, flen, fpos);
2961 }
2962 else
2963 gcc_unreachable ();
2964
2965 gcc_assert (host_integerp (blen, 1)
2966 && host_integerp (bpos, 1)
2967 && host_integerp (flen, 1)
2968 && host_integerp (fpos, 1));
2969
2970 ret = ((!tree_int_cst_lt (fpos, bpos)
2971 && tree_int_cst_lt (size_binop (MINUS_EXPR, fpos, bpos),
2972 blen))
2973 || (!tree_int_cst_lt (bpos, fpos)
2974 && tree_int_cst_lt (size_binop (MINUS_EXPR, bpos, fpos),
2975 flen)));
2976
2977 if (!ret)
2978 return ret;
2979
2980 if (data)
2981 {
2982 tree bend, fend;
2983
2984 data->field_len = flen;
2985 data->field_pos = fpos;
2986
2987 fend = size_binop (PLUS_EXPR, fpos, flen);
2988 bend = size_binop (PLUS_EXPR, bpos, blen);
2989
2990 if (tree_int_cst_lt (bend, fend))
2991 data->overlap_len = size_binop (MINUS_EXPR, bend, fpos);
2992 else
2993 data->overlap_len = NULL;
2994
2995 if (tree_int_cst_lt (fpos, bpos))
2996 {
2997 data->overlap_pos = size_binop (MINUS_EXPR, bpos, fpos);
2998 data->overlap_len = size_binop (MINUS_EXPR,
2999 data->overlap_len
3000 ? data->overlap_len
3001 : data->field_len,
3002 data->overlap_pos);
3003 }
3004 else
3005 data->overlap_pos = NULL;
3006 }
3007
3008 return ret;
3009 }
3010
3011 /* Add to LISTP a sequence of statements that copies BLEN bits between
3012 VAR and the scalarized elements of ELT, starting a bit VPOS of VAR
3013 and at bit BPOS of ELT. The direction of the copy is given by
3014 TO_VAR. */
3015
3016 static void
3017 sra_explode_bitfield_assignment (tree var, tree vpos, bool to_var,
3018 gimple_seq *seq_p, tree blen, tree bpos,
3019 struct sra_elt *elt)
3020 {
3021 struct sra_elt *fld;
3022 struct bitfield_overlap_info flp;
3023
3024 FOR_EACH_ACTUAL_CHILD (fld, elt)
3025 {
3026 tree flen, fpos;
3027
3028 if (!bitfield_overlaps_p (blen, bpos, fld, &flp))
3029 continue;
3030
3031 flen = flp.overlap_len ? flp.overlap_len : flp.field_len;
3032 fpos = flp.overlap_pos ? flp.overlap_pos : bitsize_int (0);
3033
3034 if (fld->replacement)
3035 {
3036 tree infld, invar, type;
3037 gimple_seq st;
3038
3039 infld = fld->replacement;
3040
3041 type = TREE_TYPE (infld);
3042 if (TYPE_PRECISION (type) != TREE_INT_CST_LOW (flen))
3043 type = lang_hooks.types.type_for_size (TREE_INT_CST_LOW (flen), 1);
3044 else
3045 type = unsigned_type_for (type);
3046
3047 if (TREE_CODE (infld) == BIT_FIELD_REF)
3048 {
3049 fpos = size_binop (PLUS_EXPR, fpos, TREE_OPERAND (infld, 2));
3050 infld = TREE_OPERAND (infld, 0);
3051 }
3052 else if (BYTES_BIG_ENDIAN && DECL_P (fld->element)
3053 && !tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (infld)),
3054 DECL_SIZE (fld->element)))
3055 {
3056 fpos = size_binop (PLUS_EXPR, fpos,
3057 TYPE_SIZE (TREE_TYPE (infld)));
3058 fpos = size_binop (MINUS_EXPR, fpos,
3059 DECL_SIZE (fld->element));
3060 }
3061
3062 infld = fold_build3 (BIT_FIELD_REF, type, infld, flen, fpos);
3063
3064 invar = size_binop (MINUS_EXPR, flp.field_pos, bpos);
3065 if (flp.overlap_pos)
3066 invar = size_binop (PLUS_EXPR, invar, flp.overlap_pos);
3067 invar = size_binop (PLUS_EXPR, invar, vpos);
3068
3069 invar = fold_build3 (BIT_FIELD_REF, type, var, flen, invar);
3070
3071 if (to_var)
3072 st = sra_build_bf_assignment (invar, infld);
3073 else
3074 st = sra_build_bf_assignment (infld, invar);
3075
3076 gimple_seq_add_seq (seq_p, st);
3077 }
3078 else
3079 {
3080 tree sub = size_binop (MINUS_EXPR, flp.field_pos, bpos);
3081 sub = size_binop (PLUS_EXPR, vpos, sub);
3082 if (flp.overlap_pos)
3083 sub = size_binop (PLUS_EXPR, sub, flp.overlap_pos);
3084
3085 sra_explode_bitfield_assignment (var, sub, to_var, seq_p,
3086 flen, fpos, fld);
3087 }
3088 }
3089 }
3090
3091 /* Add to LISTBEFOREP statements that copy scalarized members of ELT
3092 that overlap with BIT_FIELD_REF<(ELT->element), BLEN, BPOS> back
3093 into the full variable, and to LISTAFTERP, if non-NULL, statements
3094 that copy the (presumably modified) overlapping portions of the
3095 full variable back to the scalarized variables. */
3096
3097 static void
3098 sra_sync_for_bitfield_assignment (gimple_seq *seq_before_p,
3099 gimple_seq *seq_after_p,
3100 tree blen, tree bpos,
3101 struct sra_elt *elt)
3102 {
3103 struct sra_elt *fld;
3104 struct bitfield_overlap_info flp;
3105
3106 FOR_EACH_ACTUAL_CHILD (fld, elt)
3107 if (bitfield_overlaps_p (blen, bpos, fld, &flp))
3108 {
3109 if (fld->replacement || (!flp.overlap_len && !flp.overlap_pos))
3110 {
3111 generate_copy_inout (fld, false, generate_element_ref (fld),
3112 seq_before_p);
3113 mark_no_warning (fld);
3114 if (seq_after_p)
3115 generate_copy_inout (fld, true, generate_element_ref (fld),
3116 seq_after_p);
3117 }
3118 else
3119 {
3120 tree flen = flp.overlap_len ? flp.overlap_len : flp.field_len;
3121 tree fpos = flp.overlap_pos ? flp.overlap_pos : bitsize_int (0);
3122
3123 sra_sync_for_bitfield_assignment (seq_before_p, seq_after_p,
3124 flen, fpos, fld);
3125 }
3126 }
3127 }
3128
3129 /* Scalarize a USE. To recap, this is either a simple reference to ELT,
3130 if elt is scalar, or some occurrence of ELT that requires a complete
3131 aggregate. IS_OUTPUT is true if ELT is being modified. */
3132
3133 static void
3134 scalarize_use (struct sra_elt *elt, tree *expr_p, gimple_stmt_iterator *gsi,
3135 bool is_output, bool use_all)
3136 {
3137 gimple stmt = gsi_stmt (*gsi);
3138 tree bfexpr;
3139
3140 if (elt->replacement)
3141 {
3142 tree replacement = elt->replacement;
3143
3144 /* If we have a replacement, then updating the reference is as
3145 simple as modifying the existing statement in place. */
3146 if (is_output
3147 && TREE_CODE (elt->replacement) == BIT_FIELD_REF
3148 && is_gimple_reg (TREE_OPERAND (elt->replacement, 0))
3149 && is_gimple_assign (stmt)
3150 && gimple_assign_lhs_ptr (stmt) == expr_p)
3151 {
3152 gimple_seq newseq;
3153 /* RHS must be a single operand. */
3154 gcc_assert (gimple_assign_single_p (stmt));
3155 newseq = sra_build_elt_assignment (elt, gimple_assign_rhs1 (stmt));
3156 sra_replace (gsi, newseq);
3157 return;
3158 }
3159 else if (!is_output
3160 && TREE_CODE (elt->replacement) == BIT_FIELD_REF
3161 && is_gimple_assign (stmt)
3162 && gimple_assign_rhs1_ptr (stmt) == expr_p)
3163 {
3164 tree tmp = make_rename_temp
3165 (TREE_TYPE (gimple_assign_lhs (stmt)), "SR");
3166 gimple_seq newseq = sra_build_assignment (tmp, REPLDUP (elt->replacement));
3167
3168 sra_insert_before (gsi, newseq);
3169 replacement = tmp;
3170 }
3171 if (is_output)
3172 mark_all_v_defs_stmt (stmt);
3173 *expr_p = REPLDUP (replacement);
3174 update_stmt (stmt);
3175 }
3176 else if (use_all && is_output
3177 && is_gimple_assign (stmt)
3178 && TREE_CODE (bfexpr
3179 = gimple_assign_lhs (stmt)) == BIT_FIELD_REF
3180 && &TREE_OPERAND (bfexpr, 0) == expr_p
3181 && INTEGRAL_TYPE_P (TREE_TYPE (bfexpr))
3182 && TREE_CODE (TREE_TYPE (*expr_p)) == RECORD_TYPE)
3183 {
3184 gimple_seq seq_before = NULL;
3185 gimple_seq seq_after = NULL;
3186 tree blen = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 1));
3187 tree bpos = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 2));
3188 bool update = false;
3189
3190 if (!elt->use_block_copy)
3191 {
3192 tree type = TREE_TYPE (bfexpr);
3193 tree var = make_rename_temp (type, "SR"), tmp, vpos;
3194 gimple st;
3195
3196 gimple_assign_set_lhs (stmt, var);
3197 update = true;
3198
3199 if (!TYPE_UNSIGNED (type))
3200 {
3201 type = unsigned_type_for (type);
3202 tmp = make_rename_temp (type, "SR");
3203 st = gimple_build_assign (tmp, fold_convert (type, var));
3204 gimple_seq_add_stmt (&seq_after, st);
3205 var = tmp;
3206 }
3207
3208 /* If VAR is wider than BLEN bits, it is padded at the
3209 most-significant end. We want to set VPOS such that
3210 <BIT_FIELD_REF VAR BLEN VPOS> would refer to the
3211 least-significant BLEN bits of VAR. */
3212 if (BYTES_BIG_ENDIAN)
3213 vpos = size_binop (MINUS_EXPR, TYPE_SIZE (type), blen);
3214 else
3215 vpos = bitsize_int (0);
3216 sra_explode_bitfield_assignment
3217 (var, vpos, false, &seq_after, blen, bpos, elt);
3218 }
3219 else
3220 sra_sync_for_bitfield_assignment
3221 (&seq_before, &seq_after, blen, bpos, elt);
3222
3223 if (seq_before)
3224 {
3225 mark_all_v_defs_seq (seq_before);
3226 sra_insert_before (gsi, seq_before);
3227 }
3228 if (seq_after)
3229 {
3230 mark_all_v_defs_seq (seq_after);
3231 sra_insert_after (gsi, seq_after);
3232 }
3233
3234 if (update)
3235 update_stmt (stmt);
3236 }
3237 else if (use_all && !is_output
3238 && is_gimple_assign (stmt)
3239 && TREE_CODE (bfexpr
3240 = gimple_assign_rhs1 (stmt)) == BIT_FIELD_REF
3241 && &TREE_OPERAND (gimple_assign_rhs1 (stmt), 0) == expr_p
3242 && INTEGRAL_TYPE_P (TREE_TYPE (bfexpr))
3243 && TREE_CODE (TREE_TYPE (*expr_p)) == RECORD_TYPE)
3244 {
3245 gimple_seq seq = NULL;
3246 tree blen = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 1));
3247 tree bpos = fold_convert (bitsizetype, TREE_OPERAND (bfexpr, 2));
3248 bool update = false;
3249
3250 if (!elt->use_block_copy)
3251 {
3252 tree type = TREE_TYPE (bfexpr);
3253 tree var, vpos;
3254
3255 if (!TYPE_UNSIGNED (type))
3256 type = unsigned_type_for (type);
3257
3258 var = make_rename_temp (type, "SR");
3259
3260 gimple_seq_add_stmt (&seq,
3261 gimple_build_assign
3262 (var, build_int_cst_wide (type, 0, 0)));
3263
3264 /* If VAR is wider than BLEN bits, it is padded at the
3265 most-significant end. We want to set VPOS such that
3266 <BIT_FIELD_REF VAR BLEN VPOS> would refer to the
3267 least-significant BLEN bits of VAR. */
3268 if (BYTES_BIG_ENDIAN)
3269 vpos = size_binop (MINUS_EXPR, TYPE_SIZE (type), blen);
3270 else
3271 vpos = bitsize_int (0);
3272 sra_explode_bitfield_assignment
3273 (var, vpos, true, &seq, blen, bpos, elt);
3274
3275 gimple_assign_set_rhs1 (stmt, var);
3276 update = true;
3277 }
3278 else
3279 sra_sync_for_bitfield_assignment
3280 (&seq, NULL, blen, bpos, elt);
3281
3282 if (seq)
3283 {
3284 mark_all_v_defs_seq (seq);
3285 sra_insert_before (gsi, seq);
3286 }
3287
3288 if (update)
3289 update_stmt (stmt);
3290 }
3291 else
3292 {
3293 gimple_seq seq = NULL;
3294
3295 /* Otherwise we need some copies. If ELT is being read, then we
3296 want to store all (modified) sub-elements back into the
3297 structure before the reference takes place. If ELT is being
3298 written, then we want to load the changed values back into
3299 our shadow variables. */
3300 /* ??? We don't check modified for reads, we just always write all of
3301 the values. We should be able to record the SSA number of the VOP
3302 for which the values were last read. If that number matches the
3303 SSA number of the VOP in the current statement, then we needn't
3304 emit an assignment. This would also eliminate double writes when
3305 a structure is passed as more than one argument to a function call.
3306 This optimization would be most effective if sra_walk_function
3307 processed the blocks in dominator order. */
3308
3309 generate_copy_inout (elt, is_output, generate_element_ref (elt), &seq);
3310 if (seq == NULL)
3311 return;
3312 mark_all_v_defs_seq (seq);
3313 if (is_output)
3314 sra_insert_after (gsi, seq);
3315 else
3316 {
3317 sra_insert_before (gsi, seq);
3318 if (use_all)
3319 mark_no_warning (elt);
3320 }
3321 }
3322 }
3323
3324 /* Scalarize a COPY. To recap, this is an assignment statement between
3325 two scalarizable references, LHS_ELT and RHS_ELT. */
3326
3327 static void
3328 scalarize_copy (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt,
3329 gimple_stmt_iterator *gsi)
3330 {
3331 gimple_seq seq;
3332 gimple stmt;
3333
3334 if (lhs_elt->replacement && rhs_elt->replacement)
3335 {
3336 /* If we have two scalar operands, modify the existing statement. */
3337 stmt = gsi_stmt (*gsi);
3338
3339 /* See the commentary in sra_walk_function concerning
3340 RETURN_EXPR, and why we should never see one here. */
3341 gcc_assert (is_gimple_assign (stmt));
3342 gcc_assert (gimple_assign_copy_p (stmt));
3343
3344
3345 gimple_assign_set_lhs (stmt, lhs_elt->replacement);
3346 gimple_assign_set_rhs1 (stmt, REPLDUP (rhs_elt->replacement));
3347 update_stmt (stmt);
3348 }
3349 else if (lhs_elt->use_block_copy || rhs_elt->use_block_copy)
3350 {
3351 /* If either side requires a block copy, then sync the RHS back
3352 to the original structure, leave the original assignment
3353 statement (which will perform the block copy), then load the
3354 LHS values out of its now-updated original structure. */
3355 /* ??? Could perform a modified pair-wise element copy. That
3356 would at least allow those elements that are instantiated in
3357 both structures to be optimized well. */
3358
3359 seq = NULL;
3360 generate_copy_inout (rhs_elt, false,
3361 generate_element_ref (rhs_elt), &seq);
3362 if (seq)
3363 {
3364 mark_all_v_defs_seq (seq);
3365 sra_insert_before (gsi, seq);
3366 }
3367
3368 seq = NULL;
3369 generate_copy_inout (lhs_elt, true,
3370 generate_element_ref (lhs_elt), &seq);
3371 if (seq)
3372 {
3373 mark_all_v_defs_seq (seq);
3374 sra_insert_after (gsi, seq);
3375 }
3376 }
3377 else
3378 {
3379 /* Otherwise both sides must be fully instantiated. In which
3380 case perform pair-wise element assignments and replace the
3381 original block copy statement. */
3382
3383 stmt = gsi_stmt (*gsi);
3384 mark_all_v_defs_stmt (stmt);
3385
3386 seq = NULL;
3387 generate_element_copy (lhs_elt, rhs_elt, &seq);
3388 gcc_assert (seq);
3389 mark_all_v_defs_seq (seq);
3390 sra_replace (gsi, seq);
3391 }
3392 }
3393
3394 /* Scalarize an INIT. To recap, this is an assignment to a scalarizable
3395 reference from some form of constructor: CONSTRUCTOR, COMPLEX_CST or
3396 COMPLEX_EXPR. If RHS is NULL, it should be treated as an empty
3397 CONSTRUCTOR. */
3398
3399 static void
3400 scalarize_init (struct sra_elt *lhs_elt, tree rhs, gimple_stmt_iterator *gsi)
3401 {
3402 bool result = true;
3403 gimple_seq seq = NULL, init_seq = NULL;
3404
3405 /* Generate initialization statements for all members extant in the RHS. */
3406 if (rhs)
3407 {
3408 /* Unshare the expression just in case this is from a decl's initial. */
3409 rhs = unshare_expr (rhs);
3410 result = generate_element_init (lhs_elt, rhs, &init_seq);
3411 }
3412
3413 if (!result)
3414 {
3415 /* If we failed to convert the entire initializer, then we must
3416 leave the structure assignment in place and must load values
3417 from the structure into the slots for which we did not find
3418 constants. The easiest way to do this is to generate a complete
3419 copy-out, and then follow that with the constant assignments
3420 that we were able to build. DCE will clean things up. */
3421 gimple_seq seq0 = NULL;
3422 generate_copy_inout (lhs_elt, true, generate_element_ref (lhs_elt),
3423 &seq0);
3424 gimple_seq_add_seq (&seq0, seq);
3425 seq = seq0;
3426 }
3427 else
3428 {
3429 /* CONSTRUCTOR is defined such that any member not mentioned is assigned
3430 a zero value. Initialize the rest of the instantiated elements. */
3431 generate_element_zero (lhs_elt, &seq);
3432 gimple_seq_add_seq (&seq, init_seq);
3433 }
3434
3435 if (lhs_elt->use_block_copy || !result)
3436 {
3437 /* Since LHS is not fully instantiated, we must leave the structure
3438 assignment in place. Treating this case differently from a USE
3439 exposes constants to later optimizations. */
3440 if (seq)
3441 {
3442 mark_all_v_defs_seq (seq);
3443 sra_insert_after (gsi, seq);
3444 }
3445 }
3446 else
3447 {
3448 /* The LHS is fully instantiated. The list of initializations
3449 replaces the original structure assignment. */
3450 gcc_assert (seq);
3451 mark_all_v_defs_stmt (gsi_stmt (*gsi));
3452 mark_all_v_defs_seq (seq);
3453 sra_replace (gsi, seq);
3454 }
3455 }
3456
3457 /* A subroutine of scalarize_ldst called via walk_tree. Set TREE_NO_TRAP
3458 on all INDIRECT_REFs. */
3459
3460 static tree
3461 mark_notrap (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
3462 {
3463 tree t = *tp;
3464
3465 if (TREE_CODE (t) == INDIRECT_REF)
3466 {
3467 TREE_THIS_NOTRAP (t) = 1;
3468 *walk_subtrees = 0;
3469 }
3470 else if (IS_TYPE_OR_DECL_P (t))
3471 *walk_subtrees = 0;
3472
3473 return NULL;
3474 }
3475
3476 /* Scalarize a LDST. To recap, this is an assignment between one scalarizable
3477 reference ELT and one non-scalarizable reference OTHER. IS_OUTPUT is true
3478 if ELT is on the left-hand side. */
3479
3480 static void
3481 scalarize_ldst (struct sra_elt *elt, tree other,
3482 gimple_stmt_iterator *gsi, bool is_output)
3483 {
3484 /* Shouldn't have gotten called for a scalar. */
3485 gcc_assert (!elt->replacement);
3486
3487 if (elt->use_block_copy)
3488 {
3489 /* Since ELT is not fully instantiated, we have to leave the
3490 block copy in place. Treat this as a USE. */
3491 scalarize_use (elt, NULL, gsi, is_output, false);
3492 }
3493 else
3494 {
3495 /* The interesting case is when ELT is fully instantiated. In this
3496 case we can have each element stored/loaded directly to/from the
3497 corresponding slot in OTHER. This avoids a block copy. */
3498
3499 gimple_seq seq = NULL;
3500 gimple stmt = gsi_stmt (*gsi);
3501
3502 mark_all_v_defs_stmt (stmt);
3503 generate_copy_inout (elt, is_output, other, &seq);
3504 gcc_assert (seq);
3505 mark_all_v_defs_seq (seq);
3506
3507 /* Preserve EH semantics. */
3508 if (stmt_ends_bb_p (stmt))
3509 {
3510 gimple_stmt_iterator si;
3511 gimple first;
3512 gimple_seq blist = NULL;
3513 bool thr = stmt_could_throw_p (stmt);
3514
3515 /* If the last statement of this BB created an EH edge
3516 before scalarization, we have to locate the first
3517 statement that can throw in the new statement list and
3518 use that as the last statement of this BB, such that EH
3519 semantics is preserved. All statements up to this one
3520 are added to the same BB. All other statements in the
3521 list will be added to normal outgoing edges of the same
3522 BB. If they access any memory, it's the same memory, so
3523 we can assume they won't throw. */
3524 si = gsi_start (seq);
3525 for (first = gsi_stmt (si);
3526 thr && !gsi_end_p (si) && !stmt_could_throw_p (first);
3527 first = gsi_stmt (si))
3528 {
3529 gsi_remove (&si, false);
3530 gimple_seq_add_stmt (&blist, first);
3531 }
3532
3533 /* Extract the first remaining statement from LIST, this is
3534 the EH statement if there is one. */
3535 gsi_remove (&si, false);
3536
3537 if (blist)
3538 sra_insert_before (gsi, blist);
3539
3540 /* Replace the old statement with this new representative. */
3541 gsi_replace (gsi, first, true);
3542
3543 if (!gsi_end_p (si))
3544 {
3545 /* If any reference would trap, then they all would. And more
3546 to the point, the first would. Therefore none of the rest
3547 will trap since the first didn't. Indicate this by
3548 iterating over the remaining statements and set
3549 TREE_THIS_NOTRAP in all INDIRECT_REFs. */
3550 do
3551 {
3552 walk_gimple_stmt (&si, NULL, mark_notrap, NULL);
3553 gsi_next (&si);
3554 }
3555 while (!gsi_end_p (si));
3556
3557 insert_edge_copies_seq (seq, gsi_bb (*gsi));
3558 }
3559 }
3560 else
3561 sra_replace (gsi, seq);
3562 }
3563 }
3564
3565 /* Generate initializations for all scalarizable parameters. */
3566
3567 static void
3568 scalarize_parms (void)
3569 {
3570 gimple_seq seq = NULL;
3571 unsigned i;
3572 bitmap_iterator bi;
3573
3574 EXECUTE_IF_SET_IN_BITMAP (needs_copy_in, 0, i, bi)
3575 {
3576 tree var = referenced_var (i);
3577 struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT);
3578 generate_copy_inout (elt, true, var, &seq);
3579 }
3580
3581 if (seq)
3582 {
3583 insert_edge_copies_seq (seq, ENTRY_BLOCK_PTR);
3584 mark_all_v_defs_seq (seq);
3585 }
3586 }
3587
3588 /* Entry point to phase 4. Update the function to match replacements. */
3589
3590 static void
3591 scalarize_function (void)
3592 {
3593 static const struct sra_walk_fns fns = {
3594 scalarize_use, scalarize_copy, scalarize_init, scalarize_ldst, false
3595 };
3596
3597 sra_walk_function (&fns);
3598 scalarize_parms ();
3599 gsi_commit_edge_inserts ();
3600 }
3601
3602 \f
3603 /* Debug helper function. Print ELT in a nice human-readable format. */
3604
3605 static void
3606 dump_sra_elt_name (FILE *f, struct sra_elt *elt)
3607 {
3608 if (elt->parent && TREE_CODE (elt->parent->type) == COMPLEX_TYPE)
3609 {
3610 fputs (elt->element == integer_zero_node ? "__real__ " : "__imag__ ", f);
3611 dump_sra_elt_name (f, elt->parent);
3612 }
3613 else
3614 {
3615 if (elt->parent)
3616 dump_sra_elt_name (f, elt->parent);
3617 if (DECL_P (elt->element))
3618 {
3619 if (TREE_CODE (elt->element) == FIELD_DECL)
3620 fputc ('.', f);
3621 print_generic_expr (f, elt->element, dump_flags);
3622 }
3623 else if (TREE_CODE (elt->element) == BIT_FIELD_REF)
3624 fprintf (f, "$B" HOST_WIDE_INT_PRINT_DEC "F" HOST_WIDE_INT_PRINT_DEC,
3625 tree_low_cst (TREE_OPERAND (elt->element, 2), 1),
3626 tree_low_cst (TREE_OPERAND (elt->element, 1), 1));
3627 else if (TREE_CODE (elt->element) == RANGE_EXPR)
3628 fprintf (f, "["HOST_WIDE_INT_PRINT_DEC".."HOST_WIDE_INT_PRINT_DEC"]",
3629 TREE_INT_CST_LOW (TREE_OPERAND (elt->element, 0)),
3630 TREE_INT_CST_LOW (TREE_OPERAND (elt->element, 1)));
3631 else
3632 fprintf (f, "[" HOST_WIDE_INT_PRINT_DEC "]",
3633 TREE_INT_CST_LOW (elt->element));
3634 }
3635 }
3636
3637 /* Likewise, but callable from the debugger. */
3638
3639 void
3640 debug_sra_elt_name (struct sra_elt *elt)
3641 {
3642 dump_sra_elt_name (stderr, elt);
3643 fputc ('\n', stderr);
3644 }
3645
3646 void
3647 sra_init_cache (void)
3648 {
3649 if (sra_type_decomp_cache)
3650 return;
3651
3652 sra_type_decomp_cache = BITMAP_ALLOC (NULL);
3653 sra_type_inst_cache = BITMAP_ALLOC (NULL);
3654 }
3655
3656
3657 /* Main entry point. */
3658
3659 static unsigned int
3660 tree_sra (void)
3661 {
3662 /* Initialize local variables. */
3663 todoflags = 0;
3664 gcc_obstack_init (&sra_obstack);
3665 sra_candidates = BITMAP_ALLOC (NULL);
3666 needs_copy_in = BITMAP_ALLOC (NULL);
3667 sra_init_cache ();
3668 sra_map = htab_create (101, sra_elt_hash, sra_elt_eq, NULL);
3669
3670 /* Scan. If we find anything, instantiate and scalarize. */
3671 if (find_candidates_for_sra ())
3672 {
3673 scan_function ();
3674 decide_instantiations ();
3675 scalarize_function ();
3676 if (!bitmap_empty_p (sra_candidates))
3677 todoflags |= TODO_rebuild_alias;
3678 }
3679
3680 /* Free allocated memory. */
3681 htab_delete (sra_map);
3682 sra_map = NULL;
3683 BITMAP_FREE (sra_candidates);
3684 BITMAP_FREE (needs_copy_in);
3685 BITMAP_FREE (sra_type_decomp_cache);
3686 BITMAP_FREE (sra_type_inst_cache);
3687 obstack_free (&sra_obstack, NULL);
3688 return todoflags;
3689 }
3690
3691 static unsigned int
3692 tree_sra_early (void)
3693 {
3694 unsigned int ret;
3695
3696 early_sra = true;
3697 ret = tree_sra ();
3698 early_sra = false;
3699
3700 return ret & ~TODO_rebuild_alias;
3701 }
3702
3703 static bool
3704 gate_sra (void)
3705 {
3706 return flag_tree_sra != 0;
3707 }
3708
3709 struct gimple_opt_pass pass_sra_early =
3710 {
3711 {
3712 GIMPLE_PASS,
3713 "esra", /* name */
3714 gate_sra, /* gate */
3715 tree_sra_early, /* execute */
3716 NULL, /* sub */
3717 NULL, /* next */
3718 0, /* static_pass_number */
3719 TV_TREE_SRA, /* tv_id */
3720 PROP_cfg | PROP_ssa, /* properties_required */
3721 0, /* properties_provided */
3722 0, /* properties_destroyed */
3723 0, /* todo_flags_start */
3724 TODO_dump_func
3725 | TODO_update_ssa
3726 | TODO_ggc_collect
3727 | TODO_verify_ssa /* todo_flags_finish */
3728 }
3729 };
3730
3731 struct gimple_opt_pass pass_sra =
3732 {
3733 {
3734 GIMPLE_PASS,
3735 "sra", /* name */
3736 gate_sra, /* gate */
3737 tree_sra, /* execute */
3738 NULL, /* sub */
3739 NULL, /* next */
3740 0, /* static_pass_number */
3741 TV_TREE_SRA, /* tv_id */
3742 PROP_cfg | PROP_ssa, /* properties_required */
3743 0, /* properties_provided */
3744 0, /* properties_destroyed */
3745 0, /* todo_flags_start */
3746 TODO_dump_func
3747 | TODO_update_ssa
3748 | TODO_ggc_collect
3749 | TODO_verify_ssa /* todo_flags_finish */
3750 }
3751 };