939aa58279d78184008a671370b3479f8598ba46
[gcc.git] / gcc / cfgexpand.c
1 /* A pass for lowering trees to RTL.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "function.h"
30 #include "expr.h"
31 #include "langhooks.h"
32 #include "tree-flow.h"
33 #include "timevar.h"
34 #include "tree-dump.h"
35 #include "tree-pass.h"
36 #include "except.h"
37 #include "flags.h"
38 #include "diagnostic.h"
39 #include "toplev.h"
40 #include "debug.h"
41 #include "params.h"
42 #include "tree-inline.h"
43 #include "value-prof.h"
44 #include "target.h"
45 #include "ssaexpand.h"
46
47
48 /* This variable holds information helping the rewriting of SSA trees
49 into RTL. */
50 struct ssaexpand SA;
51
52 /* Return an expression tree corresponding to the RHS of GIMPLE
53 statement STMT. */
54
55 tree
56 gimple_assign_rhs_to_tree (gimple stmt)
57 {
58 tree t;
59 enum gimple_rhs_class grhs_class;
60
61 grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt));
62
63 if (grhs_class == GIMPLE_BINARY_RHS)
64 t = build2 (gimple_assign_rhs_code (stmt),
65 TREE_TYPE (gimple_assign_lhs (stmt)),
66 gimple_assign_rhs1 (stmt),
67 gimple_assign_rhs2 (stmt));
68 else if (grhs_class == GIMPLE_UNARY_RHS)
69 t = build1 (gimple_assign_rhs_code (stmt),
70 TREE_TYPE (gimple_assign_lhs (stmt)),
71 gimple_assign_rhs1 (stmt));
72 else if (grhs_class == GIMPLE_SINGLE_RHS)
73 t = gimple_assign_rhs1 (stmt);
74 else
75 gcc_unreachable ();
76
77 return t;
78 }
79
80 /* Return an expression tree corresponding to the PREDICATE of GIMPLE_COND
81 statement STMT. */
82
83 static tree
84 gimple_cond_pred_to_tree (gimple stmt)
85 {
86 /* We're sometimes presented with such code:
87 D.123_1 = x < y;
88 if (D.123_1 != 0)
89 ...
90 This would expand to two comparisons which then later might
91 be cleaned up by combine. But some pattern matchers like if-conversion
92 work better when there's only one compare, so make up for this
93 here as special exception if TER would have made the same change. */
94 tree lhs = gimple_cond_lhs (stmt);
95 if (SA.values
96 && TREE_CODE (lhs) == SSA_NAME
97 && bitmap_bit_p (SA.values, SSA_NAME_VERSION (lhs)))
98 lhs = gimple_assign_rhs_to_tree (SSA_NAME_DEF_STMT (lhs));
99
100 return build2 (gimple_cond_code (stmt), boolean_type_node,
101 lhs, gimple_cond_rhs (stmt));
102 }
103
104 /* Helper for gimple_to_tree. Set EXPR_LOCATION for every expression
105 inside *TP. DATA is the location to set. */
106
107 static tree
108 set_expr_location_r (tree *tp, int *ws ATTRIBUTE_UNUSED, void *data)
109 {
110 location_t *loc = (location_t *) data;
111 if (EXPR_P (*tp))
112 SET_EXPR_LOCATION (*tp, *loc);
113
114 return NULL_TREE;
115 }
116
117
118 /* RTL expansion has traditionally been done on trees, so the
119 transition to doing it on GIMPLE tuples is very invasive to the RTL
120 expander. To facilitate the transition, this function takes a
121 GIMPLE tuple STMT and returns the same statement in the form of a
122 tree. */
123
124 static tree
125 gimple_to_tree (gimple stmt)
126 {
127 tree t;
128 int rn;
129 tree_ann_common_t ann;
130 location_t loc;
131
132 switch (gimple_code (stmt))
133 {
134 case GIMPLE_ASSIGN:
135 {
136 tree lhs = gimple_assign_lhs (stmt);
137
138 t = gimple_assign_rhs_to_tree (stmt);
139 t = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, t);
140 if (gimple_assign_nontemporal_move_p (stmt))
141 MOVE_NONTEMPORAL (t) = true;
142 }
143 break;
144
145 case GIMPLE_COND:
146 t = gimple_cond_pred_to_tree (stmt);
147 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
148 break;
149
150 case GIMPLE_GOTO:
151 t = build1 (GOTO_EXPR, void_type_node, gimple_goto_dest (stmt));
152 break;
153
154 case GIMPLE_LABEL:
155 t = build1 (LABEL_EXPR, void_type_node, gimple_label_label (stmt));
156 break;
157
158 case GIMPLE_RETURN:
159 {
160 tree retval = gimple_return_retval (stmt);
161
162 if (retval && retval != error_mark_node)
163 {
164 tree result = DECL_RESULT (current_function_decl);
165
166 /* If we are not returning the current function's RESULT_DECL,
167 build an assignment to it. */
168 if (retval != result)
169 {
170 /* I believe that a function's RESULT_DECL is unique. */
171 gcc_assert (TREE_CODE (retval) != RESULT_DECL);
172
173 retval = build2 (MODIFY_EXPR, TREE_TYPE (result),
174 result, retval);
175 }
176 }
177 t = build1 (RETURN_EXPR, void_type_node, retval);
178 }
179 break;
180
181 case GIMPLE_ASM:
182 {
183 size_t i, n;
184 tree out, in, cl;
185 const char *s;
186
187 out = NULL_TREE;
188 n = gimple_asm_noutputs (stmt);
189 if (n > 0)
190 {
191 t = out = gimple_asm_output_op (stmt, 0);
192 for (i = 1; i < n; i++)
193 {
194 TREE_CHAIN (t) = gimple_asm_output_op (stmt, i);
195 t = gimple_asm_output_op (stmt, i);
196 }
197 }
198
199 in = NULL_TREE;
200 n = gimple_asm_ninputs (stmt);
201 if (n > 0)
202 {
203 t = in = gimple_asm_input_op (stmt, 0);
204 for (i = 1; i < n; i++)
205 {
206 TREE_CHAIN (t) = gimple_asm_input_op (stmt, i);
207 t = gimple_asm_input_op (stmt, i);
208 }
209 }
210
211 cl = NULL_TREE;
212 n = gimple_asm_nclobbers (stmt);
213 if (n > 0)
214 {
215 t = cl = gimple_asm_clobber_op (stmt, 0);
216 for (i = 1; i < n; i++)
217 {
218 TREE_CHAIN (t) = gimple_asm_clobber_op (stmt, i);
219 t = gimple_asm_clobber_op (stmt, i);
220 }
221 }
222
223 s = gimple_asm_string (stmt);
224 t = build4 (ASM_EXPR, void_type_node, build_string (strlen (s), s),
225 out, in, cl);
226 ASM_VOLATILE_P (t) = gimple_asm_volatile_p (stmt);
227 ASM_INPUT_P (t) = gimple_asm_input_p (stmt);
228 }
229 break;
230
231 case GIMPLE_CALL:
232 {
233 size_t i;
234 tree fn;
235 tree_ann_common_t ann;
236
237 t = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3);
238
239 CALL_EXPR_FN (t) = gimple_call_fn (stmt);
240 TREE_TYPE (t) = gimple_call_return_type (stmt);
241 CALL_EXPR_STATIC_CHAIN (t) = gimple_call_chain (stmt);
242
243 for (i = 0; i < gimple_call_num_args (stmt); i++)
244 CALL_EXPR_ARG (t, i) = gimple_call_arg (stmt, i);
245
246 if (!(gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE)))
247 TREE_SIDE_EFFECTS (t) = 1;
248
249 if (gimple_call_flags (stmt) & ECF_NOTHROW)
250 TREE_NOTHROW (t) = 1;
251
252 CALL_EXPR_TAILCALL (t) = gimple_call_tail_p (stmt);
253 CALL_EXPR_RETURN_SLOT_OPT (t) = gimple_call_return_slot_opt_p (stmt);
254 CALL_FROM_THUNK_P (t) = gimple_call_from_thunk_p (stmt);
255 CALL_CANNOT_INLINE_P (t) = gimple_call_cannot_inline_p (stmt);
256 CALL_EXPR_VA_ARG_PACK (t) = gimple_call_va_arg_pack_p (stmt);
257
258 /* If the call has a LHS then create a MODIFY_EXPR to hold it. */
259 {
260 tree lhs = gimple_call_lhs (stmt);
261
262 if (lhs)
263 t = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, t);
264 }
265
266 /* Record the original call statement, as it may be used
267 to retrieve profile information during expansion. */
268
269 if ((fn = gimple_call_fndecl (stmt)) != NULL_TREE
270 && DECL_BUILT_IN (fn))
271 {
272 ann = get_tree_common_ann (t);
273 ann->stmt = stmt;
274 }
275 }
276 break;
277
278 case GIMPLE_SWITCH:
279 {
280 tree label_vec;
281 size_t i;
282 tree elt = gimple_switch_label (stmt, 0);
283
284 label_vec = make_tree_vec (gimple_switch_num_labels (stmt));
285
286 if (!CASE_LOW (elt) && !CASE_HIGH (elt))
287 {
288 for (i = 1; i < gimple_switch_num_labels (stmt); i++)
289 TREE_VEC_ELT (label_vec, i - 1) = gimple_switch_label (stmt, i);
290
291 /* The default case in a SWITCH_EXPR must be at the end of
292 the label vector. */
293 TREE_VEC_ELT (label_vec, i - 1) = gimple_switch_label (stmt, 0);
294 }
295 else
296 {
297 for (i = 0; i < gimple_switch_num_labels (stmt); i++)
298 TREE_VEC_ELT (label_vec, i) = gimple_switch_label (stmt, i);
299 }
300
301 t = build3 (SWITCH_EXPR, void_type_node, gimple_switch_index (stmt),
302 NULL, label_vec);
303 }
304 break;
305
306 case GIMPLE_NOP:
307 case GIMPLE_PREDICT:
308 t = build1 (NOP_EXPR, void_type_node, size_zero_node);
309 break;
310
311 case GIMPLE_RESX:
312 t = build_resx (gimple_resx_region (stmt));
313 break;
314
315 default:
316 if (errorcount == 0)
317 {
318 error ("Unrecognized GIMPLE statement during RTL expansion");
319 print_gimple_stmt (stderr, stmt, 4, 0);
320 gcc_unreachable ();
321 }
322 else
323 {
324 /* Ignore any bad gimple codes if we're going to die anyhow,
325 so we can at least set TREE_ASM_WRITTEN and have the rest
326 of compilation advance without sudden ICE death. */
327 t = build1 (NOP_EXPR, void_type_node, size_zero_node);
328 break;
329 }
330 }
331
332 /* If STMT is inside an exception region, record it in the generated
333 expression. */
334 rn = lookup_stmt_eh_region (stmt);
335 if (rn >= 0)
336 {
337 tree call = get_call_expr_in (t);
338
339 ann = get_tree_common_ann (t);
340 ann->rn = rn;
341
342 /* For a CALL_EXPR on the RHS of an assignment, calls.c looks up
343 the CALL_EXPR not the assignment statment for EH region number. */
344 if (call && call != t)
345 {
346 ann = get_tree_common_ann (call);
347 ann->rn = rn;
348 }
349 }
350
351 /* Set EXPR_LOCATION in all the embedded expressions. */
352 loc = gimple_location (stmt);
353 walk_tree (&t, set_expr_location_r, (void *) &loc, NULL);
354
355 TREE_BLOCK (t) = gimple_block (stmt);
356
357 return t;
358 }
359
360
361 /* Release back to GC memory allocated by gimple_to_tree. */
362
363 static void
364 release_stmt_tree (gimple stmt, tree stmt_tree)
365 {
366 tree_ann_common_t ann;
367
368 switch (gimple_code (stmt))
369 {
370 case GIMPLE_ASSIGN:
371 if (get_gimple_rhs_class (gimple_expr_code (stmt)) != GIMPLE_SINGLE_RHS)
372 ggc_free (TREE_OPERAND (stmt_tree, 1));
373 break;
374 case GIMPLE_COND:
375 ggc_free (COND_EXPR_COND (stmt_tree));
376 break;
377 case GIMPLE_RETURN:
378 if (TREE_OPERAND (stmt_tree, 0)
379 && TREE_CODE (TREE_OPERAND (stmt_tree, 0)) == MODIFY_EXPR)
380 ggc_free (TREE_OPERAND (stmt_tree, 0));
381 break;
382 case GIMPLE_CALL:
383 if (gimple_call_lhs (stmt))
384 {
385 ann = tree_common_ann (TREE_OPERAND (stmt_tree, 1));
386 if (ann)
387 ggc_free (ann);
388 ggc_free (TREE_OPERAND (stmt_tree, 1));
389 }
390 break;
391 default:
392 break;
393 }
394 ann = tree_common_ann (stmt_tree);
395 if (ann)
396 ggc_free (ann);
397 ggc_free (stmt_tree);
398 }
399
400
401 /* Verify that there is exactly single jump instruction since last and attach
402 REG_BR_PROB note specifying probability.
403 ??? We really ought to pass the probability down to RTL expanders and let it
404 re-distribute it when the conditional expands into multiple conditionals.
405 This is however difficult to do. */
406 void
407 add_reg_br_prob_note (rtx last, int probability)
408 {
409 if (profile_status == PROFILE_ABSENT)
410 return;
411 for (last = NEXT_INSN (last); last && NEXT_INSN (last); last = NEXT_INSN (last))
412 if (JUMP_P (last))
413 {
414 /* It is common to emit condjump-around-jump sequence when we don't know
415 how to reverse the conditional. Special case this. */
416 if (!any_condjump_p (last)
417 || !JUMP_P (NEXT_INSN (last))
418 || !simplejump_p (NEXT_INSN (last))
419 || !NEXT_INSN (NEXT_INSN (last))
420 || !BARRIER_P (NEXT_INSN (NEXT_INSN (last)))
421 || !NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))
422 || !LABEL_P (NEXT_INSN (NEXT_INSN (NEXT_INSN (last))))
423 || NEXT_INSN (NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))))
424 goto failed;
425 gcc_assert (!find_reg_note (last, REG_BR_PROB, 0));
426 add_reg_note (last, REG_BR_PROB,
427 GEN_INT (REG_BR_PROB_BASE - probability));
428 return;
429 }
430 if (!last || !JUMP_P (last) || !any_condjump_p (last))
431 goto failed;
432 gcc_assert (!find_reg_note (last, REG_BR_PROB, 0));
433 add_reg_note (last, REG_BR_PROB, GEN_INT (probability));
434 return;
435 failed:
436 if (dump_file)
437 fprintf (dump_file, "Failed to add probability note\n");
438 }
439
440
441 #ifndef STACK_ALIGNMENT_NEEDED
442 #define STACK_ALIGNMENT_NEEDED 1
443 #endif
444
445 #define SSAVAR(x) (TREE_CODE (x) == SSA_NAME ? SSA_NAME_VAR (x) : x)
446
447 /* Associate declaration T with storage space X. If T is no
448 SSA name this is exactly SET_DECL_RTL, otherwise make the
449 partition of T associated with X. */
450 static inline void
451 set_rtl (tree t, rtx x)
452 {
453 if (TREE_CODE (t) == SSA_NAME)
454 {
455 SA.partition_to_pseudo[var_to_partition (SA.map, t)] = x;
456 if (x && !MEM_P (x))
457 set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (t), x);
458 }
459 else
460 SET_DECL_RTL (t, x);
461 }
462
463 /* This structure holds data relevant to one variable that will be
464 placed in a stack slot. */
465 struct stack_var
466 {
467 /* The Variable. */
468 tree decl;
469
470 /* The offset of the variable. During partitioning, this is the
471 offset relative to the partition. After partitioning, this
472 is relative to the stack frame. */
473 HOST_WIDE_INT offset;
474
475 /* Initially, the size of the variable. Later, the size of the partition,
476 if this variable becomes it's partition's representative. */
477 HOST_WIDE_INT size;
478
479 /* The *byte* alignment required for this variable. Or as, with the
480 size, the alignment for this partition. */
481 unsigned int alignb;
482
483 /* The partition representative. */
484 size_t representative;
485
486 /* The next stack variable in the partition, or EOC. */
487 size_t next;
488 };
489
490 #define EOC ((size_t)-1)
491
492 /* We have an array of such objects while deciding allocation. */
493 static struct stack_var *stack_vars;
494 static size_t stack_vars_alloc;
495 static size_t stack_vars_num;
496
497 /* An array of indices such that stack_vars[stack_vars_sorted[i]].size
498 is non-decreasing. */
499 static size_t *stack_vars_sorted;
500
501 /* We have an interference graph between such objects. This graph
502 is lower triangular. */
503 static bool *stack_vars_conflict;
504 static size_t stack_vars_conflict_alloc;
505
506 /* The phase of the stack frame. This is the known misalignment of
507 virtual_stack_vars_rtx from PREFERRED_STACK_BOUNDARY. That is,
508 (frame_offset+frame_phase) % PREFERRED_STACK_BOUNDARY == 0. */
509 static int frame_phase;
510
511 /* Used during expand_used_vars to remember if we saw any decls for
512 which we'd like to enable stack smashing protection. */
513 static bool has_protected_decls;
514
515 /* Used during expand_used_vars. Remember if we say a character buffer
516 smaller than our cutoff threshold. Used for -Wstack-protector. */
517 static bool has_short_buffer;
518
519 /* Discover the byte alignment to use for DECL. Ignore alignment
520 we can't do with expected alignment of the stack boundary. */
521
522 static unsigned int
523 get_decl_align_unit (tree decl)
524 {
525 unsigned int align;
526
527 align = LOCAL_DECL_ALIGNMENT (decl);
528
529 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
530 align = MAX_SUPPORTED_STACK_ALIGNMENT;
531
532 if (SUPPORTS_STACK_ALIGNMENT)
533 {
534 if (crtl->stack_alignment_estimated < align)
535 {
536 gcc_assert(!crtl->stack_realign_processed);
537 crtl->stack_alignment_estimated = align;
538 }
539 }
540
541 /* stack_alignment_needed > PREFERRED_STACK_BOUNDARY is permitted.
542 So here we only make sure stack_alignment_needed >= align. */
543 if (crtl->stack_alignment_needed < align)
544 crtl->stack_alignment_needed = align;
545 if (crtl->max_used_stack_slot_alignment < crtl->stack_alignment_needed)
546 crtl->max_used_stack_slot_alignment = crtl->stack_alignment_needed;
547
548 return align / BITS_PER_UNIT;
549 }
550
551 /* Allocate SIZE bytes at byte alignment ALIGN from the stack frame.
552 Return the frame offset. */
553
554 static HOST_WIDE_INT
555 alloc_stack_frame_space (HOST_WIDE_INT size, HOST_WIDE_INT align)
556 {
557 HOST_WIDE_INT offset, new_frame_offset;
558
559 new_frame_offset = frame_offset;
560 if (FRAME_GROWS_DOWNWARD)
561 {
562 new_frame_offset -= size + frame_phase;
563 new_frame_offset &= -align;
564 new_frame_offset += frame_phase;
565 offset = new_frame_offset;
566 }
567 else
568 {
569 new_frame_offset -= frame_phase;
570 new_frame_offset += align - 1;
571 new_frame_offset &= -align;
572 new_frame_offset += frame_phase;
573 offset = new_frame_offset;
574 new_frame_offset += size;
575 }
576 frame_offset = new_frame_offset;
577
578 if (frame_offset_overflow (frame_offset, cfun->decl))
579 frame_offset = offset = 0;
580
581 return offset;
582 }
583
584 /* Accumulate DECL into STACK_VARS. */
585
586 static void
587 add_stack_var (tree decl)
588 {
589 if (stack_vars_num >= stack_vars_alloc)
590 {
591 if (stack_vars_alloc)
592 stack_vars_alloc = stack_vars_alloc * 3 / 2;
593 else
594 stack_vars_alloc = 32;
595 stack_vars
596 = XRESIZEVEC (struct stack_var, stack_vars, stack_vars_alloc);
597 }
598 stack_vars[stack_vars_num].decl = decl;
599 stack_vars[stack_vars_num].offset = 0;
600 stack_vars[stack_vars_num].size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1);
601 stack_vars[stack_vars_num].alignb = get_decl_align_unit (SSAVAR (decl));
602
603 /* All variables are initially in their own partition. */
604 stack_vars[stack_vars_num].representative = stack_vars_num;
605 stack_vars[stack_vars_num].next = EOC;
606
607 /* Ensure that this decl doesn't get put onto the list twice. */
608 set_rtl (decl, pc_rtx);
609
610 stack_vars_num++;
611 }
612
613 /* Compute the linear index of a lower-triangular coordinate (I, J). */
614
615 static size_t
616 triangular_index (size_t i, size_t j)
617 {
618 if (i < j)
619 {
620 size_t t;
621 t = i, i = j, j = t;
622 }
623 return (i * (i + 1)) / 2 + j;
624 }
625
626 /* Ensure that STACK_VARS_CONFLICT is large enough for N objects. */
627
628 static void
629 resize_stack_vars_conflict (size_t n)
630 {
631 size_t size = triangular_index (n-1, n-1) + 1;
632
633 if (size <= stack_vars_conflict_alloc)
634 return;
635
636 stack_vars_conflict = XRESIZEVEC (bool, stack_vars_conflict, size);
637 memset (stack_vars_conflict + stack_vars_conflict_alloc, 0,
638 (size - stack_vars_conflict_alloc) * sizeof (bool));
639 stack_vars_conflict_alloc = size;
640 }
641
642 /* Make the decls associated with luid's X and Y conflict. */
643
644 static void
645 add_stack_var_conflict (size_t x, size_t y)
646 {
647 size_t index = triangular_index (x, y);
648 gcc_assert (index < stack_vars_conflict_alloc);
649 stack_vars_conflict[index] = true;
650 }
651
652 /* Check whether the decls associated with luid's X and Y conflict. */
653
654 static bool
655 stack_var_conflict_p (size_t x, size_t y)
656 {
657 size_t index = triangular_index (x, y);
658 gcc_assert (index < stack_vars_conflict_alloc);
659 return stack_vars_conflict[index];
660 }
661
662 /* Returns true if TYPE is or contains a union type. */
663
664 static bool
665 aggregate_contains_union_type (tree type)
666 {
667 tree field;
668
669 if (TREE_CODE (type) == UNION_TYPE
670 || TREE_CODE (type) == QUAL_UNION_TYPE)
671 return true;
672 if (TREE_CODE (type) == ARRAY_TYPE)
673 return aggregate_contains_union_type (TREE_TYPE (type));
674 if (TREE_CODE (type) != RECORD_TYPE)
675 return false;
676
677 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
678 if (TREE_CODE (field) == FIELD_DECL)
679 if (aggregate_contains_union_type (TREE_TYPE (field)))
680 return true;
681
682 return false;
683 }
684
685 /* A subroutine of expand_used_vars. If two variables X and Y have alias
686 sets that do not conflict, then do add a conflict for these variables
687 in the interference graph. We also need to make sure to add conflicts
688 for union containing structures. Else RTL alias analysis comes along
689 and due to type based aliasing rules decides that for two overlapping
690 union temporaries { short s; int i; } accesses to the same mem through
691 different types may not alias and happily reorders stores across
692 life-time boundaries of the temporaries (See PR25654).
693 We also have to mind MEM_IN_STRUCT_P and MEM_SCALAR_P. */
694
695 static void
696 add_alias_set_conflicts (void)
697 {
698 size_t i, j, n = stack_vars_num;
699
700 for (i = 0; i < n; ++i)
701 {
702 tree type_i = TREE_TYPE (stack_vars[i].decl);
703 bool aggr_i = AGGREGATE_TYPE_P (type_i);
704 bool contains_union;
705
706 contains_union = aggregate_contains_union_type (type_i);
707 for (j = 0; j < i; ++j)
708 {
709 tree type_j = TREE_TYPE (stack_vars[j].decl);
710 bool aggr_j = AGGREGATE_TYPE_P (type_j);
711 if (aggr_i != aggr_j
712 /* Either the objects conflict by means of type based
713 aliasing rules, or we need to add a conflict. */
714 || !objects_must_conflict_p (type_i, type_j)
715 /* In case the types do not conflict ensure that access
716 to elements will conflict. In case of unions we have
717 to be careful as type based aliasing rules may say
718 access to the same memory does not conflict. So play
719 safe and add a conflict in this case. */
720 || contains_union)
721 add_stack_var_conflict (i, j);
722 }
723 }
724 }
725
726 /* A subroutine of partition_stack_vars. A comparison function for qsort,
727 sorting an array of indices by the size and type of the object. */
728
729 static int
730 stack_var_size_cmp (const void *a, const void *b)
731 {
732 HOST_WIDE_INT sa = stack_vars[*(const size_t *)a].size;
733 HOST_WIDE_INT sb = stack_vars[*(const size_t *)b].size;
734 tree decla, declb;
735 unsigned int uida, uidb;
736
737 if (sa < sb)
738 return -1;
739 if (sa > sb)
740 return 1;
741 decla = stack_vars[*(const size_t *)a].decl;
742 declb = stack_vars[*(const size_t *)b].decl;
743 /* For stack variables of the same size use and id of the decls
744 to make the sort stable. Two SSA names are compared by their
745 version, SSA names come before non-SSA names, and two normal
746 decls are compared by their DECL_UID. */
747 if (TREE_CODE (decla) == SSA_NAME)
748 {
749 if (TREE_CODE (declb) == SSA_NAME)
750 uida = SSA_NAME_VERSION (decla), uidb = SSA_NAME_VERSION (declb);
751 else
752 return -1;
753 }
754 else if (TREE_CODE (declb) == SSA_NAME)
755 return 1;
756 else
757 uida = DECL_UID (decla), uidb = DECL_UID (declb);
758 if (uida < uidb)
759 return -1;
760 if (uida > uidb)
761 return 1;
762 return 0;
763 }
764
765 /* A subroutine of partition_stack_vars. The UNION portion of a UNION/FIND
766 partitioning algorithm. Partitions A and B are known to be non-conflicting.
767 Merge them into a single partition A.
768
769 At the same time, add OFFSET to all variables in partition B. At the end
770 of the partitioning process we've have a nice block easy to lay out within
771 the stack frame. */
772
773 static void
774 union_stack_vars (size_t a, size_t b, HOST_WIDE_INT offset)
775 {
776 size_t i, last;
777
778 /* Update each element of partition B with the given offset,
779 and merge them into partition A. */
780 for (last = i = b; i != EOC; last = i, i = stack_vars[i].next)
781 {
782 stack_vars[i].offset += offset;
783 stack_vars[i].representative = a;
784 }
785 stack_vars[last].next = stack_vars[a].next;
786 stack_vars[a].next = b;
787
788 /* Update the required alignment of partition A to account for B. */
789 if (stack_vars[a].alignb < stack_vars[b].alignb)
790 stack_vars[a].alignb = stack_vars[b].alignb;
791
792 /* Update the interference graph and merge the conflicts. */
793 for (last = stack_vars_num, i = 0; i < last; ++i)
794 if (stack_var_conflict_p (b, i))
795 add_stack_var_conflict (a, i);
796 }
797
798 /* A subroutine of expand_used_vars. Binpack the variables into
799 partitions constrained by the interference graph. The overall
800 algorithm used is as follows:
801
802 Sort the objects by size.
803 For each object A {
804 S = size(A)
805 O = 0
806 loop {
807 Look for the largest non-conflicting object B with size <= S.
808 UNION (A, B)
809 offset(B) = O
810 O += size(B)
811 S -= size(B)
812 }
813 }
814 */
815
816 static void
817 partition_stack_vars (void)
818 {
819 size_t si, sj, n = stack_vars_num;
820
821 stack_vars_sorted = XNEWVEC (size_t, stack_vars_num);
822 for (si = 0; si < n; ++si)
823 stack_vars_sorted[si] = si;
824
825 if (n == 1)
826 return;
827
828 qsort (stack_vars_sorted, n, sizeof (size_t), stack_var_size_cmp);
829
830 /* Special case: detect when all variables conflict, and thus we can't
831 do anything during the partitioning loop. It isn't uncommon (with
832 C code at least) to declare all variables at the top of the function,
833 and if we're not inlining, then all variables will be in the same scope.
834 Take advantage of very fast libc routines for this scan. */
835 gcc_assert (sizeof(bool) == sizeof(char));
836 if (memchr (stack_vars_conflict, false, stack_vars_conflict_alloc) == NULL)
837 return;
838
839 for (si = 0; si < n; ++si)
840 {
841 size_t i = stack_vars_sorted[si];
842 HOST_WIDE_INT isize = stack_vars[i].size;
843 HOST_WIDE_INT offset = 0;
844
845 for (sj = si; sj-- > 0; )
846 {
847 size_t j = stack_vars_sorted[sj];
848 HOST_WIDE_INT jsize = stack_vars[j].size;
849 unsigned int jalign = stack_vars[j].alignb;
850
851 /* Ignore objects that aren't partition representatives. */
852 if (stack_vars[j].representative != j)
853 continue;
854
855 /* Ignore objects too large for the remaining space. */
856 if (isize < jsize)
857 continue;
858
859 /* Ignore conflicting objects. */
860 if (stack_var_conflict_p (i, j))
861 continue;
862
863 /* Refine the remaining space check to include alignment. */
864 if (offset & (jalign - 1))
865 {
866 HOST_WIDE_INT toff = offset;
867 toff += jalign - 1;
868 toff &= -(HOST_WIDE_INT)jalign;
869 if (isize - (toff - offset) < jsize)
870 continue;
871
872 isize -= toff - offset;
873 offset = toff;
874 }
875
876 /* UNION the objects, placing J at OFFSET. */
877 union_stack_vars (i, j, offset);
878
879 isize -= jsize;
880 if (isize == 0)
881 break;
882 }
883 }
884 }
885
886 /* A debugging aid for expand_used_vars. Dump the generated partitions. */
887
888 static void
889 dump_stack_var_partition (void)
890 {
891 size_t si, i, j, n = stack_vars_num;
892
893 for (si = 0; si < n; ++si)
894 {
895 i = stack_vars_sorted[si];
896
897 /* Skip variables that aren't partition representatives, for now. */
898 if (stack_vars[i].representative != i)
899 continue;
900
901 fprintf (dump_file, "Partition %lu: size " HOST_WIDE_INT_PRINT_DEC
902 " align %u\n", (unsigned long) i, stack_vars[i].size,
903 stack_vars[i].alignb);
904
905 for (j = i; j != EOC; j = stack_vars[j].next)
906 {
907 fputc ('\t', dump_file);
908 print_generic_expr (dump_file, stack_vars[j].decl, dump_flags);
909 fprintf (dump_file, ", offset " HOST_WIDE_INT_PRINT_DEC "\n",
910 stack_vars[j].offset);
911 }
912 }
913 }
914
915 /* Assign rtl to DECL at frame offset OFFSET. */
916
917 static void
918 expand_one_stack_var_at (tree decl, HOST_WIDE_INT offset)
919 {
920 /* Alignment is unsigned. */
921 unsigned HOST_WIDE_INT align;
922 rtx x;
923
924 /* If this fails, we've overflowed the stack frame. Error nicely? */
925 gcc_assert (offset == trunc_int_for_mode (offset, Pmode));
926
927 x = plus_constant (virtual_stack_vars_rtx, offset);
928 x = gen_rtx_MEM (DECL_MODE (SSAVAR (decl)), x);
929
930 if (TREE_CODE (decl) != SSA_NAME)
931 {
932 /* Set alignment we actually gave this decl if it isn't an SSA name.
933 If it is we generate stack slots only accidentally so it isn't as
934 important, we'll simply use the alignment that is already set. */
935 offset -= frame_phase;
936 align = offset & -offset;
937 align *= BITS_PER_UNIT;
938 if (align == 0)
939 align = STACK_BOUNDARY;
940 else if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
941 align = MAX_SUPPORTED_STACK_ALIGNMENT;
942
943 DECL_ALIGN (decl) = align;
944 DECL_USER_ALIGN (decl) = 0;
945 }
946
947 set_mem_attributes (x, SSAVAR (decl), true);
948 set_rtl (decl, x);
949 }
950
951 /* A subroutine of expand_used_vars. Give each partition representative
952 a unique location within the stack frame. Update each partition member
953 with that location. */
954
955 static void
956 expand_stack_vars (bool (*pred) (tree))
957 {
958 size_t si, i, j, n = stack_vars_num;
959
960 for (si = 0; si < n; ++si)
961 {
962 HOST_WIDE_INT offset;
963
964 i = stack_vars_sorted[si];
965
966 /* Skip variables that aren't partition representatives, for now. */
967 if (stack_vars[i].representative != i)
968 continue;
969
970 /* Skip variables that have already had rtl assigned. See also
971 add_stack_var where we perpetrate this pc_rtx hack. */
972 if ((TREE_CODE (stack_vars[i].decl) == SSA_NAME
973 ? SA.partition_to_pseudo[var_to_partition (SA.map, stack_vars[i].decl)]
974 : DECL_RTL (stack_vars[i].decl)) != pc_rtx)
975 continue;
976
977 /* Check the predicate to see whether this variable should be
978 allocated in this pass. */
979 if (pred && !pred (stack_vars[i].decl))
980 continue;
981
982 offset = alloc_stack_frame_space (stack_vars[i].size,
983 stack_vars[i].alignb);
984
985 /* Create rtl for each variable based on their location within the
986 partition. */
987 for (j = i; j != EOC; j = stack_vars[j].next)
988 {
989 gcc_assert (stack_vars[j].offset <= stack_vars[i].size);
990 expand_one_stack_var_at (stack_vars[j].decl,
991 stack_vars[j].offset + offset);
992 }
993 }
994 }
995
996 /* Take into account all sizes of partitions and reset DECL_RTLs. */
997 static HOST_WIDE_INT
998 account_stack_vars (void)
999 {
1000 size_t si, j, i, n = stack_vars_num;
1001 HOST_WIDE_INT size = 0;
1002
1003 for (si = 0; si < n; ++si)
1004 {
1005 i = stack_vars_sorted[si];
1006
1007 /* Skip variables that aren't partition representatives, for now. */
1008 if (stack_vars[i].representative != i)
1009 continue;
1010
1011 size += stack_vars[i].size;
1012 for (j = i; j != EOC; j = stack_vars[j].next)
1013 set_rtl (stack_vars[j].decl, NULL);
1014 }
1015 return size;
1016 }
1017
1018 /* A subroutine of expand_one_var. Called to immediately assign rtl
1019 to a variable to be allocated in the stack frame. */
1020
1021 static void
1022 expand_one_stack_var (tree var)
1023 {
1024 HOST_WIDE_INT size, offset, align;
1025
1026 size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (var)), 1);
1027 align = get_decl_align_unit (SSAVAR (var));
1028 offset = alloc_stack_frame_space (size, align);
1029
1030 expand_one_stack_var_at (var, offset);
1031 }
1032
1033 /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL
1034 that will reside in a hard register. */
1035
1036 static void
1037 expand_one_hard_reg_var (tree var)
1038 {
1039 rest_of_decl_compilation (var, 0, 0);
1040 }
1041
1042 /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL
1043 that will reside in a pseudo register. */
1044
1045 static void
1046 expand_one_register_var (tree var)
1047 {
1048 tree decl = SSAVAR (var);
1049 tree type = TREE_TYPE (decl);
1050 int unsignedp = TYPE_UNSIGNED (type);
1051 enum machine_mode reg_mode
1052 = promote_mode (type, DECL_MODE (decl), &unsignedp, 0);
1053 rtx x = gen_reg_rtx (reg_mode);
1054
1055 set_rtl (var, x);
1056
1057 /* Note if the object is a user variable. */
1058 if (!DECL_ARTIFICIAL (decl))
1059 mark_user_reg (x);
1060
1061 if (POINTER_TYPE_P (type))
1062 mark_reg_pointer (x, TYPE_ALIGN (TREE_TYPE (type)));
1063 }
1064
1065 /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL that
1066 has some associated error, e.g. its type is error-mark. We just need
1067 to pick something that won't crash the rest of the compiler. */
1068
1069 static void
1070 expand_one_error_var (tree var)
1071 {
1072 enum machine_mode mode = DECL_MODE (var);
1073 rtx x;
1074
1075 if (mode == BLKmode)
1076 x = gen_rtx_MEM (BLKmode, const0_rtx);
1077 else if (mode == VOIDmode)
1078 x = const0_rtx;
1079 else
1080 x = gen_reg_rtx (mode);
1081
1082 SET_DECL_RTL (var, x);
1083 }
1084
1085 /* A subroutine of expand_one_var. VAR is a variable that will be
1086 allocated to the local stack frame. Return true if we wish to
1087 add VAR to STACK_VARS so that it will be coalesced with other
1088 variables. Return false to allocate VAR immediately.
1089
1090 This function is used to reduce the number of variables considered
1091 for coalescing, which reduces the size of the quadratic problem. */
1092
1093 static bool
1094 defer_stack_allocation (tree var, bool toplevel)
1095 {
1096 /* If stack protection is enabled, *all* stack variables must be deferred,
1097 so that we can re-order the strings to the top of the frame. */
1098 if (flag_stack_protect)
1099 return true;
1100
1101 /* Variables in the outermost scope automatically conflict with
1102 every other variable. The only reason to want to defer them
1103 at all is that, after sorting, we can more efficiently pack
1104 small variables in the stack frame. Continue to defer at -O2. */
1105 if (toplevel && optimize < 2)
1106 return false;
1107
1108 /* Without optimization, *most* variables are allocated from the
1109 stack, which makes the quadratic problem large exactly when we
1110 want compilation to proceed as quickly as possible. On the
1111 other hand, we don't want the function's stack frame size to
1112 get completely out of hand. So we avoid adding scalars and
1113 "small" aggregates to the list at all. */
1114 if (optimize == 0 && tree_low_cst (DECL_SIZE_UNIT (var), 1) < 32)
1115 return false;
1116
1117 return true;
1118 }
1119
1120 /* A subroutine of expand_used_vars. Expand one variable according to
1121 its flavor. Variables to be placed on the stack are not actually
1122 expanded yet, merely recorded.
1123 When REALLY_EXPAND is false, only add stack values to be allocated.
1124 Return stack usage this variable is supposed to take.
1125 */
1126
1127 static HOST_WIDE_INT
1128 expand_one_var (tree var, bool toplevel, bool really_expand)
1129 {
1130 tree origvar = var;
1131 var = SSAVAR (var);
1132
1133 if (SUPPORTS_STACK_ALIGNMENT
1134 && TREE_TYPE (var) != error_mark_node
1135 && TREE_CODE (var) == VAR_DECL)
1136 {
1137 unsigned int align;
1138
1139 /* Because we don't know if VAR will be in register or on stack,
1140 we conservatively assume it will be on stack even if VAR is
1141 eventually put into register after RA pass. For non-automatic
1142 variables, which won't be on stack, we collect alignment of
1143 type and ignore user specified alignment. */
1144 if (TREE_STATIC (var) || DECL_EXTERNAL (var))
1145 align = TYPE_ALIGN (TREE_TYPE (var));
1146 else
1147 align = DECL_ALIGN (var);
1148
1149 if (crtl->stack_alignment_estimated < align)
1150 {
1151 /* stack_alignment_estimated shouldn't change after stack
1152 realign decision made */
1153 gcc_assert(!crtl->stack_realign_processed);
1154 crtl->stack_alignment_estimated = align;
1155 }
1156 }
1157
1158 if (TREE_CODE (origvar) == SSA_NAME)
1159 {
1160 gcc_assert (TREE_CODE (var) != VAR_DECL
1161 || (!DECL_EXTERNAL (var)
1162 && !DECL_HAS_VALUE_EXPR_P (var)
1163 && !TREE_STATIC (var)
1164 && !DECL_RTL_SET_P (var)
1165 && TREE_TYPE (var) != error_mark_node
1166 && !DECL_HARD_REGISTER (var)
1167 && really_expand));
1168 }
1169 if (TREE_CODE (var) != VAR_DECL && TREE_CODE (origvar) != SSA_NAME)
1170 ;
1171 else if (DECL_EXTERNAL (var))
1172 ;
1173 else if (DECL_HAS_VALUE_EXPR_P (var))
1174 ;
1175 else if (TREE_STATIC (var))
1176 ;
1177 else if (DECL_RTL_SET_P (var))
1178 ;
1179 else if (TREE_TYPE (var) == error_mark_node)
1180 {
1181 if (really_expand)
1182 expand_one_error_var (var);
1183 }
1184 else if (TREE_CODE (var) == VAR_DECL && DECL_HARD_REGISTER (var))
1185 {
1186 if (really_expand)
1187 expand_one_hard_reg_var (var);
1188 }
1189 else if (use_register_for_decl (var))
1190 {
1191 if (really_expand)
1192 expand_one_register_var (origvar);
1193 }
1194 else if (defer_stack_allocation (var, toplevel))
1195 add_stack_var (origvar);
1196 else
1197 {
1198 if (really_expand)
1199 expand_one_stack_var (origvar);
1200 return tree_low_cst (DECL_SIZE_UNIT (var), 1);
1201 }
1202 return 0;
1203 }
1204
1205 /* A subroutine of expand_used_vars. Walk down through the BLOCK tree
1206 expanding variables. Those variables that can be put into registers
1207 are allocated pseudos; those that can't are put on the stack.
1208
1209 TOPLEVEL is true if this is the outermost BLOCK. */
1210
1211 static void
1212 expand_used_vars_for_block (tree block, bool toplevel)
1213 {
1214 size_t i, j, old_sv_num, this_sv_num, new_sv_num;
1215 tree t;
1216
1217 old_sv_num = toplevel ? 0 : stack_vars_num;
1218
1219 /* Expand all variables at this level. */
1220 for (t = BLOCK_VARS (block); t ; t = TREE_CHAIN (t))
1221 if (TREE_USED (t))
1222 expand_one_var (t, toplevel, true);
1223
1224 this_sv_num = stack_vars_num;
1225
1226 /* Expand all variables at containing levels. */
1227 for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
1228 expand_used_vars_for_block (t, false);
1229
1230 /* Since we do not track exact variable lifetimes (which is not even
1231 possible for variables whose address escapes), we mirror the block
1232 tree in the interference graph. Here we cause all variables at this
1233 level, and all sublevels, to conflict. Do make certain that a
1234 variable conflicts with itself. */
1235 if (old_sv_num < this_sv_num)
1236 {
1237 new_sv_num = stack_vars_num;
1238 resize_stack_vars_conflict (new_sv_num);
1239
1240 for (i = old_sv_num; i < new_sv_num; ++i)
1241 for (j = i < this_sv_num ? i+1 : this_sv_num; j-- > old_sv_num ;)
1242 add_stack_var_conflict (i, j);
1243 }
1244 }
1245
1246 /* A subroutine of expand_used_vars. Walk down through the BLOCK tree
1247 and clear TREE_USED on all local variables. */
1248
1249 static void
1250 clear_tree_used (tree block)
1251 {
1252 tree t;
1253
1254 for (t = BLOCK_VARS (block); t ; t = TREE_CHAIN (t))
1255 /* if (!TREE_STATIC (t) && !DECL_EXTERNAL (t)) */
1256 TREE_USED (t) = 0;
1257
1258 for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
1259 clear_tree_used (t);
1260 }
1261
1262 /* Examine TYPE and determine a bit mask of the following features. */
1263
1264 #define SPCT_HAS_LARGE_CHAR_ARRAY 1
1265 #define SPCT_HAS_SMALL_CHAR_ARRAY 2
1266 #define SPCT_HAS_ARRAY 4
1267 #define SPCT_HAS_AGGREGATE 8
1268
1269 static unsigned int
1270 stack_protect_classify_type (tree type)
1271 {
1272 unsigned int ret = 0;
1273 tree t;
1274
1275 switch (TREE_CODE (type))
1276 {
1277 case ARRAY_TYPE:
1278 t = TYPE_MAIN_VARIANT (TREE_TYPE (type));
1279 if (t == char_type_node
1280 || t == signed_char_type_node
1281 || t == unsigned_char_type_node)
1282 {
1283 unsigned HOST_WIDE_INT max = PARAM_VALUE (PARAM_SSP_BUFFER_SIZE);
1284 unsigned HOST_WIDE_INT len;
1285
1286 if (!TYPE_SIZE_UNIT (type)
1287 || !host_integerp (TYPE_SIZE_UNIT (type), 1))
1288 len = max;
1289 else
1290 len = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
1291
1292 if (len < max)
1293 ret = SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_ARRAY;
1294 else
1295 ret = SPCT_HAS_LARGE_CHAR_ARRAY | SPCT_HAS_ARRAY;
1296 }
1297 else
1298 ret = SPCT_HAS_ARRAY;
1299 break;
1300
1301 case UNION_TYPE:
1302 case QUAL_UNION_TYPE:
1303 case RECORD_TYPE:
1304 ret = SPCT_HAS_AGGREGATE;
1305 for (t = TYPE_FIELDS (type); t ; t = TREE_CHAIN (t))
1306 if (TREE_CODE (t) == FIELD_DECL)
1307 ret |= stack_protect_classify_type (TREE_TYPE (t));
1308 break;
1309
1310 default:
1311 break;
1312 }
1313
1314 return ret;
1315 }
1316
1317 /* Return nonzero if DECL should be segregated into the "vulnerable" upper
1318 part of the local stack frame. Remember if we ever return nonzero for
1319 any variable in this function. The return value is the phase number in
1320 which the variable should be allocated. */
1321
1322 static int
1323 stack_protect_decl_phase (tree decl)
1324 {
1325 unsigned int bits = stack_protect_classify_type (TREE_TYPE (decl));
1326 int ret = 0;
1327
1328 if (bits & SPCT_HAS_SMALL_CHAR_ARRAY)
1329 has_short_buffer = true;
1330
1331 if (flag_stack_protect == 2)
1332 {
1333 if ((bits & (SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_LARGE_CHAR_ARRAY))
1334 && !(bits & SPCT_HAS_AGGREGATE))
1335 ret = 1;
1336 else if (bits & SPCT_HAS_ARRAY)
1337 ret = 2;
1338 }
1339 else
1340 ret = (bits & SPCT_HAS_LARGE_CHAR_ARRAY) != 0;
1341
1342 if (ret)
1343 has_protected_decls = true;
1344
1345 return ret;
1346 }
1347
1348 /* Two helper routines that check for phase 1 and phase 2. These are used
1349 as callbacks for expand_stack_vars. */
1350
1351 static bool
1352 stack_protect_decl_phase_1 (tree decl)
1353 {
1354 return stack_protect_decl_phase (decl) == 1;
1355 }
1356
1357 static bool
1358 stack_protect_decl_phase_2 (tree decl)
1359 {
1360 return stack_protect_decl_phase (decl) == 2;
1361 }
1362
1363 /* Ensure that variables in different stack protection phases conflict
1364 so that they are not merged and share the same stack slot. */
1365
1366 static void
1367 add_stack_protection_conflicts (void)
1368 {
1369 size_t i, j, n = stack_vars_num;
1370 unsigned char *phase;
1371
1372 phase = XNEWVEC (unsigned char, n);
1373 for (i = 0; i < n; ++i)
1374 phase[i] = stack_protect_decl_phase (stack_vars[i].decl);
1375
1376 for (i = 0; i < n; ++i)
1377 {
1378 unsigned char ph_i = phase[i];
1379 for (j = 0; j < i; ++j)
1380 if (ph_i != phase[j])
1381 add_stack_var_conflict (i, j);
1382 }
1383
1384 XDELETEVEC (phase);
1385 }
1386
1387 /* Create a decl for the guard at the top of the stack frame. */
1388
1389 static void
1390 create_stack_guard (void)
1391 {
1392 tree guard = build_decl (VAR_DECL, NULL, ptr_type_node);
1393 TREE_THIS_VOLATILE (guard) = 1;
1394 TREE_USED (guard) = 1;
1395 expand_one_stack_var (guard);
1396 crtl->stack_protect_guard = guard;
1397 }
1398
1399 /* A subroutine of expand_used_vars. Walk down through the BLOCK tree
1400 expanding variables. Those variables that can be put into registers
1401 are allocated pseudos; those that can't are put on the stack.
1402
1403 TOPLEVEL is true if this is the outermost BLOCK. */
1404
1405 static HOST_WIDE_INT
1406 account_used_vars_for_block (tree block, bool toplevel)
1407 {
1408 size_t i, j, old_sv_num, this_sv_num, new_sv_num;
1409 tree t;
1410 HOST_WIDE_INT size = 0;
1411
1412 old_sv_num = toplevel ? 0 : stack_vars_num;
1413
1414 /* Expand all variables at this level. */
1415 for (t = BLOCK_VARS (block); t ; t = TREE_CHAIN (t))
1416 if (TREE_USED (t))
1417 size += expand_one_var (t, toplevel, false);
1418
1419 this_sv_num = stack_vars_num;
1420
1421 /* Expand all variables at containing levels. */
1422 for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
1423 size += account_used_vars_for_block (t, false);
1424
1425 /* Since we do not track exact variable lifetimes (which is not even
1426 possible for variables whose address escapes), we mirror the block
1427 tree in the interference graph. Here we cause all variables at this
1428 level, and all sublevels, to conflict. Do make certain that a
1429 variable conflicts with itself. */
1430 if (old_sv_num < this_sv_num)
1431 {
1432 new_sv_num = stack_vars_num;
1433 resize_stack_vars_conflict (new_sv_num);
1434
1435 for (i = old_sv_num; i < new_sv_num; ++i)
1436 for (j = i < this_sv_num ? i+1 : this_sv_num; j-- > old_sv_num ;)
1437 add_stack_var_conflict (i, j);
1438 }
1439 return size;
1440 }
1441
1442 /* Prepare for expanding variables. */
1443 static void
1444 init_vars_expansion (void)
1445 {
1446 tree t;
1447 /* Set TREE_USED on all variables in the local_decls. */
1448 for (t = cfun->local_decls; t; t = TREE_CHAIN (t))
1449 TREE_USED (TREE_VALUE (t)) = 1;
1450
1451 /* Clear TREE_USED on all variables associated with a block scope. */
1452 clear_tree_used (DECL_INITIAL (current_function_decl));
1453
1454 /* Initialize local stack smashing state. */
1455 has_protected_decls = false;
1456 has_short_buffer = false;
1457 }
1458
1459 /* Free up stack variable graph data. */
1460 static void
1461 fini_vars_expansion (void)
1462 {
1463 XDELETEVEC (stack_vars);
1464 XDELETEVEC (stack_vars_sorted);
1465 XDELETEVEC (stack_vars_conflict);
1466 stack_vars = NULL;
1467 stack_vars_alloc = stack_vars_num = 0;
1468 stack_vars_conflict = NULL;
1469 stack_vars_conflict_alloc = 0;
1470 }
1471
1472 /* Make a fair guess for the size of the stack frame of the current
1473 function. This doesn't have to be exact, the result is only used
1474 in the inline heuristics. So we don't want to run the full stack
1475 var packing algorithm (which is quadratic in the number of stack
1476 vars). Instead, we calculate the total size of all stack vars.
1477 This turns out to be a pretty fair estimate -- packing of stack
1478 vars doesn't happen very often. */
1479
1480 HOST_WIDE_INT
1481 estimated_stack_frame_size (void)
1482 {
1483 HOST_WIDE_INT size = 0;
1484 size_t i;
1485 tree t, outer_block = DECL_INITIAL (current_function_decl);
1486
1487 init_vars_expansion ();
1488
1489 for (t = cfun->local_decls; t; t = TREE_CHAIN (t))
1490 {
1491 tree var = TREE_VALUE (t);
1492
1493 if (TREE_USED (var))
1494 size += expand_one_var (var, true, false);
1495 TREE_USED (var) = 1;
1496 }
1497 size += account_used_vars_for_block (outer_block, true);
1498
1499 if (stack_vars_num > 0)
1500 {
1501 /* Fake sorting the stack vars for account_stack_vars (). */
1502 stack_vars_sorted = XNEWVEC (size_t, stack_vars_num);
1503 for (i = 0; i < stack_vars_num; ++i)
1504 stack_vars_sorted[i] = i;
1505 size += account_stack_vars ();
1506 fini_vars_expansion ();
1507 }
1508
1509 return size;
1510 }
1511
1512 /* Expand all variables used in the function. */
1513
1514 static void
1515 expand_used_vars (void)
1516 {
1517 tree t, next, outer_block = DECL_INITIAL (current_function_decl);
1518 unsigned i;
1519
1520 /* Compute the phase of the stack frame for this function. */
1521 {
1522 int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
1523 int off = STARTING_FRAME_OFFSET % align;
1524 frame_phase = off ? align - off : 0;
1525 }
1526
1527 init_vars_expansion ();
1528
1529 for (i = 0; i < SA.map->num_partitions; i++)
1530 {
1531 tree var = partition_to_var (SA.map, i);
1532
1533 gcc_assert (is_gimple_reg (var));
1534 if (TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
1535 expand_one_var (var, true, true);
1536 else
1537 {
1538 /* This is a PARM_DECL or RESULT_DECL. For those partitions that
1539 contain the default def (representing the parm or result itself)
1540 we don't do anything here. But those which don't contain the
1541 default def (representing a temporary based on the parm/result)
1542 we need to allocate space just like for normal VAR_DECLs. */
1543 if (!bitmap_bit_p (SA.partition_has_default_def, i))
1544 {
1545 expand_one_var (var, true, true);
1546 gcc_assert (SA.partition_to_pseudo[i]);
1547 }
1548 }
1549 }
1550
1551 /* At this point all variables on the local_decls with TREE_USED
1552 set are not associated with any block scope. Lay them out. */
1553 t = cfun->local_decls;
1554 cfun->local_decls = NULL_TREE;
1555 for (; t; t = next)
1556 {
1557 tree var = TREE_VALUE (t);
1558 bool expand_now = false;
1559
1560 next = TREE_CHAIN (t);
1561
1562 /* Expanded above already. */
1563 if (is_gimple_reg (var))
1564 ;
1565 /* We didn't set a block for static or extern because it's hard
1566 to tell the difference between a global variable (re)declared
1567 in a local scope, and one that's really declared there to
1568 begin with. And it doesn't really matter much, since we're
1569 not giving them stack space. Expand them now. */
1570 else if (TREE_STATIC (var) || DECL_EXTERNAL (var))
1571 expand_now = true;
1572
1573 /* If the variable is not associated with any block, then it
1574 was created by the optimizers, and could be live anywhere
1575 in the function. */
1576 else if (TREE_USED (var))
1577 expand_now = true;
1578
1579 /* Finally, mark all variables on the list as used. We'll use
1580 this in a moment when we expand those associated with scopes. */
1581 TREE_USED (var) = 1;
1582
1583 if (expand_now)
1584 {
1585 expand_one_var (var, true, true);
1586 if (DECL_ARTIFICIAL (var) && !DECL_IGNORED_P (var))
1587 {
1588 rtx rtl = DECL_RTL_IF_SET (var);
1589
1590 /* Keep artificial non-ignored vars in cfun->local_decls
1591 chain until instantiate_decls. */
1592 if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT))
1593 {
1594 TREE_CHAIN (t) = cfun->local_decls;
1595 cfun->local_decls = t;
1596 continue;
1597 }
1598 }
1599 }
1600
1601 ggc_free (t);
1602 }
1603
1604 /* At this point, all variables within the block tree with TREE_USED
1605 set are actually used by the optimized function. Lay them out. */
1606 expand_used_vars_for_block (outer_block, true);
1607
1608 if (stack_vars_num > 0)
1609 {
1610 /* Due to the way alias sets work, no variables with non-conflicting
1611 alias sets may be assigned the same address. Add conflicts to
1612 reflect this. */
1613 add_alias_set_conflicts ();
1614
1615 /* If stack protection is enabled, we don't share space between
1616 vulnerable data and non-vulnerable data. */
1617 if (flag_stack_protect)
1618 add_stack_protection_conflicts ();
1619
1620 /* Now that we have collected all stack variables, and have computed a
1621 minimal interference graph, attempt to save some stack space. */
1622 partition_stack_vars ();
1623 if (dump_file)
1624 dump_stack_var_partition ();
1625 }
1626
1627 /* There are several conditions under which we should create a
1628 stack guard: protect-all, alloca used, protected decls present. */
1629 if (flag_stack_protect == 2
1630 || (flag_stack_protect
1631 && (cfun->calls_alloca || has_protected_decls)))
1632 create_stack_guard ();
1633
1634 /* Assign rtl to each variable based on these partitions. */
1635 if (stack_vars_num > 0)
1636 {
1637 /* Reorder decls to be protected by iterating over the variables
1638 array multiple times, and allocating out of each phase in turn. */
1639 /* ??? We could probably integrate this into the qsort we did
1640 earlier, such that we naturally see these variables first,
1641 and thus naturally allocate things in the right order. */
1642 if (has_protected_decls)
1643 {
1644 /* Phase 1 contains only character arrays. */
1645 expand_stack_vars (stack_protect_decl_phase_1);
1646
1647 /* Phase 2 contains other kinds of arrays. */
1648 if (flag_stack_protect == 2)
1649 expand_stack_vars (stack_protect_decl_phase_2);
1650 }
1651
1652 expand_stack_vars (NULL);
1653
1654 fini_vars_expansion ();
1655 }
1656
1657 /* If the target requires that FRAME_OFFSET be aligned, do it. */
1658 if (STACK_ALIGNMENT_NEEDED)
1659 {
1660 HOST_WIDE_INT align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
1661 if (!FRAME_GROWS_DOWNWARD)
1662 frame_offset += align - 1;
1663 frame_offset &= -align;
1664 }
1665 }
1666
1667
1668 /* If we need to produce a detailed dump, print the tree representation
1669 for STMT to the dump file. SINCE is the last RTX after which the RTL
1670 generated for STMT should have been appended. */
1671
1672 static void
1673 maybe_dump_rtl_for_gimple_stmt (gimple stmt, rtx since)
1674 {
1675 if (dump_file && (dump_flags & TDF_DETAILS))
1676 {
1677 fprintf (dump_file, "\n;; ");
1678 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1679 fprintf (dump_file, "\n");
1680
1681 print_rtl (dump_file, since ? NEXT_INSN (since) : since);
1682 }
1683 }
1684
1685 /* Maps the blocks that do not contain tree labels to rtx labels. */
1686
1687 static struct pointer_map_t *lab_rtx_for_bb;
1688
1689 /* Returns the label_rtx expression for a label starting basic block BB. */
1690
1691 static rtx
1692 label_rtx_for_bb (basic_block bb ATTRIBUTE_UNUSED)
1693 {
1694 gimple_stmt_iterator gsi;
1695 tree lab;
1696 gimple lab_stmt;
1697 void **elt;
1698
1699 if (bb->flags & BB_RTL)
1700 return block_label (bb);
1701
1702 elt = pointer_map_contains (lab_rtx_for_bb, bb);
1703 if (elt)
1704 return (rtx) *elt;
1705
1706 /* Find the tree label if it is present. */
1707
1708 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1709 {
1710 lab_stmt = gsi_stmt (gsi);
1711 if (gimple_code (lab_stmt) != GIMPLE_LABEL)
1712 break;
1713
1714 lab = gimple_label_label (lab_stmt);
1715 if (DECL_NONLOCAL (lab))
1716 break;
1717
1718 return label_rtx (lab);
1719 }
1720
1721 elt = pointer_map_insert (lab_rtx_for_bb, bb);
1722 *elt = gen_label_rtx ();
1723 return (rtx) *elt;
1724 }
1725
1726
1727 /* A subroutine of expand_gimple_cond. Given E, a fallthrough edge
1728 of a basic block where we just expanded the conditional at the end,
1729 possibly clean up the CFG and instruction sequence. */
1730
1731 static void
1732 maybe_cleanup_end_of_block (edge e)
1733 {
1734 /* Special case: when jumpif decides that the condition is
1735 trivial it emits an unconditional jump (and the necessary
1736 barrier). But we still have two edges, the fallthru one is
1737 wrong. purge_dead_edges would clean this up later. Unfortunately
1738 we have to insert insns (and split edges) before
1739 find_many_sub_basic_blocks and hence before purge_dead_edges.
1740 But splitting edges might create new blocks which depend on the
1741 fact that if there are two edges there's no barrier. So the
1742 barrier would get lost and verify_flow_info would ICE. Instead
1743 of auditing all edge splitters to care for the barrier (which
1744 normally isn't there in a cleaned CFG), fix it here. */
1745 if (BARRIER_P (get_last_insn ()))
1746 {
1747 basic_block bb = e->src;
1748 rtx insn;
1749 remove_edge (e);
1750 /* Now, we have a single successor block, if we have insns to
1751 insert on the remaining edge we potentially will insert
1752 it at the end of this block (if the dest block isn't feasible)
1753 in order to avoid splitting the edge. This insertion will take
1754 place in front of the last jump. But we might have emitted
1755 multiple jumps (conditional and one unconditional) to the
1756 same destination. Inserting in front of the last one then
1757 is a problem. See PR 40021. We fix this by deleting all
1758 jumps except the last unconditional one. */
1759 insn = PREV_INSN (get_last_insn ());
1760 /* Make sure we have an unconditional jump. Otherwise we're
1761 confused. */
1762 gcc_assert (JUMP_P (insn) && !any_condjump_p (insn));
1763 for (insn = PREV_INSN (insn); insn != BB_HEAD (bb);)
1764 {
1765 insn = PREV_INSN (insn);
1766 if (JUMP_P (NEXT_INSN (insn)))
1767 delete_insn (NEXT_INSN (insn));
1768 }
1769 }
1770 }
1771
1772
1773 /* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_COND.
1774 Returns a new basic block if we've terminated the current basic
1775 block and created a new one. */
1776
1777 static basic_block
1778 expand_gimple_cond (basic_block bb, gimple stmt)
1779 {
1780 basic_block new_bb, dest;
1781 edge new_edge;
1782 edge true_edge;
1783 edge false_edge;
1784 tree pred = gimple_cond_pred_to_tree (stmt);
1785 rtx last2, last;
1786
1787 last2 = last = get_last_insn ();
1788
1789 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1790 if (gimple_has_location (stmt))
1791 {
1792 set_curr_insn_source_location (gimple_location (stmt));
1793 set_curr_insn_block (gimple_block (stmt));
1794 }
1795
1796 /* These flags have no purpose in RTL land. */
1797 true_edge->flags &= ~EDGE_TRUE_VALUE;
1798 false_edge->flags &= ~EDGE_FALSE_VALUE;
1799
1800 /* We can either have a pure conditional jump with one fallthru edge or
1801 two-way jump that needs to be decomposed into two basic blocks. */
1802 if (false_edge->dest == bb->next_bb)
1803 {
1804 jumpif (pred, label_rtx_for_bb (true_edge->dest));
1805 add_reg_br_prob_note (last, true_edge->probability);
1806 maybe_dump_rtl_for_gimple_stmt (stmt, last);
1807 if (true_edge->goto_locus)
1808 {
1809 set_curr_insn_source_location (true_edge->goto_locus);
1810 set_curr_insn_block (true_edge->goto_block);
1811 true_edge->goto_locus = curr_insn_locator ();
1812 }
1813 true_edge->goto_block = NULL;
1814 false_edge->flags |= EDGE_FALLTHRU;
1815 ggc_free (pred);
1816 maybe_cleanup_end_of_block (false_edge);
1817 return NULL;
1818 }
1819 if (true_edge->dest == bb->next_bb)
1820 {
1821 jumpifnot (pred, label_rtx_for_bb (false_edge->dest));
1822 add_reg_br_prob_note (last, false_edge->probability);
1823 maybe_dump_rtl_for_gimple_stmt (stmt, last);
1824 if (false_edge->goto_locus)
1825 {
1826 set_curr_insn_source_location (false_edge->goto_locus);
1827 set_curr_insn_block (false_edge->goto_block);
1828 false_edge->goto_locus = curr_insn_locator ();
1829 }
1830 false_edge->goto_block = NULL;
1831 true_edge->flags |= EDGE_FALLTHRU;
1832 ggc_free (pred);
1833 maybe_cleanup_end_of_block (true_edge);
1834 return NULL;
1835 }
1836
1837 jumpif (pred, label_rtx_for_bb (true_edge->dest));
1838 add_reg_br_prob_note (last, true_edge->probability);
1839 last = get_last_insn ();
1840 if (false_edge->goto_locus)
1841 {
1842 set_curr_insn_source_location (false_edge->goto_locus);
1843 set_curr_insn_block (false_edge->goto_block);
1844 false_edge->goto_locus = curr_insn_locator ();
1845 }
1846 false_edge->goto_block = NULL;
1847 emit_jump (label_rtx_for_bb (false_edge->dest));
1848
1849 BB_END (bb) = last;
1850 if (BARRIER_P (BB_END (bb)))
1851 BB_END (bb) = PREV_INSN (BB_END (bb));
1852 update_bb_for_insn (bb);
1853
1854 new_bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);
1855 dest = false_edge->dest;
1856 redirect_edge_succ (false_edge, new_bb);
1857 false_edge->flags |= EDGE_FALLTHRU;
1858 new_bb->count = false_edge->count;
1859 new_bb->frequency = EDGE_FREQUENCY (false_edge);
1860 new_edge = make_edge (new_bb, dest, 0);
1861 new_edge->probability = REG_BR_PROB_BASE;
1862 new_edge->count = new_bb->count;
1863 if (BARRIER_P (BB_END (new_bb)))
1864 BB_END (new_bb) = PREV_INSN (BB_END (new_bb));
1865 update_bb_for_insn (new_bb);
1866
1867 maybe_dump_rtl_for_gimple_stmt (stmt, last2);
1868
1869 if (true_edge->goto_locus)
1870 {
1871 set_curr_insn_source_location (true_edge->goto_locus);
1872 set_curr_insn_block (true_edge->goto_block);
1873 true_edge->goto_locus = curr_insn_locator ();
1874 }
1875 true_edge->goto_block = NULL;
1876
1877 ggc_free (pred);
1878 return new_bb;
1879 }
1880
1881 /* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_CALL
1882 that has CALL_EXPR_TAILCALL set. Returns non-null if we actually
1883 generated a tail call (something that might be denied by the ABI
1884 rules governing the call; see calls.c).
1885
1886 Sets CAN_FALLTHRU if we generated a *conditional* tail call, and
1887 can still reach the rest of BB. The case here is __builtin_sqrt,
1888 where the NaN result goes through the external function (with a
1889 tailcall) and the normal result happens via a sqrt instruction. */
1890
1891 static basic_block
1892 expand_gimple_tailcall (basic_block bb, gimple stmt, bool *can_fallthru)
1893 {
1894 rtx last2, last;
1895 edge e;
1896 edge_iterator ei;
1897 int probability;
1898 gcov_type count;
1899 tree stmt_tree = gimple_to_tree (stmt);
1900
1901 last2 = last = get_last_insn ();
1902
1903 expand_expr_stmt (stmt_tree);
1904
1905 release_stmt_tree (stmt, stmt_tree);
1906
1907 for (last = NEXT_INSN (last); last; last = NEXT_INSN (last))
1908 if (CALL_P (last) && SIBLING_CALL_P (last))
1909 goto found;
1910
1911 maybe_dump_rtl_for_gimple_stmt (stmt, last2);
1912
1913 *can_fallthru = true;
1914 return NULL;
1915
1916 found:
1917 /* ??? Wouldn't it be better to just reset any pending stack adjust?
1918 Any instructions emitted here are about to be deleted. */
1919 do_pending_stack_adjust ();
1920
1921 /* Remove any non-eh, non-abnormal edges that don't go to exit. */
1922 /* ??? I.e. the fallthrough edge. HOWEVER! If there were to be
1923 EH or abnormal edges, we shouldn't have created a tail call in
1924 the first place. So it seems to me we should just be removing
1925 all edges here, or redirecting the existing fallthru edge to
1926 the exit block. */
1927
1928 probability = 0;
1929 count = 0;
1930
1931 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
1932 {
1933 if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH)))
1934 {
1935 if (e->dest != EXIT_BLOCK_PTR)
1936 {
1937 e->dest->count -= e->count;
1938 e->dest->frequency -= EDGE_FREQUENCY (e);
1939 if (e->dest->count < 0)
1940 e->dest->count = 0;
1941 if (e->dest->frequency < 0)
1942 e->dest->frequency = 0;
1943 }
1944 count += e->count;
1945 probability += e->probability;
1946 remove_edge (e);
1947 }
1948 else
1949 ei_next (&ei);
1950 }
1951
1952 /* This is somewhat ugly: the call_expr expander often emits instructions
1953 after the sibcall (to perform the function return). These confuse the
1954 find_many_sub_basic_blocks code, so we need to get rid of these. */
1955 last = NEXT_INSN (last);
1956 gcc_assert (BARRIER_P (last));
1957
1958 *can_fallthru = false;
1959 while (NEXT_INSN (last))
1960 {
1961 /* For instance an sqrt builtin expander expands if with
1962 sibcall in the then and label for `else`. */
1963 if (LABEL_P (NEXT_INSN (last)))
1964 {
1965 *can_fallthru = true;
1966 break;
1967 }
1968 delete_insn (NEXT_INSN (last));
1969 }
1970
1971 e = make_edge (bb, EXIT_BLOCK_PTR, EDGE_ABNORMAL | EDGE_SIBCALL);
1972 e->probability += probability;
1973 e->count += count;
1974 BB_END (bb) = last;
1975 update_bb_for_insn (bb);
1976
1977 if (NEXT_INSN (last))
1978 {
1979 bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);
1980
1981 last = BB_END (bb);
1982 if (BARRIER_P (last))
1983 BB_END (bb) = PREV_INSN (last);
1984 }
1985
1986 maybe_dump_rtl_for_gimple_stmt (stmt, last2);
1987
1988 return bb;
1989 }
1990
1991 /* Expand basic block BB from GIMPLE trees to RTL. */
1992
1993 static basic_block
1994 expand_gimple_basic_block (basic_block bb)
1995 {
1996 gimple_stmt_iterator gsi;
1997 gimple_seq stmts;
1998 gimple stmt = NULL;
1999 rtx note, last;
2000 edge e;
2001 edge_iterator ei;
2002 void **elt;
2003
2004 if (dump_file)
2005 fprintf (dump_file, "\n;; Generating RTL for gimple basic block %d\n",
2006 bb->index);
2007
2008 /* Note that since we are now transitioning from GIMPLE to RTL, we
2009 cannot use the gsi_*_bb() routines because they expect the basic
2010 block to be in GIMPLE, instead of RTL. Therefore, we need to
2011 access the BB sequence directly. */
2012 stmts = bb_seq (bb);
2013 bb->il.gimple = NULL;
2014 rtl_profile_for_bb (bb);
2015 init_rtl_bb_info (bb);
2016 bb->flags |= BB_RTL;
2017
2018 /* Remove the RETURN_EXPR if we may fall though to the exit
2019 instead. */
2020 gsi = gsi_last (stmts);
2021 if (!gsi_end_p (gsi)
2022 && gimple_code (gsi_stmt (gsi)) == GIMPLE_RETURN)
2023 {
2024 gimple ret_stmt = gsi_stmt (gsi);
2025
2026 gcc_assert (single_succ_p (bb));
2027 gcc_assert (single_succ (bb) == EXIT_BLOCK_PTR);
2028
2029 if (bb->next_bb == EXIT_BLOCK_PTR
2030 && !gimple_return_retval (ret_stmt))
2031 {
2032 gsi_remove (&gsi, false);
2033 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2034 }
2035 }
2036
2037 gsi = gsi_start (stmts);
2038 if (!gsi_end_p (gsi))
2039 {
2040 stmt = gsi_stmt (gsi);
2041 if (gimple_code (stmt) != GIMPLE_LABEL)
2042 stmt = NULL;
2043 }
2044
2045 elt = pointer_map_contains (lab_rtx_for_bb, bb);
2046
2047 if (stmt || elt)
2048 {
2049 last = get_last_insn ();
2050
2051 if (stmt)
2052 {
2053 tree stmt_tree = gimple_to_tree (stmt);
2054 expand_expr_stmt (stmt_tree);
2055 release_stmt_tree (stmt, stmt_tree);
2056 gsi_next (&gsi);
2057 }
2058
2059 if (elt)
2060 emit_label ((rtx) *elt);
2061
2062 /* Java emits line number notes in the top of labels.
2063 ??? Make this go away once line number notes are obsoleted. */
2064 BB_HEAD (bb) = NEXT_INSN (last);
2065 if (NOTE_P (BB_HEAD (bb)))
2066 BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb));
2067 note = emit_note_after (NOTE_INSN_BASIC_BLOCK, BB_HEAD (bb));
2068
2069 maybe_dump_rtl_for_gimple_stmt (stmt, last);
2070 }
2071 else
2072 note = BB_HEAD (bb) = emit_note (NOTE_INSN_BASIC_BLOCK);
2073
2074 NOTE_BASIC_BLOCK (note) = bb;
2075
2076 for (; !gsi_end_p (gsi); gsi_next (&gsi))
2077 {
2078 gimple stmt = gsi_stmt (gsi);
2079 basic_block new_bb;
2080
2081 /* Expand this statement, then evaluate the resulting RTL and
2082 fixup the CFG accordingly. */
2083 if (gimple_code (stmt) == GIMPLE_COND)
2084 {
2085 new_bb = expand_gimple_cond (bb, stmt);
2086 if (new_bb)
2087 return new_bb;
2088 }
2089 else
2090 {
2091 if (is_gimple_call (stmt) && gimple_call_tail_p (stmt))
2092 {
2093 bool can_fallthru;
2094 new_bb = expand_gimple_tailcall (bb, stmt, &can_fallthru);
2095 if (new_bb)
2096 {
2097 if (can_fallthru)
2098 bb = new_bb;
2099 else
2100 return new_bb;
2101 }
2102 }
2103 else
2104 {
2105 def_operand_p def_p;
2106 tree stmt_tree;
2107 def_p = SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF);
2108
2109 if (def_p != NULL)
2110 {
2111 /* Ignore this stmt if it is in the list of
2112 replaceable expressions. */
2113 if (SA.values
2114 && bitmap_bit_p (SA.values,
2115 SSA_NAME_VERSION (DEF_FROM_PTR (def_p))))
2116 continue;
2117 }
2118 stmt_tree = gimple_to_tree (stmt);
2119 last = get_last_insn ();
2120 expand_expr_stmt (stmt_tree);
2121 maybe_dump_rtl_for_gimple_stmt (stmt, last);
2122 release_stmt_tree (stmt, stmt_tree);
2123 }
2124 }
2125 }
2126
2127 /* Expand implicit goto and convert goto_locus. */
2128 FOR_EACH_EDGE (e, ei, bb->succs)
2129 {
2130 if (e->goto_locus && e->goto_block)
2131 {
2132 set_curr_insn_source_location (e->goto_locus);
2133 set_curr_insn_block (e->goto_block);
2134 e->goto_locus = curr_insn_locator ();
2135 }
2136 e->goto_block = NULL;
2137 if ((e->flags & EDGE_FALLTHRU) && e->dest != bb->next_bb)
2138 {
2139 emit_jump (label_rtx_for_bb (e->dest));
2140 e->flags &= ~EDGE_FALLTHRU;
2141 }
2142 }
2143
2144 do_pending_stack_adjust ();
2145
2146 /* Find the block tail. The last insn in the block is the insn
2147 before a barrier and/or table jump insn. */
2148 last = get_last_insn ();
2149 if (BARRIER_P (last))
2150 last = PREV_INSN (last);
2151 if (JUMP_TABLE_DATA_P (last))
2152 last = PREV_INSN (PREV_INSN (last));
2153 BB_END (bb) = last;
2154
2155 update_bb_for_insn (bb);
2156
2157 return bb;
2158 }
2159
2160
2161 /* Create a basic block for initialization code. */
2162
2163 static basic_block
2164 construct_init_block (void)
2165 {
2166 basic_block init_block, first_block;
2167 edge e = NULL;
2168 int flags;
2169
2170 /* Multiple entry points not supported yet. */
2171 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1);
2172 init_rtl_bb_info (ENTRY_BLOCK_PTR);
2173 init_rtl_bb_info (EXIT_BLOCK_PTR);
2174 ENTRY_BLOCK_PTR->flags |= BB_RTL;
2175 EXIT_BLOCK_PTR->flags |= BB_RTL;
2176
2177 e = EDGE_SUCC (ENTRY_BLOCK_PTR, 0);
2178
2179 /* When entry edge points to first basic block, we don't need jump,
2180 otherwise we have to jump into proper target. */
2181 if (e && e->dest != ENTRY_BLOCK_PTR->next_bb)
2182 {
2183 tree label = gimple_block_label (e->dest);
2184
2185 emit_jump (label_rtx (label));
2186 flags = 0;
2187 }
2188 else
2189 flags = EDGE_FALLTHRU;
2190
2191 init_block = create_basic_block (NEXT_INSN (get_insns ()),
2192 get_last_insn (),
2193 ENTRY_BLOCK_PTR);
2194 init_block->frequency = ENTRY_BLOCK_PTR->frequency;
2195 init_block->count = ENTRY_BLOCK_PTR->count;
2196 if (e)
2197 {
2198 first_block = e->dest;
2199 redirect_edge_succ (e, init_block);
2200 e = make_edge (init_block, first_block, flags);
2201 }
2202 else
2203 e = make_edge (init_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
2204 e->probability = REG_BR_PROB_BASE;
2205 e->count = ENTRY_BLOCK_PTR->count;
2206
2207 update_bb_for_insn (init_block);
2208 return init_block;
2209 }
2210
2211 /* For each lexical block, set BLOCK_NUMBER to the depth at which it is
2212 found in the block tree. */
2213
2214 static void
2215 set_block_levels (tree block, int level)
2216 {
2217 while (block)
2218 {
2219 BLOCK_NUMBER (block) = level;
2220 set_block_levels (BLOCK_SUBBLOCKS (block), level + 1);
2221 block = BLOCK_CHAIN (block);
2222 }
2223 }
2224
2225 /* Create a block containing landing pads and similar stuff. */
2226
2227 static void
2228 construct_exit_block (void)
2229 {
2230 rtx head = get_last_insn ();
2231 rtx end;
2232 basic_block exit_block;
2233 edge e, e2;
2234 unsigned ix;
2235 edge_iterator ei;
2236 rtx orig_end = BB_END (EXIT_BLOCK_PTR->prev_bb);
2237
2238 rtl_profile_for_bb (EXIT_BLOCK_PTR);
2239
2240 /* Make sure the locus is set to the end of the function, so that
2241 epilogue line numbers and warnings are set properly. */
2242 if (cfun->function_end_locus != UNKNOWN_LOCATION)
2243 input_location = cfun->function_end_locus;
2244
2245 /* The following insns belong to the top scope. */
2246 set_curr_insn_block (DECL_INITIAL (current_function_decl));
2247
2248 /* Generate rtl for function exit. */
2249 expand_function_end ();
2250
2251 end = get_last_insn ();
2252 if (head == end)
2253 return;
2254 /* While emitting the function end we could move end of the last basic block.
2255 */
2256 BB_END (EXIT_BLOCK_PTR->prev_bb) = orig_end;
2257 while (NEXT_INSN (head) && NOTE_P (NEXT_INSN (head)))
2258 head = NEXT_INSN (head);
2259 exit_block = create_basic_block (NEXT_INSN (head), end,
2260 EXIT_BLOCK_PTR->prev_bb);
2261 exit_block->frequency = EXIT_BLOCK_PTR->frequency;
2262 exit_block->count = EXIT_BLOCK_PTR->count;
2263
2264 ix = 0;
2265 while (ix < EDGE_COUNT (EXIT_BLOCK_PTR->preds))
2266 {
2267 e = EDGE_PRED (EXIT_BLOCK_PTR, ix);
2268 if (!(e->flags & EDGE_ABNORMAL))
2269 redirect_edge_succ (e, exit_block);
2270 else
2271 ix++;
2272 }
2273
2274 e = make_edge (exit_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
2275 e->probability = REG_BR_PROB_BASE;
2276 e->count = EXIT_BLOCK_PTR->count;
2277 FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR->preds)
2278 if (e2 != e)
2279 {
2280 e->count -= e2->count;
2281 exit_block->count -= e2->count;
2282 exit_block->frequency -= EDGE_FREQUENCY (e2);
2283 }
2284 if (e->count < 0)
2285 e->count = 0;
2286 if (exit_block->count < 0)
2287 exit_block->count = 0;
2288 if (exit_block->frequency < 0)
2289 exit_block->frequency = 0;
2290 update_bb_for_insn (exit_block);
2291 }
2292
2293 /* Helper function for discover_nonconstant_array_refs.
2294 Look for ARRAY_REF nodes with non-constant indexes and mark them
2295 addressable. */
2296
2297 static tree
2298 discover_nonconstant_array_refs_r (tree * tp, int *walk_subtrees,
2299 void *data ATTRIBUTE_UNUSED)
2300 {
2301 tree t = *tp;
2302
2303 if (IS_TYPE_OR_DECL_P (t))
2304 *walk_subtrees = 0;
2305 else if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
2306 {
2307 while (((TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
2308 && is_gimple_min_invariant (TREE_OPERAND (t, 1))
2309 && (!TREE_OPERAND (t, 2)
2310 || is_gimple_min_invariant (TREE_OPERAND (t, 2))))
2311 || (TREE_CODE (t) == COMPONENT_REF
2312 && (!TREE_OPERAND (t,2)
2313 || is_gimple_min_invariant (TREE_OPERAND (t, 2))))
2314 || TREE_CODE (t) == BIT_FIELD_REF
2315 || TREE_CODE (t) == REALPART_EXPR
2316 || TREE_CODE (t) == IMAGPART_EXPR
2317 || TREE_CODE (t) == VIEW_CONVERT_EXPR
2318 || CONVERT_EXPR_P (t))
2319 t = TREE_OPERAND (t, 0);
2320
2321 if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
2322 {
2323 t = get_base_address (t);
2324 if (t && DECL_P (t)
2325 && DECL_MODE (t) != BLKmode)
2326 TREE_ADDRESSABLE (t) = 1;
2327 }
2328
2329 *walk_subtrees = 0;
2330 }
2331
2332 return NULL_TREE;
2333 }
2334
2335 /* RTL expansion is not able to compile array references with variable
2336 offsets for arrays stored in single register. Discover such
2337 expressions and mark variables as addressable to avoid this
2338 scenario. */
2339
2340 static void
2341 discover_nonconstant_array_refs (void)
2342 {
2343 basic_block bb;
2344 gimple_stmt_iterator gsi;
2345
2346 FOR_EACH_BB (bb)
2347 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2348 {
2349 gimple stmt = gsi_stmt (gsi);
2350 walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL);
2351 }
2352 }
2353
2354 /* This function sets crtl->args.internal_arg_pointer to a virtual
2355 register if DRAP is needed. Local register allocator will replace
2356 virtual_incoming_args_rtx with the virtual register. */
2357
2358 static void
2359 expand_stack_alignment (void)
2360 {
2361 rtx drap_rtx;
2362 unsigned int preferred_stack_boundary;
2363
2364 if (! SUPPORTS_STACK_ALIGNMENT)
2365 return;
2366
2367 if (cfun->calls_alloca
2368 || cfun->has_nonlocal_label
2369 || crtl->has_nonlocal_goto)
2370 crtl->need_drap = true;
2371
2372 gcc_assert (crtl->stack_alignment_needed
2373 <= crtl->stack_alignment_estimated);
2374
2375 /* Update crtl->stack_alignment_estimated and use it later to align
2376 stack. We check PREFERRED_STACK_BOUNDARY if there may be non-call
2377 exceptions since callgraph doesn't collect incoming stack alignment
2378 in this case. */
2379 if (flag_non_call_exceptions
2380 && PREFERRED_STACK_BOUNDARY > crtl->preferred_stack_boundary)
2381 preferred_stack_boundary = PREFERRED_STACK_BOUNDARY;
2382 else
2383 preferred_stack_boundary = crtl->preferred_stack_boundary;
2384 if (preferred_stack_boundary > crtl->stack_alignment_estimated)
2385 crtl->stack_alignment_estimated = preferred_stack_boundary;
2386 if (preferred_stack_boundary > crtl->stack_alignment_needed)
2387 crtl->stack_alignment_needed = preferred_stack_boundary;
2388
2389 crtl->stack_realign_needed
2390 = INCOMING_STACK_BOUNDARY < crtl->stack_alignment_estimated;
2391 crtl->stack_realign_tried = crtl->stack_realign_needed;
2392
2393 crtl->stack_realign_processed = true;
2394
2395 /* Target has to redefine TARGET_GET_DRAP_RTX to support stack
2396 alignment. */
2397 gcc_assert (targetm.calls.get_drap_rtx != NULL);
2398 drap_rtx = targetm.calls.get_drap_rtx ();
2399
2400 /* stack_realign_drap and drap_rtx must match. */
2401 gcc_assert ((stack_realign_drap != 0) == (drap_rtx != NULL));
2402
2403 /* Do nothing if NULL is returned, which means DRAP is not needed. */
2404 if (NULL != drap_rtx)
2405 {
2406 crtl->args.internal_arg_pointer = drap_rtx;
2407
2408 /* Call fixup_tail_calls to clean up REG_EQUIV note if DRAP is
2409 needed. */
2410 fixup_tail_calls ();
2411 }
2412 }
2413
2414 /* Translate the intermediate representation contained in the CFG
2415 from GIMPLE trees to RTL.
2416
2417 We do conversion per basic block and preserve/update the tree CFG.
2418 This implies we have to do some magic as the CFG can simultaneously
2419 consist of basic blocks containing RTL and GIMPLE trees. This can
2420 confuse the CFG hooks, so be careful to not manipulate CFG during
2421 the expansion. */
2422
2423 static unsigned int
2424 gimple_expand_cfg (void)
2425 {
2426 basic_block bb, init_block;
2427 sbitmap blocks;
2428 edge_iterator ei;
2429 edge e;
2430 unsigned i;
2431
2432 rewrite_out_of_ssa (&SA);
2433 SA.partition_to_pseudo = (rtx *)xcalloc (SA.map->num_partitions,
2434 sizeof (rtx));
2435
2436 /* Some backends want to know that we are expanding to RTL. */
2437 currently_expanding_to_rtl = 1;
2438
2439 rtl_profile_for_bb (ENTRY_BLOCK_PTR);
2440
2441 insn_locators_alloc ();
2442 if (!DECL_IS_BUILTIN (current_function_decl))
2443 {
2444 /* Eventually, all FEs should explicitly set function_start_locus. */
2445 if (cfun->function_start_locus == UNKNOWN_LOCATION)
2446 set_curr_insn_source_location
2447 (DECL_SOURCE_LOCATION (current_function_decl));
2448 else
2449 set_curr_insn_source_location (cfun->function_start_locus);
2450 }
2451 set_curr_insn_block (DECL_INITIAL (current_function_decl));
2452 prologue_locator = curr_insn_locator ();
2453
2454 /* Make sure first insn is a note even if we don't want linenums.
2455 This makes sure the first insn will never be deleted.
2456 Also, final expects a note to appear there. */
2457 emit_note (NOTE_INSN_DELETED);
2458
2459 /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */
2460 discover_nonconstant_array_refs ();
2461
2462 targetm.expand_to_rtl_hook ();
2463 crtl->stack_alignment_needed = STACK_BOUNDARY;
2464 crtl->max_used_stack_slot_alignment = STACK_BOUNDARY;
2465 crtl->stack_alignment_estimated = STACK_BOUNDARY;
2466 crtl->preferred_stack_boundary = STACK_BOUNDARY;
2467 cfun->cfg->max_jumptable_ents = 0;
2468
2469
2470 /* Expand the variables recorded during gimple lowering. */
2471 expand_used_vars ();
2472
2473 /* Honor stack protection warnings. */
2474 if (warn_stack_protect)
2475 {
2476 if (cfun->calls_alloca)
2477 warning (OPT_Wstack_protector,
2478 "not protecting local variables: variable length buffer");
2479 if (has_short_buffer && !crtl->stack_protect_guard)
2480 warning (OPT_Wstack_protector,
2481 "not protecting function: no buffer at least %d bytes long",
2482 (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
2483 }
2484
2485 /* Set up parameters and prepare for return, for the function. */
2486 expand_function_start (current_function_decl);
2487
2488 /* Now that we also have the parameter RTXs, copy them over to our
2489 partitions. */
2490 for (i = 0; i < SA.map->num_partitions; i++)
2491 {
2492 tree var = SSA_NAME_VAR (partition_to_var (SA.map, i));
2493
2494 if (TREE_CODE (var) != VAR_DECL
2495 && !SA.partition_to_pseudo[i])
2496 SA.partition_to_pseudo[i] = DECL_RTL_IF_SET (var);
2497 gcc_assert (SA.partition_to_pseudo[i]);
2498 /* Some RTL parts really want to look at DECL_RTL(x) when x
2499 was a decl marked in REG_ATTR or MEM_ATTR. We could use
2500 SET_DECL_RTL here making this available, but that would mean
2501 to select one of the potentially many RTLs for one DECL. Instead
2502 of doing that we simply reset the MEM_EXPR of the RTL in question,
2503 then nobody can get at it and hence nobody can call DECL_RTL on it. */
2504 if (!DECL_RTL_SET_P (var))
2505 {
2506 if (MEM_P (SA.partition_to_pseudo[i]))
2507 set_mem_expr (SA.partition_to_pseudo[i], NULL);
2508 }
2509 }
2510
2511 /* If this function is `main', emit a call to `__main'
2512 to run global initializers, etc. */
2513 if (DECL_NAME (current_function_decl)
2514 && MAIN_NAME_P (DECL_NAME (current_function_decl))
2515 && DECL_FILE_SCOPE_P (current_function_decl))
2516 expand_main_function ();
2517
2518 /* Initialize the stack_protect_guard field. This must happen after the
2519 call to __main (if any) so that the external decl is initialized. */
2520 if (crtl->stack_protect_guard)
2521 stack_protect_prologue ();
2522
2523 /* Update stack boundary if needed. */
2524 if (SUPPORTS_STACK_ALIGNMENT)
2525 {
2526 /* Call update_stack_boundary here to update incoming stack
2527 boundary before TARGET_FUNCTION_OK_FOR_SIBCALL is called.
2528 TARGET_FUNCTION_OK_FOR_SIBCALL needs to know the accurate
2529 incoming stack alignment to check if it is OK to perform
2530 sibcall optimization since sibcall optimization will only
2531 align the outgoing stack to incoming stack boundary. */
2532 if (targetm.calls.update_stack_boundary)
2533 targetm.calls.update_stack_boundary ();
2534
2535 /* The incoming stack frame has to be aligned at least at
2536 parm_stack_boundary. */
2537 gcc_assert (crtl->parm_stack_boundary <= INCOMING_STACK_BOUNDARY);
2538 }
2539
2540 expand_phi_nodes (&SA);
2541
2542 /* Register rtl specific functions for cfg. */
2543 rtl_register_cfg_hooks ();
2544
2545 init_block = construct_init_block ();
2546
2547 /* Clear EDGE_EXECUTABLE on the entry edge(s). It is cleaned from the
2548 remaining edges later. */
2549 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
2550 e->flags &= ~EDGE_EXECUTABLE;
2551
2552 lab_rtx_for_bb = pointer_map_create ();
2553 FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR, next_bb)
2554 bb = expand_gimple_basic_block (bb);
2555
2556 execute_free_datastructures ();
2557 finish_out_of_ssa (&SA);
2558
2559 /* Expansion is used by optimization passes too, set maybe_hot_insn_p
2560 conservatively to true until they are all profile aware. */
2561 pointer_map_destroy (lab_rtx_for_bb);
2562 free_histograms ();
2563
2564 construct_exit_block ();
2565 set_curr_insn_block (DECL_INITIAL (current_function_decl));
2566 insn_locators_finalize ();
2567
2568 /* Convert tree EH labels to RTL EH labels and zap the tree EH table. */
2569 convert_from_eh_region_ranges ();
2570 set_eh_throw_stmt_table (cfun, NULL);
2571
2572 rebuild_jump_labels (get_insns ());
2573 find_exception_handler_labels ();
2574
2575 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
2576 {
2577 edge e;
2578 edge_iterator ei;
2579 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
2580 {
2581 if (e->insns.r)
2582 commit_one_edge_insertion (e);
2583 else
2584 ei_next (&ei);
2585 }
2586 }
2587
2588 /* We're done expanding trees to RTL. */
2589 currently_expanding_to_rtl = 0;
2590
2591 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb)
2592 {
2593 edge e;
2594 edge_iterator ei;
2595 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
2596 {
2597 /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */
2598 e->flags &= ~EDGE_EXECUTABLE;
2599
2600 /* At the moment not all abnormal edges match the RTL
2601 representation. It is safe to remove them here as
2602 find_many_sub_basic_blocks will rediscover them.
2603 In the future we should get this fixed properly. */
2604 if ((e->flags & EDGE_ABNORMAL)
2605 && !(e->flags & EDGE_SIBCALL))
2606 remove_edge (e);
2607 else
2608 ei_next (&ei);
2609 }
2610 }
2611
2612 blocks = sbitmap_alloc (last_basic_block);
2613 sbitmap_ones (blocks);
2614 find_many_sub_basic_blocks (blocks);
2615 sbitmap_free (blocks);
2616 purge_all_dead_edges ();
2617
2618 compact_blocks ();
2619
2620 expand_stack_alignment ();
2621
2622 #ifdef ENABLE_CHECKING
2623 verify_flow_info ();
2624 #endif
2625
2626 /* There's no need to defer outputting this function any more; we
2627 know we want to output it. */
2628 DECL_DEFER_OUTPUT (current_function_decl) = 0;
2629
2630 /* Now that we're done expanding trees to RTL, we shouldn't have any
2631 more CONCATs anywhere. */
2632 generating_concat_p = 0;
2633
2634 if (dump_file)
2635 {
2636 fprintf (dump_file,
2637 "\n\n;;\n;; Full RTL generated for this function:\n;;\n");
2638 /* And the pass manager will dump RTL for us. */
2639 }
2640
2641 /* If we're emitting a nested function, make sure its parent gets
2642 emitted as well. Doing otherwise confuses debug info. */
2643 {
2644 tree parent;
2645 for (parent = DECL_CONTEXT (current_function_decl);
2646 parent != NULL_TREE;
2647 parent = get_containing_scope (parent))
2648 if (TREE_CODE (parent) == FUNCTION_DECL)
2649 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (parent)) = 1;
2650 }
2651
2652 /* We are now committed to emitting code for this function. Do any
2653 preparation, such as emitting abstract debug info for the inline
2654 before it gets mangled by optimization. */
2655 if (cgraph_function_possibly_inlined_p (current_function_decl))
2656 (*debug_hooks->outlining_inline_function) (current_function_decl);
2657
2658 TREE_ASM_WRITTEN (current_function_decl) = 1;
2659
2660 /* After expanding, the return labels are no longer needed. */
2661 return_label = NULL;
2662 naked_return_label = NULL;
2663 /* Tag the blocks with a depth number so that change_scope can find
2664 the common parent easily. */
2665 set_block_levels (DECL_INITIAL (cfun->decl), 0);
2666 default_rtl_profile ();
2667 return 0;
2668 }
2669
2670 struct rtl_opt_pass pass_expand =
2671 {
2672 {
2673 RTL_PASS,
2674 "expand", /* name */
2675 NULL, /* gate */
2676 gimple_expand_cfg, /* execute */
2677 NULL, /* sub */
2678 NULL, /* next */
2679 0, /* static_pass_number */
2680 TV_EXPAND, /* tv_id */
2681 PROP_ssa | PROP_gimple_leh | PROP_cfg,/* properties_required */
2682 PROP_rtl, /* properties_provided */
2683 PROP_ssa | PROP_trees, /* properties_destroyed */
2684 TODO_verify_ssa | TODO_verify_flow
2685 | TODO_verify_stmts, /* todo_flags_start */
2686 TODO_dump_func
2687 | TODO_ggc_collect /* todo_flags_finish */
2688 }
2689 };